examples/ipsec-secgw: support security offload

Ipsec-secgw application is modified so that it can support
following type of actions for crypto operations
1. full protocol offload using crypto devices.
2. inline ipsec using ethernet devices to perform crypto operations
3. full protocol offload using ethernet devices.
4. non protocol offload

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Aviad Yehezkel <aviadye@mellanox.com>
This commit is contained in:
Akhil Goyal 2017-10-25 20:37:27 +05:30 committed by Thomas Monjalon
parent 0a23d4b6f4
commit ec17993a14
7 changed files with 540 additions and 128 deletions

View File

@ -52,13 +52,22 @@ The application classifies the ports as *Protected* and *Unprotected*.
Thus, traffic received on an Unprotected or Protected port is consider
Inbound or Outbound respectively.
The application also supports complete IPSec protocol offload to hardware
(Look aside crypto accelarator or using ethernet device). It also support
inline ipsec processing by the supported ethernet device during transmission.
These modes can be selected during the SA creation configuration.
In case of complete protocol offload, the processing of headers(ESP and outer
IP header) is done by the hardware and the application does not need to
add/remove them during outbound/inbound processing.
The Path for IPsec Inbound traffic is:
* Read packets from the port.
* Classify packets between IPv4 and ESP.
* Perform Inbound SA lookup for ESP packets based on their SPI.
* Perform Verification/Decryption.
* Remove ESP and outer IP header
* Perform Verification/Decryption (Not needed in case of inline ipsec).
* Remove ESP and outer IP header (Not needed in case of protocol offload).
* Inbound SP check using ACL of decrypted packets and any other IPv4 packets.
* Routing.
* Write packet to port.
@ -68,8 +77,8 @@ The Path for the IPsec Outbound traffic is:
* Read packets from the port.
* Perform Outbound SP check using ACL of all IPv4 traffic.
* Perform Outbound SA lookup for packets that need IPsec protection.
* Add ESP and outer IP header.
* Perform Encryption/Digest.
* Add ESP and outer IP header (Not needed in case protocol offload).
* Perform Encryption/Digest (Not needed in case of inline ipsec).
* Routing.
* Write packet to port.
@ -389,7 +398,7 @@ The SA rule syntax is shown as follows:
.. code-block:: console
sa <dir> <spi> <cipher_algo> <cipher_key> <auth_algo> <auth_key>
<mode> <src_ip> <dst_ip>
<mode> <src_ip> <dst_ip> <action_type> <port_id>
where each options means:
@ -530,6 +539,34 @@ where each options means:
* *dst X.X.X.X* for IPv4
* *dst XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX* for IPv6
``<type>``
* Action type to specify the security action. This option specify
the SA to be performed with look aside protocol offload to HW
accelerator or protocol offload on ethernet device or inline
crypto processing on the ethernet device during transmission.
* Optional: Yes, default type *no-offload*
* Available options:
* *lookaside-protocol-offload*: look aside protocol offload to HW accelerator
* *inline-protocol-offload*: inline protocol offload on ethernet device
* *inline-crypto-offload*: inline crypto processing on ethernet device
* *no-offload*: no offloading to hardware
``<port_id>``
* Port/device ID of the ethernet/crypto accelerator for which the SA is
configured. This option is used when *type* is NOT *no-offload*
* Optional: No, if *type* is not *no-offload*
* Syntax:
* *port_id X* X is a valid device number in decimal
Example SA rules:
.. code-block:: console
@ -549,6 +586,11 @@ Example SA rules:
aead_key de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef:de:ad:be:ef \
mode ipv4-tunnel src 172.16.2.5 dst 172.16.1.5
sa out 5 cipher_algo aes-128-cbc cipher_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \
auth_algo sha1-hmac auth_key 0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0:0 \
mode ipv4-tunnel src 172.16.1.5 dst 172.16.2.5 \
type lookaside-protocol-offload port_id 4
Routing rule syntax
^^^^^^^^^^^^^^^^^^^

View File

@ -58,8 +58,11 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_sym_op *sym_cop;
int32_t payload_len, ip_hdr_len;
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
return 0;
RTE_ASSERT(m != NULL);
RTE_ASSERT(cop != NULL);
ip4 = rte_pktmbuf_mtod(m, struct ip *);
@ -175,29 +178,44 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
RTE_ASSERT(sa != NULL);
RTE_ASSERT(cop != NULL);
if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
else
cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else
cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
}
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
return -1;
}
nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
pad_len = nexthdr - 1;
if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
nexthdr = &m->inner_esp_next_proto;
} else {
nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
pad_len = nexthdr - 1;
padding = pad_len - *pad_len;
for (i = 0; i < *pad_len; i++) {
if (padding[i] != i + 1) {
RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
padding = pad_len - *pad_len;
for (i = 0; i < *pad_len; i++) {
if (padding[i] != i + 1) {
RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
return -EINVAL;
}
}
if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
RTE_LOG(ERR, IPSEC_ESP,
"failed to remove pad_len + digest\n");
return -EINVAL;
}
}
if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
RTE_LOG(ERR, IPSEC_ESP,
"failed to remove pad_len + digest\n");
return -EINVAL;
}
if (unlikely(sa->flags == TRANSPORT)) {
ip = rte_pktmbuf_mtod(m, struct ip *);
ip4 = (struct ip *)rte_pktmbuf_adj(m,
@ -227,14 +245,13 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct ip *ip4;
struct ip6_hdr *ip6;
struct esp_hdr *esp = NULL;
uint8_t *padding, *new_ip, nlp;
uint8_t *padding = NULL, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
int32_t i;
uint16_t pad_payload_len, pad_len, ip_hdr_len;
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
RTE_ASSERT(cop != NULL);
ip_hdr_len = 0;
@ -284,12 +301,19 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
return -EINVAL;
}
padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
if (unlikely(padding == NULL)) {
RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
return -ENOSPC;
/* Add trailer padding if it is not constructed by HW */
if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
(sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
!(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
sa->digest_len);
if (unlikely(padding == NULL)) {
RTE_LOG(ERR, IPSEC_ESP,
"not enough mbuf trailing space\n");
return -ENOSPC;
}
rte_prefetch0(padding);
}
rte_prefetch0(padding);
switch (sa->flags) {
case IP4_TUNNEL:
@ -323,15 +347,46 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
esp->spi = rte_cpu_to_be_32(sa->spi);
esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
/* set iv */
uint64_t *iv = (uint64_t *)(esp + 1);
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
*iv = rte_cpu_to_be_64(sa->seq);
} else {
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_AES_CBC:
memset(iv, 0, sa->iv_len);
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
*iv = rte_cpu_to_be_64(sa->seq);
break;
default:
RTE_LOG(ERR, IPSEC_ESP,
"unsupported cipher algorithm %u\n",
sa->cipher_algo);
return -EINVAL;
}
}
if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
/* Set the inner esp next protocol for HW trailer */
m->inner_esp_next_proto = nlp;
m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
} else {
padding[pad_len - 2] = pad_len - 2;
padding[pad_len - 1] = nlp;
}
goto done;
}
RTE_ASSERT(cop != NULL);
sym_cop = get_sym_cop(cop);
sym_cop->m_src = m;
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
uint8_t *aad;
*iv = rte_cpu_to_be_64(sa->seq);
sym_cop->aead.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
sym_cop->aead.data.length = pad_payload_len;
@ -361,13 +416,11 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_AES_CBC:
memset(iv, 0, sa->iv_len);
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct esp_hdr);
sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
*iv = rte_cpu_to_be_64(sa->seq);
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
@ -409,21 +462,26 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
}
done:
return 0;
}
int
esp_outbound_post(struct rte_mbuf *m __rte_unused,
struct ipsec_sa *sa __rte_unused,
struct rte_crypto_op *cop)
esp_outbound_post(struct rte_mbuf *m,
struct ipsec_sa *sa,
struct rte_crypto_op *cop)
{
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
return -1;
if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
m->ol_flags |= PKT_TX_SEC_OFFLOAD;
} else {
RTE_ASSERT(cop != NULL);
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
return -1;
}
}
return 0;

View File

@ -35,16 +35,6 @@
struct mbuf;
/* RFC4303 */
struct esp_hdr {
uint32_t spi;
uint32_t seq;
/* Payload */
/* Padding */
/* Pad Length */
/* Next Header */
/* Integrity Check Value - ICV */
};
int
esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,

View File

@ -1390,6 +1390,11 @@ port_init(uint16_t portid)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
&port_conf);
if (ret < 0)

View File

@ -37,7 +37,9 @@
#include <rte_branch_prediction.h>
#include <rte_log.h>
#include <rte_crypto.h>
#include <rte_security.h>
#include <rte_cryptodev.h>
#include <rte_ethdev.h>
#include <rte_mbuf.h>
#include <rte_hash.h>
@ -49,7 +51,7 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
int32_t ret;
int32_t ret = 0;
struct cdev_key key = { 0 };
key.lcore_id = (uint8_t)rte_lcore_id();
@ -58,16 +60,19 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
(void **)&cdev_id_qp);
if (ret < 0) {
RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
"auth_algo %u, aead_algo %u\n",
key.lcore_id,
key.cipher_algo,
key.auth_algo,
key.aead_algo);
return -1;
if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
(void **)&cdev_id_qp);
if (ret < 0) {
RTE_LOG(ERR, IPSEC,
"No cryptodev: core %u, cipher_algo %u, "
"auth_algo %u, aead_algo %u\n",
key.lcore_id,
key.cipher_algo,
key.auth_algo,
key.aead_algo);
return -1;
}
}
RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
@ -75,23 +80,153 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp);
sa->crypto_session = rte_cryptodev_sym_session_create(
ipsec_ctx->session_pool);
rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
sa->crypto_session, sa->xforms,
ipsec_ctx->session_pool);
if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) {
struct rte_security_session_conf sess_conf = {
.action_type = sa->type,
.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
.ipsec = {
.spi = sa->spi,
.salt = sa->salt,
.options = { 0 },
.direction = sa->direction,
.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
.mode = (sa->flags == IP4_TUNNEL ||
sa->flags == IP6_TUNNEL) ?
RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
},
.crypto_xform = sa->xforms
rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
ret = rte_cryptodev_queue_pair_attach_sym_session(
ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp,
sa->crypto_session);
if (ret < 0) {
RTE_LOG(ERR, IPSEC,
"Session cannot be attached to qp %u ",
ipsec_ctx->tbl[cdev_id_qp].qp);
return -1;
};
if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
struct rte_security_ctx *ctx = (struct rte_security_ctx *)
rte_cryptodev_get_sec_ctx(
ipsec_ctx->tbl[cdev_id_qp].id);
if (sess_conf.ipsec.mode ==
RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
struct rte_security_ipsec_tunnel_param *tunnel =
&sess_conf.ipsec.tunnel;
if (sa->flags == IP4_TUNNEL) {
tunnel->type =
RTE_SECURITY_IPSEC_TUNNEL_IPV4;
tunnel->ipv4.ttl = IPDEFTTL;
memcpy((uint8_t *)&tunnel->ipv4.src_ip,
(uint8_t *)&sa->src.ip.ip4, 4);
memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
(uint8_t *)&sa->dst.ip.ip4, 4);
}
/* TODO support for Transport and IPV6 tunnel */
}
sa->sec_session = rte_security_session_create(ctx,
&sess_conf, ipsec_ctx->session_pool);
if (sa->sec_session == NULL) {
RTE_LOG(ERR, IPSEC,
"SEC Session init failed: err: %d\n", ret);
return -1;
}
} else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
struct rte_flow_error err;
struct rte_security_ctx *ctx = (struct rte_security_ctx *)
rte_eth_dev_get_sec_ctx(
sa->portid);
const struct rte_security_capability *sec_cap;
sa->sec_session = rte_security_session_create(ctx,
&sess_conf, ipsec_ctx->session_pool);
if (sa->sec_session == NULL) {
RTE_LOG(ERR, IPSEC,
"SEC Session init failed: err: %d\n", ret);
return -1;
}
sec_cap = rte_security_capabilities_get(ctx);
/* iterate until ESP tunnel*/
while (sec_cap->action !=
RTE_SECURITY_ACTION_TYPE_NONE) {
if (sec_cap->action == sa->type &&
sec_cap->protocol ==
RTE_SECURITY_PROTOCOL_IPSEC &&
sec_cap->ipsec.mode ==
RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
sec_cap->ipsec.direction == sa->direction)
break;
sec_cap++;
}
if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
RTE_LOG(ERR, IPSEC,
"No suitable security capability found\n");
return -1;
}
sa->ol_flags = sec_cap->ol_flags;
sa->security_ctx = ctx;
sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
if (sa->flags & IP6_TUNNEL) {
sa->pattern[1].spec = &sa->ipv6_spec;
memcpy(sa->ipv6_spec.hdr.dst_addr,
sa->dst.ip.ip6.ip6_b, 16);
memcpy(sa->ipv6_spec.hdr.src_addr,
sa->src.ip.ip6.ip6_b, 16);
} else {
sa->pattern[1].spec = &sa->ipv4_spec;
sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
}
sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
sa->pattern[2].spec = &sa->esp_spec;
sa->pattern[2].mask = &rte_flow_item_esp_mask;
sa->esp_spec.hdr.spi = sa->spi;
sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
sa->action[0].conf = sa->sec_session;
sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
sa->attr.egress = (sa->direction ==
RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
sa->flow = rte_flow_create(sa->portid,
&sa->attr, sa->pattern, sa->action, &err);
if (sa->flow == NULL) {
RTE_LOG(ERR, IPSEC,
"Failed to create ipsec flow msg: %s\n",
err.message);
return -1;
}
}
} else {
sa->crypto_session = rte_cryptodev_sym_session_create(
ipsec_ctx->session_pool);
rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
sa->crypto_session, sa->xforms,
ipsec_ctx->session_pool);
rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
&cdev_info);
if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
ret = rte_cryptodev_queue_pair_attach_sym_session(
ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp,
sa->crypto_session);
if (ret < 0) {
RTE_LOG(ERR, IPSEC,
"Session cannot be attached to qp %u\n",
ipsec_ctx->tbl[cdev_id_qp].qp);
return -1;
}
}
}
sa->cdev_id_qp = cdev_id_qp;
@ -129,7 +264,9 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
{
int32_t ret = 0, i;
struct ipsec_mbuf_metadata *priv;
struct rte_crypto_sym_op *sym_cop;
struct ipsec_sa *sa;
struct cdev_qp *cqp;
for (i = 0; i < nb_pkts; i++) {
if (unlikely(sas[i] == NULL)) {
@ -144,23 +281,76 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
sa = sas[i];
priv->sa = sa;
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
switch (sa->type) {
case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_prefetch0(&priv->sym_cop);
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->crypto_session == NULL)) &&
create_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
if ((unlikely(sa->sec_session == NULL)) &&
create_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
rte_crypto_op_attach_sym_session(&priv->cop,
sa->crypto_session);
sym_cop = get_sym_cop(&priv->cop);
sym_cop->m_src = pkts[i];
ret = xform_func(pkts[i], sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkts[i]);
rte_security_attach_session(&priv->cop,
sa->sec_session);
break;
case RTE_SECURITY_ACTION_TYPE_NONE:
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->crypto_session == NULL)) &&
create_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
rte_crypto_op_attach_sym_session(&priv->cop,
sa->crypto_session);
ret = xform_func(pkts[i], sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
break;
case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
break;
case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
rte_prefetch0(&priv->sym_cop);
if ((unlikely(sa->sec_session == NULL)) &&
create_session(ipsec_ctx, sa)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
rte_security_attach_session(&priv->cop,
sa->sec_session);
ret = xform_func(pkts[i], sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkts[i]);
continue;
}
cqp = &ipsec_ctx->tbl[sa->cdev_id_qp];
cqp->ol_pkts[cqp->ol_pkts_cnt++] = pkts[i];
if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
rte_security_set_pkt_metadata(
sa->security_ctx,
sa->sec_session, pkts[i], NULL);
continue;
}
@ -171,7 +361,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
static inline int
ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
struct rte_mbuf *pkts[], uint16_t max_pkts)
struct rte_mbuf *pkts[], uint16_t max_pkts)
{
int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
struct ipsec_mbuf_metadata *priv;
@ -186,6 +376,19 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
while (cqp->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
pkt = cqp->ol_pkts[--cqp->ol_pkts_cnt];
rte_prefetch0(pkt);
priv = get_priv(pkt);
sa = priv->sa;
ret = xform_func(pkt, sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkt);
continue;
}
pkts[nb_pkts++] = pkt;
}
if (cqp->in_flight == 0)
continue;
@ -203,11 +406,14 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
RTE_ASSERT(sa != NULL);
ret = xform_func(pkt, sa, cops[j]);
if (unlikely(ret))
rte_pktmbuf_free(pkt);
else
pkts[nb_pkts++] = pkt;
if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
ret = xform_func(pkt, sa, cops[j]);
if (unlikely(ret)) {
rte_pktmbuf_free(pkt);
continue;
}
}
pkts[nb_pkts++] = pkt;
}
}

View File

@ -38,6 +38,8 @@
#include <rte_byteorder.h>
#include <rte_crypto.h>
#include <rte_security.h>
#include <rte_flow.h>
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2
@ -99,7 +101,10 @@ struct ipsec_sa {
uint32_t cdev_id_qp;
uint64_t seq;
uint32_t salt;
struct rte_cryptodev_sym_session *crypto_session;
union {
struct rte_cryptodev_sym_session *crypto_session;
struct rte_security_session *sec_session;
};
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_auth_algorithm auth_algo;
enum rte_crypto_aead_algorithm aead_algo;
@ -117,7 +122,28 @@ struct ipsec_sa {
uint8_t auth_key[MAX_KEY_SIZE];
uint16_t auth_key_len;
uint16_t aad_len;
struct rte_crypto_sym_xform *xforms;
union {
struct rte_crypto_sym_xform *xforms;
struct rte_security_ipsec_xform *sec_xform;
};
enum rte_security_session_action_type type;
enum rte_security_ipsec_sa_direction direction;
uint16_t portid;
struct rte_security_ctx *security_ctx;
uint32_t ol_flags;
#define MAX_RTE_FLOW_PATTERN (4)
#define MAX_RTE_FLOW_ACTIONS (2)
struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN];
struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS];
struct rte_flow_attr attr;
union {
struct rte_flow_item_ipv4 ipv4_spec;
struct rte_flow_item_ipv6 ipv6_spec;
};
struct rte_flow_item_esp esp_spec;
struct rte_flow *flow;
struct rte_security_session_conf sess_conf;
} __rte_cache_aligned;
struct ipsec_mbuf_metadata {
@ -133,6 +159,8 @@ struct cdev_qp {
uint16_t in_flight;
uint16_t len;
struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
struct rte_mbuf *ol_pkts[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
uint16_t ol_pkts_cnt;
};
struct ipsec_ctx {

View File

@ -41,16 +41,20 @@
#include <rte_memzone.h>
#include <rte_crypto.h>
#include <rte_security.h>
#include <rte_cryptodev.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
#include <rte_ip.h>
#include <rte_random.h>
#include <rte_ethdev.h>
#include "ipsec.h"
#include "esp.h"
#include "parser.h"
#define IPDEFTTL 64
struct supported_cipher_algo {
const char *keyword;
enum rte_crypto_cipher_algorithm algo;
@ -238,6 +242,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
uint32_t src_p = 0;
uint32_t dst_p = 0;
uint32_t mode_p = 0;
uint32_t type_p = 0;
uint32_t portid_p = 0;
if (strcmp(tokens[0], "in") == 0) {
ri = &nb_sa_in;
@ -549,6 +555,52 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
continue;
}
if (strcmp(tokens[ti], "type") == 0) {
APP_CHECK_PRESENCE(type_p, tokens[ti], status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
rule->type =
RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
else if (strcmp(tokens[ti],
"inline-protocol-offload") == 0)
rule->type =
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
else if (strcmp(tokens[ti],
"lookaside-protocol-offload") == 0)
rule->type =
RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
else if (strcmp(tokens[ti], "no-offload") == 0)
rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
else {
APP_CHECK(0, status, "Invalid input \"%s\"",
tokens[ti]);
return;
}
type_p = 1;
continue;
}
if (strcmp(tokens[ti], "port_id") == 0) {
APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
if (status->status < 0)
return;
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
return;
rule->portid = atoi(tokens[ti]);
if (status->status < 0)
return;
portid_p = 1;
continue;
}
/* unrecognizeable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
@ -579,6 +631,14 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
if (status->status < 0)
return;
if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
printf("Missing portid option, falling back to non-offload\n");
if (!type_p || !portid_p) {
rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
rule->portid = -1;
}
*ri = *ri + 1;
}
@ -646,9 +706,11 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
struct sa_ctx {
struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
struct {
struct rte_crypto_sym_xform a;
struct rte_crypto_sym_xform b;
union {
struct {
struct rte_crypto_sym_xform a;
struct rte_crypto_sym_xform b;
};
} xf[IPSEC_SA_MAX_ENTRIES];
};
@ -680,6 +742,33 @@ sa_create(const char *name, int32_t socket_id)
return sa_ctx;
}
static int
check_eth_dev_caps(uint16_t portid, uint32_t inbound)
{
struct rte_eth_dev_info dev_info;
rte_eth_dev_info_get(portid, &dev_info);
if (inbound) {
if ((dev_info.rx_offload_capa &
DEV_RX_OFFLOAD_SECURITY) == 0) {
RTE_LOG(WARNING, PORT,
"hardware RX IPSec offload is not supported\n");
return -EINVAL;
}
} else { /* outbound */
if ((dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_SECURITY) == 0) {
RTE_LOG(WARNING, PORT,
"hardware TX IPSec offload is not supported\n");
return -EINVAL;
}
}
return 0;
}
static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, uint32_t inbound)
@ -699,6 +788,16 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
*sa = entries[i];
sa->seq = 0;
if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
if (check_eth_dev_caps(sa->portid, inbound))
return -EINVAL;
}
sa->direction = (inbound == 1) ?
RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
switch (sa->flags) {
case IP4_TUNNEL:
sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
@ -708,37 +807,21 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
iv_length = 16;
if (inbound) {
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
sa_ctx->xf[idx].a.aead.key.length =
sa->cipher_key_len;
sa_ctx->xf[idx].a.aead.op =
RTE_CRYPTO_AEAD_OP_DECRYPT;
sa_ctx->xf[idx].a.next = NULL;
sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
sa_ctx->xf[idx].a.aead.iv.length = iv_length;
sa_ctx->xf[idx].a.aead.aad_length =
sa->aad_len;
sa_ctx->xf[idx].a.aead.digest_length =
sa->digest_len;
} else { /* outbound */
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
sa_ctx->xf[idx].a.aead.key.length =
sa->cipher_key_len;
sa_ctx->xf[idx].a.aead.op =
RTE_CRYPTO_AEAD_OP_ENCRYPT;
sa_ctx->xf[idx].a.next = NULL;
sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
sa_ctx->xf[idx].a.aead.iv.length = iv_length;
sa_ctx->xf[idx].a.aead.aad_length =
sa->aad_len;
sa_ctx->xf[idx].a.aead.digest_length =
sa->digest_len;
}
sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
sa_ctx->xf[idx].a.aead.key.length =
sa->cipher_key_len;
sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
RTE_CRYPTO_AEAD_OP_DECRYPT :
RTE_CRYPTO_AEAD_OP_ENCRYPT;
sa_ctx->xf[idx].a.next = NULL;
sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
sa_ctx->xf[idx].a.aead.iv.length = iv_length;
sa_ctx->xf[idx].a.aead.aad_length =
sa->aad_len;
sa_ctx->xf[idx].a.aead.digest_length =
sa->digest_len;
sa->xforms = &sa_ctx->xf[idx].a;