examples/ipsec-secgw: add lookaside event mode

Added base support for lookaside event mode.
Events that are coming from ethdev will be enqueued
to the event crypto adapter, processed and
enqueued back to ethdev for the transmission.

Signed-off-by: Volodymyr Fialko <vfialko@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
This commit is contained in:
Volodymyr Fialko 2022-10-10 18:56:26 +02:00 committed by Akhil Goyal
parent c12871e437
commit 6938fc92c4
7 changed files with 295 additions and 42 deletions

View File

@ -271,6 +271,11 @@ New Features
sysfs entries to adjust the minimum and maximum uncore frequency values,
which works on Linux with Intel hardware only.
* **Updated IPsec sample application.**
Added support for lookaside sessions in event mode.
See the :doc:`../sample_app_ug/ipsec_secgw` for more details.
* **Rewritten pmdinfo script.**
The ``dpdk-pmdinfo.py`` script was rewritten to produce valid JSON only.

View File

@ -82,9 +82,10 @@ The application supports two modes of operation: poll mode and event mode.
to help application to have multiple worker threads by maximizing performance from
every type of event device without affecting existing paths/use cases. The worker
to be used will be determined by the operating conditions and the underlying device
capabilities. **Currently the application provides non-burst, internal port worker
threads and supports inline protocol only.** It also provides infrastructure for
non-internal port however does not define any worker threads.
capabilities.
**Currently the application provides non-burst, internal port worker threads.**
It also provides infrastructure for non-internal port
however does not define any worker threads.
Event mode also supports event vectorization. The event devices, ethernet device
pairs which support the capability ``RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR`` can

View File

@ -3056,7 +3056,8 @@ main(int32_t argc, char **argv)
if ((socket_ctx[socket_id].session_pool != NULL) &&
(socket_ctx[socket_id].sa_in == NULL) &&
(socket_ctx[socket_id].sa_out == NULL)) {
sa_init(&socket_ctx[socket_id], socket_id, lcore_conf);
sa_init(&socket_ctx[socket_id], socket_id, lcore_conf,
eh_conf->mode_params);
sp4_init(&socket_ctx[socket_id], socket_id);
sp6_init(&socket_ctx[socket_id], socket_id);
rt_init(&socket_ctx[socket_id], socket_id);

View File

@ -6,6 +6,7 @@
#include <netinet/ip.h>
#include <rte_branch_prediction.h>
#include <rte_event_crypto_adapter.h>
#include <rte_log.h>
#include <rte_crypto.h>
#include <rte_security.h>
@ -56,14 +57,17 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
int
create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
struct rte_ipsec_session *ips)
struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
struct ipsec_sa *sa, struct rte_ipsec_session *ips)
{
uint16_t cdev_id = RTE_CRYPTO_MAX_DEVS;
enum rte_crypto_op_sess_type sess_type;
struct rte_cryptodev_info cdev_info;
enum rte_crypto_op_type op_type;
unsigned long cdev_id_qp = 0;
struct cdev_key key = { 0 };
struct ipsec_ctx *ipsec_ctx;
struct cdev_key key = { 0 };
void *sess = NULL;
uint32_t lcore_id;
int32_t ret = 0;
@ -158,6 +162,10 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
return -1;
}
ips->security.ctx = ctx;
sess = ips->security.ses;
op_type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
} else {
RTE_LOG(ERR, IPSEC, "Inline not supported\n");
return -1;
@ -179,6 +187,28 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
rte_cryptodev_info_get(cdev_id, &cdev_info);
}
/* Setup meta data required by event crypto adapter */
if (em_conf->enable_event_crypto_adapter && sess != NULL) {
union rte_event_crypto_metadata m_data;
const struct eventdev_params *eventdev_conf;
eventdev_conf = &(em_conf->eventdev_config[0]);
memset(&m_data, 0, sizeof(m_data));
/* Fill in response information */
m_data.response_info.sched_type = em_conf->ext_params.sched_type;
m_data.response_info.op = RTE_EVENT_OP_NEW;
m_data.response_info.queue_id = eventdev_conf->ev_cpt_queue_id;
/* Fill in request information */
m_data.request_info.cdev_id = cdev_id;
m_data.request_info.queue_pair_id = 0;
/* Attach meta info to session */
rte_cryptodev_session_event_mdata_set(cdev_id, sess, op_type,
sess_type, &m_data, sizeof(m_data));
}
return 0;
}

View File

@ -14,6 +14,7 @@
#include <rte_flow.h>
#include <rte_ipsec.h>
#include "event_helper.h"
#include "ipsec-secgw.h"
#define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2
@ -425,7 +426,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound);
void
sa_init(struct socket_ctx *ctx, int32_t socket_id,
struct lcore_conf *lcore_conf);
struct lcore_conf *lcore_conf,
const struct eventmode_conf *em_conf);
void
rt_init(struct socket_ctx *ctx, int32_t socket_id);
@ -442,8 +444,8 @@ enqueue_cop_burst(struct cdev_qp *cqp);
int
create_lookaside_session(struct ipsec_ctx *ipsec_ctx[],
struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
struct rte_ipsec_session *ips);
struct socket_ctx *skt_ctx, const struct eventmode_conf *em_conf,
struct ipsec_sa *sa, struct rte_ipsec_session *ips);
int
create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,

View File

@ -3,6 +3,7 @@
* Copyright (C) 2020 Marvell International Ltd.
*/
#include <rte_acl.h>
#include <rte_event_crypto_adapter.h>
#include <rte_event_eth_tx_adapter.h>
#include <rte_lpm.h>
#include <rte_lpm6.h>
@ -11,6 +12,7 @@
#include "ipsec.h"
#include "ipsec-secgw.h"
#include "ipsec_worker.h"
#include "sad.h"
#if defined(__ARM_NEON)
#include "ipsec_lpm_neon.h"
@ -225,6 +227,47 @@ check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
ip->num = j;
}
static inline void
ipv4_pkt_l3_len_set(struct rte_mbuf *pkt)
{
struct rte_ipv4_hdr *ipv4;
ipv4 = rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *);
pkt->l3_len = ipv4->ihl * 4;
}
static inline int
ipv6_pkt_l3_len_set(struct rte_mbuf *pkt)
{
struct rte_ipv6_hdr *ipv6;
size_t l3_len, ext_len;
uint32_t l3_type;
int next_proto;
uint8_t *p;
ipv6 = rte_pktmbuf_mtod(pkt, struct rte_ipv6_hdr *);
l3_len = sizeof(struct rte_ipv6_hdr);
l3_type = pkt->packet_type & RTE_PTYPE_L3_MASK;
if (l3_type == RTE_PTYPE_L3_IPV6_EXT ||
l3_type == RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) {
p = rte_pktmbuf_mtod(pkt, uint8_t *);
next_proto = ipv6->proto;
while (next_proto != IPPROTO_ESP &&
l3_len < pkt->data_len &&
(next_proto = rte_ipv6_get_next_ext(p + l3_len,
next_proto, &ext_len)) >= 0)
l3_len += ext_len;
/* Drop pkt when IPv6 header exceeds first seg size */
if (unlikely(l3_len > pkt->data_len))
return -EINVAL;
}
pkt->l3_len = l3_len;
return 0;
}
static inline uint16_t
route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
{
@ -284,9 +327,67 @@ get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
return RTE_MAX_ETHPORTS;
}
static inline void
crypto_op_reset(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
struct rte_crypto_op *cop[], uint16_t num)
{
struct rte_crypto_sym_op *sop;
uint32_t i;
const struct rte_crypto_op unproc_cop = {
.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
.sess_type = RTE_CRYPTO_OP_SECURITY_SESSION,
};
for (i = 0; i != num; i++) {
cop[i]->raw = unproc_cop.raw;
sop = cop[i]->sym;
sop->m_src = mb[i];
sop->m_dst = NULL;
__rte_security_attach_session(sop, ss->security.ses);
}
}
static inline int
event_crypto_enqueue(struct ipsec_ctx *ctx __rte_unused, struct rte_mbuf *pkt,
struct ipsec_sa *sa, const struct eh_event_link_info *ev_link)
{
struct ipsec_mbuf_metadata *priv;
struct rte_ipsec_session *sess;
struct rte_crypto_op *cop;
struct rte_event cev;
int ret;
/* Get IPsec session */
sess = ipsec_get_primary_session(sa);
/* Get pkt private data */
priv = get_priv(pkt);
cop = &priv->cop;
/* Reset crypto operation data */
crypto_op_reset(sess, &pkt, &cop, 1);
/* Update event_ptr with rte_crypto_op */
cev.event = 0;
cev.event_ptr = cop;
/* Enqueue event to crypto adapter */
ret = rte_event_crypto_adapter_enqueue(ev_link->eventdev_id,
ev_link->event_port_id, &cev, 1);
if (unlikely(ret <= 0)) {
/* pkt will be freed by the caller */
RTE_LOG_DP(DEBUG, IPSEC, "Cannot enqueue event: %i (errno: %i)\n", ret, rte_errno);
return rte_errno;
}
return 0;
}
static inline int
process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
struct rte_event *ev)
const struct eh_event_link_info *ev_link, struct rte_event *ev)
{
struct ipsec_sa *sa = NULL;
struct rte_mbuf *pkt;
@ -337,7 +438,35 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
goto drop_pkt_and_exit;
}
break;
case PKT_TYPE_IPSEC_IPV4:
rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
ipv4_pkt_l3_len_set(pkt);
sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
sa = ipsec_mask_saptr(sa);
if (unlikely(sa == NULL)) {
RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
goto drop_pkt_and_exit;
}
if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
goto drop_pkt_and_exit;
return PKT_POSTED;
case PKT_TYPE_IPSEC_IPV6:
rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
if (unlikely(ipv6_pkt_l3_len_set(pkt) != 0))
goto drop_pkt_and_exit;
sad_lookup(&ctx->sa_ctx->sad, &pkt, (void **)&sa, 1);
sa = ipsec_mask_saptr(sa);
if (unlikely(sa == NULL)) {
RTE_LOG_DP(DEBUG, IPSEC, "Cannot find sa\n");
goto drop_pkt_and_exit;
}
if (unlikely(event_crypto_enqueue(ctx, pkt, sa, ev_link)))
goto drop_pkt_and_exit;
return PKT_POSTED;
default:
RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
type);
@ -386,7 +515,7 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
static inline int
process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
struct rte_event *ev)
const struct eh_event_link_info *ev_link, struct rte_event *ev)
{
struct rte_ipsec_session *sess;
struct rte_ether_hdr *ethhdr;
@ -455,11 +584,9 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
/* Get IPsec session */
sess = ipsec_get_primary_session(sa);
/* Allow only inline protocol for now */
if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
RTE_LOG(ERR, IPSEC, "SA type not supported\n");
goto drop_pkt_and_exit;
}
/* Determine protocol type */
if (sess->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
goto lookaside;
rte_security_set_pkt_metadata(sess->security.ctx,
sess->security.ses, pkt, NULL);
@ -484,6 +611,13 @@ process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
ipsec_event_pre_forward(pkt, port_id);
return PKT_FORWARDED;
lookaside:
/* prepare pkt - advance start to L3 */
rte_pktmbuf_adj(pkt, RTE_ETHER_HDR_LEN);
if (likely(event_crypto_enqueue(ctx, pkt, sa, ev_link) == 0))
return PKT_POSTED;
drop_pkt_and_exit:
RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
rte_pktmbuf_free(pkt);
@ -762,6 +896,67 @@ ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
rte_mempool_put(rte_mempool_from_obj(vec), vec);
}
static inline int
ipsec_ev_cryptodev_process(const struct lcore_conf_ev_tx_int_port_wrkr *lconf,
struct rte_event *ev)
{
struct rte_ether_hdr *ethhdr;
struct rte_crypto_op *cop;
struct rte_mbuf *pkt;
uint16_t port_id;
struct ip *ip;
/* Get pkt data */
cop = ev->event_ptr;
pkt = cop->sym->m_src;
/* If operation was not successful, drop the packet */
if (unlikely(cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) {
RTE_LOG_DP(INFO, IPSEC, "Crypto operation failed\n");
free_pkts(&pkt, 1);
return PKT_DROPPED;
}
ip = rte_pktmbuf_mtod(pkt, struct ip *);
/* Prepend Ether layer */
ethhdr = (struct rte_ether_hdr *)rte_pktmbuf_prepend(pkt, RTE_ETHER_HDR_LEN);
/* Route pkt and update required fields */
if (ip->ip_v == IPVERSION) {
pkt->ol_flags |= lconf->outbound.ipv4_offloads;
pkt->l3_len = sizeof(struct ip);
pkt->l2_len = RTE_ETHER_HDR_LEN;
ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
port_id = route4_pkt(pkt, lconf->rt.rt4_ctx);
} else {
pkt->ol_flags |= lconf->outbound.ipv6_offloads;
pkt->l3_len = sizeof(struct ip6_hdr);
pkt->l2_len = RTE_ETHER_HDR_LEN;
ethhdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
port_id = route6_pkt(pkt, lconf->rt.rt6_ctx);
}
if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
RTE_LOG_DP(DEBUG, IPSEC, "Cannot route processed packet\n");
free_pkts(&pkt, 1);
return PKT_DROPPED;
}
/* Update Ether with port's MAC addresses */
memcpy(&ethhdr->src_addr, &ethaddr_tbl[port_id].src, sizeof(struct rte_ether_addr));
memcpy(&ethhdr->dst_addr, &ethaddr_tbl[port_id].dst, sizeof(struct rte_ether_addr));
/* Update event */
ev->mbuf = pkt;
return PKT_FORWARDED;
}
/*
* Event mode exposes various operating modes depending on the
* capabilities of the event device and the operating mode
@ -952,6 +1147,14 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
"Launching event mode worker (non-burst - Tx internal port - "
"app mode) on lcore %d\n", lcore_id);
ret = ipsec_sad_lcore_cache_init(app_sa_prm.cache_sz);
if (ret != 0) {
RTE_LOG(ERR, IPSEC,
"SAD cache init on lcore %u, failed with code: %d\n",
lcore_id, ret);
return;
}
/* Check if it's single link */
if (nb_links != 1) {
RTE_LOG(INFO, IPSEC,
@ -978,6 +1181,20 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
ipsec_ev_vector_process(&lconf, links, &ev);
continue;
case RTE_EVENT_TYPE_ETHDEV:
if (is_unprotected_port(ev.mbuf->port))
ret = process_ipsec_ev_inbound(&lconf.inbound,
&lconf.rt, links, &ev);
else
ret = process_ipsec_ev_outbound(&lconf.outbound,
&lconf.rt, links, &ev);
if (ret != 1)
/* The pkt has been dropped or posted */
continue;
break;
case RTE_EVENT_TYPE_CRYPTODEV:
ret = ipsec_ev_cryptodev_process(&lconf, &ev);
if (unlikely(ret != PKT_FORWARDED))
continue;
break;
default:
RTE_LOG(ERR, IPSEC, "Invalid event type %u",
@ -985,16 +1202,6 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
continue;
}
if (is_unprotected_port(ev.mbuf->port))
ret = process_ipsec_ev_inbound(&lconf.inbound,
&lconf.rt, &ev);
else
ret = process_ipsec_ev_outbound(&lconf.outbound,
&lconf.rt, &ev);
if (ret != 1)
/* The pkt has been dropped */
continue;
/*
* Since tx internal port is available, events can be
* directly enqueued to the adapter and it would be

View File

@ -1236,7 +1236,8 @@ static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, uint32_t inbound,
struct socket_ctx *skt_ctx,
struct ipsec_ctx *ips_ctx[])
struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
struct ipsec_sa *sa;
uint32_t i, idx;
@ -1409,7 +1410,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
return -EINVAL;
}
} else {
rc = create_lookaside_session(ips_ctx, skt_ctx, sa, ips);
rc = create_lookaside_session(ips_ctx, skt_ctx,
em_conf, sa, ips);
if (rc != 0) {
RTE_LOG(ERR, IPSEC_ESP,
"create_lookaside_session() failed\n");
@ -1432,17 +1434,19 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
static inline int
sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, struct socket_ctx *skt_ctx,
struct ipsec_ctx *ips_ctx[])
struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx);
return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx, ips_ctx, em_conf);
}
static inline int
sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, struct socket_ctx *skt_ctx,
struct ipsec_ctx *ips_ctx[])
struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx);
return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx, ips_ctx, em_conf);
}
/*
@ -1535,7 +1539,8 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
*/
static int
ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[])
struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
int rc;
struct rte_ipsec_sa_prm prm;
@ -1577,7 +1582,7 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
if (lsa->fallback_sessions == 1) {
struct rte_ipsec_session *ipfs = ipsec_get_fallback_session(lsa);
if (ipfs->security.ses == NULL) {
rc = create_lookaside_session(ips_ctx, skt_ctx, lsa, ipfs);
rc = create_lookaside_session(ips_ctx, skt_ctx, em_conf, lsa, ipfs);
if (rc != 0)
return rc;
}
@ -1593,7 +1598,8 @@ ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size,
*/
static int
ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket,
struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[])
struct socket_ctx *skt_ctx, struct ipsec_ctx *ips_ctx[],
const struct eventmode_conf *em_conf)
{
int32_t rc, sz;
uint32_t i, idx;
@ -1631,7 +1637,7 @@ ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket,
sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
lsa = ctx->sa + idx;
rc = ipsec_sa_init(lsa, sa, sz, skt_ctx, ips_ctx);
rc = ipsec_sa_init(lsa, sa, sz, skt_ctx, ips_ctx, em_conf);
}
return rc;
@ -1674,7 +1680,8 @@ sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
void
sa_init(struct socket_ctx *ctx, int32_t socket_id,
struct lcore_conf *lcore_conf)
struct lcore_conf *lcore_conf,
const struct eventmode_conf *em_conf)
{
int32_t rc;
const char *name;
@ -1706,11 +1713,11 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
rte_exit(EXIT_FAILURE, "failed to init SAD\n");
RTE_LCORE_FOREACH(lcore_id)
ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].inbound;
sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx);
sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx, ipsec_ctx, em_conf);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
socket_id, ctx, ipsec_ctx);
socket_id, ctx, ipsec_ctx, em_conf);
if (rc != 0)
rte_exit(EXIT_FAILURE,
"failed to init inbound SAs\n");
@ -1728,11 +1735,11 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id,
RTE_LCORE_FOREACH(lcore_id)
ipsec_ctx[lcore_id] = &lcore_conf[lcore_id].outbound;
sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx);
sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx, ipsec_ctx, em_conf);
if (app_sa_prm.enable != 0) {
rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
socket_id, ctx, ipsec_ctx);
socket_id, ctx, ipsec_ctx, em_conf);
if (rc != 0)
rte_exit(EXIT_FAILURE,
"failed to init outbound SAs\n");