examples/ipsec-secgw: fix crypto-op might never get dequeued

In some cases crypto-ops could never be dequeued from the crypto-device.
The easiest way to reproduce:
start ipsec-secgw with crypto-dev and send to it less then 32 packets.
none packets will be forwarded.
Reason for that is that the application does dequeue() from crypto-queues
only when new packets arrive.
This patch makes the app to call dequeue() on a regular basis.
Also to make code cleaner and easier to understand,
it separates crypto-dev enqueue() and dequeue() code paths.
pkt_process() now only enqueues packets into crypto device,
dequeuing and final processing is done by drain_crypto_queues().

Fixes: c64278c0c1 ("examples/ipsec-secgw: rework processing loop")
Cc: stable@dpdk.org

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
This commit is contained in:
Konstantin Ananyev 2019-01-10 21:09:06 +00:00 committed by Pablo de Lara
parent 7622291b64
commit d87152e796
3 changed files with 203 additions and 61 deletions

View File

@ -469,37 +469,54 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
ip->num = j;
}
static void
split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
{
uint32_t i, n4, n6;
struct ip *ip;
struct rte_mbuf *m;
n4 = trf->ip4.num;
n6 = trf->ip6.num;
for (i = 0; i < num; i++) {
m = mb[i];
ip = rte_pktmbuf_mtod(m, struct ip *);
if (ip->ip_v == IPVERSION) {
trf->ip4.pkts[n4] = m;
trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
uint8_t *, offsetof(struct ip, ip_p));
n4++;
} else if (ip->ip_v == IP6_VERSION) {
trf->ip6.pkts[n6] = m;
trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
uint8_t *,
offsetof(struct ip6_hdr, ip6_nxt));
n6++;
} else
rte_pktmbuf_free(m);
}
trf->ip4.num = n4;
trf->ip6.num = n6;
}
static inline void
process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
struct rte_mbuf *m;
uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6;
nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
traffic->ipsec.num, MAX_PKT_BURST);
uint16_t nb_pkts_in, n_ip4, n_ip6;
n_ip4 = traffic->ip4.num;
n_ip6 = traffic->ip6.num;
/* SP/ACL Inbound check ipsec and ip4 */
for (i = 0; i < nb_pkts_in; i++) {
m = traffic->ipsec.pkts[i];
struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
if (ip->ip_v == IPVERSION) {
idx = traffic->ip4.num++;
traffic->ip4.pkts[idx] = m;
traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
uint8_t *, offsetof(struct ip, ip_p));
} else if (ip->ip_v == IP6_VERSION) {
idx = traffic->ip6.num++;
traffic->ip6.pkts[idx] = m;
traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
uint8_t *,
offsetof(struct ip6_hdr, ip6_nxt));
} else
rte_pktmbuf_free(m);
}
nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
traffic->ipsec.num, MAX_PKT_BURST);
split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
n_ip4);
@ -795,7 +812,7 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
}
static inline void
drain_buffers(struct lcore_conf *qconf)
drain_tx_buffers(struct lcore_conf *qconf)
{
struct buffer *buf;
uint32_t portid;
@ -809,6 +826,81 @@ drain_buffers(struct lcore_conf *qconf)
}
}
static inline void
drain_crypto_buffers(struct lcore_conf *qconf)
{
uint32_t i;
struct ipsec_ctx *ctx;
/* drain inbound buffers*/
ctx = &qconf->inbound;
for (i = 0; i != ctx->nb_qps; i++) {
if (ctx->tbl[i].len != 0)
enqueue_cop_burst(ctx->tbl + i);
}
/* drain outbound buffers*/
ctx = &qconf->outbound;
for (i = 0; i != ctx->nb_qps; i++) {
if (ctx->tbl[i].len != 0)
enqueue_cop_burst(ctx->tbl + i);
}
}
static void
drain_inbound_crypto_queues(const struct lcore_conf *qconf,
struct ipsec_ctx *ctx)
{
uint32_t n;
struct ipsec_traffic trf;
/* dequeue packets from crypto-queue */
n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
RTE_DIM(trf.ipsec.pkts));
if (n == 0)
return;
trf.ip4.num = 0;
trf.ip6.num = 0;
/* split traffic by ipv4-ipv6 */
split46_traffic(&trf, trf.ipsec.pkts, n);
/* process ipv4 packets */
inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
/* process ipv6 packets */
inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
}
static void
drain_outbound_crypto_queues(const struct lcore_conf *qconf,
struct ipsec_ctx *ctx)
{
uint32_t n;
struct ipsec_traffic trf;
/* dequeue packets from crypto-queue */
n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
RTE_DIM(trf.ipsec.pkts));
if (n == 0)
return;
trf.ip4.num = 0;
trf.ip6.num = 0;
/* split traffic by ipv4-ipv6 */
split46_traffic(&trf, trf.ipsec.pkts, n);
/* process ipv4 packets */
route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
/* process ipv6 packets */
route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
}
/* main processing loop */
static int32_t
main_loop(__attribute__((unused)) void *dummy)
@ -870,12 +962,14 @@ main_loop(__attribute__((unused)) void *dummy)
diff_tsc = cur_tsc - prev_tsc;
if (unlikely(diff_tsc > drain_tsc)) {
drain_buffers(qconf);
drain_tx_buffers(qconf);
drain_crypto_buffers(qconf);
prev_tsc = cur_tsc;
}
/* Read packet from RX queues */
for (i = 0; i < qconf->nb_rx_queue; ++i) {
/* Read packets from RX queues */
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
nb_rx = rte_eth_rx_burst(portid, queueid,
@ -883,6 +977,14 @@ main_loop(__attribute__((unused)) void *dummy)
if (nb_rx > 0)
process_pkts(qconf, pkts, nb_rx, portid);
/* dequeue and process completed crypto-ops */
if (UNPROTECTED_PORT(portid))
drain_inbound_crypto_queues(qconf,
&qconf->inbound);
else
drain_outbound_crypto_queues(qconf,
&qconf->outbound);
}
}
}

View File

@ -333,33 +333,35 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
return 0;
}
/*
* queue crypto-ops into PMD queue.
*/
void
enqueue_cop_burst(struct cdev_qp *cqp)
{
uint32_t i, len, ret;
len = cqp->len;
ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
if (ret < len) {
RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
" enqueued %u crypto ops out of %u\n",
cqp->id, cqp->qp, ret, len);
/* drop packets that we fail to enqueue */
for (i = ret; i < len; i++)
rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
}
cqp->in_flight += ret;
cqp->len = 0;
}
static inline void
enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
{
int32_t ret = 0, i;
cqp->buf[cqp->len++] = cop;
if (cqp->len == MAX_PKT_BURST) {
int enq_size = cqp->len;
if ((cqp->in_flight + enq_size) > MAX_INFLIGHT)
enq_size -=
(int)((cqp->in_flight + enq_size) - MAX_INFLIGHT);
if (enq_size > 0)
ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
cqp->buf, enq_size);
if (ret < cqp->len) {
RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
" enqueued %u crypto ops out of %u\n",
cqp->id, cqp->qp,
ret, cqp->len);
for (i = ret; i < cqp->len; i++)
rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
}
cqp->in_flight += ret;
cqp->len = 0;
}
if (cqp->len == MAX_PKT_BURST)
enqueue_cop_burst(cqp);
}
static inline void
@ -473,6 +475,32 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
}
}
static inline int32_t
ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
struct rte_mbuf *pkts[], uint16_t max_pkts)
{
int32_t nb_pkts, ret;
struct ipsec_mbuf_metadata *priv;
struct ipsec_sa *sa;
struct rte_mbuf *pkt;
nb_pkts = 0;
while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
rte_prefetch0(pkt);
priv = get_priv(pkt);
sa = priv->sa;
ret = xform_func(pkt, sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkt);
continue;
}
pkts[nb_pkts++] = pkt;
}
return nb_pkts;
}
static inline int
ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
struct rte_mbuf *pkts[], uint16_t max_pkts)
@ -490,19 +518,6 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
rte_prefetch0(pkt);
priv = get_priv(pkt);
sa = priv->sa;
ret = xform_func(pkt, sa, &priv->cop);
if (unlikely(ret)) {
rte_pktmbuf_free(pkt);
continue;
}
pkts[nb_pkts++] = pkt;
}
if (cqp->in_flight == 0)
continue;
@ -545,6 +560,13 @@ ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
}
uint16_t
ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
uint16_t len)
{
return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
}
@ -558,5 +580,12 @@ ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
}
uint16_t
ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
uint16_t len)
{
return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
}

View File

@ -186,6 +186,14 @@ uint16_t
ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len);
uint16_t
ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
uint16_t len);
uint16_t
ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
uint16_t len);
static inline uint16_t
ipsec_metadata_size(void)
{
@ -250,4 +258,7 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
int
add_dst_ethaddr(uint16_t port, const struct ether_addr *addr);
void
enqueue_cop_burst(struct cdev_qp *cqp);
#endif /* __IPSEC_H__ */