examples/ipsec-secgw: support per SA HW reassembly

This add the support of hardware reassembly per SA basis.
In SA rule, new parameter reassembly_en is added to enable
HW reassembly per SA.
For example:
sa in <idx> aead_algo <algo> aead_key <key> mode ipv4-tunnel src <ip>
dst <ip> type inline-protocol-offload port_id <id> reassembly_en

Stats counter frag_dropped will represent the number of fragment
drop in case of reassembly failures.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
This commit is contained in:
Rahul Bhansali 2022-10-27 16:35:01 +05:30 committed by Akhil Goyal
parent 8799d66e65
commit d8d51d4f9b
8 changed files with 129 additions and 12 deletions

View File

@ -538,7 +538,7 @@ The SA rule syntax is shown as follows:
sa <dir> <spi> <cipher_algo> <cipher_key> <auth_algo> <auth_key>
<mode> <src_ip> <dst_ip> <action_type> <port_id> <fallback>
<flow-direction> <port_id> <queue_id> <udp-encap>
<flow-direction> <port_id> <queue_id> <udp-encap> <reassembly_en>
where each options means:
@ -794,6 +794,16 @@ where each options means:
* *esn N* N is the initial ESN value
``<reassembly_en>``
* Option to enable HW reassembly per SA.
* Optional: Yes, it is disabled by default
* Syntax:
* *reassembly_en*
Example SA rules:
.. code-block:: console

View File

@ -274,6 +274,7 @@ static void
print_stats_cb(__rte_unused void *param)
{
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
uint64_t total_frag_packets_dropped = 0;
float burst_percent, rx_per_call, tx_per_call;
unsigned int coreid;
@ -303,6 +304,7 @@ print_stats_cb(__rte_unused void *param)
"\nPackets received: %20"PRIu64
"\nPackets sent: %24"PRIu64
"\nPackets dropped: %21"PRIu64
"\nFrag Packets dropped: %16"PRIu64
"\nBurst percent: %23.2f"
"\nPackets per Rx call: %17.2f"
"\nPackets per Tx call: %17.2f",
@ -310,21 +312,25 @@ print_stats_cb(__rte_unused void *param)
core_statistics[coreid].rx,
core_statistics[coreid].tx,
core_statistics[coreid].dropped,
core_statistics[coreid].frag_dropped,
burst_percent,
rx_per_call,
tx_per_call);
total_packets_dropped += core_statistics[coreid].dropped;
total_frag_packets_dropped += core_statistics[coreid].frag_dropped;
total_packets_tx += core_statistics[coreid].tx;
total_packets_rx += core_statistics[coreid].rx;
}
printf("\nAggregate statistics ==============================="
"\nTotal packets received: %14"PRIu64
"\nTotal packets sent: %18"PRIu64
"\nTotal packets dropped: %15"PRIu64,
"\nTotal packets dropped: %15"PRIu64
"\nTotal frag packets dropped: %10"PRIu64,
total_packets_rx,
total_packets_tx,
total_packets_dropped);
total_packets_dropped,
total_frag_packets_dropped);
printf("\n====================================================\n");
rte_eal_alarm_set(stats_interval * US_PER_S, print_stats_cb, NULL);
@ -1867,7 +1873,8 @@ parse_ptype_cb(uint16_t port __rte_unused, uint16_t queue __rte_unused,
}
static void
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads,
uint8_t hw_reassembly)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
@ -1877,6 +1884,7 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
struct lcore_conf *qconf;
struct rte_ether_addr ethaddr;
struct rte_eth_conf local_port_conf = port_conf;
struct rte_eth_ip_reassembly_params reass_capa = {0};
int ptype_supported;
ret = rte_eth_dev_info_get(portid, &dev_info);
@ -2053,6 +2061,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
}
}
if (hw_reassembly) {
rte_eth_ip_reassembly_capability_get(portid, &reass_capa);
reass_capa.timeout_ms = frag_ttl_ns;
rte_eth_ip_reassembly_conf_set(portid, &reass_capa);
}
printf("\n");
}
@ -2598,6 +2612,7 @@ update_lcore_statistics(struct ipsec_core_statistics *total, uint32_t coreid)
total->rx = lcore_stats->rx;
total->dropped = lcore_stats->dropped;
total->frag_dropped = lcore_stats->frag_dropped;
total->tx = lcore_stats->tx;
/* outbound stats */
@ -2876,6 +2891,7 @@ main(int32_t argc, char **argv)
uint16_t portid, nb_crypto_qp, nb_ports = 0;
uint64_t req_rx_offloads[RTE_MAX_ETHPORTS];
uint64_t req_tx_offloads[RTE_MAX_ETHPORTS];
uint8_t req_hw_reassembly[RTE_MAX_ETHPORTS];
struct eh_conf *eh_conf = NULL;
uint32_t ipv4_cksum_port_mask = 0;
size_t sess_sz;
@ -2993,10 +3009,10 @@ main(int32_t argc, char **argv)
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
sa_check_offloads(portid, &req_rx_offloads[portid],
&req_tx_offloads[portid]);
port_init(portid, req_rx_offloads[portid],
req_tx_offloads[portid]);
sa_check_offloads(portid, &req_rx_offloads[portid], &req_tx_offloads[portid],
&req_hw_reassembly[portid]);
port_init(portid, req_rx_offloads[portid], req_tx_offloads[portid],
req_hw_reassembly[portid]);
if ((req_tx_offloads[portid] & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM))
ipv4_cksum_port_mask |= 1U << portid;
}

View File

@ -5,6 +5,7 @@
#define _IPSEC_SECGW_H_
#include <stdbool.h>
#include <rte_ethdev.h>
#define MAX_RX_QUEUE_PER_LCORE 16
@ -103,6 +104,7 @@ struct ipsec_core_statistics {
uint64_t rx_call;
uint64_t tx_call;
uint64_t dropped;
uint64_t frag_dropped;
uint64_t burst_rx;
struct {
@ -142,7 +144,8 @@ extern volatile bool force_quit;
extern uint32_t nb_bufs_in_pool;
extern bool per_port_pool;
extern int ip_reassembly_dynfield_offset;
extern uint64_t ip_reassembly_dynflag;
extern uint32_t mtu_size;
extern uint32_t frag_tbl_sz;
extern uint32_t qp_desc_nb;
@ -187,6 +190,44 @@ core_stats_update_drop(int n)
core_statistics[lcore_id].dropped += n;
}
static inline void
core_stats_update_frag_drop(int n)
{
int lcore_id = rte_lcore_id();
core_statistics[lcore_id].frag_dropped += n;
}
static inline int
is_ip_reassembly_incomplete(struct rte_mbuf *mbuf)
{
if (ip_reassembly_dynflag == 0)
return -1;
return (mbuf->ol_flags & ip_reassembly_dynflag) != 0;
}
static inline void
free_reassembly_fail_pkt(struct rte_mbuf *mb)
{
if (ip_reassembly_dynfield_offset >= 0) {
rte_eth_ip_reassembly_dynfield_t dynfield;
uint32_t frag_cnt = 0;
while (mb) {
dynfield = *RTE_MBUF_DYNFIELD(mb,
ip_reassembly_dynfield_offset,
rte_eth_ip_reassembly_dynfield_t *);
rte_pktmbuf_free(mb);
mb = dynfield.next_frag;
frag_cnt++;
}
core_stats_update_frag_drop(frag_cnt);
} else {
rte_pktmbuf_free(mb);
core_stats_update_drop(1);
}
}
/* helper routine to free bulk of packets */
static inline void
free_pkts(struct rte_mbuf *mb[], uint32_t n)

View File

@ -53,6 +53,8 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
ipsec->replay_win_sz = app_sa_prm.window_size;
ipsec->options.esn = app_sa_prm.enable_esn;
ipsec->options.udp_encap = sa->udp_encap;
if (IS_HW_REASSEMBLY_EN(sa->flags))
ipsec->options.ip_reassembly_en = 1;
}
int

View File

@ -132,6 +132,7 @@ struct ipsec_sa {
#define IP4_TRANSPORT (1 << 3)
#define IP6_TRANSPORT (1 << 4)
#define SA_TELEMETRY_ENABLE (1 << 5)
#define SA_REASSEMBLY_ENABLE (1 << 6)
struct ip_addr src;
struct ip_addr dst;
@ -208,6 +209,7 @@ struct ipsec_mbuf_metadata {
#define IS_IP6_TUNNEL(flags) ((flags) & IP6_TUNNEL)
#define IS_HW_REASSEMBLY_EN(flags) ((flags) & SA_REASSEMBLY_ENABLE)
/*
* Macro for getting ipsec_sa flags statuses without version of protocol
* used for transport (IP4_TRANSPORT and IP6_TRANSPORT flags).
@ -434,7 +436,7 @@ rt_init(struct socket_ctx *ctx, int32_t socket_id);
int
sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
uint64_t *tx_offloads);
uint64_t *tx_offloads, uint8_t *hw_reassembly);
int
add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr);

View File

@ -25,6 +25,9 @@ struct port_drv_mode_data {
typedef void (*ipsec_worker_fn_t)(void);
int ip_reassembly_dynfield_offset = -1;
uint64_t ip_reassembly_dynflag;
static inline enum pkt_type
process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
{
@ -417,6 +420,10 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
/* Get pkt from event */
pkt = ev->mbuf;
if (is_ip_reassembly_incomplete(pkt) > 0) {
free_reassembly_fail_pkt(pkt);
return PKT_DROPPED;
}
/* Check the packet type */
type = process_ipsec_get_pkt_type(pkt, &nlp);
@ -526,7 +533,6 @@ process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
return PKT_FORWARDED;
drop_pkt_and_exit:
RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
free_pkts(&pkt, 1);
ev->mbuf = NULL;
return PKT_DROPPED;
@ -859,6 +865,10 @@ process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
for (i = 0; i < vec->nb_elem; i++) {
/* Get pkt from event */
pkt = vec->mbufs[i];
if (is_ip_reassembly_incomplete(pkt) > 0) {
free_reassembly_fail_pkt(pkt);
continue;
}
if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
if (unlikely(pkt->ol_flags &
@ -1150,6 +1160,23 @@ ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
/* Workers registered */
#define IPSEC_EVENTMODE_WORKERS 2
static void
ipsec_ip_reassembly_dyn_offset_get(void)
{
/* Retrieve reassembly dynfield offset if available */
if (ip_reassembly_dynfield_offset < 0)
ip_reassembly_dynfield_offset = rte_mbuf_dynfield_lookup(
RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME, NULL);
if (ip_reassembly_dynflag == 0) {
int ip_reassembly_dynflag_offset;
ip_reassembly_dynflag_offset = rte_mbuf_dynflag_lookup(
RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME, NULL);
if (ip_reassembly_dynflag_offset >= 0)
ip_reassembly_dynflag = RTE_BIT64(ip_reassembly_dynflag_offset);
}
}
/*
* Event mode worker
* Operating parameters : non-burst - Tx internal port - driver mode
@ -1338,6 +1365,8 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
links[0].event_port_id);
ipsec_ip_reassembly_dyn_offset_get();
while (!force_quit) {
/* Read packet from event queues */
nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
@ -1606,6 +1635,8 @@ ipsec_poll_mode_wrkr_inl_pr(void)
lcore_id, portid, queueid);
}
ipsec_ip_reassembly_dyn_offset_get();
while (!force_quit) {
cur_tsc = rte_rdtsc();

View File

@ -130,6 +130,11 @@ prepare_one_packet(struct rte_security_ctx *ctx, struct rte_mbuf *pkt,
uint64_t tx_offload;
uint16_t l3len;
if (is_ip_reassembly_incomplete(pkt) > 0) {
free_reassembly_fail_pkt(pkt);
return;
}
tun_type = ptype & RTE_PTYPE_TUNNEL_MASK;
l3_type = ptype & RTE_PTYPE_L3_MASK;

View File

@ -786,6 +786,11 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
continue;
}
if (strcmp(tokens[ti], "reassembly_en") == 0) {
rule->flags |= SA_REASSEMBLY_ENABLE;
continue;
}
if (strcmp(tokens[ti], "esn") == 0) {
INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
if (status->status < 0)
@ -1813,7 +1818,7 @@ outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
*/
int
sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
uint64_t *tx_offloads)
uint64_t *tx_offloads, uint8_t *hw_reassembly)
{
struct ipsec_sa *rule;
uint32_t idx_sa;
@ -1839,6 +1844,11 @@ sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
&& rule->portid == port_id)
*rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
if (IS_HW_REASSEMBLY_EN(rule->flags)) {
*rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
*tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
*hw_reassembly = 1;
}
}
/* Check for outbound rules that use offloads and use this port */