numam-dpdk/lib/librte_node/ip4_lookup.c
Radu Nicolau 84fb33fec1 build: remove deprecated cpuflag macros
Replace use of RTE_MACHINE_CPUFLAG macros with regular compiler
macros, which are more complete than those provided by DPDK, and as such
it allows new instruction sets to be leveraged without having to do
extra work to set them up in DPDK.

Signed-off-by: Sean Morrissey <sean.morrissey@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: David Marchand <david.marchand@redhat.com>
2020-09-25 11:13:57 +02:00

216 lines
5.1 KiB
C

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(C) 2020 Marvell International Ltd.
*/
#include <arpa/inet.h>
#include <sys/socket.h>
#include <rte_debug.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
#include <rte_graph.h>
#include <rte_graph_worker.h>
#include <rte_ip.h>
#include <rte_lpm.h>
#include <rte_mbuf.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include "rte_node_ip4_api.h"
#include "node_private.h"
#define IPV4_L3FWD_LPM_MAX_RULES 1024
#define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8)
/* IP4 Lookup global data struct */
struct ip4_lookup_node_main {
struct rte_lpm *lpm_tbl[RTE_MAX_NUMA_NODES];
};
static struct ip4_lookup_node_main ip4_lookup_nm;
#if defined(__ARM_NEON)
#include "ip4_lookup_neon.h"
#elif defined(RTE_ARCH_X86)
#include "ip4_lookup_sse.h"
#else
static uint16_t
ip4_lookup_node_process(struct rte_graph *graph, struct rte_node *node,
void **objs, uint16_t nb_objs)
{
struct rte_ipv4_hdr *ipv4_hdr;
void **to_next, **from;
uint16_t last_spec = 0;
struct rte_mbuf *mbuf;
rte_edge_t next_index;
struct rte_lpm *lpm;
uint16_t held = 0;
uint32_t drop_nh;
int i, rc;
/* Speculative next */
next_index = RTE_NODE_IP4_LOOKUP_NEXT_REWRITE;
/* Drop node */
drop_nh = ((uint32_t)RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP) << 16;
/* Get socket specific LPM from ctx */
lpm = *((struct rte_lpm **)node->ctx);
from = objs;
/* Get stream for the speculated next node */
to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
for (i = 0; i < nb_objs; i++) {
uint32_t next_hop;
uint16_t next;
mbuf = (struct rte_mbuf *)objs[i];
/* Extract DIP of mbuf0 */
ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
/* Extract cksum, ttl as ipv4 hdr is in cache */
node_mbuf_priv1(mbuf)->cksum = ipv4_hdr->hdr_checksum;
node_mbuf_priv1(mbuf)->ttl = ipv4_hdr->time_to_live;
rc = rte_lpm_lookup(lpm, rte_be_to_cpu_32(ipv4_hdr->dst_addr),
&next_hop);
next_hop = (rc == 0) ? next_hop : drop_nh;
node_mbuf_priv1(mbuf)->nh = (uint16_t)next_hop;
next_hop = next_hop >> 16;
next = (uint16_t)next_hop;
if (unlikely(next_index != next)) {
/* Copy things successfully speculated till now */
rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
from += last_spec;
to_next += last_spec;
held += last_spec;
last_spec = 0;
rte_node_enqueue_x1(graph, node, next, from[0]);
from += 1;
} else {
last_spec += 1;
}
}
/* !!! Home run !!! */
if (likely(last_spec == nb_objs)) {
rte_node_next_stream_move(graph, node, next_index);
return nb_objs;
}
held += last_spec;
rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
rte_node_next_stream_put(graph, node, next_index, held);
return nb_objs;
}
#endif
int
rte_node_ip4_route_add(uint32_t ip, uint8_t depth, uint16_t next_hop,
enum rte_node_ip4_lookup_next next_node)
{
char abuf[INET6_ADDRSTRLEN];
struct in_addr in;
uint8_t socket;
uint32_t val;
int ret;
in.s_addr = htonl(ip);
inet_ntop(AF_INET, &in, abuf, sizeof(abuf));
/* Embedded next node id into 24 bit next hop */
val = ((next_node << 16) | next_hop) & ((1ull << 24) - 1);
node_dbg("ip4_lookup", "LPM: Adding route %s / %d nh (0x%x)", abuf,
depth, val);
for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
if (!ip4_lookup_nm.lpm_tbl[socket])
continue;
ret = rte_lpm_add(ip4_lookup_nm.lpm_tbl[socket],
ip, depth, val);
if (ret < 0) {
node_err("ip4_lookup",
"Unable to add entry %s / %d nh (%x) to LPM table on sock %d, rc=%d\n",
abuf, depth, val, socket, ret);
return ret;
}
}
return 0;
}
static int
setup_lpm(struct ip4_lookup_node_main *nm, int socket)
{
struct rte_lpm_config config_ipv4;
char s[RTE_LPM_NAMESIZE];
/* One LPM table per socket */
if (nm->lpm_tbl[socket])
return 0;
/* create the LPM table */
config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
config_ipv4.flags = 0;
snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socket);
nm->lpm_tbl[socket] = rte_lpm_create(s, socket, &config_ipv4);
if (nm->lpm_tbl[socket] == NULL)
return -rte_errno;
return 0;
}
static int
ip4_lookup_node_init(const struct rte_graph *graph, struct rte_node *node)
{
struct rte_lpm **lpm_p = (struct rte_lpm **)&node->ctx;
uint16_t socket, lcore_id;
static uint8_t init_once;
int rc;
RTE_SET_USED(graph);
RTE_SET_USED(node);
if (!init_once) {
/* Setup LPM tables for all sockets */
RTE_LCORE_FOREACH(lcore_id)
{
socket = rte_lcore_to_socket_id(lcore_id);
rc = setup_lpm(&ip4_lookup_nm, socket);
if (rc) {
node_err("ip4_lookup",
"Failed to setup lpm tbl for sock %u, rc=%d",
socket, rc);
return rc;
}
}
init_once = 1;
}
*lpm_p = ip4_lookup_nm.lpm_tbl[graph->socket];
node_dbg("ip4_lookup", "Initialized ip4_lookup node");
return 0;
}
static struct rte_node_register ip4_lookup_node = {
.process = ip4_lookup_node_process,
.name = "ip4_lookup",
.init = ip4_lookup_node_init,
.nb_edges = RTE_NODE_IP4_LOOKUP_NEXT_MAX,
.next_nodes = {
[RTE_NODE_IP4_LOOKUP_NEXT_REWRITE] = "ip4_rewrite",
[RTE_NODE_IP4_LOOKUP_NEXT_PKT_DROP] = "pkt_drop",
},
};
RTE_NODE_REGISTER(ip4_lookup_node);