Bring DPDK route lookups to FreeBSD.

This change introduces loadable fib lookup modules based on
 DPDK rte_lpm lib targeted for high-speed lookups in large-scale tables.
It is based on the lookup framework described in D27401.

IPv4 module is called dpdk_lpm4. It wraps around rte_lpm [1] library.
This library implements variation of DIR24-8 [2] lookup algorithm.
Module provide lockless route lookups and in-place incremental updates,
 allowing for good RIB performance.

IPv6 module is called dpdk_lpm6. It wraps around rte_lpm6 [3] library.
Implementation can be seen as multi-bit trie where the stride or number of bits
 inspected on each level varies from level to level.
It can vary from 1 to 14 memory accesses, with 5 being the average value
 for the lengths that are most commonly used in IPv6.
Module provide lockless route lookups for global unicast addresses
 and in-place incremental updates, allowing for good RIB performance.

Implementation details:
* wrapper code lives in `sys/contrib/dpdk_rte_lpm/dpdk_lpm[6].c`.
* rte_lpm[6] implementation contains both RIB and FIB code.
 . RIB ("rule_") code, backed by array of hash tables part has been commented out,
 as base radix already provides all the necessary primitives.
* link-local lookups are currently implemented as base radix lookup.
 This part should be converted to something like read-only radix trie.

Usage detail:
Compile kernel with option FIB_ALGO and load dpdk_lpm4/dpdk_lpm6
 module at any time. They will be picked up automatically when
 amount of routes raises to several thousand.

[1]: https://doc.dpdk.org/guides/prog_guide/lpm_lib.html
[2]: http://yuba.stanford.edu/~nickm/papers/Infocom98_lookup.pdf
[3]: https://doc.dpdk.org/guides/prog_guide/lpm6_lib.html

Differential Revision: https://reviews.freebsd.org/D27412
This commit is contained in:
Alexander V. Chernikov 2021-01-09 12:08:00 +00:00
parent a898ee51c4
commit 537d134373
17 changed files with 6030 additions and 0 deletions

View File

@ -0,0 +1,423 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2020 Alexander V. Chernikov
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/rmlock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <net/vnet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <netinet/in.h>
#include <netinet/in_fib.h>
#include <netinet/ip.h>
#include <net/route.h>
#include <net/route/nhop.h>
#include <net/route/route_ctl.h>
#include <net/route/fib_algo.h>
#include "rte_shim.h"
#include "rte_lpm.h"
#define LPM_MIN_TBL8 8 /* 2 pages of memory */
#define LPM_MAX_TBL8 65536 * 16 /* 256M */
MALLOC_DECLARE(M_RTABLE);
struct dpdk_lpm_data {
struct rte_lpm *lpm;
uint64_t routes_added;
uint64_t routes_failed;
uint32_t number_tbl8s;
uint32_t fibnum;
uint8_t hit_tables;
uint8_t hit_records;
struct fib_data *fd;
};
/*
* Main datapath routing
*/
static struct nhop_object *
lookup_ptr(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid)
{
struct rte_lpm *lpm;
const struct rte_lpm_external *rte_ext;
uint32_t nhidx = 0;
int ret;
lpm = (struct rte_lpm *)algo_data;
rte_ext = (const struct rte_lpm_external *)lpm;
ret = rte_lpm_lookup(lpm, ntohl(key.addr4.s_addr), &nhidx);
if (ret == 0) {
/* Success! */
return (rte_ext->nh_idx[nhidx]);
} else {
/* Not found. Check default route */
return (rte_ext->nh_idx[rte_ext->default_idx]);
}
return (NULL);
}
static uint8_t
rte_get_pref(const struct rib_rtable_info *rinfo)
{
if (rinfo->num_prefixes < 10)
return (1);
else if (rinfo->num_prefixes < 1000)
return (rinfo->num_prefixes / 10);
else if (rinfo->num_prefixes < 500000)
return (100 + rinfo->num_prefixes / 3334);
else
return (250);
}
static enum flm_op_result
handle_default_change(struct dpdk_lpm_data *dd, struct rib_cmd_info *rc)
{
struct rte_lpm_external *rte_ext;
rte_ext = (struct rte_lpm_external *)dd->lpm;
if (rc->rc_cmd != RTM_DELETE) {
/* Reference new */
uint32_t nhidx = fib_get_nhop_idx(dd->fd, rc->rc_nh_new);
if (nhidx == 0)
return (FLM_REBUILD);
rte_ext->default_idx = nhidx;
} else {
/* No default route */
rte_ext->default_idx = 0;
}
return (FLM_SUCCESS);
}
static void
get_parent_rule(struct dpdk_lpm_data *dd, struct in_addr addr, uint8_t *plen, uint32_t *nhop_idx)
{
struct route_nhop_data rnd;
struct rtentry *rt;
rt = fib4_lookup_rt(dd->fibnum, addr, 0, NHR_UNLOCKED, &rnd);
if (rt != NULL) {
struct in_addr addr4;
uint32_t scopeid;
int inet_plen;
rt_get_inet_prefix_plen(rt, &addr4, &inet_plen, &scopeid);
if (inet_plen > 0) {
*plen = inet_plen;
*nhop_idx = fib_get_nhop_idx(dd->fd, rnd.rnd_nhop);
return;
}
}
*nhop_idx = 0;
*plen = 0;
}
static enum flm_op_result
handle_gu_change(struct dpdk_lpm_data *dd, const struct rib_cmd_info *rc,
const struct in_addr addr, int plen)
{
uint32_t nhidx = 0;
int ret;
char abuf[INET_ADDRSTRLEN];
uint32_t ip;
ip = ntohl(addr.s_addr);
inet_ntop(AF_INET, &addr, abuf, sizeof(abuf));
/* So we get sin, plen and nhidx */
if (rc->rc_cmd != RTM_DELETE) {
/*
* Addition or change. Save nhop in the internal table
* and get index.
*/
nhidx = fib_get_nhop_idx(dd->fd, rc->rc_nh_new);
if (nhidx == 0) {
FIB_PRINTF(LOG_INFO, dd->fd, "nhop limit reached, need rebuild");
return (FLM_REBUILD);
}
ret = rte_lpm_add(dd->lpm, ip, plen, nhidx);
FIB_PRINTF(LOG_DEBUG, dd->fd, "DPDK GU: %s %s/%d nhop %u = %d",
(rc->rc_cmd == RTM_ADD) ? "ADD" : "UPDATE",
abuf, plen, nhidx, ret);
} else {
/*
* Need to lookup parent. Assume deletion happened already
*/
uint8_t parent_plen;
uint32_t parent_nhop_idx;
get_parent_rule(dd, addr, &parent_plen, &parent_nhop_idx);
ret = rte_lpm_delete(dd->lpm, ip, plen, parent_plen, parent_nhop_idx);
FIB_PRINTF(LOG_DEBUG, dd->fd, "DPDK: %s %s/%d nhop %u = %d",
"DEL", abuf, plen, nhidx, ret);
}
if (ret != 0) {
FIB_PRINTF(LOG_INFO, dd->fd, "error: %d", ret);
if (ret == -ENOSPC)
return (FLM_REBUILD);
return (FLM_ERROR);
}
return (FLM_SUCCESS);
}
static enum flm_op_result
handle_rtable_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc,
void *_data)
{
struct dpdk_lpm_data *dd;
enum flm_op_result ret;
struct in_addr addr4;
uint32_t scopeid;
int plen;
dd = (struct dpdk_lpm_data *)_data;
rt_get_inet_prefix_plen(rc->rc_rt, &addr4, &plen, &scopeid);
if (plen != 0)
ret = handle_gu_change(dd, rc, addr4, plen);
else
ret = handle_default_change(dd, rc);
if (ret != 0)
FIB_PRINTF(LOG_INFO, dd->fd, "error handling route");
return (ret);
}
static void
destroy_table(void *_data)
{
struct dpdk_lpm_data *dd = (struct dpdk_lpm_data *)_data;
if (dd->lpm != NULL)
rte_lpm_free(dd->lpm);
free(dd, M_RTABLE);
}
static enum flm_op_result
add_route_cb(struct rtentry *rt, void *_data)
{
struct dpdk_lpm_data *dd = (struct dpdk_lpm_data *)_data;
struct nhop_object *nh;
int plen, ret;
struct in_addr addr4;
uint32_t scopeid;
nh = rt_get_raw_nhop(rt);
rt_get_inet_prefix_plen(rt, &addr4, &plen, &scopeid);
char abuf[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &addr4, abuf, sizeof(abuf));
FIB_PRINTF(LOG_DEBUG, dd->fd, "Operating on %s/%d", abuf, plen);
if (plen == 0) {
struct rib_cmd_info rc = {
.rc_cmd = RTM_ADD,
.rc_nh_new = nh,
};
FIB_PRINTF(LOG_DEBUG, dd->fd, "Adding default route");
return (handle_default_change(dd, &rc));
}
uint32_t nhidx = fib_get_nhop_idx(dd->fd, nh);
if (nhidx == 0) {
FIB_PRINTF(LOG_INFO, dd->fd, "unable to get nhop index");
return (FLM_REBUILD);
}
ret = rte_lpm_add(dd->lpm, ntohl(addr4.s_addr), plen, nhidx);
FIB_PRINTF(LOG_DEBUG, dd->fd, "ADD %p %s/%d nh %u = %d",
dd->lpm, abuf, plen, nhidx, ret);
if (ret != 0) {
FIB_PRINTF(LOG_INFO, dd->fd, "rte_lpm_add() returned %d", ret);
if (ret == -ENOSPC) {
dd->hit_tables = 1;
return (FLM_REBUILD);
}
dd->routes_failed++;
return (FLM_ERROR);
} else
dd->routes_added++;
return (FLM_SUCCESS);
}
static enum flm_op_result
check_dump_success(void *_data, struct fib_dp *dp)
{
struct dpdk_lpm_data *dd;
dd = (struct dpdk_lpm_data *)_data;
FIB_PRINTF(LOG_INFO, dd->fd, "scan completed. added: %zu failed: %zu",
dd->routes_added, dd->routes_failed);
if (dd->hit_tables || dd->routes_failed > 0)
return (FLM_REBUILD);
FIB_PRINTF(LOG_INFO, dd->fd,
"DPDK lookup engine synced with IPv4 RIB id %u, %zu routes",
dd->fibnum, dd->routes_added);
dp->f = lookup_ptr;
dp->arg = dd->lpm;
return (FLM_SUCCESS);
}
static void
estimate_scale(const struct dpdk_lpm_data *dd_src, struct dpdk_lpm_data *dd)
{
/* XXX: update at 75% capacity */
if (dd_src->hit_tables)
dd->number_tbl8s = dd_src->number_tbl8s * 2;
else
dd->number_tbl8s = dd_src->number_tbl8s;
/* TODO: look into the appropriate RIB to adjust */
}
static struct dpdk_lpm_data *
build_table(struct dpdk_lpm_data *dd_prev, struct fib_data *fd)
{
struct dpdk_lpm_data *dd;
struct rte_lpm *lpm;
dd = malloc(sizeof(struct dpdk_lpm_data), M_RTABLE, M_NOWAIT | M_ZERO);
if (dd == NULL) {
FIB_PRINTF(LOG_INFO, fd, "Unable to allocate base datastructure");
return (NULL);
}
dd->fibnum = dd_prev->fibnum;
dd->fd = fd;
estimate_scale(dd_prev, dd);
struct rte_lpm_config cfg = {.number_tbl8s = dd->number_tbl8s};
lpm = rte_lpm_create("test", 0, &cfg);
if (lpm == NULL) {
FIB_PRINTF(LOG_INFO, fd, "unable to create lpm");
free(dd, M_RTABLE);
return (NULL);
}
dd->lpm = lpm;
struct rte_lpm_external *ext = (struct rte_lpm_external *)lpm;
ext->nh_idx = fib_get_nhop_array(dd->fd);
FIB_PRINTF(LOG_INFO, fd, "allocated %u tbl8s", dd->number_tbl8s);
return (dd);
}
static enum flm_op_result
init_table(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **data)
{
struct dpdk_lpm_data *dd, dd_base;
if (_old_data == NULL) {
bzero(&dd_base, sizeof(struct dpdk_lpm_data));
dd_base.fibnum = fibnum;
/* TODO: get rib statistics */
dd_base.number_tbl8s = LPM_MIN_TBL8;
dd = &dd_base;
} else {
FIB_PRINTF(LOG_DEBUG, fd, "Starting with old data");
dd = (struct dpdk_lpm_data *)_old_data;
}
/* Guaranteed to be in epoch */
dd = build_table(dd, fd);
if (dd == NULL) {
FIB_PRINTF(LOG_NOTICE, fd, "table creation failed");
return (FLM_REBUILD);
}
*data = dd;
return (FLM_SUCCESS);
}
static struct fib_lookup_module dpdk_lpm4 = {
.flm_name = "dpdk_lpm4",
.flm_family = AF_INET,
.flm_init_cb = init_table,
.flm_destroy_cb = destroy_table,
.flm_dump_rib_item_cb = add_route_cb,
.flm_dump_end_cb = check_dump_success,
.flm_change_rib_item_cb = handle_rtable_change_cb,
.flm_get_pref = rte_get_pref,
};
static int
lpm4_modevent(module_t mod, int type, void *unused)
{
int error = 0;
switch (type) {
case MOD_LOAD:
fib_module_register(&dpdk_lpm4);
break;
case MOD_UNLOAD:
error = fib_module_unregister(&dpdk_lpm4);
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static moduledata_t lpm4mod = {
"dpdk_lpm4",
lpm4_modevent,
0
};
DECLARE_MODULE(lpm4mod, lpm4mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(lpm4mod, 1);

View File

@ -0,0 +1,487 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2020 Alexander V. Chernikov
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet6.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/rmlock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <net/vnet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet6/in6_fib.h>
#include <net/route.h>
#include <net/route/nhop.h>
#include <net/route/route_ctl.h>
#include <net/route/fib_algo.h>
#define RTDEBUG
#include "rte_lpm6.h"
#define LPM6_MIN_TBL8 8 /* 2 pages of memory */
#define LPM6_MAX_TBL8 65536 * 16 /* 256M */
struct fib_algo_calldata {
void *lookup;
void *arg;
};
struct dpdk_lpm6_data {
struct rte_lpm6 *lpm6;
uint64_t routes_added;
uint64_t routes_failed;
uint32_t number_tbl8s;
uint32_t fibnum;
uint8_t hit_tables;
struct fib_data *fd;
};
static struct nhop_object *
lookup_ptr_ll(const struct rte_lpm6 *lpm6, const struct in6_addr *dst6,
uint32_t scopeid)
{
const struct rte_lpm6_external *rte_ext;
rte_ext = (const struct rte_lpm6_external *)lpm6;
return (fib6_radix_lookup_nh(rte_ext->fibnum, dst6, scopeid));
}
/*
* Main datapath routing
*/
static struct nhop_object *
lookup_ptr(void *algo_data, const struct flm_lookup_key key, uint32_t scopeid)
{
const struct rte_lpm6 *lpm6;
const struct rte_lpm6_external *rte_ext;
const struct in6_addr *addr6;
uint32_t nhidx = 0;
int ret;
lpm6 = (const struct rte_lpm6 *)algo_data;
addr6 = key.addr6;
rte_ext = (const struct rte_lpm6_external *)lpm6;
if (!IN6_IS_SCOPE_LINKLOCAL(addr6)) {
ret = rte_lpm6_lookup(lpm6, (const uint8_t *)addr6, &nhidx);
if (ret == 0) {
/* Success! */
return (rte_ext->nh_idx[nhidx]);
} else {
/* Not found. Check default route */
if (rte_ext->default_idx > 0)
return (rte_ext->nh_idx[rte_ext->default_idx]);
else
return (NULL);
}
} else {
/* LL */
return (lookup_ptr_ll(lpm6, addr6, scopeid));
}
}
static uint8_t
rte6_get_pref(const struct rib_rtable_info *rinfo)
{
if (rinfo->num_prefixes < 10)
return (1);
else if (rinfo->num_prefixes < 1000)
return (rinfo->num_prefixes / 10);
else if (rinfo->num_prefixes < 500000)
return (100 + rinfo->num_prefixes / 3334);
else
return (250);
}
static enum flm_op_result
handle_default_change(struct dpdk_lpm6_data *dd, struct rib_cmd_info *rc)
{
struct rte_lpm6_external *rte_ext;
rte_ext = (struct rte_lpm6_external *)dd->lpm6;
if (rc->rc_cmd != RTM_DELETE) {
/* Reference new */
uint32_t nhidx = fib_get_nhop_idx(dd->fd, rc->rc_nh_new);
if (nhidx == 0)
return (FLM_REBUILD);
rte_ext->default_idx = nhidx;
} else {
/* No default route */
rte_ext->default_idx = 0;
}
return (FLM_SUCCESS);
}
static enum flm_op_result
handle_ll_change(struct dpdk_lpm6_data *dd, struct rib_cmd_info *rc,
const struct in6_addr addr6, int plen, uint32_t scopeid)
{
return (FLM_SUCCESS);
}
static struct rte_lpm6_rule *
pack_parent_rule(struct dpdk_lpm6_data *dd, const struct in6_addr *addr6,
char *buffer)
{
struct rte_lpm6_rule *lsp_rule = NULL;
struct route_nhop_data rnd;
struct rtentry *rt;
int plen;
rt = fib6_lookup_rt(dd->fibnum, addr6, 0, NHR_UNLOCKED, &rnd);
/* plen = 0 means default route and it's out of scope */
if (rt != NULL) {
uint32_t scopeid;
struct in6_addr new_addr6;
rt_get_inet6_prefix_plen(rt, &new_addr6, &plen, &scopeid);
if (plen > 0) {
uint32_t nhidx = fib_get_nhop_idx(dd->fd, rnd.rnd_nhop);
if (nhidx == 0) {
/*
* shouldn't happen as we already have parent route.
* It will trigger rebuild automatically.
*/
return (NULL);
}
lsp_rule = fill_rule6(buffer, (uint8_t *)&new_addr6, plen, nhidx);
}
}
return (lsp_rule);
}
static enum flm_op_result
handle_gu_change(struct dpdk_lpm6_data *dd, const struct rib_cmd_info *rc,
const struct in6_addr *addr6, int plen)
{
int ret;
char abuf[INET6_ADDRSTRLEN];
inet_ntop(AF_INET6, addr6, abuf, sizeof(abuf));
/* So we get sin6, plen and nhidx */
if (rc->rc_cmd != RTM_DELETE) {
/*
* Addition or change. Save nhop in the internal table
* and get index.
*/
uint32_t nhidx = fib_get_nhop_idx(dd->fd, rc->rc_nh_new);
if (nhidx == 0) {
FIB_PRINTF(LOG_INFO, dd->fd, "nhop limit reached, need rebuild");
return (FLM_REBUILD);
}
ret = rte_lpm6_add(dd->lpm6, (const uint8_t *)addr6,
plen, nhidx, (rc->rc_cmd == RTM_ADD) ? 1 : 0);
FIB_PRINTF(LOG_DEBUG, dd->fd, "DPDK GU: %s %s/%d nhop %u = %d",
(rc->rc_cmd == RTM_ADD) ? "ADD" : "UPDATE",
abuf, plen, nhidx, ret);
} else {
/*
* Need to lookup parent. Assume deletion happened already
*/
char buffer[RTE_LPM6_RULE_SIZE];
struct rte_lpm6_rule *lsp_rule = NULL;
lsp_rule = pack_parent_rule(dd, addr6, buffer);
ret = rte_lpm6_delete(dd->lpm6, (const uint8_t *)addr6, plen, lsp_rule);
FIB_PRINTF(LOG_DEBUG, dd->fd, "DPDK GU: %s %s/%d nhop ? = %d",
"DEL", abuf, plen, ret);
}
if (ret != 0) {
FIB_PRINTF(LOG_INFO, dd->fd, "error: %d", ret);
if (ret == -ENOSPC)
return (FLM_REBUILD);
return (FLM_ERROR);
}
return (FLM_SUCCESS);
}
static enum flm_op_result
handle_any_change(struct dpdk_lpm6_data *dd, struct rib_cmd_info *rc)
{
enum flm_op_result ret;
struct in6_addr addr6;
uint32_t scopeid;
int plen;
rt_get_inet6_prefix_plen(rc->rc_rt, &addr6, &plen, &scopeid);
if (IN6_IS_SCOPE_LINKLOCAL(&addr6))
ret = handle_ll_change(dd, rc, addr6, plen, scopeid);
else if (plen == 0)
ret = handle_default_change(dd, rc);
else
ret = handle_gu_change(dd, rc, &addr6, plen);
if (ret != 0)
FIB_PRINTF(LOG_INFO, dd->fd, "error handling route");
return (ret);
}
static enum flm_op_result
handle_rtable_change_cb(struct rib_head *rnh, struct rib_cmd_info *rc,
void *_data)
{
struct dpdk_lpm6_data *dd;
dd = (struct dpdk_lpm6_data *)_data;
return (handle_any_change(dd, rc));
}
static void
destroy_dd(struct dpdk_lpm6_data *dd)
{
FIB_PRINTF(LOG_INFO, dd->fd, "destroy dd %p", dd);
if (dd->lpm6 != NULL)
rte_lpm6_free(dd->lpm6);
free(dd, M_TEMP);
}
static void
destroy_table(void *_data)
{
destroy_dd((struct dpdk_lpm6_data *)_data);
}
static enum flm_op_result
add_route_cb(struct rtentry *rt, void *_data)
{
struct dpdk_lpm6_data *dd = (struct dpdk_lpm6_data *)_data;
struct in6_addr addr6;
struct nhop_object *nh;
uint32_t scopeid;
int plen;
int ret;
rt_get_inet6_prefix_plen(rt, &addr6, &plen, &scopeid);
nh = rt_get_raw_nhop(rt);
if (IN6_IS_SCOPE_LINKLOCAL(&addr6)) {
/*
* We don't operate on LL directly, however
* reference them to maintain guarantee on
* ability to refcount nhops in epoch.
*/
fib_get_nhop_idx(dd->fd, nh);
return (FLM_SUCCESS);
}
char abuf[INET6_ADDRSTRLEN];
inet_ntop(AF_INET6, &addr6, abuf, sizeof(abuf));
FIB_PRINTF(LOG_DEBUG, dd->fd, "Operating on %s/%d", abuf, plen);
if (plen == 0) {
struct rib_cmd_info rc = {
.rc_cmd = RTM_ADD,
.rc_nh_new = nh,
};
FIB_PRINTF(LOG_DEBUG, dd->fd, "Adding default route");
return (handle_default_change(dd, &rc));
}
uint32_t nhidx = fib_get_nhop_idx(dd->fd, nh);
if (nhidx == 0) {
FIB_PRINTF(LOG_INFO, dd->fd, "unable to get nhop index");
return (FLM_REBUILD);
}
ret = rte_lpm6_add(dd->lpm6, (const uint8_t *)&addr6, plen, nhidx, 1);
FIB_PRINTF(LOG_DEBUG, dd->fd, "ADD %p %s/%d nh %u = %d",
dd->lpm6, abuf, plen, nhidx, ret);
if (ret != 0) {
FIB_PRINTF(LOG_INFO, dd->fd, "rte_lpm6_add() returned %d", ret);
if (ret == -ENOSPC) {
dd->hit_tables = 1;
return (FLM_REBUILD);
}
dd->routes_failed++;
return (FLM_ERROR);
} else
dd->routes_added++;
return (FLM_SUCCESS);
}
static enum flm_op_result
check_dump_success(void *_data, struct fib_dp *dp)
{
struct dpdk_lpm6_data *dd;
dd = (struct dpdk_lpm6_data *)_data;
FIB_PRINTF(LOG_INFO, dd->fd, "scan completed. added: %zu failed: %zu",
dd->routes_added, dd->routes_failed);
if (dd->hit_tables || dd->routes_failed > 0)
return (FLM_REBUILD);
FIB_PRINTF(LOG_INFO, dd->fd,
"DPDK lookup engine synced with IPv6 RIB id %u, %zu routes",
dd->fibnum, dd->routes_added);
dp->f = lookup_ptr;
dp->arg = dd->lpm6;
return (FLM_SUCCESS);
}
static void
estimate_scale(const struct dpdk_lpm6_data *dd_src, struct dpdk_lpm6_data *dd)
{
/* XXX: update at 75% capacity */
if (dd_src->hit_tables)
dd->number_tbl8s = dd_src->number_tbl8s * 2;
else
dd->number_tbl8s = dd_src->number_tbl8s;
/* TODO: look into the appropriate RIB to adjust */
}
static struct dpdk_lpm6_data *
build_table(struct dpdk_lpm6_data *dd_prev, struct fib_data *fd)
{
struct dpdk_lpm6_data *dd;
struct rte_lpm6 *lpm6;
dd = malloc(sizeof(struct dpdk_lpm6_data), M_TEMP, M_NOWAIT | M_ZERO);
if (dd == NULL) {
FIB_PRINTF(LOG_INFO, fd, "Unable to allocate base datastructure");
return (NULL);
}
dd->fibnum = dd_prev->fibnum;
dd->fd = fd;
estimate_scale(dd_prev, dd);
struct rte_lpm6_config cfg = {.number_tbl8s = dd->number_tbl8s};
lpm6 = rte_lpm6_create("test", 0, &cfg);
if (lpm6 == NULL) {
FIB_PRINTF(LOG_INFO, fd, "unable to create lpm6");
free(dd, M_TEMP);
return (NULL);
}
dd->lpm6 = lpm6;
struct rte_lpm6_external *ext = (struct rte_lpm6_external *)lpm6;
ext->nh_idx = fib_get_nhop_array(dd->fd);
FIB_PRINTF(LOG_INFO, fd, "allocated %u tbl8s", dd->number_tbl8s);
return (dd);
}
static enum flm_op_result
init_table(uint32_t fibnum, struct fib_data *fd, void *_old_data, void **data)
{
struct dpdk_lpm6_data *dd, dd_base;
if (_old_data == NULL) {
bzero(&dd_base, sizeof(struct dpdk_lpm6_data));
dd_base.fibnum = fibnum;
/* TODO: get rib statistics */
dd_base.number_tbl8s = LPM6_MIN_TBL8;
dd = &dd_base;
} else {
FIB_PRINTF(LOG_INFO, fd, "Starting with old data");
dd = (struct dpdk_lpm6_data *)_old_data;
}
/* Guaranteed to be in epoch */
dd = build_table(dd, fd);
if (dd == NULL) {
FIB_PRINTF(LOG_INFO, fd, "table creation failed");
return (FLM_REBUILD);
}
*data = dd;
return (FLM_SUCCESS);
}
static struct fib_lookup_module dpdk_lpm6 = {
.flm_name = "dpdk_lpm6",
.flm_family = AF_INET6,
.flm_init_cb = init_table,
.flm_destroy_cb = destroy_table,
.flm_dump_rib_item_cb = add_route_cb,
.flm_dump_end_cb = check_dump_success,
.flm_change_rib_item_cb = handle_rtable_change_cb,
.flm_get_pref = rte6_get_pref,
};
static int
lpm6_modevent(module_t mod, int type, void *unused)
{
int error = 0;
switch (type) {
case MOD_LOAD:
fib_module_register(&dpdk_lpm6);
break;
case MOD_UNLOAD:
error = fib_module_unregister(&dpdk_lpm6);
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
static moduledata_t lpm6mod = {
"dpdk_lpm6",
lpm6_modevent,
0
};
DECLARE_MODULE(lpm6mod, lpm6mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
MODULE_VERSION(lpm6mod, 1);

View File

@ -0,0 +1,57 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2020 Alexander V. Chernikov
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Contains various definitions shared between the parts of a routing subsystem.
*
*/
#ifndef _NETINET6_DPDK_LPM6_H_
#define _NETINET6_DPDK_LPM6_H_
/** LPM structure. */
struct rte_lpm6;
/** LPM configuration structure. */
struct rte_lpm6_config {
uint32_t max_rules; /**< Max number of rules. */
uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
int flags; /**< This field is currently unused. */
};
struct rte_lpm6 *
rte_lpm6_create(const char *name, int socket_id,
const struct rte_lpm6_config *config);
void
rte_lpm6_free(struct rte_lpm6 *lpm);
int
rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
uint32_t next_hop, int is_new_rule);
#endif

View File

@ -0,0 +1,41 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
/**
* @file
* Branch Prediction Helpers in RTE
*/
#ifndef _RTE_BRANCH_PREDICTION_H_
#define _RTE_BRANCH_PREDICTION_H_
/**
* Check if a branch is likely to be taken.
*
* This compiler builtin allows the developer to indicate if a branch is
* likely to be taken. Example:
*
* if (likely(x > 1))
* do_stuff();
*
*/
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
#endif /* likely */
/**
* Check if a branch is unlikely to be taken.
*
* This compiler builtin allows the developer to indicate if a branch is
* unlikely to be taken. Example:
*
* if (unlikely(x < 1))
* do_stuff();
*
*/
#ifndef unlikely
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif /* unlikely */
#endif /* _RTE_BRANCH_PREDICTION_H_ */

View File

@ -0,0 +1,838 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2019 Intel Corporation
*/
#ifndef _RTE_COMMON_H_
#define _RTE_COMMON_H_
/**
* @file
*
* Generic, commonly-used macro and inline function definitions
* for DPDK.
*/
#ifdef __cplusplus
extern "C" {
#endif
//#include <rte_config.h>
/* OS specific include */
//#include <rte_os.h>
#ifndef typeof
#define typeof __typeof__
#endif
#ifndef asm
#define asm __asm__
#endif
/** C extension macro for environments lacking C11 features. */
#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L
#define RTE_STD_C11 __extension__
#else
#define RTE_STD_C11
#endif
/*
* RTE_TOOLCHAIN_GCC is defined if the target is built with GCC,
* while a host application (like pmdinfogen) may have another compiler.
* RTE_CC_IS_GNU is true if the file is compiled with GCC,
* no matter it is a target or host application.
*/
#define RTE_CC_IS_GNU 0
#if defined __clang__
#define RTE_CC_CLANG
#elif defined __INTEL_COMPILER
#define RTE_CC_ICC
#elif defined __GNUC__
#define RTE_CC_GCC
#undef RTE_CC_IS_GNU
#define RTE_CC_IS_GNU 1
#endif
#if RTE_CC_IS_GNU
#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + \
__GNUC_PATCHLEVEL__)
#endif
/**
* Force alignment
*/
#define __rte_aligned(a) __attribute__((__aligned__(a)))
#ifdef RTE_ARCH_STRICT_ALIGN
typedef uint64_t unaligned_uint64_t __rte_aligned(1);
typedef uint32_t unaligned_uint32_t __rte_aligned(1);
typedef uint16_t unaligned_uint16_t __rte_aligned(1);
#else
typedef uint64_t unaligned_uint64_t;
typedef uint32_t unaligned_uint32_t;
typedef uint16_t unaligned_uint16_t;
#endif
/**
* Force a structure to be packed
*/
#define __rte_packed __attribute__((__packed__))
/******* Macro to mark functions and fields scheduled for removal *****/
#define __rte_deprecated __attribute__((__deprecated__))
/**
* Mark a function or variable to a weak reference.
*/
#define __rte_weak __attribute__((__weak__))
/**
* Force symbol to be generated even if it appears to be unused.
*/
#define __rte_used __attribute__((used))
/*********** Macros to eliminate unused variable warnings ********/
/**
* short definition to mark a function parameter unused
*/
#define __rte_unused __attribute__((__unused__))
/**
* definition to mark a variable or function parameter as used so
* as to avoid a compiler warning
*/
#define RTE_SET_USED(x) (void)(x)
/**
* Check format string and its arguments at compile-time.
*
* GCC on Windows assumes MS-specific format string by default,
* even if the underlying stdio implementation is ANSI-compliant,
* so this must be overridden.
*/
#if RTE_CC_IS_GNU
#define __rte_format_printf(format_index, first_arg) \
__attribute__((format(gnu_printf, format_index, first_arg)))
#else
#define __rte_format_printf(format_index, first_arg) \
__attribute__((format(printf, format_index, first_arg)))
#endif
#define RTE_PRIORITY_LOG 101
#define RTE_PRIORITY_BUS 110
#define RTE_PRIORITY_CLASS 120
#define RTE_PRIORITY_LAST 65535
#define RTE_PRIO(prio) \
RTE_PRIORITY_ ## prio
/**
* Run function before main() with high priority.
*
* @param func
* Constructor function.
* @param prio
* Priority number must be above 100.
* Lowest number is the first to run.
*/
#ifndef RTE_INIT_PRIO /* Allow to override from EAL */
#define RTE_INIT_PRIO(func, prio) \
static void __attribute__((constructor(RTE_PRIO(prio)), used)) func(void)
#endif
/**
* Run function before main() with low priority.
*
* The constructor will be run after prioritized constructors.
*
* @param func
* Constructor function.
*/
#define RTE_INIT(func) \
RTE_INIT_PRIO(func, LAST)
/**
* Run after main() with low priority.
*
* @param func
* Destructor function name.
* @param prio
* Priority number must be above 100.
* Lowest number is the last to run.
*/
#ifndef RTE_FINI_PRIO /* Allow to override from EAL */
#define RTE_FINI_PRIO(func, prio) \
static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void)
#endif
/**
* Run after main() with high priority.
*
* The destructor will be run *before* prioritized destructors.
*
* @param func
* Destructor function name.
*/
#define RTE_FINI(func) \
RTE_FINI_PRIO(func, LAST)
/**
* Hint never returning function
*/
#define __rte_noreturn __attribute__((noreturn))
/**
* Force a function to be inlined
*/
#define __rte_always_inline inline __attribute__((always_inline))
/**
* Force a function to be noinlined
*/
#define __rte_noinline __attribute__((noinline))
/**
* Hint function in the hot path
*/
#define __rte_hot __attribute__((hot))
/**
* Hint function in the cold path
*/
#define __rte_cold __attribute__((cold))
/*********** Macros for pointer arithmetic ********/
/**
* add a byte-value offset to a pointer
*/
#define RTE_PTR_ADD(ptr, x) ((void*)((uintptr_t)(ptr) + (x)))
/**
* subtract a byte-value offset from a pointer
*/
#define RTE_PTR_SUB(ptr, x) ((void*)((uintptr_t)ptr - (x)))
/**
* get the difference between two pointer values, i.e. how far apart
* in bytes are the locations they point two. It is assumed that
* ptr1 is greater than ptr2.
*/
#define RTE_PTR_DIFF(ptr1, ptr2) ((uintptr_t)(ptr1) - (uintptr_t)(ptr2))
/**
* Workaround to cast a const field of a structure to non-const type.
*/
#define RTE_CAST_FIELD(var, field, type) \
(*(type *)((uintptr_t)(var) + offsetof(typeof(*(var)), field)))
/*********** Macros/static functions for doing alignment ********/
/**
* Macro to align a pointer to a given power-of-two. The resultant
* pointer will be a pointer of the same type as the first parameter, and
* point to an address no higher than the first parameter. Second parameter
* must be a power-of-two value.
*/
#define RTE_PTR_ALIGN_FLOOR(ptr, align) \
((typeof(ptr))RTE_ALIGN_FLOOR((uintptr_t)ptr, align))
/**
* Macro to align a value to a given power-of-two. The resultant value
* will be of the same type as the first parameter, and will be no
* bigger than the first parameter. Second parameter must be a
* power-of-two value.
*/
#define RTE_ALIGN_FLOOR(val, align) \
(typeof(val))((val) & (~((typeof(val))((align) - 1))))
/**
* Macro to align a pointer to a given power-of-two. The resultant
* pointer will be a pointer of the same type as the first parameter, and
* point to an address no lower than the first parameter. Second parameter
* must be a power-of-two value.
*/
#define RTE_PTR_ALIGN_CEIL(ptr, align) \
RTE_PTR_ALIGN_FLOOR((typeof(ptr))RTE_PTR_ADD(ptr, (align) - 1), align)
/**
* Macro to align a value to a given power-of-two. The resultant value
* will be of the same type as the first parameter, and will be no lower
* than the first parameter. Second parameter must be a power-of-two
* value.
*/
#define RTE_ALIGN_CEIL(val, align) \
RTE_ALIGN_FLOOR(((val) + ((typeof(val)) (align) - 1)), align)
/**
* Macro to align a pointer to a given power-of-two. The resultant
* pointer will be a pointer of the same type as the first parameter, and
* point to an address no lower than the first parameter. Second parameter
* must be a power-of-two value.
* This function is the same as RTE_PTR_ALIGN_CEIL
*/
#define RTE_PTR_ALIGN(ptr, align) RTE_PTR_ALIGN_CEIL(ptr, align)
/**
* Macro to align a value to a given power-of-two. The resultant
* value will be of the same type as the first parameter, and
* will be no lower than the first parameter. Second parameter
* must be a power-of-two value.
* This function is the same as RTE_ALIGN_CEIL
*/
#define RTE_ALIGN(val, align) RTE_ALIGN_CEIL(val, align)
/**
* Macro to align a value to the multiple of given value. The resultant
* value will be of the same type as the first parameter and will be no lower
* than the first parameter.
*/
#define RTE_ALIGN_MUL_CEIL(v, mul) \
(((v + (typeof(v))(mul) - 1) / ((typeof(v))(mul))) * (typeof(v))(mul))
/**
* Macro to align a value to the multiple of given value. The resultant
* value will be of the same type as the first parameter and will be no higher
* than the first parameter.
*/
#define RTE_ALIGN_MUL_FLOOR(v, mul) \
((v / ((typeof(v))(mul))) * (typeof(v))(mul))
/**
* Macro to align value to the nearest multiple of the given value.
* The resultant value might be greater than or less than the first parameter
* whichever difference is the lowest.
*/
#define RTE_ALIGN_MUL_NEAR(v, mul) \
({ \
typeof(v) ceil = RTE_ALIGN_MUL_CEIL(v, mul); \
typeof(v) floor = RTE_ALIGN_MUL_FLOOR(v, mul); \
(ceil - v) > (v - floor) ? floor : ceil; \
})
/**
* Checks if a pointer is aligned to a given power-of-two value
*
* @param ptr
* The pointer whose alignment is to be checked
* @param align
* The power-of-two value to which the ptr should be aligned
*
* @return
* True(1) where the pointer is correctly aligned, false(0) otherwise
*/
static inline int
rte_is_aligned(void *ptr, unsigned align)
{
return RTE_PTR_ALIGN(ptr, align) == ptr;
}
/*********** Macros for compile type checks ********/
/**
* Triggers an error at compilation time if the condition is true.
*/
#define RTE_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
/*********** Cache line related macros ********/
/** Cache line mask. */
#define RTE_CACHE_LINE_MASK (RTE_CACHE_LINE_SIZE-1)
/** Return the first cache-aligned value greater or equal to size. */
#define RTE_CACHE_LINE_ROUNDUP(size) \
(RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / \
RTE_CACHE_LINE_SIZE))
/** Cache line size in terms of log2 */
#if RTE_CACHE_LINE_SIZE == 64
#define RTE_CACHE_LINE_SIZE_LOG2 6
#elif RTE_CACHE_LINE_SIZE == 128
#define RTE_CACHE_LINE_SIZE_LOG2 7
#else
#error "Unsupported cache line size"
#endif
/** Minimum Cache line size. */
#define RTE_CACHE_LINE_MIN_SIZE 64
/** Force alignment to cache line. */
#define __rte_cache_aligned __rte_aligned(RTE_CACHE_LINE_SIZE)
/** Force minimum cache line alignment. */
#define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE)
/*********** PA/IOVA type definitions ********/
/** Physical address */
typedef uint64_t phys_addr_t;
#define RTE_BAD_PHYS_ADDR ((phys_addr_t)-1)
/**
* IO virtual address type.
* When the physical addressing mode (IOVA as PA) is in use,
* the translation from an IO virtual address (IOVA) to a physical address
* is a direct mapping, i.e. the same value.
* Otherwise, in virtual mode (IOVA as VA), an IOMMU may do the translation.
*/
typedef uint64_t rte_iova_t;
#define RTE_BAD_IOVA ((rte_iova_t)-1)
/*********** Structure alignment markers ********/
/** Generic marker for any place in a structure. */
__extension__ typedef void *RTE_MARKER[0];
/** Marker for 1B alignment in a structure. */
__extension__ typedef uint8_t RTE_MARKER8[0];
/** Marker for 2B alignment in a structure. */
__extension__ typedef uint16_t RTE_MARKER16[0];
/** Marker for 4B alignment in a structure. */
__extension__ typedef uint32_t RTE_MARKER32[0];
/** Marker for 8B alignment in a structure. */
__extension__ typedef uint64_t RTE_MARKER64[0];
/**
* Combines 32b inputs most significant set bits into the least
* significant bits to construct a value with the same MSBs as x
* but all 1's under it.
*
* @param x
* The integer whose MSBs need to be combined with its LSBs
* @return
* The combined value.
*/
static inline uint32_t
rte_combine32ms1b(register uint32_t x)
{
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return x;
}
/**
* Combines 64b inputs most significant set bits into the least
* significant bits to construct a value with the same MSBs as x
* but all 1's under it.
*
* @param v
* The integer whose MSBs need to be combined with its LSBs
* @return
* The combined value.
*/
static inline uint64_t
rte_combine64ms1b(register uint64_t v)
{
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v |= v >> 32;
return v;
}
/*********** Macros to work with powers of 2 ********/
/**
* Macro to return 1 if n is a power of 2, 0 otherwise
*/
#define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n)))
/**
* Returns true if n is a power of 2
* @param n
* Number to check
* @return 1 if true, 0 otherwise
*/
static inline int
rte_is_power_of_2(uint32_t n)
{
return n && !(n & (n - 1));
}
/**
* Aligns input parameter to the next power of 2
*
* @param x
* The integer value to align
*
* @return
* Input parameter aligned to the next power of 2
*/
static inline uint32_t
rte_align32pow2(uint32_t x)
{
x--;
x = rte_combine32ms1b(x);
return x + 1;
}
/**
* Aligns input parameter to the previous power of 2
*
* @param x
* The integer value to align
*
* @return
* Input parameter aligned to the previous power of 2
*/
static inline uint32_t
rte_align32prevpow2(uint32_t x)
{
x = rte_combine32ms1b(x);
return x - (x >> 1);
}
/**
* Aligns 64b input parameter to the next power of 2
*
* @param v
* The 64b value to align
*
* @return
* Input parameter aligned to the next power of 2
*/
static inline uint64_t
rte_align64pow2(uint64_t v)
{
v--;
v = rte_combine64ms1b(v);
return v + 1;
}
/**
* Aligns 64b input parameter to the previous power of 2
*
* @param v
* The 64b value to align
*
* @return
* Input parameter aligned to the previous power of 2
*/
static inline uint64_t
rte_align64prevpow2(uint64_t v)
{
v = rte_combine64ms1b(v);
return v - (v >> 1);
}
/*********** Macros for calculating min and max **********/
/**
* Macro to return the minimum of two numbers
*/
#define RTE_MIN(a, b) \
__extension__ ({ \
typeof (a) _a = (a); \
typeof (b) _b = (b); \
_a < _b ? _a : _b; \
})
/**
* Macro to return the maximum of two numbers
*/
#define RTE_MAX(a, b) \
__extension__ ({ \
typeof (a) _a = (a); \
typeof (b) _b = (b); \
_a > _b ? _a : _b; \
})
/*********** Other general functions / macros ********/
/**
* Searches the input parameter for the least significant set bit
* (starting from zero).
* If a least significant 1 bit is found, its bit index is returned.
* If the content of the input parameter is zero, then the content of the return
* value is undefined.
* @param v
* input parameter, should not be zero.
* @return
* least significant set bit in the input parameter.
*/
static inline uint32_t
rte_bsf32(uint32_t v)
{
return (uint32_t)__builtin_ctz(v);
}
/**
* Searches the input parameter for the least significant set bit
* (starting from zero). Safe version (checks for input parameter being zero).
*
* @warning ``pos`` must be a valid pointer. It is not checked!
*
* @param v
* The input parameter.
* @param pos
* If ``v`` was not 0, this value will contain position of least significant
* bit within the input parameter.
* @return
* Returns 0 if ``v`` was 0, otherwise returns 1.
*/
static inline int
rte_bsf32_safe(uint64_t v, uint32_t *pos)
{
if (v == 0)
return 0;
*pos = rte_bsf32(v);
return 1;
}
/**
* Return the rounded-up log2 of a integer.
*
* @note Contrary to the logarithm mathematical operation,
* rte_log2_u32(0) == 0 and not -inf.
*
* @param v
* The input parameter.
* @return
* The rounded-up log2 of the input, or 0 if the input is 0.
*/
static inline uint32_t
rte_log2_u32(uint32_t v)
{
if (v == 0)
return 0;
v = rte_align32pow2(v);
return rte_bsf32(v);
}
/**
* Return the last (most-significant) bit set.
*
* @note The last (most significant) bit is at position 32.
* @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32
*
* @param x
* The input parameter.
* @return
* The last (most-significant) bit set, or 0 if the input is 0.
*/
static inline int
rte_fls_u32(uint32_t x)
{
return (x == 0) ? 0 : 32 - __builtin_clz(x);
}
/**
* Searches the input parameter for the least significant set bit
* (starting from zero).
* If a least significant 1 bit is found, its bit index is returned.
* If the content of the input parameter is zero, then the content of the return
* value is undefined.
* @param v
* input parameter, should not be zero.
* @return
* least significant set bit in the input parameter.
*/
static inline int
rte_bsf64(uint64_t v)
{
return (uint32_t)__builtin_ctzll(v);
}
/**
* Searches the input parameter for the least significant set bit
* (starting from zero). Safe version (checks for input parameter being zero).
*
* @warning ``pos`` must be a valid pointer. It is not checked!
*
* @param v
* The input parameter.
* @param pos
* If ``v`` was not 0, this value will contain position of least significant
* bit within the input parameter.
* @return
* Returns 0 if ``v`` was 0, otherwise returns 1.
*/
static inline int
rte_bsf64_safe(uint64_t v, uint32_t *pos)
{
if (v == 0)
return 0;
*pos = rte_bsf64(v);
return 1;
}
/**
* Return the last (most-significant) bit set.
*
* @note The last (most significant) bit is at position 64.
* @note rte_fls_u64(0) = 0, rte_fls_u64(1) = 1,
* rte_fls_u64(0x8000000000000000) = 64
*
* @param x
* The input parameter.
* @return
* The last (most-significant) bit set, or 0 if the input is 0.
*/
static inline int
rte_fls_u64(uint64_t x)
{
return (x == 0) ? 0 : 64 - __builtin_clzll(x);
}
/**
* Return the rounded-up log2 of a 64-bit integer.
*
* @note Contrary to the logarithm mathematical operation,
* rte_log2_u64(0) == 0 and not -inf.
*
* @param v
* The input parameter.
* @return
* The rounded-up log2 of the input, or 0 if the input is 0.
*/
static inline uint32_t
rte_log2_u64(uint64_t v)
{
if (v == 0)
return 0;
v = rte_align64pow2(v);
/* we checked for v being 0 already, so no undefined behavior */
return rte_bsf64(v);
}
#ifndef offsetof
/** Return the offset of a field in a structure. */
#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
#endif
/**
* Return pointer to the wrapping struct instance.
*
* Example:
*
* struct wrapper {
* ...
* struct child c;
* ...
* };
*
* struct child *x = obtain(...);
* struct wrapper *w = container_of(x, struct wrapper, c);
*/
#ifndef container_of
#define container_of(ptr, type, member) __extension__ ({ \
const typeof(((type *)0)->member) *_ptr = (ptr); \
__rte_unused type *_target_ptr = \
(type *)(ptr); \
(type *)(((uintptr_t)_ptr) - offsetof(type, member)); \
})
#endif
/**
* Get the size of a field in a structure.
*
* @param type
* The type of the structure.
* @param field
* The field in the structure.
* @return
* The size of the field in the structure, in bytes.
*/
#define RTE_SIZEOF_FIELD(type, field) (sizeof(((type *)0)->field))
#define _RTE_STR(x) #x
/** Take a macro value and get a string version of it */
#define RTE_STR(x) _RTE_STR(x)
/**
* ISO C helpers to modify format strings using variadic macros.
* This is a replacement for the ", ## __VA_ARGS__" GNU extension.
* An empty %s argument is appended to avoid a dangling comma.
*/
#define RTE_FMT(fmt, ...) fmt "%.0s", __VA_ARGS__ ""
#define RTE_FMT_HEAD(fmt, ...) fmt
#define RTE_FMT_TAIL(fmt, ...) __VA_ARGS__
/** Mask value of type "tp" for the first "ln" bit set. */
#define RTE_LEN2MASK(ln, tp) \
((tp)((uint64_t)-1 >> (sizeof(uint64_t) * CHAR_BIT - (ln))))
/** Number of elements in the array. */
#define RTE_DIM(a) (sizeof (a) / sizeof ((a)[0]))
/**
* Converts a numeric string to the equivalent uint64_t value.
* As well as straight number conversion, also recognises the suffixes
* k, m and g for kilobytes, megabytes and gigabytes respectively.
*
* If a negative number is passed in i.e. a string with the first non-black
* character being "-", zero is returned. Zero is also returned in the case of
* an error with the strtoull call in the function.
*
* @param str
* String containing number to convert.
* @return
* Number.
*/
#if 0
static inline uint64_t
rte_str_to_size(const char *str)
{
char *endptr;
unsigned long long size;
while (isspace((int)*str))
str++;
if (*str == '-')
return 0;
errno = 0;
size = strtoull(str, &endptr, 0);
if (errno)
return 0;
if (*endptr == ' ')
endptr++; /* allow 1 space gap */
switch (*endptr){
case 'G': case 'g': size *= 1024; /* fall-through */
case 'M': case 'm': size *= 1024; /* fall-through */
case 'K': case 'k': size *= 1024; /* fall-through */
default:
break;
}
return size;
}
#endif
/**
* Function to terminate the application immediately, printing an error
* message and returning the exit_code back to the shell.
*
* This function never returns
*
* @param exit_code
* The exit code to be returned by the application
* @param format
* The format string to be used for printing the message. This can include
* printf format characters which will be expanded using any further parameters
* to the function.
*/
__rte_noreturn void
rte_exit(int exit_code, const char *format, ...)
__rte_format_printf(2, 3);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _RTE_DEBUG_H_
#define _RTE_DEBUG_H_
/**
* @file
*
* Debug Functions in RTE
*
* This file defines a generic API for debug operations. Part of
* the implementation is architecture-specific.
*/
//#include "rte_log.h"
#include "rte_branch_prediction.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Dump the stack of the calling core to the console.
*/
void rte_dump_stack(void);
/**
* Dump the registers of the calling core to the console.
*
* Note: Not implemented in a userapp environment; use gdb instead.
*/
void rte_dump_registers(void);
/**
* Provide notification of a critical non-recoverable error and terminate
* execution abnormally.
*
* Display the format string and its expanded arguments (printf-like).
*
* In a linux environment, this function dumps the stack and calls
* abort() resulting in a core dump if enabled.
*
* The function never returns.
*
* @param ...
* The format string, followed by the variable list of arguments.
*/
#define rte_panic(...) rte_panic_(__func__, __VA_ARGS__, "dummy")
#define rte_panic_(func, format, ...) __rte_panic(func, format "%.0s", __VA_ARGS__)
#ifdef RTE_ENABLE_ASSERT
#define RTE_ASSERT(exp) RTE_VERIFY(exp)
#else
#define RTE_ASSERT(exp) do {} while (0)
#endif
#define RTE_VERIFY(exp) do { \
if (unlikely(!(exp))) \
rte_panic("line %d\tassert \"%s\" failed\n", __LINE__, #exp); \
} while (0)
/*
* Provide notification of a critical non-recoverable error and stop.
*
* This function should not be called directly. Refer to rte_panic() macro
* documentation.
*/
void __rte_panic(const char *funcname , const char *format, ...)
{
#ifdef __GNUC__
#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
__rte_cold
#endif
#endif
//__rte_noreturn
//__rte_format_printf(2, 3);
#ifdef __cplusplus
}
#endif
#endif /* _RTE_DEBUG_H_ */

View File

@ -0,0 +1,379 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2015 Intel Corporation.
*/
#ifndef _RTE_JHASH_H
#define _RTE_JHASH_H
/**
* @file
*
* jhash functions.
*/
#ifdef __cplusplus
extern "C" {
#endif
//#include <rte_byteorder.h>
/* jhash.h: Jenkins hash support.
*
* Copyright (C) 2006 Bob Jenkins (bob_jenkins@burtleburtle.net)
*
* http://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
* lookup3.c, by Bob Jenkins, May 2006, Public Domain.
*
* These are functions for producing 32-bit hashes for hash table lookup.
* hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
* are externally useful functions. Routines to test the hash are included
* if SELF_TEST is defined. You can use this free for any purpose. It's in
* the public domain. It has no warranty.
*
* $FreeBSD$
*/
#define rot(x, k) (((x) << (k)) | ((x) >> (32-(k))))
/** @internal Internal function. NOTE: Arguments are modified. */
#define __rte_jhash_mix(a, b, c) do { \
a -= c; a ^= rot(c, 4); c += b; \
b -= a; b ^= rot(a, 6); a += c; \
c -= b; c ^= rot(b, 8); b += a; \
a -= c; a ^= rot(c, 16); c += b; \
b -= a; b ^= rot(a, 19); a += c; \
c -= b; c ^= rot(b, 4); b += a; \
} while (0)
#define __rte_jhash_final(a, b, c) do { \
c ^= b; c -= rot(b, 14); \
a ^= c; a -= rot(c, 11); \
b ^= a; b -= rot(a, 25); \
c ^= b; c -= rot(b, 16); \
a ^= c; a -= rot(c, 4); \
b ^= a; b -= rot(a, 14); \
c ^= b; c -= rot(b, 24); \
} while (0)
/** The golden ratio: an arbitrary value. */
#define RTE_JHASH_GOLDEN_RATIO 0xdeadbeef
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#define BIT_SHIFT(x, y, k) (((x) >> (k)) | ((uint64_t)(y) << (32-(k))))
#else
#define BIT_SHIFT(x, y, k) (((uint64_t)(x) << (k)) | ((y) >> (32-(k))))
#endif
#define LOWER8b_MASK rte_le_to_cpu_32(0xff)
#define LOWER16b_MASK rte_le_to_cpu_32(0xffff)
#define LOWER24b_MASK rte_le_to_cpu_32(0xffffff)
static inline void
__rte_jhash_2hashes(const void *key, uint32_t length, uint32_t *pc,
uint32_t *pb, unsigned check_align)
{
uint32_t a, b, c;
/* Set up the internal state */
a = b = c = RTE_JHASH_GOLDEN_RATIO + ((uint32_t)length) + *pc;
c += *pb;
/*
* Check key alignment. For x86 architecture, first case is always optimal
* If check_align is not set, first case will be used
*/
#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_I686) || defined(RTE_ARCH_X86_X32)
const uint32_t *k = (const uint32_t *)key;
const uint32_t s = 0;
#else
const uint32_t *k = (uint32_t *)((uintptr_t)key & (uintptr_t)~3);
const uint32_t s = ((uintptr_t)key & 3) * CHAR_BIT;
#endif
if (!check_align || s == 0) {
while (length > 12) {
a += k[0];
b += k[1];
c += k[2];
__rte_jhash_mix(a, b, c);
k += 3;
length -= 12;
}
switch (length) {
case 12:
c += k[2]; b += k[1]; a += k[0]; break;
case 11:
c += k[2] & LOWER24b_MASK; b += k[1]; a += k[0]; break;
case 10:
c += k[2] & LOWER16b_MASK; b += k[1]; a += k[0]; break;
case 9:
c += k[2] & LOWER8b_MASK; b += k[1]; a += k[0]; break;
case 8:
b += k[1]; a += k[0]; break;
case 7:
b += k[1] & LOWER24b_MASK; a += k[0]; break;
case 6:
b += k[1] & LOWER16b_MASK; a += k[0]; break;
case 5:
b += k[1] & LOWER8b_MASK; a += k[0]; break;
case 4:
a += k[0]; break;
case 3:
a += k[0] & LOWER24b_MASK; break;
case 2:
a += k[0] & LOWER16b_MASK; break;
case 1:
a += k[0] & LOWER8b_MASK; break;
/* zero length strings require no mixing */
case 0:
*pc = c;
*pb = b;
return;
};
} else {
/* all but the last block: affect some 32 bits of (a, b, c) */
while (length > 12) {
a += BIT_SHIFT(k[0], k[1], s);
b += BIT_SHIFT(k[1], k[2], s);
c += BIT_SHIFT(k[2], k[3], s);
__rte_jhash_mix(a, b, c);
k += 3;
length -= 12;
}
/* last block: affect all 32 bits of (c) */
switch (length) {
case 12:
a += BIT_SHIFT(k[0], k[1], s);
b += BIT_SHIFT(k[1], k[2], s);
c += BIT_SHIFT(k[2], k[3], s);
break;
case 11:
a += BIT_SHIFT(k[0], k[1], s);
b += BIT_SHIFT(k[1], k[2], s);
c += BIT_SHIFT(k[2], k[3], s) & LOWER24b_MASK;
break;
case 10:
a += BIT_SHIFT(k[0], k[1], s);
b += BIT_SHIFT(k[1], k[2], s);
c += BIT_SHIFT(k[2], k[3], s) & LOWER16b_MASK;
break;
case 9:
a += BIT_SHIFT(k[0], k[1], s);
b += BIT_SHIFT(k[1], k[2], s);
c += BIT_SHIFT(k[2], k[3], s) & LOWER8b_MASK;
break;
case 8:
a += BIT_SHIFT(k[0], k[1], s);
b += BIT_SHIFT(k[1], k[2], s);
break;
case 7:
a += BIT_SHIFT(k[0], k[1], s);
b += BIT_SHIFT(k[1], k[2], s) & LOWER24b_MASK;
break;
case 6:
a += BIT_SHIFT(k[0], k[1], s);
b += BIT_SHIFT(k[1], k[2], s) & LOWER16b_MASK;
break;
case 5:
a += BIT_SHIFT(k[0], k[1], s);
b += BIT_SHIFT(k[1], k[2], s) & LOWER8b_MASK;
break;
case 4:
a += BIT_SHIFT(k[0], k[1], s);
break;
case 3:
a += BIT_SHIFT(k[0], k[1], s) & LOWER24b_MASK;
break;
case 2:
a += BIT_SHIFT(k[0], k[1], s) & LOWER16b_MASK;
break;
case 1:
a += BIT_SHIFT(k[0], k[1], s) & LOWER8b_MASK;
break;
/* zero length strings require no mixing */
case 0:
*pc = c;
*pb = b;
return;
}
}
__rte_jhash_final(a, b, c);
*pc = c;
*pb = b;
}
/**
* Same as rte_jhash, but takes two seeds and return two uint32_ts.
* pc and pb must be non-null, and *pc and *pb must both be initialized
* with seeds. If you pass in (*pb)=0, the output (*pc) will be
* the same as the return value from rte_jhash.
*
* @param key
* Key to calculate hash of.
* @param length
* Length of key in bytes.
* @param pc
* IN: seed OUT: primary hash value.
* @param pb
* IN: second seed OUT: secondary hash value.
*/
static inline void
rte_jhash_2hashes(const void *key, uint32_t length, uint32_t *pc, uint32_t *pb)
{
__rte_jhash_2hashes(key, length, pc, pb, 1);
}
/**
* Same as rte_jhash_32b, but takes two seeds and return two uint32_ts.
* pc and pb must be non-null, and *pc and *pb must both be initialized
* with seeds. If you pass in (*pb)=0, the output (*pc) will be
* the same as the return value from rte_jhash_32b.
*
* @param k
* Key to calculate hash of.
* @param length
* Length of key in units of 4 bytes.
* @param pc
* IN: seed OUT: primary hash value.
* @param pb
* IN: second seed OUT: secondary hash value.
*/
static inline void
rte_jhash_32b_2hashes(const uint32_t *k, uint32_t length, uint32_t *pc, uint32_t *pb)
{
__rte_jhash_2hashes((const void *) k, (length << 2), pc, pb, 0);
}
/**
* The most generic version, hashes an arbitrary sequence
* of bytes. No alignment or length assumptions are made about
* the input key. For keys not aligned to four byte boundaries
* or a multiple of four bytes in length, the memory region
* just after may be read (but not used in the computation).
* This may cross a page boundary.
*
* @param key
* Key to calculate hash of.
* @param length
* Length of key in bytes.
* @param initval
* Initialising value of hash.
* @return
* Calculated hash value.
*/
static inline uint32_t
rte_jhash(const void *key, uint32_t length, uint32_t initval)
{
uint32_t initval2 = 0;
rte_jhash_2hashes(key, length, &initval, &initval2);
return initval;
}
/**
* A special optimized version that handles 1 or more of uint32_ts.
* The length parameter here is the number of uint32_ts in the key.
*
* @param k
* Key to calculate hash of.
* @param length
* Length of key in units of 4 bytes.
* @param initval
* Initialising value of hash.
* @return
* Calculated hash value.
*/
static inline uint32_t
rte_jhash_32b(const uint32_t *k, uint32_t length, uint32_t initval)
{
uint32_t initval2 = 0;
rte_jhash_32b_2hashes(k, length, &initval, &initval2);
return initval;
}
static inline uint32_t
__rte_jhash_3words(uint32_t a, uint32_t b, uint32_t c, uint32_t initval)
{
a += RTE_JHASH_GOLDEN_RATIO + initval;
b += RTE_JHASH_GOLDEN_RATIO + initval;
c += RTE_JHASH_GOLDEN_RATIO + initval;
__rte_jhash_final(a, b, c);
return c;
}
/**
* A special ultra-optimized versions that knows it is hashing exactly
* 3 words.
*
* @param a
* First word to calculate hash of.
* @param b
* Second word to calculate hash of.
* @param c
* Third word to calculate hash of.
* @param initval
* Initialising value of hash.
* @return
* Calculated hash value.
*/
static inline uint32_t
rte_jhash_3words(uint32_t a, uint32_t b, uint32_t c, uint32_t initval)
{
return __rte_jhash_3words(a + 12, b + 12, c + 12, initval);
}
/**
* A special ultra-optimized versions that knows it is hashing exactly
* 2 words.
*
* @param a
* First word to calculate hash of.
* @param b
* Second word to calculate hash of.
* @param initval
* Initialising value of hash.
* @return
* Calculated hash value.
*/
static inline uint32_t
rte_jhash_2words(uint32_t a, uint32_t b, uint32_t initval)
{
return __rte_jhash_3words(a + 8, b + 8, 8, initval);
}
/**
* A special ultra-optimized versions that knows it is hashing exactly
* 1 word.
*
* @param a
* Word to calculate hash of.
* @param initval
* Initialising value of hash.
* @return
* Calculated hash value.
*/
static inline uint32_t
rte_jhash_1word(uint32_t a, uint32_t initval)
{
return __rte_jhash_3words(a + 4, 4, 4, initval);
}
#ifdef __cplusplus
}
#endif
#endif /* _RTE_JHASH_H */

View File

@ -0,0 +1,383 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2017 Intel Corporation
*/
#ifndef _RTE_LOG_H_
#define _RTE_LOG_H_
/**
* @file
*
* RTE Logs API
*
* This file provides a log API to RTE applications.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdbool.h>
#include <sys/queue.h>
#include <rte_common.h>
#include <rte_config.h>
#include <rte_compat.h>
struct rte_log_dynamic_type;
/** The rte_log structure. */
struct rte_logs {
uint32_t type; /**< Bitfield with enabled logs. */
uint32_t level; /**< Log level. */
FILE *file; /**< Output file set by rte_openlog_stream, or NULL. */
size_t dynamic_types_len;
struct rte_log_dynamic_type *dynamic_types;
};
/** Global log information */
extern struct rte_logs rte_logs;
/* SDK log type */
#define RTE_LOGTYPE_EAL 0 /**< Log related to eal. */
#define RTE_LOGTYPE_MALLOC 1 /**< Log related to malloc. */
#define RTE_LOGTYPE_RING 2 /**< Log related to ring. */
#define RTE_LOGTYPE_MEMPOOL 3 /**< Log related to mempool. */
#define RTE_LOGTYPE_TIMER 4 /**< Log related to timers. */
#define RTE_LOGTYPE_PMD 5 /**< Log related to poll mode driver. */
#define RTE_LOGTYPE_HASH 6 /**< Log related to hash table. */
#define RTE_LOGTYPE_LPM 7 /**< Log related to LPM. */
#define RTE_LOGTYPE_KNI 8 /**< Log related to KNI. */
#define RTE_LOGTYPE_ACL 9 /**< Log related to ACL. */
#define RTE_LOGTYPE_POWER 10 /**< Log related to power. */
#define RTE_LOGTYPE_METER 11 /**< Log related to QoS meter. */
#define RTE_LOGTYPE_SCHED 12 /**< Log related to QoS port scheduler. */
#define RTE_LOGTYPE_PORT 13 /**< Log related to port. */
#define RTE_LOGTYPE_TABLE 14 /**< Log related to table. */
#define RTE_LOGTYPE_PIPELINE 15 /**< Log related to pipeline. */
#define RTE_LOGTYPE_MBUF 16 /**< Log related to mbuf. */
#define RTE_LOGTYPE_CRYPTODEV 17 /**< Log related to cryptodev. */
#define RTE_LOGTYPE_EFD 18 /**< Log related to EFD. */
#define RTE_LOGTYPE_EVENTDEV 19 /**< Log related to eventdev. */
#define RTE_LOGTYPE_GSO 20 /**< Log related to GSO. */
/* these log types can be used in an application */
#define RTE_LOGTYPE_USER1 24 /**< User-defined log type 1. */
#define RTE_LOGTYPE_USER2 25 /**< User-defined log type 2. */
#define RTE_LOGTYPE_USER3 26 /**< User-defined log type 3. */
#define RTE_LOGTYPE_USER4 27 /**< User-defined log type 4. */
#define RTE_LOGTYPE_USER5 28 /**< User-defined log type 5. */
#define RTE_LOGTYPE_USER6 29 /**< User-defined log type 6. */
#define RTE_LOGTYPE_USER7 30 /**< User-defined log type 7. */
#define RTE_LOGTYPE_USER8 31 /**< User-defined log type 8. */
/** First identifier for extended logs */
#define RTE_LOGTYPE_FIRST_EXT_ID 32
/* Can't use 0, as it gives compiler warnings */
#define RTE_LOG_EMERG 1U /**< System is unusable. */
#define RTE_LOG_ALERT 2U /**< Action must be taken immediately. */
#define RTE_LOG_CRIT 3U /**< Critical conditions. */
#define RTE_LOG_ERR 4U /**< Error conditions. */
#define RTE_LOG_WARNING 5U /**< Warning conditions. */
#define RTE_LOG_NOTICE 6U /**< Normal but significant condition. */
#define RTE_LOG_INFO 7U /**< Informational. */
#define RTE_LOG_DEBUG 8U /**< Debug-level messages. */
/**
* Change the stream that will be used by the logging system.
*
* This can be done at any time. The f argument represents the stream
* to be used to send the logs. If f is NULL, the default output is
* used (stderr).
*
* @param f
* Pointer to the stream.
* @return
* - 0 on success.
* - Negative on error.
*/
int rte_openlog_stream(FILE *f);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Retrieve the stream used by the logging system (see rte_openlog_stream()
* to change it).
*
* @return
* Pointer to the stream.
*/
__rte_experimental
FILE *rte_log_get_stream(void);
/**
* Set the global log level.
*
* After this call, logs with a level lower or equal than the level
* passed as argument will be displayed.
*
* @param level
* Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
*/
void rte_log_set_global_level(uint32_t level);
/**
* Get the global log level.
*
* @return
* The current global log level.
*/
uint32_t rte_log_get_global_level(void);
/**
* Get the log level for a given type.
*
* @param logtype
* The log type identifier.
* @return
* 0 on success, a negative value if logtype is invalid.
*/
int rte_log_get_level(uint32_t logtype);
/**
* For a given `logtype`, check if a log with `loglevel` can be printed.
*
* @param logtype
* The log type identifier
* @param loglevel
* Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
* @return
* Returns 'true' if log can be printed and 'false' if it can't.
*/
__rte_experimental
bool rte_log_can_log(uint32_t logtype, uint32_t loglevel);
/**
* Set the log level for a given type based on shell pattern.
*
* @param pattern
* The match pattern identifying the log type.
* @param level
* The level to be set.
* @return
* 0 on success, a negative value if level is invalid.
*/
int rte_log_set_level_pattern(const char *pattern, uint32_t level);
/**
* Set the log level for a given type based on regular expression.
*
* @param regex
* The regular expression identifying the log type.
* @param level
* The level to be set.
* @return
* 0 on success, a negative value if level is invalid.
*/
int rte_log_set_level_regexp(const char *regex, uint32_t level);
/**
* Set the log level for a given type.
*
* @param logtype
* The log type identifier.
* @param level
* The level to be set.
* @return
* 0 on success, a negative value if logtype or level is invalid.
*/
int rte_log_set_level(uint32_t logtype, uint32_t level);
/**
* Get the current loglevel for the message being processed.
*
* Before calling the user-defined stream for logging, the log
* subsystem sets a per-lcore variable containing the loglevel and the
* logtype of the message being processed. This information can be
* accessed by the user-defined log output function through this
* function.
*
* @return
* The loglevel of the message being processed.
*/
int rte_log_cur_msg_loglevel(void);
/**
* Get the current logtype for the message being processed.
*
* Before calling the user-defined stream for logging, the log
* subsystem sets a per-lcore variable containing the loglevel and the
* logtype of the message being processed. This information can be
* accessed by the user-defined log output function through this
* function.
*
* @return
* The logtype of the message being processed.
*/
int rte_log_cur_msg_logtype(void);
/**
* Register a dynamic log type
*
* If a log is already registered with the same type, the returned value
* is the same than the previous one.
*
* @param name
* The string identifying the log type.
* @return
* - >0: success, the returned value is the log type identifier.
* - (-ENOMEM): cannot allocate memory.
*/
int rte_log_register(const char *name);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
* Register a dynamic log type and try to pick its level from EAL options
*
* rte_log_register() is called inside. If successful, the function tries
* to search for matching regexp in the list of EAL log level options and
* pick the level from the last matching entry. If nothing can be applied
* from the list, the level will be set to the user-defined default value.
*
* @param name
* Name for the log type to be registered
* @param level_def
* Fallback level to be set if the global list has no matching options
* @return
* - >=0: the newly registered log type
* - <0: rte_log_register() error value
*/
__rte_experimental
int rte_log_register_type_and_pick_level(const char *name, uint32_t level_def);
/**
* Dump log information.
*
* Dump the global level and the registered log types.
*
* @param f
* The output stream where the dump should be sent.
*/
void rte_log_dump(FILE *f);
/**
* Generates a log message.
*
* The message will be sent in the stream defined by the previous call
* to rte_openlog_stream().
*
* The level argument determines if the log should be displayed or
* not, depending on the global rte_logs variable.
*
* The preferred alternative is the RTE_LOG() because it adds the
* level and type in the logged string.
*
* @param level
* Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
* @param logtype
* The log type, for example, RTE_LOGTYPE_EAL.
* @param format
* The format string, as in printf(3), followed by the variable arguments
* required by the format.
* @return
* - 0: Success.
* - Negative on error.
*/
int rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
#ifdef __GNUC__
#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
__rte_cold
#endif
#endif
__rte_format_printf(3, 4);
/**
* Generates a log message.
*
* The message will be sent in the stream defined by the previous call
* to rte_openlog_stream().
*
* The level argument determines if the log should be displayed or
* not, depending on the global rte_logs variable. A trailing
* newline may be added if needed.
*
* The preferred alternative is the RTE_LOG() because it adds the
* level and type in the logged string.
*
* @param level
* Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
* @param logtype
* The log type, for example, RTE_LOGTYPE_EAL.
* @param format
* The format string, as in printf(3), followed by the variable arguments
* required by the format.
* @param ap
* The va_list of the variable arguments required by the format.
* @return
* - 0: Success.
* - Negative on error.
*/
int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
__rte_format_printf(3, 0);
/**
* Generates a log message.
*
* The RTE_LOG() is a helper that prefixes the string with the log level
* and type, and call rte_log().
*
* @param l
* Log level. A value between EMERG (1) and DEBUG (8). The short name is
* expanded by the macro, so it cannot be an integer value.
* @param t
* The log type, for example, EAL. The short name is expanded by the
* macro, so it cannot be an integer value.
* @param ...
* The fmt string, as in printf(3), followed by the variable arguments
* required by the format.
* @return
* - 0: Success.
* - Negative on error.
*/
#define RTE_LOG(l, t, ...) \
rte_log(RTE_LOG_ ## l, \
RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__)
/**
* Generates a log message for data path.
*
* Similar to RTE_LOG(), except that it is removed at compilation time
* if the RTE_LOG_DP_LEVEL configuration option is lower than the log
* level argument.
*
* @param l
* Log level. A value between EMERG (1) and DEBUG (8). The short name is
* expanded by the macro, so it cannot be an integer value.
* @param t
* The log type, for example, EAL. The short name is expanded by the
* macro, so it cannot be an integer value.
* @param ...
* The fmt string, as in printf(3), followed by the variable arguments
* required by the format.
* @return
* - 0: Success.
* - Negative on error.
*/
#define RTE_LOG_DP(l, t, ...) \
(void)((RTE_LOG_ ## l <= RTE_LOG_DP_LEVEL) ? \
rte_log(RTE_LOG_ ## l, \
RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__) : \
0)
#ifdef __cplusplus
}
#endif
#endif /* _RTE_LOG_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,403 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _RTE_LPM_H_
#define _RTE_LPM_H_
/**
* @file
* RTE Longest Prefix Match (LPM)
*/
/*
#include <errno.h>
#include <sys/queue.h>
#include <stdint.h>
#include <stdlib.h>
#include <rte_branch_prediction.h>
#include <rte_byteorder.h>
#include <rte_config.h>
#include <rte_memory.h>
#include <rte_common.h>
#include <rte_vect.h>
*/
#include "rte_branch_prediction.h"
#ifdef __cplusplus
extern "C" {
#endif
/** Max number of characters in LPM name. */
#define RTE_LPM_NAMESIZE 16
/** Maximum depth value possible for IPv4 LPM. */
#define RTE_LPM_MAX_DEPTH 32
/** @internal Total number of tbl24 entries. */
#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
/** @internal Number of entries in a tbl8 group. */
#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
/** @internal Max number of tbl8 groups in the tbl8. */
#define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
/** @internal Total number of tbl8 groups in the tbl8. */
#define RTE_LPM_TBL8_NUM_GROUPS 256
/** @internal Total number of tbl8 entries. */
#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
/** @internal Macro to enable/disable run-time checks. */
#if defined(RTE_LIBRTE_LPM_DEBUG)
#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
if (cond) return (retval); \
} while (0)
#else
#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
#endif
/** @internal bitmask with valid and valid_group fields set */
#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
/** Bitmask used to indicate successful lookup */
#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
/** @internal Tbl24 entry structure. */
__extension__
struct rte_lpm_tbl_entry {
/**
* Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
* a group index pointing to a tbl8 structure (tbl24 only, when
* valid_group is set)
*/
uint32_t next_hop :24;
/* Using single uint8_t to store 3 values. */
uint32_t valid :1; /**< Validation flag. */
/**
* For tbl24:
* - valid_group == 0: entry stores a next hop
* - valid_group == 1: entry stores a group_index pointing to a tbl8
* For tbl8:
* - valid_group indicates whether the current tbl8 is in use or not
*/
uint32_t valid_group :1;
uint32_t depth :6; /**< Rule depth. */
};
#else
__extension__
struct rte_lpm_tbl_entry {
uint32_t depth :6;
uint32_t valid_group :1;
uint32_t valid :1;
uint32_t next_hop :24;
};
#endif
/** LPM configuration structure. */
struct rte_lpm_config {
uint32_t max_rules; /**< Max number of rules. */
uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
int flags; /**< This field is currently unused. */
};
/** @internal Rule structure. */
struct rte_lpm_rule {
uint32_t ip; /**< Rule IP address. */
uint32_t next_hop; /**< Rule next hop. */
};
/** @internal Contains metadata about the rules table. */
struct rte_lpm_rule_info {
uint32_t used_rules; /**< Used rules so far. */
uint32_t first_rule; /**< Indexes the first rule of a given depth. */
};
struct nhop_object;
struct rte_lpm_external {
struct nhop_object **nh_idx; /**< # -> idx mappings */
uint32_t default_idx; /* nhop index of default route */
uint32_t fibnum; /* fib index */
};
/** @internal LPM structure. */
struct rte_lpm {
/* LPM metadata. */
struct rte_lpm_external ext;
char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
uint32_t max_rules; /**< Max. balanced rules per lpm. */
uint32_t number_tbl8s; /**< Number of tbl8s. */
struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
/* LPM Tables. */
struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
__rte_cache_aligned; /**< LPM tbl24 table. */
struct rte_lpm_tbl_entry *tbl8; /**< LPM tbl8 table. */
struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
};
/**
* Create an LPM object.
*
* @param name
* LPM object name
* @param socket_id
* NUMA socket ID for LPM table memory allocation
* @param config
* Structure containing the configuration
* @return
* Handle to LPM object on success, NULL otherwise with rte_errno set
* to an appropriate values. Possible rte_errno values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
* - EINVAL - invalid parameter passed to function
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create memzone
*/
struct rte_lpm *
rte_lpm_create(const char *name, int socket_id,
const struct rte_lpm_config *config);
/**
* Find an existing LPM object and return a pointer to it.
*
* @param name
* Name of the lpm object as passed to rte_lpm_create()
* @return
* Pointer to lpm object or NULL if object not found with rte_errno
* set appropriately. Possible rte_errno values include:
* - ENOENT - required entry not available to return.
*/
struct rte_lpm *
rte_lpm_find_existing(const char *name);
/**
* Free an LPM object.
*
* @param lpm
* LPM object handle
* @return
* None
*/
void
rte_lpm_free(struct rte_lpm *lpm);
/**
* Add a rule to the LPM table.
*
* @param lpm
* LPM object handle
* @param ip
* IP of the rule to be added to the LPM table
* @param depth
* Depth of the rule to be added to the LPM table
* @param next_hop
* Next hop of the rule to be added to the LPM table
* @return
* 0 on success, negative value otherwise
*/
int
rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
/**
* Check if a rule is present in the LPM table,
* and provide its next hop if it is.
*
* @param lpm
* LPM object handle
* @param ip
* IP of the rule to be searched
* @param depth
* Depth of the rule to searched
* @param next_hop
* Next hop of the rule (valid only if it is found)
* @return
* 1 if the rule exists, 0 if it does not, a negative value on failure
*/
int
rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
uint32_t *next_hop);
/**
* Delete a rule from the LPM table.
*
* @param lpm
* LPM object handle
* @param ip
* IP of the rule to be deleted from the LPM table
* @param depth
* Depth of the rule to be deleted from the LPM table
* @param psub_rule_depth
* Pointer to depth of the parent rule
* @param sub_rule_nhop
* Pinter to the parent rule nexthop index
* @return
* 0 on success, negative value otherwise
*/
int
rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
uint8_t sub_rule_depth, uint32_t sub_rule_nhop);
/**
* Delete all rules from the LPM table.
*
* @param lpm
* LPM object handle
*/
void
rte_lpm_delete_all(struct rte_lpm *lpm);
/**
* Lookup an IP into the LPM table.
*
* @param lpm
* LPM object handle
* @param ip
* IP to be looked up in the LPM table
* @param next_hop
* Next hop of the most specific rule found for IP (valid on lookup hit only)
* @return
* -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
*/
static inline int
rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
{
unsigned tbl24_index = (ip >> 8);
uint32_t tbl_entry;
const uint32_t *ptbl;
/* DEBUG: Check user input arguments. */
RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
/* Copy tbl24 entry */
ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
tbl_entry = *ptbl;
/* Memory ordering is not required in lookup. Because dataflow
* dependency exists, compiler or HW won't be able to re-order
* the operations.
*/
/* Copy tbl8 entry (only if needed) */
if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
unsigned tbl8_index = (uint8_t)ip +
(((uint32_t)tbl_entry & 0x00FFFFFF) *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
tbl_entry = *ptbl;
}
*next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
}
/**
* Lookup multiple IP addresses in an LPM table. This may be implemented as a
* macro, so the address of the function should not be used.
*
* @param lpm
* LPM object handle
* @param ips
* Array of IPs to be looked up in the LPM table
* @param next_hops
* Next hop of the most specific rule found for IP (valid on lookup hit only).
* This is an array of two byte values. The most significant byte in each
* value says whether the lookup was successful (bitmask
* RTE_LPM_LOOKUP_SUCCESS is set). The least significant byte is the
* actual next hop.
* @param n
* Number of elements in ips (and next_hops) array to lookup. This should be a
* compile time constant, and divisible by 8 for best performance.
* @return
* -EINVAL for incorrect arguments, otherwise 0
*/
#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
static inline int
rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
uint32_t *next_hops, const unsigned n)
{
unsigned i;
unsigned tbl24_indexes[n];
const uint32_t *ptbl;
/* DEBUG: Check user input arguments. */
RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
(next_hops == NULL)), -EINVAL);
for (i = 0; i < n; i++) {
tbl24_indexes[i] = ips[i] >> 8;
}
for (i = 0; i < n; i++) {
/* Simply copy tbl24 entry to output */
ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
next_hops[i] = *ptbl;
/* Overwrite output with tbl8 entry if needed */
if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
unsigned tbl8_index = (uint8_t)ips[i] +
(((uint32_t)next_hops[i] & 0x00FFFFFF) *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
next_hops[i] = *ptbl;
}
}
return 0;
}
/* Mask four results. */
#define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
/**
* Lookup four IP addresses in an LPM table.
*
* @param lpm
* LPM object handle
* @param ip
* Four IPs to be looked up in the LPM table
* @param hop
* Next hop of the most specific rule found for IP (valid on lookup hit only).
* This is an 4 elements array of two byte values.
* If the lookup was successful for the given IP, then least significant byte
* of the corresponding element is the actual next hop and the most
* significant byte is zero.
* If the lookup for the given IP failed, then corresponding element would
* contain default value, see description of then next parameter.
* @param defv
* Default value to populate into corresponding element of hop[] array,
* if lookup would fail.
*/
#if 0
static inline void
rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
uint32_t defv);
#if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64)
#include "rte_lpm_neon.h"
#elif defined(RTE_ARCH_PPC_64)
#include "rte_lpm_altivec.h"
#else
#include "rte_lpm_sse.h"
#endif
#endif
#ifdef __cplusplus
}
#endif
#endif /* _RTE_LPM_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,209 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _RTE_LPM6_H_
#define _RTE_LPM6_H_
/**
* @file
* RTE Longest Prefix Match for IPv6 (LPM6)
*/
#ifdef __cplusplus
extern "C" {
#endif
#define RTE_LPM6_MAX_DEPTH 128
#define RTE_LPM6_IPV6_ADDR_SIZE 16
/** Max number of characters in LPM name. */
#define RTE_LPM6_NAMESIZE 32
/** LPM structure. */
struct rte_lpm6;
struct nhop_object;
struct rte_lpm6_external {
struct nhop_object **nh_idx; /**< # -> idx mappings */
uint32_t default_idx; /* nhop index of default route */
uint32_t fibnum; /* fib index */
};
/** LPM configuration structure. */
struct rte_lpm6_config {
uint32_t max_rules; /**< Max number of rules. */
uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
int flags; /**< This field is currently unused. */
};
#define RTE_LPM6_RULE_SIZE 32
struct rte_lpm6_rule *fill_rule6(char *buffer, const uint8_t *ip,
uint8_t depth, uint32_t next_hop);
/**
* Create an LPM object.
*
* @param name
* LPM object name
* @param socket_id
* NUMA socket ID for LPM table memory allocation
* @param config
* Structure containing the configuration
* @return
* Handle to LPM object on success, NULL otherwise with rte_errno set
* to an appropriate values. Possible rte_errno values include:
* - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
* - E_RTE_SECONDARY - function was called from a secondary process instance
* - EINVAL - invalid parameter passed to function
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create memzone
*/
struct rte_lpm6 *
rte_lpm6_create(const char *name, int socket_id,
const struct rte_lpm6_config *config);
/**
* Find an existing LPM object and return a pointer to it.
*
* @param name
* Name of the lpm object as passed to rte_lpm6_create()
* @return
* Pointer to lpm object or NULL if object not found with rte_errno
* set appropriately. Possible rte_errno values include:
* - ENOENT - required entry not available to return.
*/
struct rte_lpm6 *
rte_lpm6_find_existing(const char *name);
/**
* Free an LPM object.
*
* @param lpm
* LPM object handle
* @return
* None
*/
void
rte_lpm6_free(struct rte_lpm6 *lpm);
/**
* Add a rule to the LPM table.
*
* @param lpm
* LPM object handle
* @param ip
* IP of the rule to be added to the LPM table
* @param depth
* Depth of the rule to be added to the LPM table
* @param next_hop
* Next hop of the rule to be added to the LPM table
* @return
* 0 on success, negative value otherwise
*/
int
rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
uint32_t next_hop, int is_new_rule);
/**
* Check if a rule is present in the LPM table,
* and provide its next hop if it is.
*
* @param lpm
* LPM object handle
* @param ip
* IP of the rule to be searched
* @param depth
* Depth of the rule to searched
* @param next_hop
* Next hop of the rule (valid only if it is found)
* @return
* 1 if the rule exists, 0 if it does not, a negative value on failure
*/
int
rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
uint32_t *next_hop);
/**
* Delete a rule from the LPM table.
*
* @param lpm
* LPM object handle
* @param ip
* IP of the rule to be deleted from the LPM table
* @param depth
* Depth of the rule to be deleted from the LPM table
* @return
* 0 on success, negative value otherwise
*/
int
rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
struct rte_lpm6_rule *lsp_rule);
/**
* Delete a rule from the LPM table.
*
* @param lpm
* LPM object handle
* @param ips
* Array of IPs to be deleted from the LPM table
* @param depths
* Array of depths of the rules to be deleted from the LPM table
* @param n
* Number of rules to be deleted from the LPM table
* @return
* 0 on success, negative value otherwise.
*/
int
rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm,
uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, unsigned n);
/**
* Delete all rules from the LPM table.
*
* @param lpm
* LPM object handle
*/
void
rte_lpm6_delete_all(struct rte_lpm6 *lpm);
/**
* Lookup an IP into the LPM table.
*
* @param lpm
* LPM object handle
* @param ip
* IP to be looked up in the LPM table
* @param next_hop
* Next hop of the most specific rule found for IP (valid on lookup hit only)
* @return
* -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
*/
int
rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip, uint32_t *next_hop);
/**
* Lookup multiple IP addresses in an LPM table.
*
* @param lpm
* LPM object handle
* @param ips
* Array of IPs to be looked up in the LPM table
* @param next_hops
* Next hop of the most specific rule found for IP (valid on lookup hit only).
* This is an array of two byte values. The next hop will be stored on
* each position on success; otherwise the position will be set to -1.
* @param n
* Number of elements in ips (and next_hops) array to lookup.
* @return
* -EINVAL for incorrect arguments, otherwise 0
*/
int
rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
int32_t *next_hops, unsigned int n);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,31 @@
#ifndef _RTE_SHIM_H_
#define _RTE_SHIM_H_
#define rte_malloc(_type, _size, _align) malloc(_size, M_TEMP, M_NOWAIT)
#define rte_free(_ptr) free(_ptr, M_TEMP)
#define rte_zmalloc(_type, _size, _align) malloc(_size, M_TEMP, M_NOWAIT | M_ZERO)
#define rte_zmalloc_socket(_type, _size, _align, _s) malloc(_size, M_TEMP, M_NOWAIT | M_ZERO)
#define rte_mcfg_tailq_write_unlock()
#define rte_mcfg_tailq_write_lock()
#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
#define strtoull strtoul
#define assert(_s) KASSERT((_s), ("DPDK: assert failed"))
#define rte_memcpy memcpy
#define rte_strerror(_err) "strerror_not_implemented"
#define RTE_LOG(_sev, _sub, _fmt, ...) printf("DPDK::" #_sev "::" #_sub " %s: " _fmt, __func__ , ## __VA_ARGS__)
#include "sys/endian.h"
#define RTE_BYTE_ORDER BYTE_ORDER
#define RTE_LITTLE_ENDIAN LITTLE_ENDIAN
#define RTE_BIG_ENDIAN BIG_ENDIAN
#include "sys/limits.h" // CHAR_BIT
#define rte_le_to_cpu_32 le32toh
#include "rte_jhash.h"
#include "rte_common.h"
#endif

View File

@ -0,0 +1,140 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _RTE_TAILQ_H_
#define _RTE_TAILQ_H_
/**
* @file
* Here defines rte_tailq APIs for only internal use
*
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/queue.h>
//#include <stdio.h>
#include <netinet6/rte_debug.h>
/** dummy structure type used by the rte_tailq APIs */
struct rte_tailq_entry {
TAILQ_ENTRY(rte_tailq_entry) next; /**< Pointer entries for a tailq list */
void *data; /**< Pointer to the data referenced by this tailq entry */
};
/** dummy */
TAILQ_HEAD(rte_tailq_entry_head, rte_tailq_entry);
#define RTE_TAILQ_NAMESIZE 32
/**
* The structure defining a tailq header entry for storing
* in the rte_config structure in shared memory. Each tailq
* is identified by name.
* Any library storing a set of objects e.g. rings, mempools, hash-tables,
* is recommended to use an entry here, so as to make it easy for
* a multi-process app to find already-created elements in shared memory.
*/
struct rte_tailq_head {
struct rte_tailq_entry_head tailq_head; /**< NOTE: must be first element */
char name[RTE_TAILQ_NAMESIZE];
};
struct rte_tailq_elem {
/**
* Reference to head in shared mem, updated at init time by
* rte_eal_tailqs_init()
*/
struct rte_tailq_head *head;
TAILQ_ENTRY(rte_tailq_elem) next;
const char name[RTE_TAILQ_NAMESIZE];
};
/**
* Return the first tailq entry cast to the right struct.
*/
#define RTE_TAILQ_CAST(tailq_entry, struct_name) \
(struct struct_name *)&(tailq_entry)->tailq_head
/**
* Utility macro to make looking up a tailqueue for a particular struct easier.
*
* @param name
* The name of tailq
*
* @param struct_name
* The name of the list type we are using. (Generally this is the same as the
* first parameter passed to TAILQ_HEAD macro)
*
* @return
* The return value from rte_eal_tailq_lookup, typecast to the appropriate
* structure pointer type.
* NULL on error, since the tailq_head is the first
* element in the rte_tailq_head structure.
*/
#define RTE_TAILQ_LOOKUP(name, struct_name) \
RTE_TAILQ_CAST(rte_eal_tailq_lookup(name), struct_name)
/**
* Dump tail queues to a file.
*
* @param f
* A pointer to a file for output
*/
//void rte_dump_tailq(FILE *f);
/**
* Lookup for a tail queue.
*
* Get a pointer to a tail queue header of a tail
* queue identified by the name given as an argument.
* Note: this function is not multi-thread safe, and should only be called from
* a single thread at a time
*
* @param name
* The name of the queue.
* @return
* A pointer to the tail queue head structure.
*/
struct rte_tailq_head *rte_eal_tailq_lookup(const char *name);
/**
* Register a tail queue.
*
* Register a tail queue from shared memory.
* This function is mainly used by EAL_REGISTER_TAILQ macro which is used to
* register tailq from the different dpdk libraries. Since this macro is a
* constructor, the function has no access to dpdk shared memory, so the
* registered tailq can not be used before call to rte_eal_init() which calls
* rte_eal_tailqs_init().
*
* @param t
* The tailq element which contains the name of the tailq you want to
* create (/retrieve when in secondary process).
* @return
* 0 on success or -1 in case of an error.
*/
int rte_eal_tailq_register(struct rte_tailq_elem *t);
#define EAL_REGISTER_TAILQ(t) \
RTE_INIT(tailqinitfn_ ##t) \
{ \
if (rte_eal_tailq_register(&t) < 0) \
rte_panic("Cannot initialize tailq: %s\n", t.name); \
}
/* This macro permits both remove and free var within the loop safely.*/
#ifndef TAILQ_FOREACH_SAFE
#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = TAILQ_FIRST((head)); \
(var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
(var) = (tvar))
#endif
#ifdef __cplusplus
}
#endif
#endif /* _RTE_TAILQ_H_ */

View File

@ -104,6 +104,8 @@ SUBDIR= \
dc \
dcons \
dcons_crom \
${_dpdk_lpm4} \
${_dpdk_lpm6} \
${_dpms} \
dummynet \
${_efirt} \
@ -472,6 +474,14 @@ _ipfw_nptv6= ipfw_nptv6
_ipfilter= ipfilter
.endif
.if ${MK_INET_SUPPORT} != "no" && ${KERN_OPTS:MFIB_ALGO}
_dpdk_lpm4= dpdk_lpm4
.endif
.if ${MK_INET6_SUPPORT} != "no" && ${KERN_OPTS:MFIB_ALGO}
_dpdk_lpm6= dpdk_lpm6
.endif
.if ${MK_ISCSI} != "no" || defined(ALL_MODULES)
SUBDIR+= cfiscsi
SUBDIR+= iscsi

View File

@ -0,0 +1,12 @@
# $FreeBSD$
SYSDIR?=${SRCTOP}/sys
.include "${SYSDIR}/conf/kern.opts.mk"
.PATH: ${SYSDIR}/contrib/dpdk_rte_lpm
KMOD= dpdk_lpm4
SRCS= opt_inet.h
SRCS.INET=dpdk_lpm.c rte_lpm.c
.include <bsd.kmod.mk>

View File

@ -0,0 +1,12 @@
# $FreeBSD$
SYSDIR?=${SRCTOP}/sys
.include "${SYSDIR}/conf/kern.opts.mk"
.PATH: ${SYSDIR}/contrib/dpdk_rte_lpm
KMOD= dpdk_lpm6
SRCS= opt_inet6.h
SRCS.INET6=dpdk_lpm6.c rte_lpm6.c
.include <bsd.kmod.mk>