examples/server_node_efd: renamed from flow_distributor
To avoid confusion with distributor app, this commit renames the flow-distributor sample app to server_node_efd, since it shows how to use the EFD library and it is based on a server/nodes model. Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
This commit is contained in:
parent
a085723ac8
commit
ed2a80fdf6
@ -551,8 +551,8 @@ M: Pablo de Lara Guarch <pablo.de.lara.guarch@intel.com>
|
||||
F: lib/librte_efd/
|
||||
F: doc/guides/prog_guide/efd_lib.rst
|
||||
F: app/test/test_efd*
|
||||
F: examples/flow_distributor/
|
||||
F: doc/guides/sample_app_ug/flow_distributor.rst
|
||||
F: examples/server_node_efd/
|
||||
F: doc/guides/sample_app_ug/server_node_efd.rst
|
||||
|
||||
Hashes
|
||||
M: Bruce Richardson <bruce.richardson@intel.com>
|
||||
|
@ -52,10 +52,6 @@
|
||||
@example load_balancer/init.c
|
||||
@example load_balancer/main.c
|
||||
@example load_balancer/runtime.c
|
||||
@example flow_distributor/distributor/args.c
|
||||
@example flow_distributor/distributor/init.c
|
||||
@example flow_distributor/distributor/main.c
|
||||
@example flow_distributor/node/node.c
|
||||
@example multi_process/client_server_mp/mp_client/client.c
|
||||
@example multi_process/client_server_mp/mp_server/args.c
|
||||
@example multi_process/client_server_mp/mp_server/init.c
|
||||
@ -94,6 +90,10 @@
|
||||
@example quota_watermark/qw/init.c
|
||||
@example quota_watermark/qw/main.c
|
||||
@example rxtx_callbacks/main.c
|
||||
@example server_node_efd/server/args.c
|
||||
@example server_node_efd/server/init.c
|
||||
@example server_node_efd/server/main.c
|
||||
@example server_node_efd/node/node.c
|
||||
@example skeleton/basicfwd.c
|
||||
@example tep_termination/main.c
|
||||
@example tep_termination/vxlan.c
|
||||
|
Before Width: | Height: | Size: 74 KiB After Width: | Height: | Size: 74 KiB |
@ -57,7 +57,7 @@ Sample Applications User Guides
|
||||
l3_forward_virtual
|
||||
link_status_intr
|
||||
load_balancer
|
||||
flow_distributor
|
||||
server_node_efd
|
||||
multi_process
|
||||
qos_metering
|
||||
qos_scheduler
|
||||
@ -133,6 +133,8 @@ Sample Applications User Guides
|
||||
|
||||
:numref:`figure_ptpclient_highlevel` :ref:`figure_ptpclient_highlevel`
|
||||
|
||||
:numref:`figure_efd_sample_app_overview` :ref:`figure_efd_sample_app_overview`
|
||||
|
||||
**Tables**
|
||||
|
||||
:numref:`table_qos_metering_1` :ref:`table_qos_metering_1`
|
||||
|
@ -28,8 +28,8 @@
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Flow Distributor Sample Application
|
||||
===================================
|
||||
Server-Node EFD Sample Application
|
||||
==================================
|
||||
|
||||
This sample application demonstrates the use of EFD library as a flow-level
|
||||
load balancer, for more information about the EFD Library please refer to the
|
||||
@ -48,12 +48,12 @@ presented in the following figure.
|
||||
|
||||
.. _figure_efd_sample_app_overview:
|
||||
|
||||
.. figure:: img/flow_distributor.*
|
||||
.. figure:: img/server_node_efd.*
|
||||
|
||||
Using EFD as a Flow-Level Load Balancer
|
||||
|
||||
As shown in :numref:`figure_efd_sample_app_overview`,
|
||||
the sample application consists of a front-end node (distributor)
|
||||
the sample application consists of a front-end node (server)
|
||||
using the EFD library to create a load-balancing table for flows,
|
||||
for each flow a target backend worker node is specified. The EFD table does not
|
||||
store the flow key (unlike a regular hash table), and hence, it can
|
||||
@ -61,12 +61,12 @@ individually load-balance millions of flows (number of targets * maximum number
|
||||
of flows fit in a flow table per target) while still fitting in CPU cache.
|
||||
|
||||
It should be noted that although they are referred to as nodes, the frontend
|
||||
distributor and worker nodes are processes running on the same platform.
|
||||
server and worker nodes are processes running on the same platform.
|
||||
|
||||
Front-end Distributor
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
Front-end Server
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Upon initializing, the frontend distributor node (process) creates a flow
|
||||
Upon initializing, the frontend server node (process) creates a flow
|
||||
distributor table (based on the EFD library) which is populated with flow
|
||||
information and its intended target node.
|
||||
|
||||
@ -81,7 +81,7 @@ the IP destination addresses as follows:
|
||||
|
||||
then the pair of <key,target> is inserted into the flow distribution table.
|
||||
|
||||
The main loop of the the distributor node receives a burst of packets, then for
|
||||
The main loop of the server process receives a burst of packets, then for
|
||||
each packet, a flow key (IP destination address) is extracted. The flow
|
||||
distributor table is looked up and the target node id is returned. Packets are
|
||||
then enqueued to the specified target node id.
|
||||
@ -121,7 +121,7 @@ The sequence of steps used to build the application is:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
cd ${RTE_SDK}/examples/flow_distributor/
|
||||
cd ${RTE_SDK}/examples/server_node_efd/
|
||||
make
|
||||
|
||||
For more details on how to build the DPDK libraries and sample
|
||||
@ -132,12 +132,12 @@ The sequence of steps used to build the application is:
|
||||
Running the Application
|
||||
-----------------------
|
||||
|
||||
The application has two binaries to be run: the front-end distributor
|
||||
The application has two binaries to be run: the front-end server
|
||||
and the back-end node.
|
||||
|
||||
The frontend distributor (distributor) has the following command line options::
|
||||
The frontend server (server) has the following command line options::
|
||||
|
||||
./distributor [EAL options] -- -p PORTMASK -n NUM_NODES -f NUM_FLOWS
|
||||
./server [EAL options] -- -p PORTMASK -n NUM_NODES -f NUM_FLOWS
|
||||
|
||||
Where,
|
||||
|
||||
@ -154,7 +154,7 @@ Where,
|
||||
* ``-n NODE_ID:`` Node ID, which cannot be equal or higher than NUM_MODES
|
||||
|
||||
|
||||
First, the distributor app must be launched, with the number of nodes that will be run.
|
||||
First, the server app must be launched, with the number of nodes that will be run.
|
||||
Once it has been started, the node instances can be run, with different NODE_ID.
|
||||
These instances have to be run as secondary processes, with ``--proc-type=secondary``
|
||||
in the EAL options, which will attach to the primary process memory, and therefore,
|
||||
@ -176,7 +176,7 @@ Explanation
|
||||
|
||||
As described in previous sections, there are two processes in this example.
|
||||
|
||||
The first process, the front-end distributor, creates and populates the EFD table,
|
||||
The first process, the front-end server, creates and populates the EFD table,
|
||||
which is used to distribute packets to nodes, which the number of flows
|
||||
specified in the command line (1 million, by default).
|
||||
|
||||
@ -184,7 +184,7 @@ specified in the command line (1 million, by default).
|
||||
.. code-block:: c
|
||||
|
||||
static void
|
||||
create_flow_distributor_table(void)
|
||||
create_efd_table(void)
|
||||
{
|
||||
uint8_t socket_id = rte_socket_id();
|
||||
|
||||
@ -197,7 +197,7 @@ specified in the command line (1 million, by default).
|
||||
}
|
||||
|
||||
static void
|
||||
populate_flow_distributor_table(void)
|
||||
populate_efd_table(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int32_t ret;
|
||||
@ -214,7 +214,7 @@ specified in the command line (1 million, by default).
|
||||
(void *)&ip_dst, (efd_value_t)node_id);
|
||||
if (ret < 0)
|
||||
rte_exit(EXIT_FAILURE, "Unable to add entry %u in "
|
||||
"flow distributor table\n", i);
|
||||
"EFD table\n", i);
|
||||
}
|
||||
|
||||
printf("EFD table: Adding 0x%x keys\n", num_flows);
|
||||
@ -269,7 +269,7 @@ which tells the node where the packet has to be distributed.
|
||||
}
|
||||
|
||||
The burst of packets received is enqueued in temporary buffers (per node),
|
||||
and enqueued in the shared ring between the distributor and the node.
|
||||
and enqueued in the shared ring between the server and the node.
|
||||
After this, a new burst of packets is received and this process is
|
||||
repeated infinitely.
|
||||
|
||||
@ -297,9 +297,9 @@ repeated infinitely.
|
||||
}
|
||||
|
||||
The second process, the back-end node, receives the packets from the shared
|
||||
ring with the distributor and send them out, if they belong to the node.
|
||||
ring with the server and send them out, if they belong to the node.
|
||||
|
||||
At initialization, it attaches to the distributor process memory, to have
|
||||
At initialization, it attaches to the server process memory, to have
|
||||
access to the shared ring, parameters and statistics.
|
||||
|
||||
.. code-block:: c
|
||||
@ -307,7 +307,7 @@ access to the shared ring, parameters and statistics.
|
||||
rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
|
||||
if (rx_ring == NULL)
|
||||
rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
|
||||
"is distributor process running?\n");
|
||||
"is server process running?\n");
|
||||
|
||||
mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
|
||||
if (mp == NULL)
|
||||
@ -381,7 +381,7 @@ by the node is created and populated.
|
||||
}
|
||||
|
||||
After initialization, packets are dequeued from the shared ring
|
||||
(from the distributor) and, like in the distributor process,
|
||||
(from the server) and, like in the server process,
|
||||
the IPv4 address from the packets is used as a key to look up in the hash table.
|
||||
If there is a hit, packet is stored in a buffer, to be eventually transmitted
|
||||
in one of the enabled ports. If key is not there, packet is dropped, since the
|
||||
@ -421,7 +421,7 @@ flow is not handled by the node.
|
||||
}
|
||||
|
||||
Finally, note that both processes updates statistics, such as transmitted, received
|
||||
and dropped packets, which are shown and refreshed by the distributor app.
|
||||
and dropped packets, which are shown and refreshed by the server app.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
@ -470,7 +470,7 @@ and dropped packets, which are shown and refreshed by the distributor app.
|
||||
port_tx[i]);
|
||||
}
|
||||
|
||||
printf("\nFLOW DISTRIBUTOR\n");
|
||||
printf("\nSERVER\n");
|
||||
printf("-----\n");
|
||||
printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n",
|
||||
flow_dist_stats.distributed, flow_dist_stats.drop);
|
@ -45,7 +45,7 @@ DIRS-y += dpdk_qat
|
||||
endif
|
||||
DIRS-y += ethtool
|
||||
DIRS-y += exception_path
|
||||
DIRS-$(CONFIG_RTE_LIBRTE_EFD) += flow_distributor
|
||||
DIRS-$(CONFIG_RTE_LIBRTE_EFD) += server_node_efd
|
||||
DIRS-y += helloworld
|
||||
DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += ip_pipeline
|
||||
ifeq ($(CONFIG_RTE_LIBRTE_LPM),y)
|
||||
|
@ -38,7 +38,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
|
||||
|
||||
include $(RTE_SDK)/mk/rte.vars.mk
|
||||
|
||||
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += distributor
|
||||
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += server
|
||||
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += node
|
||||
|
||||
include $(RTE_SDK)/mk/rte.extsubdir.mk
|
@ -84,7 +84,7 @@ static uint8_t output_ports[RTE_MAX_ETHPORTS];
|
||||
/* buffers up a set of packet that are ready to send */
|
||||
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
|
||||
|
||||
/* shared data from distributor. We update statistics here */
|
||||
/* shared data from server. We update statistics here */
|
||||
static struct tx_stats *tx_stats;
|
||||
|
||||
static struct filter_stats *filter_stats;
|
||||
@ -357,7 +357,7 @@ main(int argc, char *argv[])
|
||||
rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
|
||||
if (rx_ring == NULL)
|
||||
rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
|
||||
"is distributor process running?\n");
|
||||
"is server process running?\n");
|
||||
|
||||
mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
|
||||
if (mp == NULL)
|
@ -44,7 +44,7 @@ please change the definition of the RTE_TARGET environment variable)
|
||||
endif
|
||||
|
||||
# binary name
|
||||
APP = distributor
|
||||
APP = server
|
||||
|
||||
# all source are stored in SRCS-y
|
||||
SRCS-y := main.c init.c args.c
|
@ -85,10 +85,10 @@ struct rte_mempool *pktmbuf_pool;
|
||||
/* array of info/queues for nodes */
|
||||
struct node *nodes;
|
||||
|
||||
/* Flow distributor table */
|
||||
/* EFD table */
|
||||
struct rte_efd_table *efd_table;
|
||||
|
||||
/* Shared info between distributor and nodes */
|
||||
/* Shared info between server and nodes */
|
||||
struct shared_info *info;
|
||||
|
||||
/**
|
||||
@ -176,7 +176,7 @@ init_port(uint8_t port_num)
|
||||
|
||||
/**
|
||||
* Set up the DPDK rings which will be used to pass packets, via
|
||||
* pointers, between the multi-process distributor and node processes.
|
||||
* pointers, between the multi-process server and node processes.
|
||||
* Each node needs one RX queue.
|
||||
*/
|
||||
static int
|
||||
@ -208,11 +208,11 @@ init_shm_rings(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Create flow distributor table which will contain all the flows
|
||||
* Create EFD table which will contain all the flows
|
||||
* that will be distributed among the nodes
|
||||
*/
|
||||
static void
|
||||
create_flow_distributor_table(void)
|
||||
create_efd_table(void)
|
||||
{
|
||||
uint8_t socket_id = rte_socket_id();
|
||||
|
||||
@ -225,7 +225,7 @@ create_flow_distributor_table(void)
|
||||
}
|
||||
|
||||
static void
|
||||
populate_flow_distributor_table(void)
|
||||
populate_efd_table(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int32_t ret;
|
||||
@ -242,7 +242,7 @@ populate_flow_distributor_table(void)
|
||||
(void *)&ip_dst, (efd_value_t)node_id);
|
||||
if (ret < 0)
|
||||
rte_exit(EXIT_FAILURE, "Unable to add entry %u in "
|
||||
"flow distributor table\n", i);
|
||||
"EFD table\n", i);
|
||||
}
|
||||
|
||||
printf("EFD table: Adding 0x%x keys\n", num_flows);
|
||||
@ -304,7 +304,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
|
||||
}
|
||||
|
||||
/**
|
||||
* Main init function for the multi-process distributor app,
|
||||
* Main init function for the multi-process server app,
|
||||
* calls subfunctions to do each stage of the initialisation.
|
||||
*/
|
||||
int
|
||||
@ -356,11 +356,11 @@ init(int argc, char *argv[])
|
||||
/* initialise the node queues/rings for inter-eu comms */
|
||||
init_shm_rings();
|
||||
|
||||
/* Create the flow distributor table */
|
||||
create_flow_distributor_table();
|
||||
/* Create the EFD table */
|
||||
create_efd_table();
|
||||
|
||||
/* Populate the flow distributor table */
|
||||
populate_flow_distributor_table();
|
||||
/* Populate the EFD table */
|
||||
populate_efd_table();
|
||||
|
||||
/* Share the total number of nodes */
|
||||
info->num_nodes = num_nodes;
|
@ -61,7 +61,7 @@ extern struct rte_efd_table *efd_table;
|
||||
extern struct node *nodes;
|
||||
|
||||
/*
|
||||
* shared information between distributor and nodes: number of clients,
|
||||
* shared information between server and nodes: number of nodes,
|
||||
* port numbers, rx and tx stats etc.
|
||||
*/
|
||||
extern struct shared_info *info;
|
@ -88,7 +88,7 @@ struct node_rx_buf {
|
||||
uint16_t count;
|
||||
};
|
||||
|
||||
struct flow_distributor_stats {
|
||||
struct efd_stats {
|
||||
uint64_t distributed;
|
||||
uint64_t drop;
|
||||
} flow_dist_stats;
|
||||
@ -120,7 +120,7 @@ get_printable_mac_addr(uint8_t port)
|
||||
* This function displays the recorded statistics for each port
|
||||
* and for each node. It uses ANSI terminal codes to clear
|
||||
* screen when called. It is called from a single non-master
|
||||
* thread in the distributor process, when the process is run with more
|
||||
* thread in the server process, when the process is run with more
|
||||
* than one lcore enabled.
|
||||
*/
|
||||
static void
|
||||
@ -168,7 +168,7 @@ do_stats_display(void)
|
||||
port_tx[i]);
|
||||
}
|
||||
|
||||
printf("\nFLOW DISTRIBUTOR\n");
|
||||
printf("\nSERVER\n");
|
||||
printf("-----\n");
|
||||
printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n",
|
||||
flow_dist_stats.distributed, flow_dist_stats.drop);
|
@ -39,12 +39,12 @@
|
||||
|
||||
#define MAX_NODES 16
|
||||
/*
|
||||
* Shared port info, including statistics information for display by distributor.
|
||||
* Shared port info, including statistics information for display by server.
|
||||
* Structure will be put in a memzone.
|
||||
* - All port id values share one cache line as this data will be read-only
|
||||
* during operation.
|
||||
* - All rx statistic values share cache lines, as this data is written only
|
||||
* by the distributor process. (rare reads by stats display)
|
||||
* by the server process. (rare reads by stats display)
|
||||
* - The tx statistics have values for all ports per cache line, but the stats
|
||||
* themselves are written by the nodes, so we have a distinct set, on different
|
||||
* cache lines for each node to use.
|
||||
@ -73,7 +73,7 @@ struct shared_info {
|
||||
struct filter_stats filter_stats[MAX_NODES];
|
||||
};
|
||||
|
||||
/* define common names for structures shared between distributor and node */
|
||||
/* define common names for structures shared between server and node */
|
||||
#define MP_NODE_RXQ_NAME "MProc_Node_%u_RX"
|
||||
#define PKTMBUF_POOL_NAME "MProc_pktmbuf_pool"
|
||||
#define MZ_SHARED_INFO "MProc_shared_info"
|
Loading…
Reference in New Issue
Block a user