kni: fix multi-process support

Signed-off-by: Intel
This commit is contained in:
Intel 2013-06-03 00:00:00 +00:00 committed by Thomas Monjalon
parent d27a7e4e0b
commit f2e7592c47
13 changed files with 963 additions and 124 deletions

View File

@ -88,6 +88,7 @@ SRCS-$(CONFIG_RTE_APP_TEST) += test_cmdline_lib.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_red.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_sched.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_meter.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_kni.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_pmac_pm.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_pmac_acl.c
SRCS-$(CONFIG_RTE_APP_TEST) += test_power.c

View File

@ -336,6 +336,19 @@ def all_sockets(num):
# tests that should not be run when any other tests are running
non_parallel_test_group_list = [
{
"Prefix" : "kni",
"Memory" : "512",
"Tests" :
[
{
"Name" : "KNI autotest",
"Command" : "kni_autotest",
"Func" : default_autotest,
"Report" : None,
},
]
},
{
"Prefix": "mempool_perf",
"Memory" : all_sockets(256),

View File

@ -173,6 +173,8 @@ static void cmd_autotest_parsed(void *parsed_result,
ret |= test_sched();
if (all || !strcmp(res->autotest, "meter_autotest"))
ret |= test_meter();
if (all || !strcmp(res->autotest, "kni_autotest"))
ret |= test_kni();
if (all || !strcmp(res->autotest, "pm_autotest"))
ret |= test_pmac_pm();
if (all || !strcmp(res->autotest, "acl_autotest"))
@ -208,8 +210,8 @@ cmdline_parse_token_string_t cmd_autotest_autotest =
"cmdline_autotest#func_reentrancy_autotest#"
"mempool_perf_autotest#hash_perf_autotest#"
"red_autotest#meter_autotest#sched_autotest#"
"memcpy_perf_autotest#pm_autotest#"
"acl_autotest#power_autotest#"
"memcpy_perf_autotest#kni_autotest#"
"pm_autotest#acl_autotest#power_autotest#"
"all_autotests");
cmdline_parse_inst_t cmd_autotest = {

View File

@ -89,6 +89,7 @@ int test_sched(void);
int test_meter(void);
int test_pmac_pm(void);
int test_pmac_acl(void);
int test_kni(void);
int test_power(void);
int test_pci_run;

537
app/test/test_kni.c Normal file
View File

@ -0,0 +1,537 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <sys/wait.h>
#include <cmdline_parse.h>
#include "test.h"
#ifdef RTE_LIBRTE_KNI
#include <rte_mempool.h>
#include <rte_ethdev.h>
#include <rte_cycles.h>
#include <rte_kni.h>
#define NB_MBUF (8192 * 16)
#define MAX_PACKET_SZ 2048
#define MBUF_SZ \
(MAX_PACKET_SZ + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
#define PKT_BURST_SZ 32
#define MEMPOOL_CACHE_SZ PKT_BURST_SZ
#define SOCKET 0
#define NB_RXD 128
#define NB_TXD 512
#define KNI_TIMEOUT_MS 5000 /* ms */
#define IFCONFIG "/sbin/ifconfig"
/* The threshold number of mbufs to be transmitted or received. */
#define KNI_NUM_MBUF_THRESHOLD 100
static int kni_pkt_mtu = 0;
struct test_kni_stats {
volatile uint64_t ingress;
volatile uint64_t egress;
};
static const struct rte_eth_rxconf rx_conf = {
.rx_thresh = {
.pthresh = 8,
.hthresh = 8,
.wthresh = 4,
},
.rx_free_thresh = 0,
};
static const struct rte_eth_txconf tx_conf = {
.tx_thresh = {
.pthresh = 36,
.hthresh = 0,
.wthresh = 0,
},
.tx_free_thresh = 0,
.tx_rs_thresh = 0,
};
static const struct rte_eth_conf port_conf = {
.rxmode = {
.header_split = 0,
.hw_ip_checksum = 0,
.hw_vlan_filter = 0,
.jumbo_frame = 0,
.hw_strip_crc = 0,
},
.txmode = {
.mq_mode = ETH_DCB_NONE,
},
};
static struct rte_kni_ops kni_ops = {
.change_mtu = NULL,
.config_network_if = NULL,
};
static unsigned lcore_master, lcore_ingress, lcore_egress;
static struct rte_kni *test_kni_ctx;
static struct test_kni_stats stats;
static volatile uint32_t test_kni_processing_flag;
static struct rte_mempool *
test_kni_create_mempool(void)
{
struct rte_mempool * mp;
mp = rte_mempool_lookup("kni_mempool");
if (!mp)
mp = rte_mempool_create("kni_mempool",
NB_MBUF,
MBUF_SZ,
MEMPOOL_CACHE_SZ,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init,
NULL,
rte_pktmbuf_init,
NULL,
SOCKET,
0);
return mp;
}
static struct rte_mempool *
test_kni_lookup_mempool(void)
{
return rte_mempool_lookup("kni_mempool");
}
/* Callback for request of changing MTU */
static int
kni_change_mtu(uint8_t port_id, unsigned new_mtu)
{
printf("Change MTU of port %d to %u\n", port_id, new_mtu);
kni_pkt_mtu = new_mtu;
printf("Change MTU of port %d to %i successfully.\n",
port_id, kni_pkt_mtu);
return 0;
}
/**
* This loop fully tests the basic functions of KNI. e.g. transmitting,
* receiving to, from kernel space, and kernel requests.
*
* This is the loop to transmit/receive mbufs to/from kernel interface with
* supported by KNI kernel module. The ingress lcore will allocate mbufs and
* transmit them to kernel space; while the egress lcore will receive the mbufs
* from kernel space and free them.
* On the master lcore, several commands will be run to check handling the
* kernel requests. And it will finally set the flag to exit the KNI
* transmitting/receiving to/from the kernel space.
*
* Note: To support this testing, the KNI kernel module needs to be insmodded
* in one of its loopback modes.
*/
static int
test_kni_loop(__rte_unused void *arg)
{
int ret = 0;
unsigned nb_rx, nb_tx, num, i;
const unsigned lcore_id = rte_lcore_id();
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
if (lcore_id == lcore_master) {
rte_delay_ms(KNI_TIMEOUT_MS);
/* tests of handling kernel request */
if (system(IFCONFIG " vEth0 up") == -1)
ret = -1;
if (system(IFCONFIG " vEth0 mtu 1400") == -1)
ret = -1;
if (system(IFCONFIG " vEth0 down") == -1)
ret = -1;
rte_delay_ms(KNI_TIMEOUT_MS);
test_kni_processing_flag = 1;
} else if (lcore_id == lcore_ingress) {
struct rte_mempool *mp = test_kni_lookup_mempool();
if (mp == NULL)
return -1;
while (1) {
if (test_kni_processing_flag)
break;
for (nb_rx = 0; nb_rx < PKT_BURST_SZ; nb_rx++) {
pkts_burst[nb_rx] = rte_pktmbuf_alloc(mp);
if (!pkts_burst[nb_rx])
break;
}
num = rte_kni_tx_burst(test_kni_ctx, pkts_burst,
nb_rx);
stats.ingress += num;
rte_kni_handle_request(test_kni_ctx);
if (num < nb_rx) {
for (i = num; i < nb_rx; i++) {
rte_pktmbuf_free(pkts_burst[i]);
}
}
}
} else if (lcore_id == lcore_egress) {
while (1) {
if (test_kni_processing_flag)
break;
num = rte_kni_rx_burst(test_kni_ctx, pkts_burst,
PKT_BURST_SZ);
stats.egress += num;
for (nb_tx = 0; nb_tx < num; nb_tx++)
rte_pktmbuf_free(pkts_burst[nb_tx]);
}
}
return ret;
}
static int
test_kni_allocate_lcores(void)
{
unsigned i, count = 0;
lcore_master = rte_get_master_lcore();
printf("master lcore: %u\n", lcore_master);
for (i = 0; i < RTE_MAX_LCORE; i++) {
if (count >=2 )
break;
if (rte_lcore_is_enabled(i) && i != lcore_master) {
count ++;
if (count == 1)
lcore_ingress = i;
else if (count == 2)
lcore_egress = i;
}
}
printf("count: %u\n", count);
return (count == 2 ? 0 : -1);
}
static int
test_kni_processing(uint8_t pid, struct rte_mempool *mp)
{
int ret = 0;
unsigned i;
struct rte_kni *kni;
int p_id,p_ret;
int status;
if (!mp)
return -1;
/* basic test of kni processing */
kni = rte_kni_create(pid, MAX_PACKET_SZ, mp, &kni_ops);
if (!kni) {
printf("fail to create kni\n");
return -1;
}
if (rte_kni_get_port_id(kni) != pid) {
printf("fail to get port id\n");
ret = -1;
goto fail_kni;
}
test_kni_ctx = kni;
test_kni_processing_flag = 0;
stats.ingress = 0;
stats.egress = 0;
/* create a subprocess to test the APIs of supporting multi-process */
p_id = fork();
if (p_id == 0) {
struct rte_kni *kni_test;
#define TEST_MTU_SIZE 1450
kni_test = rte_kni_info_get(RTE_MAX_ETHPORTS);
if (kni_test) {
printf("unexpectedly gets kni successfully with an invalid "
"port id\n");
exit(-1);
}
kni_test = rte_kni_info_get(pid);
if (NULL == kni_test) {
printf("Failed to get KNI info of the port %d\n",pid);
exit(-1);
}
struct rte_kni_ops kni_ops_test = {
.change_mtu = kni_change_mtu,
.config_network_if = NULL,
};
/* test of registering kni with NULL ops */
if (rte_kni_register_handlers(kni_test,NULL) == 0) {
printf("unexpectedly register kni successfully"
"with NULL ops\n");
exit(-1);
}
if (rte_kni_register_handlers(kni_test,&kni_ops_test) < 0) {
printf("Failed to register KNI request handler"
"of the port %d\n",pid);
exit(-1);
}
if (system(IFCONFIG " vEth0 mtu 1450") == -1)
exit(-1);
rte_kni_handle_request(kni_test);
if (kni_pkt_mtu != TEST_MTU_SIZE) {
printf("Failed to change kni MTU\n");
exit(-1) ;
}
/* test of unregistering kni request */
kni_pkt_mtu = 0;
if (rte_kni_unregister_handlers(kni_test) < 0) {
printf("Failed to unregister kni request handlers\n");
exit(-1);
}
if (system(IFCONFIG " vEth0 mtu 1450") == -1)
exit(-1);
rte_kni_handle_request(kni_test);
if (kni_pkt_mtu != 0) {
printf("Failed to test kni unregister handlers\n");
exit(-1);
}
exit(0);
}else if (p_id < 0) {
printf("Failed to fork a process\n");
return -1;
}else {
p_ret = wait(&status);
if (WIFEXITED(status))
printf("test of multi-process api passed.\n");
else {
printf("KNI test:The child process %d exit abnormally./n",p_ret);
return -1;
}
}
rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MASTER);
RTE_LCORE_FOREACH_SLAVE(i) {
if (rte_eal_wait_lcore(i) < 0) {
ret = -1;
goto fail_kni;
}
}
/**
* Check if the number of mbufs received from kernel space is equal
* to that of transmitted to kernel space
*/
if (stats.ingress < KNI_NUM_MBUF_THRESHOLD ||
stats.egress < KNI_NUM_MBUF_THRESHOLD) {
printf("The ingress/egress number should not be "
"less than %u\n", (unsigned)KNI_NUM_MBUF_THRESHOLD);
ret = -1;
goto fail_kni;
}
/* test of creating kni on a port which has been used for a kni */
if (rte_kni_create(pid, MAX_PACKET_SZ, mp, &kni_ops) != NULL) {
printf("should not create a kni successfully for a port which"
"has been used for a kni\n");
ret = -1;
goto fail_kni;
}
if (rte_kni_release(kni) < 0) {
printf("fail to release kni\n");
return -1;
}
test_kni_ctx = NULL;
/* test of releasing a released kni device */
if (rte_kni_release(kni) == 0) {
printf("should not release a released kni device\n");
return -1;
}
/* test of reusing memzone */
kni = rte_kni_create(pid, MAX_PACKET_SZ, mp, &kni_ops);
if (!kni) {
printf("fail to create kni\n");
return -1;
}
/* Release the kni for following testing */
if (rte_kni_release(kni) < 0) {
printf("fail to release kni\n");
return -1;
}
return ret;
fail_kni:
if (rte_kni_release(kni) < 0) {
printf("fail to release kni\n");
ret = -1;
}
return ret;
}
int
test_kni(void)
{
int ret = -1;
uint8_t nb_ports, pid;
struct rte_kni *kni;
struct rte_mempool * mp;
if (test_kni_allocate_lcores() < 0) {
printf("No enough lcores for kni processing\n");
return -1;
}
mp = test_kni_create_mempool();
if (!mp) {
printf("fail to create mempool for kni\n");
return -1;
}
ret = rte_pmd_init_all();
if (ret < 0) {
printf("fail to initialize PMD\n");
return -1;
}
ret = rte_eal_pci_probe();
if (ret < 0) {
printf("fail to probe PCI devices\n");
return -1;
}
nb_ports = rte_eth_dev_count();
if (nb_ports == 0) {
printf("no supported nic port found\n");
return -1;
}
/* configuring port 0 for the test is enough */
pid = 0;
ret = rte_eth_dev_configure(pid, 1, 1, &port_conf);
if (ret < 0) {
printf("fail to configure port %d\n", pid);
return -1;
}
ret = rte_eth_rx_queue_setup(pid, 0, NB_RXD, SOCKET, &rx_conf, mp);
if (ret < 0) {
printf("fail to setup rx queue for port %d\n", pid);
return -1;
}
ret = rte_eth_tx_queue_setup(pid, 0, NB_TXD, SOCKET, &tx_conf);
if (ret < 0) {
printf("fail to setup tx queue for port %d\n", pid);
return -1;
}
ret = rte_eth_dev_start(pid);
if (ret < 0) {
printf("fail to start port %d\n", pid);
return -1;
}
rte_eth_promiscuous_enable(pid);
/* basic test of kni processing */
ret = test_kni_processing(pid, mp);
if (ret < 0)
goto fail;
/* test of creating kni with port id exceeds the maximum */
kni = rte_kni_create(RTE_MAX_ETHPORTS, MAX_PACKET_SZ, mp, &kni_ops);
if (kni) {
printf("unexpectedly creates kni successfully with an invalid "
"port id\n");
goto fail;
}
/* test of creating kni with NULL mempool pointer */
kni = rte_kni_create(pid, MAX_PACKET_SZ, NULL, &kni_ops);
if (kni) {
printf("unexpectedly creates kni successfully with NULL "
"mempool pointer\n");
goto fail;
}
/* test of creating kni with NULL ops */
kni = rte_kni_create(pid, MAX_PACKET_SZ, mp, NULL);
if (!kni) {
printf("unexpectedly creates kni falied with NULL ops\n");
goto fail;
}
/* test of releasing kni with NULL ops */
if (rte_kni_release(kni) < 0) {
printf("fail to release kni\n");
goto fail;
}
/* test of getting port id according to NULL kni context */
if (rte_kni_get_port_id(NULL) < RTE_MAX_ETHPORTS) {
printf("unexpectedly get port id successfully by NULL kni "
"pointer\n");
goto fail;
}
/* test of releasing NULL kni context */
ret = rte_kni_release(NULL);
if (ret == 0) {
printf("unexpectedly release kni successfully\n");
goto fail;
}
ret = 0;
fail:
rte_eth_dev_stop(pid);
return ret;
}
#else /* RTE_LIBRTE_KNI */
int
test_kni(void)
{
printf("The KNI library is not included in this build\n");
return 0;
}
#endif /* RTE_LIBRTE_KNI */

View File

@ -107,6 +107,9 @@
/* Total octets in the FCS */
#define KNI_ENET_FCS_SIZE 4
#define KNI_US_PER_SECOND 1000000
#define KNI_SECOND_PER_DAY 86400
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
@ -209,6 +212,8 @@ static struct rte_kni_ops kni_ops = {
.config_network_if = kni_config_network_interface,
};
static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
/* Print out statistics on packets handled */
static void
print_stats(void)
@ -235,7 +240,7 @@ print_stats(void)
printf("====== ============== ============ ============ ============ ============\n");
}
/* Custom handling of signals to handle stats */
/* Custom handling of signals to handle stats and kni processing */
static void
signal_handler(int signum)
{
@ -250,6 +255,14 @@ signal_handler(int signum)
printf("\n**Statistics have been reset**\n");
return;
}
/* When we receive a RTMIN signal, stop kni processing */
if (signum == SIGRTMIN) {
printf("SIGRTMIN is received, and the KNI processing is "
"going to stop\n");
rte_atomic32_inc(&kni_stop);
return;
}
}
static void
@ -290,6 +303,7 @@ kni_ingress(struct rte_kni *kni)
num = rte_kni_tx_burst(kni, pkts_burst, nb_rx);
kni_stats[port_id].rx_packets += num;
rte_kni_handle_request(kni);
if (unlikely(num < nb_rx)) {
/* Free mbufs not tx to kni interface */
kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
@ -329,18 +343,14 @@ kni_egress(struct rte_kni *kni)
}
/* Main processing loop */
static __attribute__((noreturn)) int
static int
main_loop(__rte_unused void *arg)
{
uint8_t pid;
const unsigned lcore_id = rte_lcore_id();
struct rte_kni *kni = kni_lcore_to_kni(lcore_id);
if (kni == NULL) {
RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);
for (;;)
; /* loop doing nothing */
} else {
if (kni != NULL) {
pid = rte_kni_get_port_id(kni);
if (pid >= RTE_MAX_ETHPORTS)
rte_exit(EXIT_FAILURE, "Failure: port id >= %d\n",
@ -353,8 +363,13 @@ main_loop(__rte_unused void *arg)
fflush(stdout);
/* rx loop */
while (1)
while (1) {
int32_t flag = rte_atomic32_read(&kni_stop);
if (flag)
break;
kni_ingress(kni);
}
} else if (kni_port_info[pid].lcore_id_egress == lcore_id) {
/* Running on lcores for output packets */
RTE_LOG(INFO, APP, "Lcore %u is writing to port %d\n",
@ -362,15 +377,20 @@ main_loop(__rte_unused void *arg)
fflush(stdout);
/* tx loop */
while (1)
while (1) {
int32_t flag = rte_atomic32_read(&kni_stop);
if (flag)
break;
kni_egress(kni);
} else {
RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n",
lcore_id);
for (;;)
; /* loop doing nothing */
}
}
}
/* fallthrough to here if we don't have any work */
RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);
return 0;
}
/* Display usage instructions */
@ -379,10 +399,11 @@ print_usage(const char *prgname)
{
RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK "
"-i IN_CORES -o OUT_CORES\n"
" -p PORTMASK: hex bitmask of ports to use\n"
" -i IN_CORES: hex bitmask of cores which read "
" -p PORTMASK: hex bitmask of ports to use\n"
" -i IN_CORES: hex bitmask of cores which read "
"from NIC\n"
" -o OUT_CORES: hex bitmask of cores which write to NIC\n",
" -o OUT_CORES: hex bitmask of cores which write "
"to NIC\n",
prgname);
}
@ -436,7 +457,7 @@ kni_setup_port_affinities(uint8_t nb_port)
}
if (in_lcore != 0) {
/* It is be for packet receiving */
/* It is for packet receiving */
while ((rx_port < nb_port) &&
((ports_mask & (1 << rx_port)) == 0))
rx_port++;
@ -702,6 +723,7 @@ main(int argc, char** argv)
/* Associate signal_hanlder function with USR signals */
signal(SIGUSR1, signal_handler);
signal(SIGUSR2, signal_handler);
signal(SIGRTMIN, signal_handler);
/* Initialise EAL */
ret = rte_eal_init(argc, argv);
@ -781,6 +803,13 @@ main(int argc, char** argv)
return -1;
}
for (port = 0; port < nb_sys_ports; port++) {
struct rte_kni *kni = kni_port_info[port].kni;
if (kni != NULL)
rte_kni_release(kni);
}
return 0;
}

View File

@ -70,7 +70,7 @@ extern struct rte_logs rte_logs;
#define RTE_LOGTYPE_PMD 0x00000020 /**< Log related to poll mode driver. */
#define RTE_LOGTYPE_HASH 0x00000040 /**< Log related to hash table. */
#define RTE_LOGTYPE_LPM 0x00000080 /**< Log related to LPM. */
#define RTE_LOGTYPE_KNI 0X00000100 /**< Log related to KNI. */
#define RTE_LOGTYPE_KNI 0x00000100 /**< Log related to KNI. */
#define RTE_LOGTYPE_PMAC 0x00000200 /**< Log related to PMAC. */
#define RTE_LOGTYPE_POWER 0x00000400 /**< Log related to power. */
#define RTE_LOGTYPE_METER 0x00000800 /**< Log related to QoS meter. */

View File

@ -144,6 +144,7 @@ struct rte_kni_device_info
uint8_t bus; /**< Device bus */
uint8_t devid; /**< Device ID */
uint8_t function; /**< Device function. */
uint8_t port_id; /**< Port ID */
/* mbuf size */
unsigned mbuf_size;
@ -151,7 +152,8 @@ struct rte_kni_device_info
#define KNI_DEVICE "kni"
#define RTE_KNI_IOCTL_TEST _IOWR(0, 1, int)
#define RTE_KNI_IOCTL_CREATE _IOWR(0, 2, struct rte_kni_device_info)
#define RTE_KNI_IOCTL_TEST _IOWR(0, 1, int)
#define RTE_KNI_IOCTL_CREATE _IOWR(0, 2, struct rte_kni_device_info)
#define RTE_KNI_IOCTL_RELEASE _IOWR(0, 3, uint8_t)
#endif /* _RTE_KNI_COMMON_H_ */

View File

@ -31,6 +31,7 @@
#include <linux/sched.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#define KNI_KTHREAD_RESCHEDULE_INTERVAL 10 /* us */
@ -38,9 +39,12 @@
* A structure describing the private information for a kni device.
*/
struct kni_dev {
/* kni list */
struct list_head list;
struct net_device_stats stats;
int status;
int idx;
int port_id;
/* wait queue for req/resp */
wait_queue_head_t wq;

View File

@ -28,6 +28,7 @@
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/rwsem.h>
#include <exec-env/rte_kni_common.h>
#include "kni_dev.h"
@ -79,14 +80,17 @@ static struct miscdevice kni_misc = {
/* loopback mode */
static char *lo_mode = NULL;
static struct kni_dev *kni_devs[KNI_MAX_DEVICES];
static volatile int num_devs; /* number of kni devices */
#define KNI_DEV_IN_USE_BIT_NUM 0 /* Bit number for device in use */
static volatile unsigned long device_in_use; /* device in use flag */
static struct task_struct *kni_kthread;
/* kni list lock */
static DECLARE_RWSEM(kni_list_lock);
/* kni list */
static struct list_head kni_list_head = LIST_HEAD_INIT(kni_list_head);
static int __init
kni_init(void)
{
@ -122,9 +126,6 @@ kni_open(struct inode *inode, struct file *file)
if (test_and_set_bit(KNI_DEV_IN_USE_BIT_NUM, &device_in_use))
return -EBUSY;
memset(kni_devs, 0, sizeof(kni_devs));
num_devs = 0;
/* Create kernel thread for RX */
kni_kthread = kthread_run(kni_thread, NULL, "kni_thread");
if (IS_ERR(kni_kthread)) {
@ -140,7 +141,7 @@ kni_open(struct inode *inode, struct file *file)
static int
kni_release(struct inode *inode, struct file *file)
{
int i;
struct kni_dev *dev, *n;
KNI_PRINT("Stopping KNI thread...");
@ -148,28 +149,26 @@ kni_release(struct inode *inode, struct file *file)
kthread_stop(kni_kthread);
kni_kthread = NULL;
for (i = 0; i < KNI_MAX_DEVICES; i++) {
if (kni_devs[i] != NULL) {
/* Call the remove part to restore pci dev */
switch (kni_devs[i]->device_id) {
#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) case (dev):
#include <rte_pci_dev_ids.h>
igb_kni_remove(kni_devs[i]->pci_dev);
break;
#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) case (dev):
#include <rte_pci_dev_ids.h>
ixgbe_kni_remove(kni_devs[i]->pci_dev);
break;
default:
break;
}
unregister_netdev(kni_devs[i]->net_dev);
free_netdev(kni_devs[i]->net_dev);
kni_devs[i] = NULL;
down_write(&kni_list_lock);
list_for_each_entry_safe(dev, n, &kni_list_head, list) {
/* Call the remove part to restore pci dev */
switch (dev->device_id) {
#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) case (dev):
#include <rte_pci_dev_ids.h>
igb_kni_remove(dev->pci_dev);
break;
#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) case (dev):
#include <rte_pci_dev_ids.h>
ixgbe_kni_remove(dev->pci_dev);
break;
default:
break;
}
unregister_netdev(dev->net_dev);
free_netdev(dev->net_dev);
list_del(&dev->list);
}
num_devs = 0;
up_write(&kni_list_lock);
/* Clear the bit of device in use */
clear_bit(KNI_DEV_IN_USE_BIT_NUM, &device_in_use);
@ -182,22 +181,20 @@ kni_release(struct inode *inode, struct file *file)
static int
kni_thread(void *unused)
{
int i, j;
int j;
struct kni_dev *dev, *n;
KNI_PRINT("Kernel thread for KNI started\n");
while (!kthread_should_stop()) {
int n_devs = num_devs;
down_read(&kni_list_lock);
for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
for (i = 0; i < n_devs; i++) {
/* This shouldn't be needed */
if (kni_devs[i]) {
kni_net_rx(kni_devs[i]);
kni_net_poll_resp(kni_devs[i]);
}
else
KNI_ERR("kni_thread -no kni found!!!");
list_for_each_entry_safe(dev, n,
&kni_list_head, list) {
kni_net_rx(dev);
kni_net_poll_resp(dev);
}
}
up_read(&kni_list_lock);
/* reschedule out for a while */
schedule_timeout_interruptible(usecs_to_jiffies( \
KNI_KTHREAD_RESCHEDULE_INTERVAL));
@ -216,23 +213,32 @@ kni_ioctl_create(unsigned int ioctl_num, unsigned long ioctl_param)
struct pci_dev *found_pci = NULL;
struct net_device *net_dev = NULL;
struct net_device *lad_dev = NULL;
struct kni_dev *kni;
if (num_devs == KNI_MAX_DEVICES)
return -EBUSY;
struct kni_dev *kni, *dev, *n;
printk(KERN_INFO "KNI: Creating kni...\n");
/* Check the buffer size, to avoid warning */
if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
return -EINVAL;
/* Copy kni info from user space */
ret = copy_from_user(&dev_info, (void *)ioctl_param,
_IOC_SIZE(ioctl_num));
ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
if (ret) {
KNI_ERR("copy_from_user");
KNI_ERR("copy_from_user in kni_ioctl_create");
return -EIO;
}
/* Check if it has been created */
down_read(&kni_list_lock);
list_for_each_entry_safe(dev, n, &kni_list_head, list) {
if (dev->port_id == dev_info.port_id) {
up_read(&kni_list_lock);
KNI_ERR("Port %d has already been created\n",
dev_info.port_id);
return -EINVAL;
}
}
up_read(&kni_list_lock);
net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,
kni_net_init);
if (net_dev == NULL) {
@ -243,7 +249,7 @@ kni_ioctl_create(unsigned int ioctl_num, unsigned long ioctl_param)
kni = netdev_priv(net_dev);
kni->net_dev = net_dev;
kni->idx = num_devs;
kni->port_id = dev_info.port_id;
/* Translate user space info into kernel space info */
kni->tx_q = phys_to_virt(dev_info.tx_phys);
@ -337,11 +343,61 @@ kni_ioctl_create(unsigned int ioctl_num, unsigned long ioctl_param)
return -ENODEV;
}
kni_devs[num_devs++] = kni;
down_write(&kni_list_lock);
list_add(&kni->list, &kni_list_head);
up_write(&kni_list_lock);
printk(KERN_INFO "KNI: Successfully create kni for port %d\n",
dev_info.port_id);
return 0;
}
static int
kni_ioctl_release(unsigned int ioctl_num, unsigned long ioctl_param)
{
int ret = -EINVAL;
uint8_t port_id;
struct kni_dev *dev, *n;
if (_IOC_SIZE(ioctl_num) > sizeof(port_id))
return -EINVAL;
ret = copy_from_user(&port_id, (void *)ioctl_param, sizeof(port_id));
if (ret) {
KNI_ERR("copy_from_user in kni_ioctl_release");
return -EIO;
}
down_write(&kni_list_lock);
list_for_each_entry_safe(dev, n, &kni_list_head, list) {
if (dev->port_id != port_id)
continue;
switch (dev->device_id) {
#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) case (dev):
#include <rte_pci_dev_ids.h>
igb_kni_remove(dev->pci_dev);
break;
#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) case (dev):
#include <rte_pci_dev_ids.h>
ixgbe_kni_remove(dev->pci_dev);
break;
default:
break;
}
unregister_netdev(dev->net_dev);
free_netdev(dev->net_dev);
list_del(&dev->list);
ret = 0;
break;
}
up_write(&kni_list_lock);
printk(KERN_INFO "KNI: %s release kni for port %d\n",
(ret == 0 ? "Successfully" : "Unsuccessfully"), port_id);
return ret;
}
static int
kni_ioctl(struct inode *inode,
unsigned int ioctl_num,
@ -361,6 +417,9 @@ kni_ioctl(struct inode *inode,
case _IOC_NR(RTE_KNI_IOCTL_CREATE):
ret = kni_ioctl_create(ioctl_num, ioctl_param);
break;
case _IOC_NR(RTE_KNI_IOCTL_RELEASE):
ret = kni_ioctl_release(ioctl_num, ioctl_param);
break;
default:
KNI_DBG("IOCTL default \n");
break;

View File

@ -132,7 +132,7 @@ kni_net_open(struct net_device *dev)
struct rte_kni_request req;
struct kni_dev *kni = netdev_priv(dev);
KNI_DBG("kni_net_open %d\n", kni->idx);
KNI_DBG("kni_net_open %d\n", kni->port_id);
/*
* Assign the hardware address of the board: use "\0KNIx", where
@ -144,7 +144,7 @@ kni_net_open(struct net_device *dev)
memcpy(dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN);
else {
memcpy(dev->dev_addr, "\0KNI0", ETH_ALEN);
dev->dev_addr[ETH_ALEN-1] += kni->idx; /* \0KNI1 */
dev->dev_addr[ETH_ALEN-1] += kni->port_id; /* \0KNI1 */
}
netif_start_queue(dev);
@ -247,7 +247,7 @@ kni_net_rx_normal(struct kni_dev *kni)
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* Call netif interface */
netif_rx(skb);
netif_receive_skb(skb);
/* Update statistics */
kni->stats.rx_bytes += len;
@ -543,7 +543,7 @@ static int
kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct kni_dev *kni = netdev_priv(dev);
KNI_DBG("kni_net_ioctl %d\n", kni->idx);
KNI_DBG("kni_net_ioctl %d\n", kni->port_id);
return 0;
}

View File

@ -61,6 +61,8 @@
#define KNI_REQUEST_MBUF_NUM_MAX 32
#define KNI_MZ_CHECK(mz) do { if (mz) goto fail; } while (0)
/**
* KNI context
*/
@ -78,16 +80,33 @@ struct rte_kni {
/* For request & response */
struct rte_kni_fifo *req_q; /**< Request queue */
struct rte_kni_fifo *resp_q; /**< Response queue */
void * sync_addr; /**< Req/Resp Mem address */
void * sync_addr; /**< Req/Resp Mem address */
struct rte_kni_ops ops; /**< operations for request */
uint8_t port_in_use : 1; /**< kni creation flag */
};
enum kni_ops_status {
KNI_REQ_NO_REGISTER = 0,
KNI_REQ_REGISTERED,
};
static void kni_free_mbufs(struct rte_kni *kni);
static void kni_allocate_mbufs(struct rte_kni *kni);
static int kni_fd = -1;
static volatile int kni_fd = -1;
static const struct rte_memzone *
kni_memzone_reserve(const char *name, size_t len, int socket_id,
unsigned flags)
{
const struct rte_memzone *mz = rte_memzone_lookup(name);
if (mz == NULL)
mz = rte_memzone_reserve(name, len, socket_id, flags);
return mz;
}
struct rte_kni *
rte_kni_create(uint8_t port_id,
@ -95,15 +114,17 @@ rte_kni_create(uint8_t port_id,
struct rte_mempool *pktmbuf_pool,
struct rte_kni_ops *ops)
{
int ret;
struct rte_kni_device_info dev_info;
struct rte_eth_dev_info eth_dev_info;
struct rte_kni *ctx;
char itf_name[IFNAMSIZ];
#define OBJNAMSIZ 32
char obj_name[OBJNAMSIZ];
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
if (port_id >= RTE_MAX_ETHPORTS || pktmbuf_pool == NULL || !ops)
if (port_id >= RTE_MAX_ETHPORTS || pktmbuf_pool == NULL)
return NULL;
/* Check FD and open once */
@ -128,11 +149,21 @@ rte_kni_create(uint8_t port_id,
dev_info.function = eth_dev_info.pci_dev->addr.function;
dev_info.vendor_id = eth_dev_info.pci_dev->id.vendor_id;
dev_info.device_id = eth_dev_info.pci_dev->id.device_id;
dev_info.port_id = port_id;
ctx = rte_zmalloc("kni devs", sizeof(struct rte_kni), 0);
if (ctx == NULL)
rte_panic("Cannot allocate memory for kni dev\n");
memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));
rte_snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", port_id);
mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
SOCKET_ID_ANY, 0);
KNI_MZ_CHECK(mz == NULL);
ctx = mz->addr;
if (ctx->port_in_use != 0) {
RTE_LOG(ERR, KNI, "Port %d has been used\n", port_id);
goto fail;
}
memset(ctx, 0, sizeof(struct rte_kni));
if (ops)
memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));
rte_snprintf(itf_name, IFNAMSIZ, "vEth%u", port_id);
rte_snprintf(ctx->name, IFNAMSIZ, itf_name);
@ -140,73 +171,64 @@ rte_kni_create(uint8_t port_id,
/* TX RING */
rte_snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", port_id);
mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
if (mz == NULL || mz->addr == NULL)
rte_panic("Cannot create kni_tx_%d queue\n", port_id);
mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
KNI_MZ_CHECK(mz == NULL);
ctx->tx_q = mz->addr;
kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
dev_info.tx_phys = mz->phys_addr;
/* RX RING */
rte_snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", port_id);
mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
if (mz == NULL || mz->addr == NULL)
rte_panic("Cannot create kni_rx_%d queue\n", port_id);
mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
KNI_MZ_CHECK(mz == NULL);
ctx->rx_q = mz->addr;
kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
dev_info.rx_phys = mz->phys_addr;
/* ALLOC RING */
rte_snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", port_id);
mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
if (mz == NULL || mz->addr == NULL)
rte_panic("Cannot create kni_alloc_%d queue\n", port_id);
mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
KNI_MZ_CHECK(mz == NULL);
ctx->alloc_q = mz->addr;
kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
dev_info.alloc_phys = mz->phys_addr;
/* FREE RING */
rte_snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", port_id);
mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
if (mz == NULL || mz->addr == NULL)
rte_panic("Cannot create kni_free_%d queue\n", port_id);
mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
KNI_MZ_CHECK(mz == NULL);
ctx->free_q = mz->addr;
kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
dev_info.free_phys = mz->phys_addr;
/* Request RING */
rte_snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", port_id);
mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
if (mz == NULL || mz->addr == NULL)
rte_panic("Cannot create kni_req_%d ring\n", port_id);
mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
KNI_MZ_CHECK(mz == NULL);
ctx->req_q = mz->addr;
kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
dev_info.req_phys = mz->phys_addr;
/* Response RING */
rte_snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", port_id);
mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
if (mz == NULL || mz->addr == NULL)
rte_panic("Cannot create kni_resp_%d ring\n", port_id);
mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
KNI_MZ_CHECK(mz == NULL);
ctx->resp_q = mz->addr;
kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
dev_info.resp_phys = mz->phys_addr;
/* Req/Resp sync mem area */
rte_snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", port_id);
mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
if (mz == NULL || mz->addr == NULL)
rte_panic("Cannot create kni_sync_%d mem\n", port_id);
mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
KNI_MZ_CHECK(mz == NULL);
ctx->sync_addr = mz->addr;
dev_info.sync_va = mz->addr;
dev_info.sync_phys = mz->phys_addr;
/* MBUF mempool */
mz = rte_memzone_lookup("MP_mbuf_pool");
if (mz == NULL) {
RTE_LOG(ERR, KNI, "Can not find MP_mbuf_pool\n");
goto fail;
}
rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", pktmbuf_pool->name);
mz = rte_memzone_lookup(mz_name);
KNI_MZ_CHECK(mz == NULL);
dev_info.mbuf_va = mz->addr;
dev_info.mbuf_phys = mz->phys_addr;
ctx->pktmbuf_pool = pktmbuf_pool;
@ -216,28 +238,54 @@ rte_kni_create(uint8_t port_id,
/* Configure the buffer size which will be checked in kernel module */
dev_info.mbuf_size = ctx->mbuf_size;
if (ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info) < 0) {
RTE_LOG(ERR, KNI, "Fail to create kni device\n");
goto fail;
}
ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
KNI_MZ_CHECK(ret < 0);
ctx->port_in_use = 1;
return ctx;
fail:
if (ctx != NULL)
rte_free(ctx);
return NULL;
}
/**
* It is called in the same lcore of receiving packets, and polls the request
* mbufs sent from kernel space. Then analyzes it and calls the specific
* actions for the specific requests. Finally constructs the response mbuf and
* puts it back to the resp_q.
*/
static int
kni_request_handler(struct rte_kni *kni)
static void
kni_free_fifo(struct rte_kni_fifo *fifo)
{
int ret;
struct rte_mbuf *pkt;
do {
ret = kni_fifo_get(fifo, (void **)&pkt, 1);
if (ret)
rte_pktmbuf_free(pkt);
} while (ret);
}
int
rte_kni_release(struct rte_kni *kni)
{
if (!kni || kni->port_in_use == 0)
return -1;
if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &kni->port_id) < 0) {
RTE_LOG(ERR, KNI, "Fail to release kni device\n");
return -1;
}
/* mbufs in all fifo should be released, except request/response */
kni_free_fifo(kni->tx_q);
kni_free_fifo(kni->rx_q);
kni_free_fifo(kni->alloc_q);
kni_free_fifo(kni->free_q);
memset(kni, 0, sizeof(struct rte_kni));
return 0;
}
int
rte_kni_handle_request(struct rte_kni *kni)
{
unsigned ret;
struct rte_kni_request *req;
@ -290,9 +338,6 @@ rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
/* Get mbufs from free_q and then free them */
kni_free_mbufs(kni);
/* Handle the requests from kernel space */
kni_request_handler(kni);
return ret;
}
@ -365,3 +410,81 @@ rte_kni_get_port_id(struct rte_kni *kni)
return kni->port_id;
}
struct rte_kni *
rte_kni_info_get(uint8_t port_id)
{
struct rte_kni *kni;
const struct rte_memzone *mz;
char mz_name[RTE_MEMZONE_NAMESIZE];
if(port_id >= RTE_MAX_ETHPORTS)
return NULL;
rte_snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", port_id);
mz = rte_memzone_lookup(mz_name);
if (NULL == mz)
return NULL;
kni = mz->addr;
if (0 == kni->port_in_use)
return NULL;
return kni;
}
static enum kni_ops_status
kni_check_request_register(struct rte_kni_ops *ops)
{
/* check if KNI request ops has been registered*/
if( NULL == ops )
return KNI_REQ_NO_REGISTER;
if((NULL == ops->change_mtu) && (NULL == ops->config_network_if))
return KNI_REQ_NO_REGISTER;
return KNI_REQ_REGISTERED;
}
int
rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops)
{
enum kni_ops_status req_status;
if (NULL == ops) {
RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n");
return -1;
}
if (NULL == kni) {
RTE_LOG(ERR, KNI, "Invalid kni info.\n");
return -1;
}
req_status = kni_check_request_register(&kni->ops);
if ( KNI_REQ_REGISTERED == req_status) {
RTE_LOG(ERR, KNI, "The KNI request operation"
"has already registered.\n");
return -1;
}
memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
return 0;
}
int
rte_kni_unregister_handlers(struct rte_kni *kni)
{
if (NULL == kni) {
RTE_LOG(ERR, KNI, "Invalid kni info.\n");
return -1;
}
if (NULL == &kni->ops) {
RTE_LOG(ERR, KNI, "The invalid KNI unregister operation.\n");
return -1;
}
kni->ops.change_mtu = NULL;
kni->ops.config_network_if = NULL;
return 0;
}

View File

@ -86,6 +86,34 @@ struct rte_kni_ops {
extern struct rte_kni *rte_kni_create(uint8_t port_id, unsigned mbuf_size,
struct rte_mempool *pktmbuf_pool, struct rte_kni_ops *ops);
/**
* Release kni interface according to the context. It will also release the
* paired KNI interface in kernel space. All processing on the specific kni
* context need to be stopped before calling this interface.
*
* @param kni
* The pointer to the context of an existant kni interface.
*
* @return
* - 0 indicates success.
* - negative value indicates failure.
*/
extern int rte_kni_release(struct rte_kni *kni);
/**
* It is used to handle the request mbufs sent from kernel space.
* Then analyzes it and calls the specific actions for the specific requests.
* Finally constructs the response mbuf and puts it back to the resp_q.
*
* @param kni
* The pointer to the context of an existant kni interface.
*
* @return
* - 0
* - negative value indicates failure.
*/
extern int rte_kni_handle_request(struct rte_kni *kni);
/**
* Retrieve a burst of packets from a kni interface. The retrieved packets are
* stored in rte_mbuf structures whose pointers are supplied in the array of
@ -136,6 +164,46 @@ extern unsigned rte_kni_tx_burst(struct rte_kni *kni,
*/
extern uint8_t rte_kni_get_port_id(struct rte_kni *kni);
/**
* Get kni context information of the port.
*
* @port_id
* the port id.
*
* @return
* On success: Pointer to kni interface.
* On failure: NULL
*/
extern struct rte_kni * rte_kni_info_get(uint8_t port_id);
/**
* Register kni request handling for a specified port,and it can
* be called by master process or slave process.
*
* @param kni
* pointer to struct rte_kni.
* @param ops
* ponter to struct rte_kni_ops.
*
* @return
* On success: 0
* On failure: -1
*/
extern int rte_kni_register_handlers(struct rte_kni *kni,
struct rte_kni_ops *ops);
/**
* Unregister kni request handling for a specified port.
*
* @param kni
* pointer to struct rte_kni.
*
* @return
* On success: 0
* On failure: -1
*/
extern int rte_kni_unregister_handlers(struct rte_kni *kni);
#ifdef __cplusplus
}
#endif