2017-12-19 15:49:01 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2016-2017 Intel Corporation
|
2016-12-12 08:38:38 -06:00
|
|
|
*/
|
|
|
|
|
2017-03-14 13:51:58 +01:00
|
|
|
#include <rte_atomic.h>
|
2017-03-16 09:59:21 +01:00
|
|
|
#include <rte_branch_prediction.h>
|
2017-05-22 13:20:43 +02:00
|
|
|
#include <rte_byteorder.h>
|
2017-03-14 13:51:58 +01:00
|
|
|
#include <rte_common.h>
|
2016-12-12 08:38:38 -06:00
|
|
|
#include <rte_mbuf.h>
|
2018-01-22 00:16:22 +00:00
|
|
|
#include <rte_ethdev_driver.h>
|
2017-04-11 17:44:12 +02:00
|
|
|
#include <rte_ethdev_vdev.h>
|
2016-12-12 08:38:38 -06:00
|
|
|
#include <rte_malloc.h>
|
2017-11-07 06:54:21 +00:00
|
|
|
#include <rte_bus_vdev.h>
|
2016-12-12 08:38:38 -06:00
|
|
|
#include <rte_kvargs.h>
|
2017-03-15 15:48:18 +01:00
|
|
|
#include <rte_net.h>
|
2017-05-12 15:01:36 +02:00
|
|
|
#include <rte_debug.h>
|
2017-05-22 13:20:43 +02:00
|
|
|
#include <rte_ip.h>
|
2018-03-12 11:33:00 +00:00
|
|
|
#include <rte_string_fns.h>
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/ioctl.h>
|
net/tap: add basic flow API patterns and actions
Supported flow rules are now mapped to TC rules on the tap netdevice.
The netlink message used for creating the TC rule is stored in struct
rte_flow. That way, by simply changing a metadata in it, we can require
for the rule deletion without further parsing.
Supported items:
- eth: src and dst (with variable masks), and eth_type (0xffff mask).
- vlan: vid, pcp, tpid, but not eid.
- ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask).
- udp/tcp: src and dst port (0xffff) mask.
Supported actions:
- DROP
- QUEUE
- PASSTHRU
It is generally not possible to provide a "last" item. However, if the
"last" item, once masked, is identical to the masked spec, then it is
supported.
Only IPv4/6 and MAC addresses can use a variable mask. All other
items need a full mask (exact match).
Support for VLAN requires kernel headers >= 4.9, checked using
auto-config.sh.
Signed-off-by: Pascal Mazon <pascal.mazon@6wind.com>
Acked-by: Olga Shern <olgas@mellanox.com>
Acked-by: Keith Wiles <keith.wiles@intel.com>
2017-03-23 09:33:57 +01:00
|
|
|
#include <sys/utsname.h>
|
2016-12-12 08:38:38 -06:00
|
|
|
#include <sys/mman.h>
|
2017-03-14 13:51:58 +01:00
|
|
|
#include <errno.h>
|
|
|
|
#include <signal.h>
|
2018-01-17 16:04:33 +02:00
|
|
|
#include <stdbool.h>
|
2017-03-14 13:51:58 +01:00
|
|
|
#include <stdint.h>
|
2017-03-16 09:59:21 +01:00
|
|
|
#include <sys/uio.h>
|
2016-12-12 08:38:38 -06:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <arpa/inet.h>
|
net/tap: add basic flow API patterns and actions
Supported flow rules are now mapped to TC rules on the tap netdevice.
The netlink message used for creating the TC rule is stored in struct
rte_flow. That way, by simply changing a metadata in it, we can require
for the rule deletion without further parsing.
Supported items:
- eth: src and dst (with variable masks), and eth_type (0xffff mask).
- vlan: vid, pcp, tpid, but not eid.
- ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask).
- udp/tcp: src and dst port (0xffff) mask.
Supported actions:
- DROP
- QUEUE
- PASSTHRU
It is generally not possible to provide a "last" item. However, if the
"last" item, once masked, is identical to the masked spec, then it is
supported.
Only IPv4/6 and MAC addresses can use a variable mask. All other
items need a full mask (exact match).
Support for VLAN requires kernel headers >= 4.9, checked using
auto-config.sh.
Signed-off-by: Pascal Mazon <pascal.mazon@6wind.com>
Acked-by: Olga Shern <olgas@mellanox.com>
Acked-by: Keith Wiles <keith.wiles@intel.com>
2017-03-23 09:33:57 +01:00
|
|
|
#include <net/if.h>
|
2016-12-12 08:38:38 -06:00
|
|
|
#include <linux/if_tun.h>
|
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
|
2018-05-08 17:07:35 +00:00
|
|
|
#include <tap_rss.h>
|
2017-03-23 09:33:54 +01:00
|
|
|
#include <rte_eth_tap.h>
|
2017-03-23 09:33:55 +01:00
|
|
|
#include <tap_flow.h>
|
2017-03-22 09:40:01 +01:00
|
|
|
#include <tap_netlink.h>
|
net/tap: add basic flow API patterns and actions
Supported flow rules are now mapped to TC rules on the tap netdevice.
The netlink message used for creating the TC rule is stored in struct
rte_flow. That way, by simply changing a metadata in it, we can require
for the rule deletion without further parsing.
Supported items:
- eth: src and dst (with variable masks), and eth_type (0xffff mask).
- vlan: vid, pcp, tpid, but not eid.
- ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask).
- udp/tcp: src and dst port (0xffff) mask.
Supported actions:
- DROP
- QUEUE
- PASSTHRU
It is generally not possible to provide a "last" item. However, if the
"last" item, once masked, is identical to the masked spec, then it is
supported.
Only IPv4/6 and MAC addresses can use a variable mask. All other
items need a full mask (exact match).
Support for VLAN requires kernel headers >= 4.9, checked using
auto-config.sh.
Signed-off-by: Pascal Mazon <pascal.mazon@6wind.com>
Acked-by: Olga Shern <olgas@mellanox.com>
Acked-by: Keith Wiles <keith.wiles@intel.com>
2017-03-23 09:33:57 +01:00
|
|
|
#include <tap_tcmsgs.h>
|
2017-03-23 09:33:54 +01:00
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
/* Linux based path to the TUN device */
|
|
|
|
#define TUN_TAP_DEV_PATH "/dev/net/tun"
|
|
|
|
#define DEFAULT_TAP_NAME "dtap"
|
2018-04-03 03:07:47 +05:30
|
|
|
#define DEFAULT_TUN_NAME "dtun"
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
#define ETH_TAP_IFACE_ARG "iface"
|
2017-03-23 09:42:11 +01:00
|
|
|
#define ETH_TAP_REMOTE_ARG "remote"
|
2017-04-12 09:30:21 +02:00
|
|
|
#define ETH_TAP_MAC_ARG "mac"
|
|
|
|
#define ETH_TAP_MAC_FIXED "fixed"
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2018-03-13 03:23:52 +05:30
|
|
|
#define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx"
|
|
|
|
#define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
|
|
|
|
#define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
static struct rte_vdev_driver pmd_tap_drv;
|
2018-04-03 03:07:47 +05:30
|
|
|
static struct rte_vdev_driver pmd_tun_drv;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
static const char *valid_arguments[] = {
|
|
|
|
ETH_TAP_IFACE_ARG,
|
2017-03-23 09:42:11 +01:00
|
|
|
ETH_TAP_REMOTE_ARG,
|
2017-04-12 09:30:21 +02:00
|
|
|
ETH_TAP_MAC_ARG,
|
2016-12-12 08:38:38 -06:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static int tap_unit;
|
2018-04-03 03:07:47 +05:30
|
|
|
static int tun_unit;
|
|
|
|
|
|
|
|
static int tap_type;
|
|
|
|
static char tuntap_name[8];
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-03-14 13:51:58 +01:00
|
|
|
static volatile uint32_t tap_trigger; /* Rx trigger */
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
static struct rte_eth_link pmd_link = {
|
|
|
|
.link_speed = ETH_SPEED_NUM_10G,
|
|
|
|
.link_duplex = ETH_LINK_FULL_DUPLEX,
|
|
|
|
.link_status = ETH_LINK_DOWN,
|
2018-04-20 01:01:24 +01:00
|
|
|
.link_autoneg = ETH_LINK_FIXED,
|
2016-12-12 08:38:38 -06:00
|
|
|
};
|
|
|
|
|
2017-03-14 13:51:58 +01:00
|
|
|
static void
|
|
|
|
tap_trigger_cb(int sig __rte_unused)
|
|
|
|
{
|
|
|
|
/* Valid trigger values are nonzero */
|
|
|
|
tap_trigger = (tap_trigger + 1) | 0x80000000;
|
|
|
|
}
|
|
|
|
|
2017-03-31 15:54:10 +02:00
|
|
|
/* Specifies on what netdevices the ioctl should be applied */
|
|
|
|
enum ioctl_mode {
|
|
|
|
LOCAL_AND_REMOTE,
|
|
|
|
LOCAL_ONLY,
|
|
|
|
REMOTE_ONLY,
|
|
|
|
};
|
|
|
|
|
2017-03-22 09:40:01 +01:00
|
|
|
static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
/* Tun/Tap allocation routine
|
|
|
|
*
|
|
|
|
* name is the number of the interface to use, unless NULL to take the host
|
|
|
|
* supplied name.
|
|
|
|
*/
|
|
|
|
static int
|
2017-05-12 15:01:39 +02:00
|
|
|
tun_alloc(struct pmd_internals *pmd)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
|
|
|
struct ifreq ifr;
|
2017-02-06 13:40:35 -06:00
|
|
|
#ifdef IFF_MULTI_QUEUE
|
2016-12-12 08:38:38 -06:00
|
|
|
unsigned int features;
|
2017-02-06 13:40:35 -06:00
|
|
|
#endif
|
2016-12-12 08:38:38 -06:00
|
|
|
int fd;
|
|
|
|
|
|
|
|
memset(&ifr, 0, sizeof(struct ifreq));
|
|
|
|
|
2017-03-16 09:59:21 +01:00
|
|
|
/*
|
|
|
|
* Do not set IFF_NO_PI as packet information header will be needed
|
|
|
|
* to check if a received packet has been truncated.
|
|
|
|
*/
|
2018-04-03 03:07:47 +05:30
|
|
|
ifr.ifr_flags = (tap_type) ? IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
|
2017-02-17 09:43:04 -06:00
|
|
|
snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
|
2017-02-06 13:40:35 -06:00
|
|
|
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "ifr_name '%s'", ifr.ifr_name);
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
fd = open(TUN_TAP_DEV_PATH, O_RDWR);
|
|
|
|
if (fd < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
|
2016-12-12 08:38:38 -06:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-02-06 13:40:35 -06:00
|
|
|
#ifdef IFF_MULTI_QUEUE
|
|
|
|
/* Grab the TUN features to verify we can work multi-queue */
|
2016-12-12 08:38:38 -06:00
|
|
|
if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s unable to get TUN/TAP features",
|
|
|
|
tuntap_name);
|
2016-12-12 08:38:38 -06:00
|
|
|
goto error;
|
|
|
|
}
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "%s Features %08x", tuntap_name, features);
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-02-06 13:40:35 -06:00
|
|
|
if (features & IFF_MULTI_QUEUE) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, " Multi-queue support for %d queues",
|
2016-12-12 08:38:38 -06:00
|
|
|
RTE_PMD_TAP_MAX_QUEUES);
|
2017-02-06 13:40:35 -06:00
|
|
|
ifr.ifr_flags |= IFF_MULTI_QUEUE;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2017-01-20 08:30:25 -06:00
|
|
|
ifr.ifr_flags |= IFF_ONE_QUEUE;
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, " Single queue only support");
|
2017-01-20 08:30:25 -06:00
|
|
|
}
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-02-06 13:40:35 -06:00
|
|
|
/* Set the TUN/TAP configuration and set the name if needed */
|
2016-12-12 08:38:38 -06:00
|
|
|
if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s",
|
|
|
|
ifr.ifr_name, strerror(errno));
|
2016-12-12 08:38:38 -06:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Always set the file descriptor to non-blocking */
|
|
|
|
if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(WARNING,
|
|
|
|
"Unable to set %s to nonblocking: %s",
|
|
|
|
ifr.ifr_name, strerror(errno));
|
2016-12-12 08:38:38 -06:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-03-14 13:51:58 +01:00
|
|
|
/* Set up trigger to optimize empty Rx bursts */
|
|
|
|
errno = 0;
|
|
|
|
do {
|
|
|
|
struct sigaction sa;
|
|
|
|
int flags = fcntl(fd, F_GETFL);
|
|
|
|
|
|
|
|
if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
|
|
|
|
break;
|
|
|
|
if (sa.sa_handler != tap_trigger_cb) {
|
|
|
|
/*
|
|
|
|
* Make sure SIGIO is not already taken. This is done
|
|
|
|
* as late as possible to leave the application a
|
|
|
|
* chance to set up its own signal handler first.
|
|
|
|
*/
|
|
|
|
if (sa.sa_handler != SIG_IGN &&
|
|
|
|
sa.sa_handler != SIG_DFL) {
|
|
|
|
errno = EBUSY;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
sa = (struct sigaction){
|
|
|
|
.sa_flags = SA_RESTART,
|
|
|
|
.sa_handler = tap_trigger_cb,
|
|
|
|
};
|
|
|
|
if (sigaction(SIGIO, &sa, NULL) == -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Enable SIGIO on file descriptor */
|
|
|
|
fcntl(fd, F_SETFL, flags | O_ASYNC);
|
|
|
|
fcntl(fd, F_SETOWN, getpid());
|
|
|
|
} while (0);
|
2018-04-25 08:56:37 -07:00
|
|
|
|
2017-03-14 13:51:58 +01:00
|
|
|
if (errno) {
|
|
|
|
/* Disable trigger globally in case of error */
|
|
|
|
tap_trigger = 0;
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(WARNING, "Rx trigger disabled: %s",
|
2017-03-14 13:51:58 +01:00
|
|
|
strerror(errno));
|
|
|
|
}
|
|
|
|
|
2017-03-23 09:42:11 +01:00
|
|
|
return fd;
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
error:
|
|
|
|
if (fd > 0)
|
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-05-22 13:20:43 +02:00
|
|
|
static void
|
|
|
|
tap_verify_csum(struct rte_mbuf *mbuf)
|
|
|
|
{
|
|
|
|
uint32_t l2 = mbuf->packet_type & RTE_PTYPE_L2_MASK;
|
|
|
|
uint32_t l3 = mbuf->packet_type & RTE_PTYPE_L3_MASK;
|
|
|
|
uint32_t l4 = mbuf->packet_type & RTE_PTYPE_L4_MASK;
|
|
|
|
unsigned int l2_len = sizeof(struct ether_hdr);
|
|
|
|
unsigned int l3_len;
|
|
|
|
uint16_t cksum = 0;
|
|
|
|
void *l3_hdr;
|
|
|
|
void *l4_hdr;
|
|
|
|
|
|
|
|
if (l2 == RTE_PTYPE_L2_ETHER_VLAN)
|
|
|
|
l2_len += 4;
|
|
|
|
else if (l2 == RTE_PTYPE_L2_ETHER_QINQ)
|
|
|
|
l2_len += 8;
|
|
|
|
/* Don't verify checksum for packets with discontinuous L2 header */
|
|
|
|
if (unlikely(l2_len + sizeof(struct ipv4_hdr) >
|
|
|
|
rte_pktmbuf_data_len(mbuf)))
|
|
|
|
return;
|
|
|
|
l3_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len);
|
|
|
|
if (l3 == RTE_PTYPE_L3_IPV4 || l3 == RTE_PTYPE_L3_IPV4_EXT) {
|
|
|
|
struct ipv4_hdr *iph = l3_hdr;
|
|
|
|
|
|
|
|
/* ihl contains the number of 4-byte words in the header */
|
|
|
|
l3_len = 4 * (iph->version_ihl & 0xf);
|
|
|
|
if (unlikely(l2_len + l3_len > rte_pktmbuf_data_len(mbuf)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
cksum = ~rte_raw_cksum(iph, l3_len);
|
|
|
|
mbuf->ol_flags |= cksum ?
|
|
|
|
PKT_RX_IP_CKSUM_BAD :
|
|
|
|
PKT_RX_IP_CKSUM_GOOD;
|
|
|
|
} else if (l3 == RTE_PTYPE_L3_IPV6) {
|
|
|
|
l3_len = sizeof(struct ipv6_hdr);
|
|
|
|
} else {
|
|
|
|
/* IPv6 extensions are not supported */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (l4 == RTE_PTYPE_L4_UDP || l4 == RTE_PTYPE_L4_TCP) {
|
|
|
|
l4_hdr = rte_pktmbuf_mtod_offset(mbuf, void *, l2_len + l3_len);
|
|
|
|
/* Don't verify checksum for multi-segment packets. */
|
|
|
|
if (mbuf->nb_segs > 1)
|
|
|
|
return;
|
|
|
|
if (l3 == RTE_PTYPE_L3_IPV4)
|
|
|
|
cksum = ~rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
|
|
|
|
else if (l3 == RTE_PTYPE_L3_IPV6)
|
|
|
|
cksum = ~rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
|
|
|
|
mbuf->ol_flags |= cksum ?
|
|
|
|
PKT_RX_L4_CKSUM_BAD :
|
|
|
|
PKT_RX_L4_CKSUM_GOOD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-17 16:04:34 +02:00
|
|
|
static uint64_t
|
|
|
|
tap_rx_offload_get_port_capa(void)
|
|
|
|
{
|
|
|
|
/*
|
2018-04-26 11:13:02 +00:00
|
|
|
* No specific port Rx offload capabilities.
|
2018-01-17 16:04:34 +02:00
|
|
|
*/
|
2018-04-26 11:13:02 +00:00
|
|
|
return 0;
|
2018-01-17 16:04:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
tap_rx_offload_get_queue_capa(void)
|
|
|
|
{
|
|
|
|
return DEV_RX_OFFLOAD_SCATTER |
|
|
|
|
DEV_RX_OFFLOAD_IPV4_CKSUM |
|
|
|
|
DEV_RX_OFFLOAD_UDP_CKSUM |
|
2018-02-13 08:14:25 +00:00
|
|
|
DEV_RX_OFFLOAD_TCP_CKSUM |
|
|
|
|
DEV_RX_OFFLOAD_CRC_STRIP;
|
2018-01-17 16:04:34 +02:00
|
|
|
}
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
/* Callback to handle the rx burst of packets to the correct interface and
|
|
|
|
* file descriptor(s) in a multi-queue setup.
|
|
|
|
*/
|
|
|
|
static uint16_t
|
|
|
|
pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct rx_queue *rxq = queue;
|
|
|
|
uint16_t num_rx;
|
|
|
|
unsigned long num_rx_bytes = 0;
|
2017-03-14 13:51:58 +01:00
|
|
|
uint32_t trigger = tap_trigger;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-03-14 13:51:58 +01:00
|
|
|
if (trigger == rxq->trigger_seen)
|
|
|
|
return 0;
|
|
|
|
if (trigger)
|
|
|
|
rxq->trigger_seen = trigger;
|
|
|
|
rte_compiler_barrier();
|
2016-12-12 08:38:38 -06:00
|
|
|
for (num_rx = 0; num_rx < nb_pkts; ) {
|
2017-03-16 09:59:21 +01:00
|
|
|
struct rte_mbuf *mbuf = rxq->pool;
|
|
|
|
struct rte_mbuf *seg = NULL;
|
|
|
|
struct rte_mbuf *new_tail = NULL;
|
|
|
|
uint16_t data_off = rte_pktmbuf_headroom(mbuf);
|
|
|
|
int len;
|
|
|
|
|
|
|
|
len = readv(rxq->fd, *rxq->iovecs,
|
2018-01-17 16:04:34 +02:00
|
|
|
1 +
|
|
|
|
(rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
|
|
|
|
rxq->nb_rx_desc : 1));
|
2017-03-16 09:59:21 +01:00
|
|
|
if (len < (int)sizeof(struct tun_pi))
|
2016-12-12 08:38:38 -06:00
|
|
|
break;
|
|
|
|
|
2017-03-16 09:59:21 +01:00
|
|
|
/* Packet couldn't fit in the provided mbuf */
|
|
|
|
if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
|
|
|
|
rxq->stats.ierrors++;
|
|
|
|
continue;
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
2017-03-16 09:59:21 +01:00
|
|
|
len -= sizeof(struct tun_pi);
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
mbuf->pkt_len = len;
|
|
|
|
mbuf->port = rxq->in_port;
|
2017-03-16 09:59:21 +01:00
|
|
|
while (1) {
|
|
|
|
struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
|
|
|
|
|
|
|
|
if (unlikely(!buf)) {
|
|
|
|
rxq->stats.rx_nombuf++;
|
|
|
|
/* No new buf has been allocated: do nothing */
|
|
|
|
if (!new_tail || !seg)
|
|
|
|
goto end;
|
|
|
|
|
|
|
|
seg->next = NULL;
|
|
|
|
rte_pktmbuf_free(mbuf);
|
|
|
|
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
seg = seg ? seg->next : mbuf;
|
|
|
|
if (rxq->pool == mbuf)
|
|
|
|
rxq->pool = buf;
|
|
|
|
if (new_tail)
|
|
|
|
new_tail->next = buf;
|
|
|
|
new_tail = buf;
|
|
|
|
new_tail->next = seg->next;
|
|
|
|
|
|
|
|
/* iovecs[0] is reserved for packet info (pi) */
|
|
|
|
(*rxq->iovecs)[mbuf->nb_segs].iov_len =
|
|
|
|
buf->buf_len - data_off;
|
|
|
|
(*rxq->iovecs)[mbuf->nb_segs].iov_base =
|
|
|
|
(char *)buf->buf_addr + data_off;
|
|
|
|
|
|
|
|
seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
|
|
|
|
seg->data_off = data_off;
|
|
|
|
|
|
|
|
len -= seg->data_len;
|
|
|
|
if (len <= 0)
|
|
|
|
break;
|
|
|
|
mbuf->nb_segs++;
|
|
|
|
/* First segment has headroom, not the others */
|
|
|
|
data_off = 0;
|
|
|
|
}
|
|
|
|
seg->next = NULL;
|
2017-03-15 15:48:18 +01:00
|
|
|
mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
|
|
|
|
RTE_PTYPE_ALL_MASK);
|
2018-01-17 16:04:34 +02:00
|
|
|
if (rxq->rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
|
2017-05-22 13:20:43 +02:00
|
|
|
tap_verify_csum(mbuf);
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
/* account for the receive frame */
|
|
|
|
bufs[num_rx++] = mbuf;
|
|
|
|
num_rx_bytes += mbuf->pkt_len;
|
|
|
|
}
|
2017-03-16 09:59:21 +01:00
|
|
|
end:
|
2016-12-12 08:38:38 -06:00
|
|
|
rxq->stats.ipackets += num_rx;
|
|
|
|
rxq->stats.ibytes += num_rx_bytes;
|
|
|
|
|
|
|
|
return num_rx;
|
|
|
|
}
|
|
|
|
|
2018-01-17 16:04:33 +02:00
|
|
|
static uint64_t
|
|
|
|
tap_tx_offload_get_port_capa(void)
|
|
|
|
{
|
|
|
|
/*
|
2018-04-26 11:13:02 +00:00
|
|
|
* No specific port Tx offload capabilities.
|
2018-01-17 16:04:33 +02:00
|
|
|
*/
|
2018-04-26 11:13:02 +00:00
|
|
|
return 0;
|
2018-01-17 16:04:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
tap_tx_offload_get_queue_capa(void)
|
|
|
|
{
|
2018-02-05 10:59:08 +00:00
|
|
|
return DEV_TX_OFFLOAD_MULTI_SEGS |
|
|
|
|
DEV_TX_OFFLOAD_IPV4_CKSUM |
|
2018-01-17 16:04:33 +02:00
|
|
|
DEV_TX_OFFLOAD_UDP_CKSUM |
|
|
|
|
DEV_TX_OFFLOAD_TCP_CKSUM;
|
|
|
|
}
|
|
|
|
|
2017-05-22 13:20:43 +02:00
|
|
|
static void
|
|
|
|
tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
|
|
|
|
unsigned int l3_len)
|
|
|
|
{
|
|
|
|
void *l3_hdr = packet + l2_len;
|
|
|
|
|
|
|
|
if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
|
|
|
|
struct ipv4_hdr *iph = l3_hdr;
|
|
|
|
uint16_t cksum;
|
|
|
|
|
|
|
|
iph->hdr_checksum = 0;
|
|
|
|
cksum = rte_raw_cksum(iph, l3_len);
|
|
|
|
iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
|
|
|
|
}
|
|
|
|
if (ol_flags & PKT_TX_L4_MASK) {
|
|
|
|
uint16_t l4_len;
|
|
|
|
uint32_t cksum;
|
|
|
|
uint16_t *l4_cksum;
|
|
|
|
void *l4_hdr;
|
|
|
|
|
|
|
|
l4_hdr = packet + l2_len + l3_len;
|
|
|
|
if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
|
|
|
|
l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
|
|
|
|
else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
|
|
|
|
l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
|
|
|
|
else
|
|
|
|
return;
|
|
|
|
*l4_cksum = 0;
|
|
|
|
if (ol_flags & PKT_TX_IPV4) {
|
|
|
|
struct ipv4_hdr *iph = l3_hdr;
|
|
|
|
|
|
|
|
l4_len = rte_be_to_cpu_16(iph->total_length) - l3_len;
|
|
|
|
cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
|
|
|
|
} else {
|
|
|
|
struct ipv6_hdr *ip6h = l3_hdr;
|
|
|
|
|
|
|
|
/* payload_len does not include ext headers */
|
|
|
|
l4_len = rte_be_to_cpu_16(ip6h->payload_len) -
|
|
|
|
l3_len + sizeof(struct ipv6_hdr);
|
|
|
|
cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
|
|
|
|
}
|
|
|
|
cksum += rte_raw_cksum(l4_hdr, l4_len);
|
|
|
|
cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
|
|
|
|
cksum = (~cksum) & 0xffff;
|
|
|
|
if (cksum == 0)
|
|
|
|
cksum = 0xffff;
|
|
|
|
*l4_cksum = cksum;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
/* Callback to handle sending packets from the tap interface
|
|
|
|
*/
|
|
|
|
static uint16_t
|
|
|
|
pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct tx_queue *txq = queue;
|
|
|
|
uint16_t num_tx = 0;
|
|
|
|
unsigned long num_tx_bytes = 0;
|
2017-03-15 16:09:08 +01:00
|
|
|
uint32_t max_size;
|
2017-03-16 09:59:21 +01:00
|
|
|
int i;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
if (unlikely(nb_pkts == 0))
|
|
|
|
return 0;
|
|
|
|
|
2017-03-15 16:09:08 +01:00
|
|
|
max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
|
2016-12-12 08:38:38 -06:00
|
|
|
for (i = 0; i < nb_pkts; i++) {
|
2017-03-16 09:59:21 +01:00
|
|
|
struct rte_mbuf *mbuf = bufs[num_tx];
|
|
|
|
struct iovec iovecs[mbuf->nb_segs + 1];
|
2018-04-03 03:07:47 +05:30
|
|
|
struct tun_pi pi = { .flags = 0, .proto = 0x00 };
|
2017-03-16 09:59:21 +01:00
|
|
|
struct rte_mbuf *seg = mbuf;
|
2017-05-22 13:20:43 +02:00
|
|
|
char m_copy[mbuf->data_len];
|
2017-03-16 09:59:21 +01:00
|
|
|
int n;
|
|
|
|
int j;
|
|
|
|
|
2017-03-15 16:09:08 +01:00
|
|
|
/* stats.errs will be incremented */
|
|
|
|
if (rte_pktmbuf_pkt_len(mbuf) > max_size)
|
|
|
|
break;
|
2017-03-16 09:59:21 +01:00
|
|
|
|
2018-04-03 03:07:47 +05:30
|
|
|
/*
|
|
|
|
* TUN and TAP are created with IFF_NO_PI disabled.
|
|
|
|
* For TUN PMD this mandatory as fields are used by
|
|
|
|
* Kernel tun.c to determine whether its IP or non IP
|
|
|
|
* packets.
|
|
|
|
*
|
|
|
|
* The logic fetches the first byte of data from mbuf.
|
|
|
|
* compares whether its v4 or v6. If none matches default
|
|
|
|
* value 0x00 is taken for protocol field.
|
|
|
|
*/
|
|
|
|
char *buff_data = rte_pktmbuf_mtod(seg, void *);
|
|
|
|
j = (*buff_data & 0xf0);
|
2018-04-13 11:28:47 +05:30
|
|
|
pi.proto = (j == 0x40) ? 0x0008 :
|
|
|
|
(j == 0x60) ? 0xdd86 : 0x00;
|
2018-04-03 03:07:47 +05:30
|
|
|
|
2017-03-16 09:59:21 +01:00
|
|
|
iovecs[0].iov_base = π
|
|
|
|
iovecs[0].iov_len = sizeof(pi);
|
|
|
|
for (j = 1; j <= mbuf->nb_segs; j++) {
|
|
|
|
iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
|
|
|
|
iovecs[j].iov_base =
|
|
|
|
rte_pktmbuf_mtod(seg, void *);
|
|
|
|
seg = seg->next;
|
|
|
|
}
|
2018-01-17 16:04:33 +02:00
|
|
|
if (txq->csum &&
|
|
|
|
((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
|
|
|
|
(mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
|
|
|
|
(mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
|
2017-05-22 13:20:43 +02:00
|
|
|
/* Support only packets with all data in the same seg */
|
|
|
|
if (mbuf->nb_segs > 1)
|
|
|
|
break;
|
|
|
|
/* To change checksums, work on a copy of data. */
|
|
|
|
rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
|
|
|
|
rte_pktmbuf_data_len(mbuf));
|
|
|
|
tap_tx_offload(m_copy, mbuf->ol_flags,
|
|
|
|
mbuf->l2_len, mbuf->l3_len);
|
|
|
|
iovecs[1].iov_base = m_copy;
|
|
|
|
}
|
2017-03-16 09:59:21 +01:00
|
|
|
/* copy the tx frame data */
|
|
|
|
n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
|
2016-12-12 08:38:38 -06:00
|
|
|
if (n <= 0)
|
|
|
|
break;
|
|
|
|
|
2017-03-14 13:51:57 +01:00
|
|
|
num_tx++;
|
|
|
|
num_tx_bytes += mbuf->pkt_len;
|
|
|
|
rte_pktmbuf_free(mbuf);
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
txq->stats.opackets += num_tx;
|
|
|
|
txq->stats.errs += nb_pkts - num_tx;
|
|
|
|
txq->stats.obytes += num_tx_bytes;
|
|
|
|
|
|
|
|
return num_tx;
|
|
|
|
}
|
|
|
|
|
2017-05-12 15:01:36 +02:00
|
|
|
static const char *
|
|
|
|
tap_ioctl_req2str(unsigned long request)
|
|
|
|
{
|
|
|
|
switch (request) {
|
|
|
|
case SIOCSIFFLAGS:
|
|
|
|
return "SIOCSIFFLAGS";
|
|
|
|
case SIOCGIFFLAGS:
|
|
|
|
return "SIOCGIFFLAGS";
|
|
|
|
case SIOCGIFHWADDR:
|
|
|
|
return "SIOCGIFHWADDR";
|
|
|
|
case SIOCSIFHWADDR:
|
|
|
|
return "SIOCSIFHWADDR";
|
|
|
|
case SIOCSIFMTU:
|
|
|
|
return "SIOCSIFMTU";
|
|
|
|
}
|
|
|
|
return "UNKNOWN";
|
|
|
|
}
|
|
|
|
|
2017-02-06 13:40:36 -06:00
|
|
|
static int
|
2017-03-15 15:48:13 +01:00
|
|
|
tap_ioctl(struct pmd_internals *pmd, unsigned long request,
|
2017-03-31 15:54:10 +02:00
|
|
|
struct ifreq *ifr, int set, enum ioctl_mode mode)
|
2017-02-02 17:18:03 +01:00
|
|
|
{
|
2017-03-15 15:48:13 +01:00
|
|
|
short req_flags = ifr->ifr_flags;
|
2017-03-31 15:54:10 +02:00
|
|
|
int remote = pmd->remote_if_index &&
|
|
|
|
(mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
|
2017-02-02 17:18:03 +01:00
|
|
|
|
2017-03-31 15:54:10 +02:00
|
|
|
if (!pmd->remote_if_index && mode == REMOTE_ONLY)
|
|
|
|
return 0;
|
2017-03-23 09:42:11 +01:00
|
|
|
/*
|
|
|
|
* If there is a remote netdevice, apply ioctl on it, then apply it on
|
|
|
|
* the tap netdevice.
|
|
|
|
*/
|
|
|
|
apply:
|
|
|
|
if (remote)
|
|
|
|
snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
|
2017-03-31 15:54:10 +02:00
|
|
|
else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
|
2017-03-23 09:42:11 +01:00
|
|
|
snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
|
2017-03-15 15:48:13 +01:00
|
|
|
switch (request) {
|
|
|
|
case SIOCSIFFLAGS:
|
|
|
|
/* fetch current flags to leave other flags untouched */
|
|
|
|
if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
|
|
|
|
goto error;
|
|
|
|
if (set)
|
|
|
|
ifr->ifr_flags |= req_flags;
|
|
|
|
else
|
|
|
|
ifr->ifr_flags &= ~req_flags;
|
|
|
|
break;
|
2017-03-22 09:40:00 +01:00
|
|
|
case SIOCGIFFLAGS:
|
2017-03-15 15:48:14 +01:00
|
|
|
case SIOCGIFHWADDR:
|
|
|
|
case SIOCSIFHWADDR:
|
2017-03-15 15:48:15 +01:00
|
|
|
case SIOCSIFMTU:
|
2017-03-15 15:48:14 +01:00
|
|
|
break;
|
2017-03-15 15:48:13 +01:00
|
|
|
default:
|
2018-03-12 19:31:33 +00:00
|
|
|
RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
|
|
|
|
pmd->name);
|
|
|
|
return -EINVAL;
|
2017-03-15 15:48:13 +01:00
|
|
|
}
|
|
|
|
if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
|
|
|
|
goto error;
|
2017-03-31 15:54:10 +02:00
|
|
|
if (remote-- && mode == LOCAL_AND_REMOTE)
|
2017-03-23 09:42:11 +01:00
|
|
|
goto apply;
|
2017-02-02 17:18:03 +01:00
|
|
|
return 0;
|
2017-03-15 15:48:13 +01:00
|
|
|
|
|
|
|
error:
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name,
|
|
|
|
tap_ioctl_req2str(request), strerror(errno), errno);
|
2017-03-15 15:48:13 +01:00
|
|
|
return -errno;
|
2017-02-02 17:18:03 +01:00
|
|
|
}
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
static int
|
2017-02-02 17:18:03 +01:00
|
|
|
tap_link_set_down(struct rte_eth_dev *dev)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
2017-02-02 17:18:03 +01:00
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
2017-03-15 15:48:13 +01:00
|
|
|
struct ifreq ifr = { .ifr_flags = IFF_UP };
|
2017-02-02 17:18:03 +01:00
|
|
|
|
|
|
|
dev->data->dev_link.link_status = ETH_LINK_DOWN;
|
2017-06-27 14:33:15 +02:00
|
|
|
return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_ONLY);
|
2017-02-02 17:18:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
tap_link_set_up(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
2017-03-15 15:48:13 +01:00
|
|
|
struct ifreq ifr = { .ifr_flags = IFF_UP };
|
2017-02-02 17:18:03 +01:00
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
dev->data->dev_link.link_status = ETH_LINK_UP;
|
2017-03-31 15:54:10 +02:00
|
|
|
return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
|
2017-02-02 17:18:03 +01:00
|
|
|
}
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-02-02 17:18:03 +01:00
|
|
|
static int
|
|
|
|
tap_dev_start(struct rte_eth_dev *dev)
|
|
|
|
{
|
2017-03-22 09:40:01 +01:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = tap_intr_handle_set(dev, 1);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2017-02-02 17:18:03 +01:00
|
|
|
return tap_link_set_up(dev);
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function gets called when the current port gets stopped.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
tap_dev_stop(struct rte_eth_dev *dev)
|
|
|
|
{
|
2017-03-22 09:40:01 +01:00
|
|
|
tap_intr_handle_set(dev, 0);
|
2017-02-06 13:40:37 -06:00
|
|
|
tap_link_set_down(dev);
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-09-16 22:32:38 +00:00
|
|
|
tap_dev_configure(struct rte_eth_dev *dev)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
2017-09-16 22:32:38 +00:00
|
|
|
if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR,
|
|
|
|
"%s: number of rx queues %d exceeds max num of queues %d",
|
2017-09-16 22:32:38 +00:00
|
|
|
dev->device->name,
|
|
|
|
dev->data->nb_rx_queues,
|
|
|
|
RTE_PMD_TAP_MAX_QUEUES);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR,
|
|
|
|
"%s: number of tx queues %d exceeds max num of queues %d",
|
2017-09-16 22:32:38 +00:00
|
|
|
dev->device->name,
|
|
|
|
dev->data->nb_tx_queues,
|
|
|
|
RTE_PMD_TAP_MAX_QUEUES);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(INFO, "%s: %p: TX configured queues number: %u",
|
|
|
|
dev->device->name, (void *)dev, dev->data->nb_tx_queues);
|
2017-09-16 22:32:38 +00:00
|
|
|
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(INFO, "%s: %p: RX configured queues number: %u",
|
|
|
|
dev->device->name, (void *)dev, dev->data->nb_rx_queues);
|
2017-09-16 22:32:38 +00:00
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-15 15:48:16 +01:00
|
|
|
static uint32_t
|
|
|
|
tap_dev_speed_capa(void)
|
|
|
|
{
|
|
|
|
uint32_t speed = pmd_link.link_speed;
|
|
|
|
uint32_t capa = 0;
|
|
|
|
|
|
|
|
if (speed >= ETH_SPEED_NUM_10M)
|
|
|
|
capa |= ETH_LINK_SPEED_10M;
|
|
|
|
if (speed >= ETH_SPEED_NUM_100M)
|
|
|
|
capa |= ETH_LINK_SPEED_100M;
|
|
|
|
if (speed >= ETH_SPEED_NUM_1G)
|
|
|
|
capa |= ETH_LINK_SPEED_1G;
|
|
|
|
if (speed >= ETH_SPEED_NUM_5G)
|
|
|
|
capa |= ETH_LINK_SPEED_2_5G;
|
|
|
|
if (speed >= ETH_SPEED_NUM_5G)
|
|
|
|
capa |= ETH_LINK_SPEED_5G;
|
|
|
|
if (speed >= ETH_SPEED_NUM_10G)
|
|
|
|
capa |= ETH_LINK_SPEED_10G;
|
|
|
|
if (speed >= ETH_SPEED_NUM_20G)
|
|
|
|
capa |= ETH_LINK_SPEED_20G;
|
|
|
|
if (speed >= ETH_SPEED_NUM_25G)
|
|
|
|
capa |= ETH_LINK_SPEED_25G;
|
|
|
|
if (speed >= ETH_SPEED_NUM_40G)
|
|
|
|
capa |= ETH_LINK_SPEED_40G;
|
|
|
|
if (speed >= ETH_SPEED_NUM_50G)
|
|
|
|
capa |= ETH_LINK_SPEED_50G;
|
|
|
|
if (speed >= ETH_SPEED_NUM_56G)
|
|
|
|
capa |= ETH_LINK_SPEED_56G;
|
|
|
|
if (speed >= ETH_SPEED_NUM_100G)
|
|
|
|
capa |= ETH_LINK_SPEED_100G;
|
|
|
|
|
|
|
|
return capa;
|
|
|
|
}
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
static void
|
|
|
|
tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
|
|
|
{
|
|
|
|
struct pmd_internals *internals = dev->data->dev_private;
|
|
|
|
|
|
|
|
dev_info->if_index = internals->if_index;
|
|
|
|
dev_info->max_mac_addrs = 1;
|
|
|
|
dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
|
2017-09-16 22:32:38 +00:00
|
|
|
dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
|
|
|
|
dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
|
2016-12-12 08:38:38 -06:00
|
|
|
dev_info->min_rx_bufsize = 0;
|
2017-03-15 15:48:16 +01:00
|
|
|
dev_info->speed_capa = tap_dev_speed_capa();
|
2018-01-17 16:04:34 +02:00
|
|
|
dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
|
|
|
|
dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
|
|
|
|
dev_info->rx_queue_offload_capa;
|
2018-01-17 16:04:33 +02:00
|
|
|
dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
|
2018-01-17 16:04:34 +02:00
|
|
|
dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
|
|
|
|
dev_info->tx_queue_offload_capa;
|
2018-05-08 17:07:35 +00:00
|
|
|
dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
|
|
|
|
/*
|
|
|
|
* limitation: TAP suppors all of the following hash
|
|
|
|
* functions together and not in partial combinations
|
|
|
|
*/
|
|
|
|
dev_info->flow_type_rss_offloads =
|
|
|
|
ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP;
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
2017-10-10 20:20:18 +00:00
|
|
|
static int
|
2016-12-12 08:38:38 -06:00
|
|
|
tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
|
|
|
|
{
|
|
|
|
unsigned int i, imax;
|
|
|
|
unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
|
|
|
|
unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
|
2017-03-16 09:59:21 +01:00
|
|
|
unsigned long rx_nombuf = 0, ierrors = 0;
|
2016-12-12 08:38:38 -06:00
|
|
|
const struct pmd_internals *pmd = dev->data->dev_private;
|
|
|
|
|
2017-09-16 22:32:38 +00:00
|
|
|
/* rx queue statistics */
|
|
|
|
imax = (dev->data->nb_rx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
|
|
|
|
dev->data->nb_rx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
|
2016-12-12 08:38:38 -06:00
|
|
|
for (i = 0; i < imax; i++) {
|
|
|
|
tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
|
|
|
|
tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
|
|
|
|
rx_total += tap_stats->q_ipackets[i];
|
|
|
|
rx_bytes_total += tap_stats->q_ibytes[i];
|
2017-03-16 09:59:21 +01:00
|
|
|
rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
|
|
|
|
ierrors += pmd->rxq[i].stats.ierrors;
|
2017-09-16 22:32:38 +00:00
|
|
|
}
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-09-16 22:32:38 +00:00
|
|
|
/* tx queue statistics */
|
|
|
|
imax = (dev->data->nb_tx_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
|
|
|
|
dev->data->nb_tx_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
|
|
|
|
|
|
|
|
for (i = 0; i < imax; i++) {
|
2016-12-12 08:38:38 -06:00
|
|
|
tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
|
|
|
|
tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
|
|
|
|
tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
|
|
|
|
tx_total += tap_stats->q_opackets[i];
|
|
|
|
tx_err_total += tap_stats->q_errors[i];
|
|
|
|
tx_bytes_total += tap_stats->q_obytes[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
tap_stats->ipackets = rx_total;
|
|
|
|
tap_stats->ibytes = rx_bytes_total;
|
2017-03-16 09:59:21 +01:00
|
|
|
tap_stats->ierrors = ierrors;
|
|
|
|
tap_stats->rx_nombuf = rx_nombuf;
|
2016-12-12 08:38:38 -06:00
|
|
|
tap_stats->opackets = tx_total;
|
|
|
|
tap_stats->oerrors = tx_err_total;
|
|
|
|
tap_stats->obytes = tx_bytes_total;
|
2017-10-10 20:20:18 +00:00
|
|
|
return 0;
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tap_stats_reset(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
|
|
|
|
2017-09-16 22:32:38 +00:00
|
|
|
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
|
2016-12-12 08:38:38 -06:00
|
|
|
pmd->rxq[i].stats.ipackets = 0;
|
|
|
|
pmd->rxq[i].stats.ibytes = 0;
|
2017-03-16 09:59:21 +01:00
|
|
|
pmd->rxq[i].stats.ierrors = 0;
|
|
|
|
pmd->rxq[i].stats.rx_nombuf = 0;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
pmd->txq[i].stats.opackets = 0;
|
|
|
|
pmd->txq[i].stats.errs = 0;
|
|
|
|
pmd->txq[i].stats.obytes = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-05-12 11:33:03 +01:00
|
|
|
tap_dev_close(struct rte_eth_dev *dev)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
2017-02-06 13:40:38 -06:00
|
|
|
int i;
|
|
|
|
struct pmd_internals *internals = dev->data->dev_private;
|
|
|
|
|
|
|
|
tap_link_set_down(dev);
|
2017-03-23 09:33:55 +01:00
|
|
|
tap_flow_flush(dev, NULL);
|
2017-03-23 09:42:11 +01:00
|
|
|
tap_flow_implicit_flush(internals, NULL);
|
2017-02-06 13:40:38 -06:00
|
|
|
|
2017-09-16 22:32:38 +00:00
|
|
|
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
|
|
|
|
if (internals->rxq[i].fd != -1) {
|
2017-02-06 13:40:38 -06:00
|
|
|
close(internals->rxq[i].fd);
|
2017-09-16 22:32:38 +00:00
|
|
|
internals->rxq[i].fd = -1;
|
|
|
|
}
|
|
|
|
if (internals->txq[i].fd != -1) {
|
|
|
|
close(internals->txq[i].fd);
|
|
|
|
internals->txq[i].fd = -1;
|
|
|
|
}
|
2017-02-06 13:40:38 -06:00
|
|
|
}
|
2017-06-27 14:33:15 +02:00
|
|
|
|
|
|
|
if (internals->remote_if_index) {
|
|
|
|
/* Restore initial remote state */
|
|
|
|
ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
|
|
|
|
&internals->remote_initial_flags);
|
|
|
|
}
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tap_rx_queue_release(void *queue)
|
|
|
|
{
|
|
|
|
struct rx_queue *rxq = queue;
|
|
|
|
|
|
|
|
if (rxq && (rxq->fd > 0)) {
|
|
|
|
close(rxq->fd);
|
|
|
|
rxq->fd = -1;
|
2017-03-16 09:59:21 +01:00
|
|
|
rte_pktmbuf_free(rxq->pool);
|
|
|
|
rte_free(rxq->iovecs);
|
|
|
|
rxq->pool = NULL;
|
|
|
|
rxq->iovecs = NULL;
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tap_tx_queue_release(void *queue)
|
|
|
|
{
|
|
|
|
struct tx_queue *txq = queue;
|
|
|
|
|
|
|
|
if (txq && (txq->fd > 0)) {
|
|
|
|
close(txq->fd);
|
|
|
|
txq->fd = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-03-22 09:40:00 +01:00
|
|
|
tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
2017-03-22 09:40:00 +01:00
|
|
|
struct rte_eth_link *dev_link = &dev->data->dev_link;
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
|
|
|
struct ifreq ifr = { .ifr_flags = 0 };
|
|
|
|
|
|
|
|
if (pmd->remote_if_index) {
|
2017-03-31 15:54:10 +02:00
|
|
|
tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
|
2017-03-22 09:40:00 +01:00
|
|
|
if (!(ifr.ifr_flags & IFF_UP) ||
|
|
|
|
!(ifr.ifr_flags & IFF_RUNNING)) {
|
|
|
|
dev_link->link_status = ETH_LINK_DOWN;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2017-03-31 15:54:10 +02:00
|
|
|
tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
|
2017-03-22 09:40:00 +01:00
|
|
|
dev_link->link_status =
|
|
|
|
((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
|
|
|
|
ETH_LINK_UP :
|
|
|
|
ETH_LINK_DOWN);
|
2016-12-12 08:38:38 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-02 17:18:04 +01:00
|
|
|
static void
|
|
|
|
tap_promisc_enable(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
2017-03-15 15:48:13 +01:00
|
|
|
struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
|
2017-02-02 17:18:04 +01:00
|
|
|
|
|
|
|
dev->data->promiscuous = 1;
|
2017-03-31 15:54:10 +02:00
|
|
|
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
|
2017-05-24 17:41:12 +02:00
|
|
|
if (pmd->remote_if_index && !pmd->flow_isolate)
|
2017-03-23 09:42:11 +01:00
|
|
|
tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
|
2017-02-02 17:18:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tap_promisc_disable(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
2017-03-15 15:48:13 +01:00
|
|
|
struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
|
2017-02-02 17:18:04 +01:00
|
|
|
|
|
|
|
dev->data->promiscuous = 0;
|
2017-03-31 15:54:10 +02:00
|
|
|
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
|
2017-05-24 17:41:12 +02:00
|
|
|
if (pmd->remote_if_index && !pmd->flow_isolate)
|
2017-03-23 09:42:11 +01:00
|
|
|
tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
|
2017-02-02 17:18:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tap_allmulti_enable(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
2017-03-15 15:48:13 +01:00
|
|
|
struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
|
2017-02-02 17:18:04 +01:00
|
|
|
|
|
|
|
dev->data->all_multicast = 1;
|
2017-03-31 15:54:10 +02:00
|
|
|
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
|
2017-05-24 17:41:12 +02:00
|
|
|
if (pmd->remote_if_index && !pmd->flow_isolate)
|
2017-03-23 09:42:11 +01:00
|
|
|
tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
|
2017-02-02 17:18:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tap_allmulti_disable(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
2017-03-15 15:48:13 +01:00
|
|
|
struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
|
2017-02-02 17:18:04 +01:00
|
|
|
|
|
|
|
dev->data->all_multicast = 0;
|
2017-03-31 15:54:10 +02:00
|
|
|
tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
|
2017-05-24 17:41:12 +02:00
|
|
|
if (pmd->remote_if_index && !pmd->flow_isolate)
|
2017-03-23 09:42:11 +01:00
|
|
|
tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
|
2017-02-02 17:18:04 +01:00
|
|
|
}
|
|
|
|
|
2018-04-11 18:32:51 +02:00
|
|
|
static int
|
2017-03-15 15:48:14 +01:00
|
|
|
tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
|
|
|
|
{
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
2017-05-12 15:01:40 +02:00
|
|
|
enum ioctl_mode mode = LOCAL_ONLY;
|
2017-03-15 15:48:14 +01:00
|
|
|
struct ifreq ifr;
|
2018-04-11 18:32:51 +02:00
|
|
|
int ret;
|
2017-03-15 15:48:14 +01:00
|
|
|
|
|
|
|
if (is_zero_ether_addr(mac_addr)) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: can't set an empty MAC address",
|
2017-06-09 19:36:05 +01:00
|
|
|
dev->device->name);
|
2018-04-11 18:32:51 +02:00
|
|
|
return -EINVAL;
|
2017-03-15 15:48:14 +01:00
|
|
|
}
|
2017-03-31 15:54:11 +02:00
|
|
|
/* Check the actual current MAC address on the tap netdevice */
|
2018-04-11 18:32:51 +02:00
|
|
|
ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2017-03-31 15:54:11 +02:00
|
|
|
if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
|
|
|
|
mac_addr))
|
2018-04-11 18:32:51 +02:00
|
|
|
return 0;
|
2017-05-12 15:01:40 +02:00
|
|
|
/* Check the current MAC address on the remote */
|
2018-04-11 18:32:51 +02:00
|
|
|
ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2017-05-12 15:01:40 +02:00
|
|
|
if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
|
|
|
|
mac_addr))
|
|
|
|
mode = LOCAL_AND_REMOTE;
|
2017-03-15 15:48:14 +01:00
|
|
|
ifr.ifr_hwaddr.sa_family = AF_LOCAL;
|
|
|
|
rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
|
2018-04-11 18:32:51 +02:00
|
|
|
ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2017-03-15 15:48:14 +01:00
|
|
|
rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
|
2017-05-24 17:41:12 +02:00
|
|
|
if (pmd->remote_if_index && !pmd->flow_isolate) {
|
2017-03-31 15:54:11 +02:00
|
|
|
/* Replace MAC redirection rule after a MAC change */
|
2018-04-11 18:32:51 +02:00
|
|
|
ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
|
|
|
|
if (ret < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR,
|
|
|
|
"%s: Couldn't delete MAC redirection rule",
|
2017-06-09 19:36:05 +01:00
|
|
|
dev->device->name);
|
2018-04-11 18:32:51 +02:00
|
|
|
return ret;
|
2017-03-31 15:54:11 +02:00
|
|
|
}
|
2018-04-11 18:32:51 +02:00
|
|
|
ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
|
|
|
|
if (ret < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR,
|
|
|
|
"%s: Couldn't add MAC redirection rule",
|
2017-06-09 19:36:05 +01:00
|
|
|
dev->device->name);
|
2018-04-11 18:32:51 +02:00
|
|
|
return ret;
|
|
|
|
}
|
2017-03-31 15:54:11 +02:00
|
|
|
}
|
2018-04-11 18:32:51 +02:00
|
|
|
|
|
|
|
return 0;
|
2017-03-15 15:48:14 +01:00
|
|
|
}
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
static int
|
|
|
|
tap_setup_queue(struct rte_eth_dev *dev,
|
|
|
|
struct pmd_internals *internals,
|
2017-09-16 22:32:38 +00:00
|
|
|
uint16_t qid,
|
|
|
|
int is_rx)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
2017-09-16 22:32:38 +00:00
|
|
|
int *fd;
|
|
|
|
int *other_fd;
|
|
|
|
const char *dir;
|
2017-02-02 17:17:58 +01:00
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
2016-12-12 08:38:38 -06:00
|
|
|
struct rx_queue *rx = &internals->rxq[qid];
|
|
|
|
struct tx_queue *tx = &internals->txq[qid];
|
|
|
|
|
2017-09-16 22:32:38 +00:00
|
|
|
if (is_rx) {
|
|
|
|
fd = &rx->fd;
|
|
|
|
other_fd = &tx->fd;
|
|
|
|
dir = "rx";
|
|
|
|
} else {
|
|
|
|
fd = &tx->fd;
|
|
|
|
other_fd = &rx->fd;
|
|
|
|
dir = "tx";
|
|
|
|
}
|
|
|
|
if (*fd != -1) {
|
|
|
|
/* fd for this queue already exists */
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
|
2017-09-16 22:32:38 +00:00
|
|
|
pmd->name, *fd, dir, qid);
|
|
|
|
} else if (*other_fd != -1) {
|
|
|
|
/* Only other_fd exists. dup it */
|
|
|
|
*fd = dup(*other_fd);
|
|
|
|
if (*fd < 0) {
|
|
|
|
*fd = -1;
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
|
2017-09-16 22:32:38 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
|
2017-09-16 22:32:38 +00:00
|
|
|
pmd->name, *other_fd, dir, qid, *fd);
|
|
|
|
} else {
|
|
|
|
/* Both RX and TX fds do not exist (equal -1). Create fd */
|
|
|
|
*fd = tun_alloc(pmd);
|
|
|
|
if (*fd < 0) {
|
|
|
|
*fd = -1; /* restore original value */
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
|
2017-05-12 15:01:38 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
|
2017-09-16 22:32:38 +00:00
|
|
|
pmd->name, dir, qid, *fd);
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
2017-03-15 16:09:08 +01:00
|
|
|
tx->mtu = &dev->data->mtu;
|
2017-03-16 09:59:21 +01:00
|
|
|
rx->rxmode = &dev->data->dev_conf.rxmode;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-09-16 22:32:38 +00:00
|
|
|
return *fd;
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
tap_rx_queue_setup(struct rte_eth_dev *dev,
|
|
|
|
uint16_t rx_queue_id,
|
2017-03-16 09:59:21 +01:00
|
|
|
uint16_t nb_rx_desc,
|
|
|
|
unsigned int socket_id,
|
2016-12-12 08:38:38 -06:00
|
|
|
const struct rte_eth_rxconf *rx_conf __rte_unused,
|
|
|
|
struct rte_mempool *mp)
|
|
|
|
{
|
|
|
|
struct pmd_internals *internals = dev->data->dev_private;
|
2017-03-16 09:59:21 +01:00
|
|
|
struct rx_queue *rxq = &internals->rxq[rx_queue_id];
|
|
|
|
struct rte_mbuf **tmp = &rxq->pool;
|
2017-04-27 15:51:42 +02:00
|
|
|
long iov_max = sysconf(_SC_IOV_MAX);
|
|
|
|
uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
|
|
|
|
struct iovec (*iovecs)[nb_desc + 1];
|
2017-03-16 09:59:21 +01:00
|
|
|
int data_off = RTE_PKTMBUF_HEADROOM;
|
|
|
|
int ret = 0;
|
2016-12-12 08:38:38 -06:00
|
|
|
int fd;
|
2017-03-16 09:59:21 +01:00
|
|
|
int i;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-09-16 22:32:38 +00:00
|
|
|
if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(WARNING,
|
|
|
|
"nb_rx_queues %d too small or mempool NULL",
|
2017-09-16 22:32:38 +00:00
|
|
|
dev->data->nb_rx_queues);
|
2016-12-12 08:38:38 -06:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-03-16 09:59:21 +01:00
|
|
|
rxq->mp = mp;
|
|
|
|
rxq->trigger_seen = 1; /* force initial burst */
|
|
|
|
rxq->in_port = dev->data->port_id;
|
2017-04-27 15:51:42 +02:00
|
|
|
rxq->nb_rx_desc = nb_desc;
|
2017-06-09 19:36:05 +01:00
|
|
|
iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
|
2017-03-16 09:59:21 +01:00
|
|
|
socket_id);
|
|
|
|
if (!iovecs) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(WARNING,
|
|
|
|
"%s: Couldn't allocate %d RX descriptors",
|
2017-06-09 19:36:05 +01:00
|
|
|
dev->device->name, nb_desc);
|
2017-03-16 09:59:21 +01:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
rxq->iovecs = iovecs;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-05-12 15:01:37 +02:00
|
|
|
dev->data->rx_queues[rx_queue_id] = rxq;
|
2017-09-16 22:32:38 +00:00
|
|
|
fd = tap_setup_queue(dev, internals, rx_queue_id, 1);
|
2017-03-16 09:59:21 +01:00
|
|
|
if (fd == -1) {
|
|
|
|
ret = fd;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
(*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
|
|
|
|
(*rxq->iovecs)[0].iov_base = &rxq->pi;
|
|
|
|
|
2017-04-27 15:51:42 +02:00
|
|
|
for (i = 1; i <= nb_desc; i++) {
|
2017-03-16 09:59:21 +01:00
|
|
|
*tmp = rte_pktmbuf_alloc(rxq->mp);
|
|
|
|
if (!*tmp) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(WARNING,
|
|
|
|
"%s: couldn't allocate memory for queue %d",
|
2017-06-09 19:36:05 +01:00
|
|
|
dev->device->name, rx_queue_id);
|
2017-03-16 09:59:21 +01:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
(*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
|
|
|
|
(*rxq->iovecs)[i].iov_base =
|
|
|
|
(char *)(*tmp)->buf_addr + data_off;
|
|
|
|
data_off = 0;
|
|
|
|
tmp = &(*tmp)->next;
|
|
|
|
}
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
|
2017-02-02 17:17:58 +01:00
|
|
|
internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
return 0;
|
2017-03-16 09:59:21 +01:00
|
|
|
|
|
|
|
error:
|
|
|
|
rte_pktmbuf_free(rxq->pool);
|
|
|
|
rxq->pool = NULL;
|
|
|
|
rte_free(rxq->iovecs);
|
|
|
|
rxq->iovecs = NULL;
|
|
|
|
return ret;
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
tap_tx_queue_setup(struct rte_eth_dev *dev,
|
|
|
|
uint16_t tx_queue_id,
|
|
|
|
uint16_t nb_tx_desc __rte_unused,
|
|
|
|
unsigned int socket_id __rte_unused,
|
2018-01-17 16:04:33 +02:00
|
|
|
const struct rte_eth_txconf *tx_conf)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
|
|
|
struct pmd_internals *internals = dev->data->dev_private;
|
2018-01-17 16:04:33 +02:00
|
|
|
struct tx_queue *txq;
|
2016-12-12 08:38:38 -06:00
|
|
|
int ret;
|
2018-05-10 19:56:55 +08:00
|
|
|
uint64_t offloads;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-09-16 22:32:38 +00:00
|
|
|
if (tx_queue_id >= dev->data->nb_tx_queues)
|
2016-12-12 08:38:38 -06:00
|
|
|
return -1;
|
2017-05-12 15:01:37 +02:00
|
|
|
dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
|
2018-01-17 16:04:33 +02:00
|
|
|
txq = dev->data->tx_queues[tx_queue_id];
|
2018-05-10 19:56:55 +08:00
|
|
|
|
|
|
|
offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
|
|
|
|
txq->csum = !!(offloads &
|
|
|
|
(DEV_TX_OFFLOAD_IPV4_CKSUM |
|
|
|
|
DEV_TX_OFFLOAD_UDP_CKSUM |
|
|
|
|
DEV_TX_OFFLOAD_TCP_CKSUM));
|
|
|
|
|
2017-09-16 22:32:38 +00:00
|
|
|
ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
|
2016-12-12 08:38:38 -06:00
|
|
|
if (ret == -1)
|
|
|
|
return -1;
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG,
|
|
|
|
" TX TUNTAP device name %s, qid %d on fd %d csum %s",
|
2018-01-17 16:04:33 +02:00
|
|
|
internals->name, tx_queue_id, internals->txq[tx_queue_id].fd,
|
|
|
|
txq->csum ? "on" : "off");
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-15 15:48:15 +01:00
|
|
|
static int
|
|
|
|
tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
|
|
|
{
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
|
|
|
struct ifreq ifr = { .ifr_mtu = mtu };
|
|
|
|
int err = 0;
|
|
|
|
|
2017-03-31 15:54:10 +02:00
|
|
|
err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
|
2017-03-15 15:48:15 +01:00
|
|
|
if (!err)
|
|
|
|
dev->data->mtu = mtu;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-03-15 15:48:17 +01:00
|
|
|
static int
|
|
|
|
tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
struct ether_addr *mc_addr_set __rte_unused,
|
|
|
|
uint32_t nb_mc_addr __rte_unused)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Nothing to do actually: the tap has no filtering whatsoever, every
|
|
|
|
* packet is received.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-22 09:40:01 +01:00
|
|
|
static int
|
|
|
|
tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev = arg;
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
|
|
|
struct ifinfomsg *info = NLMSG_DATA(nh);
|
|
|
|
|
|
|
|
if (nh->nlmsg_type != RTM_NEWLINK ||
|
|
|
|
(info->ifi_index != pmd->if_index &&
|
|
|
|
info->ifi_index != pmd->remote_if_index))
|
|
|
|
return 0;
|
|
|
|
return tap_link_update(dev, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-04-06 20:42:22 +08:00
|
|
|
tap_dev_intr_handler(void *cb_arg)
|
2017-03-22 09:40:01 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev = cb_arg;
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
|
|
|
|
2017-12-15 11:34:37 +00:00
|
|
|
tap_nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
|
2017-03-22 09:40:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-01-28 12:45:35 +02:00
|
|
|
tap_lsc_intr_handle_set(struct rte_eth_dev *dev, int set)
|
2017-03-22 09:40:01 +01:00
|
|
|
{
|
|
|
|
struct pmd_internals *pmd = dev->data->dev_private;
|
|
|
|
|
|
|
|
/* In any case, disable interrupt if the conf is no longer there. */
|
|
|
|
if (!dev->data->dev_conf.intr_conf.lsc) {
|
2017-09-18 19:47:35 +01:00
|
|
|
if (pmd->intr_handle.fd != -1) {
|
2017-12-15 11:34:37 +00:00
|
|
|
tap_nl_final(pmd->intr_handle.fd);
|
2017-09-18 19:47:35 +01:00
|
|
|
rte_intr_callback_unregister(&pmd->intr_handle,
|
|
|
|
tap_dev_intr_handler, dev);
|
|
|
|
}
|
2017-03-22 09:40:01 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (set) {
|
2017-12-15 11:34:37 +00:00
|
|
|
pmd->intr_handle.fd = tap_nl_init(RTMGRP_LINK);
|
2017-03-22 09:40:01 +01:00
|
|
|
if (unlikely(pmd->intr_handle.fd == -1))
|
|
|
|
return -EBADF;
|
|
|
|
return rte_intr_callback_register(
|
|
|
|
&pmd->intr_handle, tap_dev_intr_handler, dev);
|
|
|
|
}
|
2017-12-15 11:34:37 +00:00
|
|
|
tap_nl_final(pmd->intr_handle.fd);
|
2017-03-22 09:40:01 +01:00
|
|
|
return rte_intr_callback_unregister(&pmd->intr_handle,
|
|
|
|
tap_dev_intr_handler, dev);
|
|
|
|
}
|
|
|
|
|
2018-01-28 12:45:35 +02:00
|
|
|
static int
|
|
|
|
tap_intr_handle_set(struct rte_eth_dev *dev, int set)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = tap_lsc_intr_handle_set(dev, set);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
err = tap_rx_intr_vec_set(dev, set);
|
|
|
|
if (err && set)
|
|
|
|
tap_lsc_intr_handle_set(dev, 0);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-03-15 15:48:18 +01:00
|
|
|
static const uint32_t*
|
|
|
|
tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
|
|
|
|
{
|
|
|
|
static const uint32_t ptypes[] = {
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER,
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_VLAN,
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_QINQ,
|
|
|
|
RTE_PTYPE_INNER_L3_IPV4,
|
|
|
|
RTE_PTYPE_INNER_L3_IPV4_EXT,
|
|
|
|
RTE_PTYPE_INNER_L3_IPV6,
|
|
|
|
RTE_PTYPE_INNER_L3_IPV6_EXT,
|
|
|
|
RTE_PTYPE_INNER_L4_FRAG,
|
|
|
|
RTE_PTYPE_INNER_L4_UDP,
|
|
|
|
RTE_PTYPE_INNER_L4_TCP,
|
|
|
|
RTE_PTYPE_INNER_L4_SCTP,
|
|
|
|
RTE_PTYPE_L2_ETHER,
|
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
|
|
RTE_PTYPE_L2_ETHER_QINQ,
|
|
|
|
RTE_PTYPE_L3_IPV4,
|
|
|
|
RTE_PTYPE_L3_IPV4_EXT,
|
|
|
|
RTE_PTYPE_L3_IPV6_EXT,
|
|
|
|
RTE_PTYPE_L3_IPV6,
|
|
|
|
RTE_PTYPE_L4_FRAG,
|
|
|
|
RTE_PTYPE_L4_UDP,
|
|
|
|
RTE_PTYPE_L4_TCP,
|
|
|
|
RTE_PTYPE_L4_SCTP,
|
|
|
|
};
|
|
|
|
|
|
|
|
return ptypes;
|
|
|
|
}
|
|
|
|
|
2017-03-15 15:48:19 +01:00
|
|
|
static int
|
|
|
|
tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
|
|
|
fc_conf->mode = RTE_FC_NONE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
|
|
|
|
struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
|
|
|
if (fc_conf->mode != RTE_FC_NONE)
|
|
|
|
return -ENOTSUP;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
static const struct eth_dev_ops ops = {
|
|
|
|
.dev_start = tap_dev_start,
|
|
|
|
.dev_stop = tap_dev_stop,
|
|
|
|
.dev_close = tap_dev_close,
|
|
|
|
.dev_configure = tap_dev_configure,
|
|
|
|
.dev_infos_get = tap_dev_info,
|
|
|
|
.rx_queue_setup = tap_rx_queue_setup,
|
|
|
|
.tx_queue_setup = tap_tx_queue_setup,
|
|
|
|
.rx_queue_release = tap_rx_queue_release,
|
|
|
|
.tx_queue_release = tap_tx_queue_release,
|
2017-03-15 15:48:19 +01:00
|
|
|
.flow_ctrl_get = tap_flow_ctrl_get,
|
|
|
|
.flow_ctrl_set = tap_flow_ctrl_set,
|
2016-12-12 08:38:38 -06:00
|
|
|
.link_update = tap_link_update,
|
2017-02-02 17:18:03 +01:00
|
|
|
.dev_set_link_up = tap_link_set_up,
|
|
|
|
.dev_set_link_down = tap_link_set_down,
|
2017-02-02 17:18:04 +01:00
|
|
|
.promiscuous_enable = tap_promisc_enable,
|
|
|
|
.promiscuous_disable = tap_promisc_disable,
|
|
|
|
.allmulticast_enable = tap_allmulti_enable,
|
|
|
|
.allmulticast_disable = tap_allmulti_disable,
|
2017-03-15 15:48:14 +01:00
|
|
|
.mac_addr_set = tap_mac_set,
|
2017-03-15 15:48:15 +01:00
|
|
|
.mtu_set = tap_mtu_set,
|
2017-03-15 15:48:17 +01:00
|
|
|
.set_mc_addr_list = tap_set_mc_addr_list,
|
2016-12-12 08:38:38 -06:00
|
|
|
.stats_get = tap_stats_get,
|
|
|
|
.stats_reset = tap_stats_reset,
|
2017-03-15 15:48:18 +01:00
|
|
|
.dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
|
2017-03-23 09:33:55 +01:00
|
|
|
.filter_ctrl = tap_dev_filter_ctrl,
|
2016-12-12 08:38:38 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2017-04-11 17:44:12 +02:00
|
|
|
eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
|
2018-03-13 03:23:52 +05:30
|
|
|
char *remote_iface, struct ether_addr *mac_addr)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
|
|
|
int numa_node = rte_socket_id();
|
2017-04-11 17:44:12 +02:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct pmd_internals *pmd;
|
|
|
|
struct rte_eth_dev_data *data;
|
2017-05-12 15:01:39 +02:00
|
|
|
struct ifreq ifr;
|
2017-02-06 13:40:35 -06:00
|
|
|
int i;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "%s device on numa %u",
|
2018-04-03 03:07:48 +05:30
|
|
|
tuntap_name, rte_socket_id());
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-04-11 17:44:12 +02:00
|
|
|
dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
|
2016-12-12 08:38:38 -06:00
|
|
|
if (!dev) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s Unable to allocate device struct",
|
2018-04-03 03:07:48 +05:30
|
|
|
tuntap_name);
|
2018-02-05 18:17:20 +02:00
|
|
|
goto error_exit_nodev;
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
|
2017-04-11 17:44:12 +02:00
|
|
|
pmd = dev->data->dev_private;
|
2017-09-16 22:32:38 +00:00
|
|
|
pmd->dev = dev;
|
2016-12-12 08:38:38 -06:00
|
|
|
snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
|
|
|
|
|
2017-03-15 15:48:13 +01:00
|
|
|
pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
|
|
|
|
if (pmd->ioctl_sock == -1) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR,
|
|
|
|
"%s Unable to get a socket for management: %s",
|
2018-04-03 03:07:48 +05:30
|
|
|
tuntap_name, strerror(errno));
|
2017-03-15 15:48:13 +01:00
|
|
|
goto error_exit;
|
|
|
|
}
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
/* Setup some default values */
|
2018-04-24 05:51:23 +00:00
|
|
|
data = dev->data;
|
2016-12-12 08:38:38 -06:00
|
|
|
data->dev_private = pmd;
|
2017-10-24 12:35:38 +02:00
|
|
|
data->dev_flags = RTE_ETH_DEV_INTR_LSC;
|
2016-12-12 08:38:38 -06:00
|
|
|
data->numa_node = numa_node;
|
|
|
|
|
|
|
|
data->dev_link = pmd_link;
|
|
|
|
data->mac_addrs = &pmd->eth_addr;
|
2017-09-16 22:32:38 +00:00
|
|
|
/* Set the number of RX and TX queues */
|
|
|
|
data->nb_rx_queues = 0;
|
|
|
|
data->nb_tx_queues = 0;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
dev->dev_ops = &ops;
|
|
|
|
dev->rx_pkt_burst = pmd_rx_burst;
|
|
|
|
dev->tx_pkt_burst = pmd_tx_burst;
|
|
|
|
|
2017-03-22 09:40:01 +01:00
|
|
|
pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
|
|
|
|
pmd->intr_handle.fd = -1;
|
2018-01-28 12:45:35 +02:00
|
|
|
dev->intr_handle = &pmd->intr_handle;
|
2017-03-22 09:40:01 +01:00
|
|
|
|
2017-02-06 13:40:35 -06:00
|
|
|
/* Presetup the fds to -1 as being not valid */
|
|
|
|
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
|
2016-12-12 08:38:38 -06:00
|
|
|
pmd->rxq[i].fd = -1;
|
|
|
|
pmd->txq[i].fd = -1;
|
|
|
|
}
|
|
|
|
|
2018-04-03 03:07:47 +05:30
|
|
|
if (tap_type) {
|
|
|
|
if (is_zero_ether_addr(mac_addr))
|
|
|
|
eth_random_addr((uint8_t *)&pmd->eth_addr);
|
|
|
|
else
|
|
|
|
rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
|
|
|
|
}
|
2017-04-12 09:30:21 +02:00
|
|
|
|
2017-05-12 15:01:39 +02:00
|
|
|
/* Immediately create the netdevice (this will create the 1st queue). */
|
2017-09-16 22:32:38 +00:00
|
|
|
/* rx queue */
|
|
|
|
if (tap_setup_queue(dev, pmd, 0, 1) == -1)
|
|
|
|
goto error_exit;
|
|
|
|
/* tx queue */
|
|
|
|
if (tap_setup_queue(dev, pmd, 0, 0) == -1)
|
2017-05-12 15:01:39 +02:00
|
|
|
goto error_exit;
|
|
|
|
|
|
|
|
ifr.ifr_mtu = dev->data->mtu;
|
|
|
|
if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
|
|
|
|
goto error_exit;
|
|
|
|
|
2018-04-03 03:07:47 +05:30
|
|
|
if (tap_type) {
|
|
|
|
memset(&ifr, 0, sizeof(struct ifreq));
|
|
|
|
ifr.ifr_hwaddr.sa_family = AF_LOCAL;
|
|
|
|
rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
|
|
|
|
ETHER_ADDR_LEN);
|
|
|
|
if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
|
|
|
|
goto error_exit;
|
|
|
|
}
|
2017-05-12 15:01:39 +02:00
|
|
|
|
net/tap: add basic flow API patterns and actions
Supported flow rules are now mapped to TC rules on the tap netdevice.
The netlink message used for creating the TC rule is stored in struct
rte_flow. That way, by simply changing a metadata in it, we can require
for the rule deletion without further parsing.
Supported items:
- eth: src and dst (with variable masks), and eth_type (0xffff mask).
- vlan: vid, pcp, tpid, but not eid.
- ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask).
- udp/tcp: src and dst port (0xffff) mask.
Supported actions:
- DROP
- QUEUE
- PASSTHRU
It is generally not possible to provide a "last" item. However, if the
"last" item, once masked, is identical to the masked spec, then it is
supported.
Only IPv4/6 and MAC addresses can use a variable mask. All other
items need a full mask (exact match).
Support for VLAN requires kernel headers >= 4.9, checked using
auto-config.sh.
Signed-off-by: Pascal Mazon <pascal.mazon@6wind.com>
Acked-by: Olga Shern <olgas@mellanox.com>
Acked-by: Keith Wiles <keith.wiles@intel.com>
2017-03-23 09:33:57 +01:00
|
|
|
/*
|
2017-05-12 15:01:39 +02:00
|
|
|
* Set up everything related to rte_flow:
|
|
|
|
* - netlink socket
|
|
|
|
* - tap / remote if_index
|
|
|
|
* - mandatory QDISCs
|
|
|
|
* - rte_flow actual/implicit lists
|
|
|
|
* - implicit rules
|
net/tap: add basic flow API patterns and actions
Supported flow rules are now mapped to TC rules on the tap netdevice.
The netlink message used for creating the TC rule is stored in struct
rte_flow. That way, by simply changing a metadata in it, we can require
for the rule deletion without further parsing.
Supported items:
- eth: src and dst (with variable masks), and eth_type (0xffff mask).
- vlan: vid, pcp, tpid, but not eid.
- ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask).
- udp/tcp: src and dst port (0xffff) mask.
Supported actions:
- DROP
- QUEUE
- PASSTHRU
It is generally not possible to provide a "last" item. However, if the
"last" item, once masked, is identical to the masked spec, then it is
supported.
Only IPv4/6 and MAC addresses can use a variable mask. All other
items need a full mask (exact match).
Support for VLAN requires kernel headers >= 4.9, checked using
auto-config.sh.
Signed-off-by: Pascal Mazon <pascal.mazon@6wind.com>
Acked-by: Olga Shern <olgas@mellanox.com>
Acked-by: Keith Wiles <keith.wiles@intel.com>
2017-03-23 09:33:57 +01:00
|
|
|
*/
|
2017-12-15 11:34:37 +00:00
|
|
|
pmd->nlsk_fd = tap_nl_init(0);
|
2017-05-12 15:01:39 +02:00
|
|
|
if (pmd->nlsk_fd == -1) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(WARNING, "%s: failed to create netlink socket.",
|
2017-05-12 15:01:39 +02:00
|
|
|
pmd->name);
|
|
|
|
goto disable_rte_flow;
|
|
|
|
}
|
|
|
|
pmd->if_index = if_nametoindex(pmd->name);
|
|
|
|
if (!pmd->if_index) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name);
|
2017-05-12 15:01:39 +02:00
|
|
|
goto disable_rte_flow;
|
|
|
|
}
|
|
|
|
if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: failed to create multiq qdisc.",
|
2017-05-12 15:01:39 +02:00
|
|
|
pmd->name);
|
|
|
|
goto disable_rte_flow;
|
|
|
|
}
|
|
|
|
if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
|
2017-05-12 15:01:39 +02:00
|
|
|
pmd->name);
|
|
|
|
goto disable_rte_flow;
|
|
|
|
}
|
|
|
|
LIST_INIT(&pmd->flows);
|
2017-03-31 15:54:10 +02:00
|
|
|
|
2017-05-12 15:01:39 +02:00
|
|
|
if (strlen(remote_iface)) {
|
2017-03-23 09:42:11 +01:00
|
|
|
pmd->remote_if_index = if_nametoindex(remote_iface);
|
2017-05-12 15:01:39 +02:00
|
|
|
if (!pmd->remote_if_index) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: failed to get %s if_index.",
|
2017-05-12 15:01:39 +02:00
|
|
|
pmd->name, remote_iface);
|
|
|
|
goto error_remote;
|
|
|
|
}
|
2017-03-23 09:42:11 +01:00
|
|
|
snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
|
|
|
|
"%s", remote_iface);
|
2017-06-27 14:33:15 +02:00
|
|
|
|
|
|
|
/* Save state of remote device */
|
|
|
|
tap_ioctl(pmd, SIOCGIFFLAGS, &pmd->remote_initial_flags, 0, REMOTE_ONLY);
|
|
|
|
|
|
|
|
/* Replicate remote MAC address */
|
2017-05-12 15:01:39 +02:00
|
|
|
if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: failed to get %s MAC address.",
|
2017-05-12 15:01:39 +02:00
|
|
|
pmd->name, pmd->remote_iface);
|
|
|
|
goto error_remote;
|
2017-03-31 15:54:10 +02:00
|
|
|
}
|
|
|
|
rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
|
|
|
|
ETHER_ADDR_LEN);
|
2017-05-12 15:01:39 +02:00
|
|
|
/* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
|
|
|
|
if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: failed to get %s MAC address.",
|
2017-05-12 15:01:39 +02:00
|
|
|
pmd->name, remote_iface);
|
|
|
|
goto error_remote;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush usually returns negative value because it tries to
|
|
|
|
* delete every QDISC (and on a running device, one QDISC at
|
|
|
|
* least is needed). Ignore negative return value.
|
|
|
|
*/
|
|
|
|
qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
|
|
|
|
if (qdisc_create_ingress(pmd->nlsk_fd,
|
|
|
|
pmd->remote_if_index) < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
|
2017-05-12 15:01:39 +02:00
|
|
|
pmd->remote_iface);
|
|
|
|
goto error_remote;
|
|
|
|
}
|
|
|
|
LIST_INIT(&pmd->implicit_flows);
|
|
|
|
if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0 ||
|
|
|
|
tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
|
|
|
|
tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
|
|
|
|
tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR,
|
|
|
|
"%s: failed to create implicit rules.",
|
2017-05-12 15:01:39 +02:00
|
|
|
pmd->name);
|
|
|
|
goto error_remote;
|
|
|
|
}
|
2017-03-23 09:42:11 +01:00
|
|
|
}
|
2017-03-23 09:33:55 +01:00
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
return 0;
|
|
|
|
|
2017-05-12 15:01:39 +02:00
|
|
|
disable_rte_flow:
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, " Disabling rte flow support: %s(%d)",
|
2017-05-12 15:01:39 +02:00
|
|
|
strerror(errno), errno);
|
|
|
|
if (strlen(remote_iface)) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "Remote feature requires flow support.");
|
2017-05-12 15:01:39 +02:00
|
|
|
goto error_exit;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_remote:
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, " Can't set up remote feature: %s(%d)",
|
2017-05-12 15:01:39 +02:00
|
|
|
strerror(errno), errno);
|
|
|
|
tap_flow_implicit_flush(pmd, NULL);
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
error_exit:
|
2018-02-05 18:17:20 +02:00
|
|
|
if (pmd->ioctl_sock > 0)
|
|
|
|
close(pmd->ioctl_sock);
|
|
|
|
rte_eth_dev_release_port(dev);
|
|
|
|
|
|
|
|
error_exit_nodev:
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "%s Unable to initialize %s",
|
2018-04-03 03:07:48 +05:30
|
|
|
tuntap_name, rte_vdev_device_name(vdev));
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
set_interface_name(const char *key __rte_unused,
|
|
|
|
const char *value,
|
|
|
|
void *extra_args)
|
|
|
|
{
|
|
|
|
char *name = (char *)extra_args;
|
|
|
|
|
|
|
|
if (value)
|
2018-03-12 11:33:00 +00:00
|
|
|
strlcpy(name, value, RTE_ETH_NAME_MAX_LEN - 1);
|
2016-12-12 08:38:38 -06:00
|
|
|
else
|
|
|
|
snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
|
|
|
|
DEFAULT_TAP_NAME, (tap_unit - 1));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-23 09:42:11 +01:00
|
|
|
static int
|
|
|
|
set_remote_iface(const char *key __rte_unused,
|
|
|
|
const char *value,
|
|
|
|
void *extra_args)
|
|
|
|
{
|
|
|
|
char *name = (char *)extra_args;
|
|
|
|
|
|
|
|
if (value)
|
2018-03-12 11:33:00 +00:00
|
|
|
strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
|
2017-03-23 09:42:11 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-13 03:23:52 +05:30
|
|
|
static int parse_user_mac(struct ether_addr *user_mac,
|
|
|
|
const char *value)
|
|
|
|
{
|
|
|
|
unsigned int index = 0;
|
|
|
|
char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
|
|
|
|
|
|
|
|
if (user_mac == NULL || value == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2018-03-12 11:33:00 +00:00
|
|
|
strlcpy(mac_temp, value, sizeof(mac_temp));
|
2018-03-13 03:23:52 +05:30
|
|
|
mac_byte = strtok(mac_temp, ":");
|
|
|
|
|
|
|
|
while ((mac_byte != NULL) &&
|
|
|
|
(strlen(mac_byte) <= 2) &&
|
|
|
|
(strlen(mac_byte) == strspn(mac_byte,
|
|
|
|
ETH_TAP_CMP_MAC_FMT))) {
|
|
|
|
user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
|
|
|
|
mac_byte = strtok(NULL, ":");
|
|
|
|
}
|
|
|
|
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
|
2017-04-12 09:30:21 +02:00
|
|
|
static int
|
|
|
|
set_mac_type(const char *key __rte_unused,
|
|
|
|
const char *value,
|
|
|
|
void *extra_args)
|
|
|
|
{
|
2018-03-13 03:23:52 +05:30
|
|
|
struct ether_addr *user_mac = extra_args;
|
|
|
|
|
|
|
|
if (!value)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
|
|
|
|
static int iface_idx;
|
|
|
|
|
|
|
|
/* fixed mac = 00:64:74:61:70:<iface_idx> */
|
|
|
|
memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN);
|
|
|
|
user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';
|
|
|
|
goto success;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (parse_user_mac(user_mac, value) != 6)
|
|
|
|
goto error;
|
|
|
|
success:
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "TAP user MAC param (%s)", value);
|
2017-04-12 09:30:21 +02:00
|
|
|
return 0;
|
2018-03-13 03:23:52 +05:30
|
|
|
|
|
|
|
error:
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)",
|
2018-03-13 03:23:52 +05:30
|
|
|
value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
|
|
|
|
return -1;
|
2017-04-12 09:30:21 +02:00
|
|
|
}
|
|
|
|
|
2018-04-03 03:07:47 +05:30
|
|
|
/*
|
|
|
|
* Open a TUN interface device. TUN PMD
|
|
|
|
* 1) sets tap_type as false
|
|
|
|
* 2) intakes iface as argument.
|
|
|
|
* 3) as interface is virtual set speed to 10G
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
rte_pmd_tun_probe(struct rte_vdev_device *dev)
|
|
|
|
{
|
|
|
|
const char *name, *params;
|
|
|
|
int ret;
|
|
|
|
struct rte_kvargs *kvlist = NULL;
|
|
|
|
char tun_name[RTE_ETH_NAME_MAX_LEN];
|
|
|
|
char remote_iface[RTE_ETH_NAME_MAX_LEN];
|
|
|
|
|
|
|
|
tap_type = 0;
|
|
|
|
strcpy(tuntap_name, "TUN");
|
|
|
|
|
|
|
|
name = rte_vdev_device_name(dev);
|
|
|
|
params = rte_vdev_device_args(dev);
|
|
|
|
memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
|
|
|
|
|
|
|
|
if (params && (params[0] != '\0')) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "parameters (%s)", params);
|
2018-04-03 03:07:47 +05:30
|
|
|
|
|
|
|
kvlist = rte_kvargs_parse(params, valid_arguments);
|
|
|
|
if (kvlist) {
|
|
|
|
if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
|
|
|
|
ret = rte_kvargs_process(kvlist,
|
|
|
|
ETH_TAP_IFACE_ARG,
|
|
|
|
&set_interface_name,
|
|
|
|
tun_name);
|
|
|
|
|
|
|
|
if (ret == -1)
|
|
|
|
goto leave;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pmd_link.link_speed = ETH_SPEED_NUM_10G;
|
|
|
|
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(NOTICE, "Initializing pmd_tun for %s as %s",
|
2018-04-03 03:07:47 +05:30
|
|
|
name, tun_name);
|
|
|
|
|
|
|
|
ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0);
|
|
|
|
|
|
|
|
leave:
|
|
|
|
if (ret == -1) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "Failed to create pmd for %s as %s",
|
2018-04-03 03:07:47 +05:30
|
|
|
name, tun_name);
|
|
|
|
tun_unit--; /* Restore the unit number */
|
|
|
|
}
|
|
|
|
rte_kvargs_free(kvlist);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
/* Open a TAP interface device.
|
|
|
|
*/
|
|
|
|
static int
|
2017-04-11 17:44:13 +02:00
|
|
|
rte_pmd_tap_probe(struct rte_vdev_device *dev)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
2017-04-11 17:44:13 +02:00
|
|
|
const char *name, *params;
|
2016-12-12 08:38:38 -06:00
|
|
|
int ret;
|
|
|
|
struct rte_kvargs *kvlist = NULL;
|
|
|
|
int speed;
|
|
|
|
char tap_name[RTE_ETH_NAME_MAX_LEN];
|
2017-03-23 09:42:11 +01:00
|
|
|
char remote_iface[RTE_ETH_NAME_MAX_LEN];
|
2018-03-13 03:23:52 +05:30
|
|
|
struct ether_addr user_mac = { .addr_bytes = {0} };
|
2018-04-24 05:51:24 +00:00
|
|
|
struct rte_eth_dev *eth_dev;
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2018-04-03 03:07:47 +05:30
|
|
|
tap_type = 1;
|
|
|
|
strcpy(tuntap_name, "TAP");
|
|
|
|
|
2017-04-11 17:44:13 +02:00
|
|
|
name = rte_vdev_device_name(dev);
|
|
|
|
params = rte_vdev_device_args(dev);
|
|
|
|
|
2018-04-24 05:51:24 +00:00
|
|
|
if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
|
|
|
|
strlen(params) == 0) {
|
|
|
|
eth_dev = rte_eth_dev_attach_secondary(name);
|
|
|
|
if (!eth_dev) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "Failed to probe %s", name);
|
2018-04-24 05:51:24 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* TODO: request info from primary to set up Rx and Tx */
|
|
|
|
eth_dev->dev_ops = &ops;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
speed = ETH_SPEED_NUM_10G;
|
|
|
|
snprintf(tap_name, sizeof(tap_name), "%s%d",
|
|
|
|
DEFAULT_TAP_NAME, tap_unit++);
|
2017-03-23 09:42:11 +01:00
|
|
|
memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
if (params && (params[0] != '\0')) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "parameters (%s)", params);
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
kvlist = rte_kvargs_parse(params, valid_arguments);
|
|
|
|
if (kvlist) {
|
|
|
|
if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
|
|
|
|
ret = rte_kvargs_process(kvlist,
|
|
|
|
ETH_TAP_IFACE_ARG,
|
|
|
|
&set_interface_name,
|
|
|
|
tap_name);
|
|
|
|
if (ret == -1)
|
|
|
|
goto leave;
|
|
|
|
}
|
2017-03-23 09:42:11 +01:00
|
|
|
|
|
|
|
if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
|
|
|
|
ret = rte_kvargs_process(kvlist,
|
|
|
|
ETH_TAP_REMOTE_ARG,
|
|
|
|
&set_remote_iface,
|
|
|
|
remote_iface);
|
|
|
|
if (ret == -1)
|
|
|
|
goto leave;
|
|
|
|
}
|
2017-04-12 09:30:21 +02:00
|
|
|
|
|
|
|
if (rte_kvargs_count(kvlist, ETH_TAP_MAC_ARG) == 1) {
|
|
|
|
ret = rte_kvargs_process(kvlist,
|
|
|
|
ETH_TAP_MAC_ARG,
|
|
|
|
&set_mac_type,
|
2018-03-13 03:23:52 +05:30
|
|
|
&user_mac);
|
2017-04-12 09:30:21 +02:00
|
|
|
if (ret == -1)
|
|
|
|
goto leave;
|
|
|
|
}
|
2016-12-12 08:38:38 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
pmd_link.link_speed = speed;
|
|
|
|
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(NOTICE, "Initializing pmd_tap for %s as %s",
|
2017-02-02 17:18:02 +01:00
|
|
|
name, tap_name);
|
|
|
|
|
2018-03-13 03:23:52 +05:30
|
|
|
ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac);
|
2016-12-12 08:38:38 -06:00
|
|
|
|
|
|
|
leave:
|
|
|
|
if (ret == -1) {
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(ERR, "Failed to create pmd for %s as %s",
|
2016-12-12 08:38:38 -06:00
|
|
|
name, tap_name);
|
|
|
|
tap_unit--; /* Restore the unit number */
|
|
|
|
}
|
|
|
|
rte_kvargs_free(kvlist);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-04-03 03:07:47 +05:30
|
|
|
/* detach a TUNTAP device.
|
2016-12-12 08:38:38 -06:00
|
|
|
*/
|
|
|
|
static int
|
2017-04-11 17:44:13 +02:00
|
|
|
rte_pmd_tap_remove(struct rte_vdev_device *dev)
|
2016-12-12 08:38:38 -06:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *eth_dev = NULL;
|
|
|
|
struct pmd_internals *internals;
|
|
|
|
int i;
|
|
|
|
|
2018-04-25 08:56:37 -07:00
|
|
|
TAP_LOG(DEBUG, "Closing TUN/TAP Ethernet device on numa %u",
|
2016-12-12 08:38:38 -06:00
|
|
|
rte_socket_id());
|
|
|
|
|
|
|
|
/* find the ethdev entry */
|
2017-04-11 17:44:13 +02:00
|
|
|
eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
|
2016-12-12 08:38:38 -06:00
|
|
|
if (!eth_dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
internals = eth_dev->data->dev_private;
|
2017-07-17 12:00:20 +03:00
|
|
|
if (internals->nlsk_fd) {
|
net/tap: add basic flow API patterns and actions
Supported flow rules are now mapped to TC rules on the tap netdevice.
The netlink message used for creating the TC rule is stored in struct
rte_flow. That way, by simply changing a metadata in it, we can require
for the rule deletion without further parsing.
Supported items:
- eth: src and dst (with variable masks), and eth_type (0xffff mask).
- vlan: vid, pcp, tpid, but not eid.
- ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask).
- udp/tcp: src and dst port (0xffff) mask.
Supported actions:
- DROP
- QUEUE
- PASSTHRU
It is generally not possible to provide a "last" item. However, if the
"last" item, once masked, is identical to the masked spec, then it is
supported.
Only IPv4/6 and MAC addresses can use a variable mask. All other
items need a full mask (exact match).
Support for VLAN requires kernel headers >= 4.9, checked using
auto-config.sh.
Signed-off-by: Pascal Mazon <pascal.mazon@6wind.com>
Acked-by: Olga Shern <olgas@mellanox.com>
Acked-by: Keith Wiles <keith.wiles@intel.com>
2017-03-23 09:33:57 +01:00
|
|
|
tap_flow_flush(eth_dev, NULL);
|
2017-03-23 09:42:11 +01:00
|
|
|
tap_flow_implicit_flush(internals, NULL);
|
2017-12-15 11:34:37 +00:00
|
|
|
tap_nl_final(internals->nlsk_fd);
|
net/tap: add basic flow API patterns and actions
Supported flow rules are now mapped to TC rules on the tap netdevice.
The netlink message used for creating the TC rule is stored in struct
rte_flow. That way, by simply changing a metadata in it, we can require
for the rule deletion without further parsing.
Supported items:
- eth: src and dst (with variable masks), and eth_type (0xffff mask).
- vlan: vid, pcp, tpid, but not eid.
- ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask).
- udp/tcp: src and dst port (0xffff) mask.
Supported actions:
- DROP
- QUEUE
- PASSTHRU
It is generally not possible to provide a "last" item. However, if the
"last" item, once masked, is identical to the masked spec, then it is
supported.
Only IPv4/6 and MAC addresses can use a variable mask. All other
items need a full mask (exact match).
Support for VLAN requires kernel headers >= 4.9, checked using
auto-config.sh.
Signed-off-by: Pascal Mazon <pascal.mazon@6wind.com>
Acked-by: Olga Shern <olgas@mellanox.com>
Acked-by: Keith Wiles <keith.wiles@intel.com>
2017-03-23 09:33:57 +01:00
|
|
|
}
|
2017-09-16 22:32:38 +00:00
|
|
|
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
|
|
|
|
if (internals->rxq[i].fd != -1) {
|
2017-02-06 13:40:33 -06:00
|
|
|
close(internals->rxq[i].fd);
|
2017-09-16 22:32:38 +00:00
|
|
|
internals->rxq[i].fd = -1;
|
|
|
|
}
|
|
|
|
if (internals->txq[i].fd != -1) {
|
|
|
|
close(internals->txq[i].fd);
|
|
|
|
internals->txq[i].fd = -1;
|
|
|
|
}
|
|
|
|
}
|
2016-12-12 08:38:38 -06:00
|
|
|
|
2017-03-15 15:48:13 +01:00
|
|
|
close(internals->ioctl_sock);
|
2016-12-12 08:38:38 -06:00
|
|
|
rte_free(eth_dev->data->dev_private);
|
|
|
|
|
|
|
|
rte_eth_dev_release_port(eth_dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-03 03:07:47 +05:30
|
|
|
static struct rte_vdev_driver pmd_tun_drv = {
|
|
|
|
.probe = rte_pmd_tun_probe,
|
|
|
|
.remove = rte_pmd_tap_remove,
|
|
|
|
};
|
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
static struct rte_vdev_driver pmd_tap_drv = {
|
|
|
|
.probe = rte_pmd_tap_probe,
|
|
|
|
.remove = rte_pmd_tap_remove,
|
|
|
|
};
|
2018-04-25 08:56:37 -07:00
|
|
|
|
2016-12-12 08:38:38 -06:00
|
|
|
RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
|
2018-04-03 03:07:47 +05:30
|
|
|
RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
|
2016-12-12 08:38:38 -06:00
|
|
|
RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
|
2018-04-03 03:07:47 +05:30
|
|
|
RTE_PMD_REGISTER_PARAM_STRING(net_tun,
|
|
|
|
ETH_TAP_IFACE_ARG "=<string> ");
|
2017-04-11 11:01:03 +02:00
|
|
|
RTE_PMD_REGISTER_PARAM_STRING(net_tap,
|
|
|
|
ETH_TAP_IFACE_ARG "=<string> "
|
2018-03-13 03:23:52 +05:30
|
|
|
ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
|
2017-04-11 11:01:03 +02:00
|
|
|
ETH_TAP_REMOTE_ARG "=<string>");
|
2018-04-25 08:56:37 -07:00
|
|
|
int tap_logtype;
|
|
|
|
|
|
|
|
RTE_INIT(tap_init_log);
|
|
|
|
static void
|
|
|
|
tap_init_log(void)
|
|
|
|
{
|
|
|
|
tap_logtype = rte_log_register("pmd.net.tap");
|
|
|
|
if (tap_logtype >= 0)
|
|
|
|
rte_log_set_level(tap_logtype, RTE_LOG_NOTICE);
|
|
|
|
}
|