2018-01-09 15:12:28 +00:00
|
|
|
/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1) */
|
|
|
|
/*
|
|
|
|
* Copyright(c) 2007-2014 Intel Corporation.
|
2012-12-20 00:00:00 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _RTE_KNI_COMMON_H_
|
|
|
|
#define _RTE_KNI_COMMON_H_
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/if.h>
|
2018-10-08 17:11:45 +08:00
|
|
|
#include <asm/barrier.h>
|
2016-09-08 14:25:06 +02:00
|
|
|
#define RTE_STD_C11
|
|
|
|
#else
|
|
|
|
#include <rte_common.h>
|
2017-12-21 14:00:04 +01:00
|
|
|
#include <rte_config.h>
|
2012-12-20 00:00:00 +01:00
|
|
|
#endif
|
|
|
|
|
2019-11-15 14:03:28 +01:00
|
|
|
/*
|
|
|
|
* KNI name is part of memzone name. Must not exceed IFNAMSIZ.
|
2013-09-18 12:00:00 +02:00
|
|
|
*/
|
2019-11-15 14:03:28 +01:00
|
|
|
#define RTE_KNI_NAMESIZE 16
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2016-01-29 13:15:53 +05:30
|
|
|
#define RTE_CACHE_LINE_MIN_SIZE 64
|
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
/*
|
|
|
|
* Request id.
|
|
|
|
*/
|
|
|
|
enum rte_kni_req_id {
|
|
|
|
RTE_KNI_REQ_UNKNOWN = 0,
|
|
|
|
RTE_KNI_REQ_CHANGE_MTU,
|
|
|
|
RTE_KNI_REQ_CFG_NETWORK_IF,
|
2018-01-18 11:42:58 +05:30
|
|
|
RTE_KNI_REQ_CHANGE_MAC_ADDR,
|
2018-01-18 11:42:59 +05:30
|
|
|
RTE_KNI_REQ_CHANGE_PROMISC,
|
2019-08-12 11:06:30 +08:00
|
|
|
RTE_KNI_REQ_CHANGE_ALLMULTI,
|
2012-12-20 00:00:00 +01:00
|
|
|
RTE_KNI_REQ_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Structure for KNI request.
|
|
|
|
*/
|
|
|
|
struct rte_kni_request {
|
|
|
|
uint32_t req_id; /**< Request id */
|
2016-09-08 14:25:06 +02:00
|
|
|
RTE_STD_C11
|
2012-12-20 00:00:00 +01:00
|
|
|
union {
|
|
|
|
uint32_t new_mtu; /**< New MTU */
|
|
|
|
uint8_t if_up; /**< 1: interface up, 0: interface down */
|
2018-01-18 11:42:58 +05:30
|
|
|
uint8_t mac_addr[6]; /**< MAC address for interface */
|
2018-01-18 11:42:59 +05:30
|
|
|
uint8_t promiscusity;/**< 1: promisc mode enable, 0: disable */
|
2019-08-12 11:06:30 +08:00
|
|
|
uint8_t allmulti; /**< 1: all-multicast mode enable, 0: disable */
|
2012-12-20 00:00:00 +01:00
|
|
|
};
|
|
|
|
int32_t result; /**< Result for processing request */
|
|
|
|
} __attribute__((__packed__));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fifo struct mapped in a shared memory. It describes a circular buffer FIFO
|
2014-05-02 16:42:52 -07:00
|
|
|
* Write and read should wrap around. Fifo is empty when write == read
|
2012-12-20 00:00:00 +01:00
|
|
|
* Writing should never overwrite the read position
|
|
|
|
*/
|
|
|
|
struct rte_kni_fifo {
|
2018-10-08 17:11:46 +08:00
|
|
|
#ifdef RTE_USE_C11_MEM_MODEL
|
|
|
|
unsigned write; /**< Next position to be written*/
|
|
|
|
unsigned read; /**< Next position to be read */
|
|
|
|
#else
|
2012-12-20 00:00:00 +01:00
|
|
|
volatile unsigned write; /**< Next position to be written*/
|
|
|
|
volatile unsigned read; /**< Next position to be read */
|
2018-10-08 17:11:46 +08:00
|
|
|
#endif
|
2012-12-20 00:00:00 +01:00
|
|
|
unsigned len; /**< Circular buffer length */
|
|
|
|
unsigned elem_size; /**< Pointer size - for 32/64 bit OS */
|
2016-09-08 14:25:04 +02:00
|
|
|
void *volatile buffer[]; /**< The buffer contains mbuf pointers */
|
2012-12-20 00:00:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The kernel image of the rte_mbuf struct, with only the relevant fields.
|
|
|
|
* Padding is necessary to assure the offsets of these fields
|
|
|
|
*/
|
|
|
|
struct rte_kni_mbuf {
|
2014-12-04 18:14:08 +08:00
|
|
|
void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
|
2020-09-14 11:43:25 +02:00
|
|
|
uint64_t buf_iova;
|
2014-09-11 14:15:35 +01:00
|
|
|
uint16_t data_off; /**< Start address of data in segment buffer. */
|
2016-04-26 13:37:58 +01:00
|
|
|
char pad1[2];
|
2017-04-04 18:28:05 +02:00
|
|
|
uint16_t nb_segs; /**< Number of segments. */
|
|
|
|
char pad4[2];
|
2014-09-11 14:15:37 +01:00
|
|
|
uint64_t ol_flags; /**< Offload features. */
|
2015-07-10 00:31:23 +08:00
|
|
|
char pad2[4];
|
|
|
|
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
|
|
|
|
uint16_t data_len; /**< Amount of data in segment buffer. */
|
2020-10-29 01:55:43 +01:00
|
|
|
char pad3[14];
|
|
|
|
void *pool;
|
2014-09-23 12:08:16 +01:00
|
|
|
|
|
|
|
/* fields on second cache line */
|
2020-10-25 21:42:27 +01:00
|
|
|
__attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE)))
|
2019-06-25 23:04:14 +08:00
|
|
|
void *next; /**< Physical address of next mbuf in kernel. */
|
mbuf: split mbuf across two cache lines.
This change splits the mbuf in two to move the pool and next pointers to
the second cache line. This frees up 16 bytes in first cache line.
The reason for this change is that we believe that there is no possible
way that we can ever fit all the fields we need to fit into a 64-byte
mbuf, and so we need to start looking at a 128-byte mbuf instead. Examples
of new fields that need to fit in, include -
* 32-bits more for filter information for support for the new filters in
the i40e driver (and possibly other future drivers)
* an additional 2-4 bytes for storing info on a second vlan tag to allow
drivers to support double Vlan/QinQ
* 4-bytes for storing a sequence number to enable out of order packet
processing and subsequent packet reordering
as well as potentially a number of other fields or splitting out fields
that are superimposed over each other right now, e.g. for the qos scheduler.
We also want to allow space for use by other non-Intel NIC drivers that may
be open-sourced to dpdk.org in the future too, where they support fields
and offloads that currently supported hardware doesn't.
If we accept the fact of a 2-cache-line mbuf, then the issue becomes
how to rework things so that we spread our fields over the two
cache lines while causing the lowest slow-down possible. The general
approach that we are looking to take is to focus the first cache
line on fields that are updated on RX , so that receive only deals
with one cache line. The second cache line can be used for application
data and information that will only be used on the TX leg. This would
allow us to work on the first cache line in RX as now, and have the
second cache line being prefetched in the background so that it is
available when necessary. Hardware prefetches should help us out
here. We also may move rarely used, or slow-path RX fields e.g. such
as those for chained mbufs with jumbo frames, to the second
cache line, depending upon the performance impact and bytes savings
achieved.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2014-09-11 14:15:44 +01:00
|
|
|
};
|
2012-12-20 00:00:00 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Struct used to create a KNI device. Passed to the kernel in IOCTL call
|
|
|
|
*/
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
struct rte_kni_device_info {
|
|
|
|
char name[RTE_KNI_NAMESIZE]; /**< Network device name for KNI */
|
2012-12-20 00:00:00 +01:00
|
|
|
|
|
|
|
phys_addr_t tx_phys;
|
|
|
|
phys_addr_t rx_phys;
|
|
|
|
phys_addr_t alloc_phys;
|
|
|
|
phys_addr_t free_phys;
|
|
|
|
|
|
|
|
/* Used by Ethtool */
|
|
|
|
phys_addr_t req_phys;
|
|
|
|
phys_addr_t resp_phys;
|
|
|
|
phys_addr_t sync_phys;
|
|
|
|
void * sync_va;
|
|
|
|
|
|
|
|
/* mbuf mempool */
|
|
|
|
void * mbuf_va;
|
|
|
|
phys_addr_t mbuf_phys;
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
uint16_t group_id; /**< Group ID */
|
|
|
|
uint32_t core_id; /**< core ID to bind for kernel thread */
|
|
|
|
|
2016-09-08 14:25:05 +02:00
|
|
|
__extension__
|
2013-09-18 12:00:00 +02:00
|
|
|
uint8_t force_bind : 1; /**< Flag for kernel thread binding */
|
2012-12-20 00:00:00 +01:00
|
|
|
|
|
|
|
/* mbuf size */
|
|
|
|
unsigned mbuf_size;
|
2018-01-18 11:43:00 +05:30
|
|
|
unsigned int mtu;
|
2019-10-25 19:30:58 +01:00
|
|
|
unsigned int min_mtu;
|
|
|
|
unsigned int max_mtu;
|
2019-04-10 14:20:55 +08:00
|
|
|
uint8_t mac_addr[6];
|
2019-11-17 16:12:43 +01:00
|
|
|
uint8_t iova_mode;
|
2012-12-20 00:00:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
#define KNI_DEVICE "kni"
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
#define RTE_KNI_IOCTL_TEST _IOWR(0, 1, int)
|
|
|
|
#define RTE_KNI_IOCTL_CREATE _IOWR(0, 2, struct rte_kni_device_info)
|
2013-09-18 12:00:00 +02:00
|
|
|
#define RTE_KNI_IOCTL_RELEASE _IOWR(0, 3, struct rte_kni_device_info)
|
2012-12-20 00:00:00 +01:00
|
|
|
|
|
|
|
#endif /* _RTE_KNI_COMMON_H_ */
|