ethdev: add Tx preparation
Added API for `rte_eth_tx_prepare` uint16_t rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) Added fields to the `struct rte_eth_desc_lim`: uint16_t nb_seg_max; /**< Max number of segments per whole packet. */ uint16_t nb_mtu_seg_max; /**< Max number of segments per one MTU */ These fields can be used to create valid packets according to the following rules: * For non-TSO packet, a single transmit packet may span up to "nb_mtu_seg_max" buffers. * For TSO packet the total number of data descriptors is "nb_seg_max", and each segment within the TSO may span up to "nb_mtu_seg_max". Added functions: int rte_validate_tx_offload(struct rte_mbuf *m) to validate general requirements for tx offload set in mbuf of packet such a flag completness. In current implementation this function is called optionaly when RTE_LIBRTE_ETHDEV_DEBUG is enabled. int rte_net_intel_cksum_prepare(struct rte_mbuf *m) to prepare pseudo header checksum for TSO and non-TSO tcp/udp packets before hardware tx checksum offload. - for non-TSO tcp/udp packets full pseudo-header checksum is counted and set. - for TSO the IP payload length is not included. int rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) this function uses same logic as rte_net_intel_cksum_prepare, but allows application to choose which offloads should be taken into account, if full preparation is not required. PERFORMANCE TESTS ----------------- This feature was tested with modified csum engine from test-pmd. The packet checksum preparation was moved from application to Tx preparation step placed before burst. We may expect some overhead costs caused by: 1) using additional callback before burst, 2) rescanning burst, 3) additional condition checking (packet validation), 4) worse optimization (e.g. packet data access, etc.) We tested it using ixgbe Tx preparation implementation with some parts disabled to have comparable information about the impact of different parts of implementation. IMPACT: 1) For unimplemented Tx preparation callback the performance impact is negligible, 2) For packet condition check without checksum modifications (nb_segs, available offloads, etc.) is 14626628/14252168 (~2.62% drop), 3) Full support in ixgbe driver (point 2 + packet checksum initialization) is 14060924/13588094 (~3.48% drop) Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Acked-by: Olivier Matz <olivier.matz@6wind.com> Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
This commit is contained in:
parent
6d52d1d4af
commit
4fb7e803eb
@ -122,6 +122,14 @@ CONFIG_RTE_LIBRTE_IEEE1588=n
|
||||
CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
|
||||
CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
|
||||
|
||||
#
|
||||
# Turn off Tx preparation stage
|
||||
#
|
||||
# Warning: rte_ethdev_tx_prepare() can be safely disabled only if using a
|
||||
# driver which do not implement any Tx preparation.
|
||||
#
|
||||
CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n
|
||||
|
||||
#
|
||||
# Support NIC bypass logic
|
||||
#
|
||||
|
@ -182,6 +182,7 @@ extern "C" {
|
||||
#include <rte_pci.h>
|
||||
#include <rte_dev.h>
|
||||
#include <rte_devargs.h>
|
||||
#include <rte_errno.h>
|
||||
#include "rte_ether.h"
|
||||
#include "rte_eth_ctrl.h"
|
||||
#include "rte_dev_info.h"
|
||||
@ -702,6 +703,29 @@ struct rte_eth_desc_lim {
|
||||
uint16_t nb_max; /**< Max allowed number of descriptors. */
|
||||
uint16_t nb_min; /**< Min allowed number of descriptors. */
|
||||
uint16_t nb_align; /**< Number of descriptors should be aligned to. */
|
||||
|
||||
/**
|
||||
* Max allowed number of segments per whole packet.
|
||||
*
|
||||
* - For TSO packet this is the total number of data descriptors allowed
|
||||
* by device.
|
||||
*
|
||||
* @see nb_mtu_seg_max
|
||||
*/
|
||||
uint16_t nb_seg_max;
|
||||
|
||||
/**
|
||||
* Max number of segments per one MTU.
|
||||
*
|
||||
* - For non-TSO packet, this is the maximum allowed number of segments
|
||||
* in a single transmit packet.
|
||||
*
|
||||
* - For TSO packet each segment within the TSO may span up to this
|
||||
* value.
|
||||
*
|
||||
* @see nb_seg_max
|
||||
*/
|
||||
uint16_t nb_mtu_seg_max;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1194,6 +1218,11 @@ typedef uint16_t (*eth_tx_burst_t)(void *txq,
|
||||
uint16_t nb_pkts);
|
||||
/**< @internal Send output packets on a transmit queue of an Ethernet device. */
|
||||
|
||||
typedef uint16_t (*eth_tx_prep_t)(void *txq,
|
||||
struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
/**< @internal Prepare output packets on a transmit queue of an Ethernet device. */
|
||||
|
||||
typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
|
||||
struct rte_eth_fc_conf *fc_conf);
|
||||
/**< @internal Get current flow control parameter on an Ethernet device */
|
||||
@ -1624,6 +1653,7 @@ struct rte_eth_rxtx_callback {
|
||||
struct rte_eth_dev {
|
||||
eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
|
||||
eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
|
||||
eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
|
||||
struct rte_eth_dev_data *data; /**< Pointer to device data */
|
||||
const struct eth_driver *driver;/**< Driver for this device */
|
||||
const struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */
|
||||
@ -2834,6 +2864,115 @@ rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
|
||||
return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
|
||||
}
|
||||
|
||||
/**
|
||||
* @warning
|
||||
* @b EXPERIMENTAL: this API may change without prior notice
|
||||
*
|
||||
* Process a burst of output packets on a transmit queue of an Ethernet device.
|
||||
*
|
||||
* The rte_eth_tx_prepare() function is invoked to prepare output packets to be
|
||||
* transmitted on the output queue *queue_id* of the Ethernet device designated
|
||||
* by its *port_id*.
|
||||
* The *nb_pkts* parameter is the number of packets to be prepared which are
|
||||
* supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
|
||||
* allocated from a pool created with rte_pktmbuf_pool_create().
|
||||
* For each packet to send, the rte_eth_tx_prepare() function performs
|
||||
* the following operations:
|
||||
*
|
||||
* - Check if packet meets devices requirements for tx offloads.
|
||||
*
|
||||
* - Check limitations about number of segments.
|
||||
*
|
||||
* - Check additional requirements when debug is enabled.
|
||||
*
|
||||
* - Update and/or reset required checksums when tx offload is set for packet.
|
||||
*
|
||||
* Since this function can modify packet data, provided mbufs must be safely
|
||||
* writable (e.g. modified data cannot be in shared segment).
|
||||
*
|
||||
* The rte_eth_tx_prepare() function returns the number of packets ready to be
|
||||
* sent. A return value equal to *nb_pkts* means that all packets are valid and
|
||||
* ready to be sent, otherwise stops processing on the first invalid packet and
|
||||
* leaves the rest packets untouched.
|
||||
*
|
||||
* When this functionality is not implemented in the driver, all packets are
|
||||
* are returned untouched.
|
||||
*
|
||||
* @param port_id
|
||||
* The port identifier of the Ethernet device.
|
||||
* The value must be a valid port id.
|
||||
* @param queue_id
|
||||
* The index of the transmit queue through which output packets must be
|
||||
* sent.
|
||||
* The value must be in the range [0, nb_tx_queue - 1] previously supplied
|
||||
* to rte_eth_dev_configure().
|
||||
* @param tx_pkts
|
||||
* The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
|
||||
* which contain the output packets.
|
||||
* @param nb_pkts
|
||||
* The maximum number of packets to process.
|
||||
* @return
|
||||
* The number of packets correct and ready to be sent. The return value can be
|
||||
* less than the value of the *tx_pkts* parameter when some packet doesn't
|
||||
* meet devices requirements with rte_errno set appropriately:
|
||||
* - -EINVAL: offload flags are not correctly set
|
||||
* - -ENOTSUP: the offload feature is not supported by the hardware
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
|
||||
|
||||
static inline uint16_t
|
||||
rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id,
|
||||
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
{
|
||||
struct rte_eth_dev *dev;
|
||||
|
||||
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
|
||||
if (!rte_eth_dev_is_valid_port(port_id)) {
|
||||
RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
|
||||
rte_errno = -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
dev = &rte_eth_devices[port_id];
|
||||
|
||||
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
|
||||
if (queue_id >= dev->data->nb_tx_queues) {
|
||||
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
|
||||
rte_errno = -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!dev->tx_pkt_prepare)
|
||||
return nb_pkts;
|
||||
|
||||
return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
|
||||
tx_pkts, nb_pkts);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Native NOOP operation for compilation targets which doesn't require any
|
||||
* preparations steps, and functional NOOP may introduce unnecessary performance
|
||||
* drop.
|
||||
*
|
||||
* Generally this is not a good idea to turn it on globally and didn't should
|
||||
* be used if behavior of tx_preparation can change.
|
||||
*/
|
||||
|
||||
static inline uint16_t
|
||||
rte_eth_tx_prepare(__rte_unused uint8_t port_id, __rte_unused uint16_t queue_id,
|
||||
__rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
{
|
||||
return nb_pkts;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
|
||||
void *userdata);
|
||||
|
||||
|
@ -283,6 +283,19 @@ extern "C" {
|
||||
*/
|
||||
#define PKT_TX_OUTER_IPV6 (1ULL << 60)
|
||||
|
||||
/**
|
||||
* Bitmask of all supported packet Tx offload features flags,
|
||||
* which can be set for packet.
|
||||
*/
|
||||
#define PKT_TX_OFFLOAD_MASK ( \
|
||||
PKT_TX_IP_CKSUM | \
|
||||
PKT_TX_L4_MASK | \
|
||||
PKT_TX_OUTER_IP_CKSUM | \
|
||||
PKT_TX_TCP_SEG | \
|
||||
PKT_TX_QINQ_PKT | \
|
||||
PKT_TX_VLAN_PKT | \
|
||||
PKT_TX_TUNNEL_MASK)
|
||||
|
||||
#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */
|
||||
|
||||
#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */
|
||||
@ -1646,6 +1659,57 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate general requirements for Tx offload in mbuf.
|
||||
*
|
||||
* This function checks correctness and completeness of Tx offload settings.
|
||||
*
|
||||
* @param m
|
||||
* The packet mbuf to be validated.
|
||||
* @return
|
||||
* 0 if packet is valid
|
||||
*/
|
||||
static inline int
|
||||
rte_validate_tx_offload(const struct rte_mbuf *m)
|
||||
{
|
||||
uint64_t ol_flags = m->ol_flags;
|
||||
uint64_t inner_l3_offset = m->l2_len;
|
||||
|
||||
/* Does packet set any of available offloads? */
|
||||
if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
|
||||
return 0;
|
||||
|
||||
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
|
||||
inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
|
||||
|
||||
/* Headers are fragmented */
|
||||
if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
|
||||
return -ENOTSUP;
|
||||
|
||||
/* IP checksum can be counted only for IPv4 packet */
|
||||
if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
|
||||
return -EINVAL;
|
||||
|
||||
/* IP type not set when required */
|
||||
if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
|
||||
if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Check requirements for TSO packet */
|
||||
if (ol_flags & PKT_TX_TCP_SEG)
|
||||
if ((m->tso_segsz == 0) ||
|
||||
((ol_flags & PKT_TX_IPV4) &&
|
||||
!(ol_flags & PKT_TX_IP_CKSUM)))
|
||||
return -EINVAL;
|
||||
|
||||
/* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
|
||||
if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
|
||||
!(ol_flags & PKT_TX_OUTER_IPV4))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump an mbuf structure to a file.
|
||||
*
|
||||
|
@ -38,6 +38,11 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <rte_ip.h>
|
||||
#include <rte_udp.h>
|
||||
#include <rte_tcp.h>
|
||||
#include <rte_sctp.h>
|
||||
|
||||
/**
|
||||
* Structure containing header lengths associated to a packet, filled
|
||||
* by rte_net_get_ptype().
|
||||
@ -86,6 +91,111 @@ struct rte_net_hdr_lens {
|
||||
uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
|
||||
struct rte_net_hdr_lens *hdr_lens, uint32_t layers);
|
||||
|
||||
/**
|
||||
* Prepare pseudo header checksum
|
||||
*
|
||||
* This function prepares pseudo header checksum for TSO and non-TSO tcp/udp in
|
||||
* provided mbufs packet data and based on the requested offload flags.
|
||||
*
|
||||
* - for non-TSO tcp/udp packets full pseudo-header checksum is counted and set
|
||||
* in packet data,
|
||||
* - for TSO the IP payload length is not included in pseudo header.
|
||||
*
|
||||
* This function expects that used headers are in the first data segment of
|
||||
* mbuf, are not fragmented and can be safely modified.
|
||||
*
|
||||
* @param m
|
||||
* The packet mbuf to be fixed.
|
||||
* @param ol_flags
|
||||
* TX offloads flags to use with this packet.
|
||||
* @return
|
||||
* 0 if checksum is initialized properly
|
||||
*/
|
||||
static inline int
|
||||
rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
|
||||
{
|
||||
struct ipv4_hdr *ipv4_hdr;
|
||||
struct ipv6_hdr *ipv6_hdr;
|
||||
struct tcp_hdr *tcp_hdr;
|
||||
struct udp_hdr *udp_hdr;
|
||||
uint64_t inner_l3_offset = m->l2_len;
|
||||
|
||||
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
|
||||
inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
|
||||
|
||||
if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
|
||||
if (ol_flags & PKT_TX_IPV4) {
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
inner_l3_offset);
|
||||
|
||||
if (ol_flags & PKT_TX_IP_CKSUM)
|
||||
ipv4_hdr->hdr_checksum = 0;
|
||||
|
||||
udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
|
||||
m->l3_len);
|
||||
udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
|
||||
ol_flags);
|
||||
} else {
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
|
||||
inner_l3_offset);
|
||||
/* non-TSO udp */
|
||||
udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
|
||||
inner_l3_offset + m->l3_len);
|
||||
udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
|
||||
ol_flags);
|
||||
}
|
||||
} else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
|
||||
(ol_flags & PKT_TX_TCP_SEG)) {
|
||||
if (ol_flags & PKT_TX_IPV4) {
|
||||
ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
|
||||
inner_l3_offset);
|
||||
|
||||
if (ol_flags & PKT_TX_IP_CKSUM)
|
||||
ipv4_hdr->hdr_checksum = 0;
|
||||
|
||||
/* non-TSO tcp or TSO */
|
||||
tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
|
||||
m->l3_len);
|
||||
tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
|
||||
ol_flags);
|
||||
} else {
|
||||
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
|
||||
inner_l3_offset);
|
||||
/* non-TSO tcp or TSO */
|
||||
tcp_hdr = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *,
|
||||
inner_l3_offset + m->l3_len);
|
||||
tcp_hdr->cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
|
||||
ol_flags);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare pseudo header checksum
|
||||
*
|
||||
* This function prepares pseudo header checksum for TSO and non-TSO tcp/udp in
|
||||
* provided mbufs packet data.
|
||||
*
|
||||
* - for non-TSO tcp/udp packets full pseudo-header checksum is counted and set
|
||||
* in packet data,
|
||||
* - for TSO the IP payload length is not included in pseudo header.
|
||||
*
|
||||
* This function expects that used headers are in the first data segment of
|
||||
* mbuf, are not fragmented and can be safely modified.
|
||||
*
|
||||
* @param m
|
||||
* The packet mbuf to be fixed.
|
||||
* @return
|
||||
* 0 if checksum is initialized properly
|
||||
*/
|
||||
static inline int
|
||||
rte_net_intel_cksum_prepare(struct rte_mbuf *m)
|
||||
{
|
||||
return rte_net_intel_cksum_flags_prepare(m, m->ol_flags);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user