2017-12-19 15:49:03 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2017 Intel Corporation
|
2012-09-04 13:54:00 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <ctype.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <errno.h>
|
2018-05-11 01:58:31 +02:00
|
|
|
#include <stdbool.h>
|
2012-09-04 13:54:00 +01:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <inttypes.h>
|
2014-06-16 15:31:43 +08:00
|
|
|
#include <netinet/in.h>
|
2012-09-04 13:54:00 +01:00
|
|
|
|
|
|
|
#include <rte_byteorder.h>
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_debug.h>
|
|
|
|
#include <rte_interrupts.h>
|
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_memcpy.h>
|
|
|
|
#include <rte_memzone.h>
|
|
|
|
#include <rte_launch.h>
|
|
|
|
#include <rte_eal.h>
|
|
|
|
#include <rte_per_lcore.h>
|
|
|
|
#include <rte_lcore.h>
|
|
|
|
#include <rte_branch_prediction.h>
|
|
|
|
#include <rte_common.h>
|
|
|
|
#include <rte_mempool.h>
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_mbuf.h>
|
|
|
|
#include <rte_errno.h>
|
|
|
|
#include <rte_spinlock.h>
|
2014-06-25 21:07:44 +01:00
|
|
|
#include <rte_string_fns.h>
|
ethdev: add common devargs parser
Introduces a new structure, rte_eth_devargs, to support generic
ethdev arguments common across NET PMDs, with a new API
rte_eth_devargs_parse API to support PMD parsing these arguments. The
patch add support for a representor argument passed with passed with
the EAL -w option. The representor parameter allows the user to specify
which representor ports to initialise on a device.
The argument supports passing a single representor port, a list of
port values or a range of port values.
-w BDF,representor=1 # create representor port 1 on pci device BDF
-w BDF,representor=[1,2,5,6,10] # create representor ports in list
-w BDF,representor=[0-31] # create representor ports in range
Signed-off-by: Remy Horton <remy.horton@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-04-26 11:41:02 +01:00
|
|
|
#include <rte_kvargs.h>
|
2018-10-23 10:28:37 +02:00
|
|
|
#include <rte_class.h>
|
2019-10-23 13:53:00 +01:00
|
|
|
#include <rte_ether.h>
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
#include <rte_telemetry.h>
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-04-23 00:33:45 +05:30
|
|
|
#include "rte_ethdev_trace.h"
|
2012-09-04 13:54:00 +01:00
|
|
|
#include "rte_ethdev.h"
|
2021-01-29 16:48:19 +00:00
|
|
|
#include "ethdev_driver.h"
|
2017-09-22 17:52:29 +03:00
|
|
|
#include "ethdev_profile.h"
|
2018-10-23 10:28:37 +02:00
|
|
|
#include "ethdev_private.h"
|
2012-09-04 13:54:00 +01:00
|
|
|
|
|
|
|
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
|
|
|
|
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
|
|
|
|
|
|
|
|
/* spinlock for eth device callbacks */
|
2020-10-13 17:56:58 +01:00
|
|
|
static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2016-06-15 15:06:18 +01:00
|
|
|
/* spinlock for add/remove rx callbacks */
|
2020-10-13 17:56:58 +01:00
|
|
|
static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
|
2016-06-15 15:06:18 +01:00
|
|
|
|
|
|
|
/* spinlock for add/remove tx callbacks */
|
2020-10-13 17:56:58 +01:00
|
|
|
static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
|
2016-06-15 15:06:18 +01:00
|
|
|
|
2018-01-22 16:38:19 +00:00
|
|
|
/* spinlock for shared data allocation */
|
2020-10-13 17:56:58 +01:00
|
|
|
static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2014-07-23 14:28:53 +02:00
|
|
|
/* store statistics names and its offset in stats structure */
|
|
|
|
struct rte_eth_xstats_name_off {
|
|
|
|
char name[RTE_ETH_XSTATS_NAME_SIZE];
|
|
|
|
unsigned offset;
|
|
|
|
};
|
|
|
|
|
2018-01-22 16:38:19 +00:00
|
|
|
/* Shared memory between primary and secondary processes. */
|
|
|
|
static struct {
|
|
|
|
uint64_t next_owner_id;
|
|
|
|
rte_spinlock_t ownership_lock;
|
|
|
|
struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
|
2020-10-13 17:56:58 +01:00
|
|
|
} *eth_dev_shared_data;
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
|
2015-11-02 10:18:59 +00:00
|
|
|
{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
|
|
|
|
{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
|
|
|
|
{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
|
|
|
|
{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
|
2017-12-14 15:23:00 +01:00
|
|
|
{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
|
2015-04-09 14:29:41 -07:00
|
|
|
{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
|
2015-11-02 10:18:59 +00:00
|
|
|
{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
|
|
|
|
{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
|
|
|
|
rx_nombuf)},
|
2014-07-23 14:28:53 +02:00
|
|
|
};
|
2015-11-02 10:18:59 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
#define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
|
2014-07-23 14:28:53 +02:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
|
2015-11-02 10:18:59 +00:00
|
|
|
{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
|
|
|
|
{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
|
|
|
|
{"errors", offsetof(struct rte_eth_stats, q_errors)},
|
2014-07-23 14:28:53 +02:00
|
|
|
};
|
2015-11-02 10:18:59 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
#define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
|
2014-07-23 14:28:53 +02:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
|
2015-11-02 10:18:59 +00:00
|
|
|
{"packets", offsetof(struct rte_eth_stats, q_opackets)},
|
|
|
|
{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
|
2014-07-23 14:28:53 +02:00
|
|
|
};
|
2020-10-13 17:56:58 +01:00
|
|
|
#define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
|
2014-07-23 14:28:53 +02:00
|
|
|
|
2018-01-18 09:44:26 +00:00
|
|
|
#define RTE_RX_OFFLOAD_BIT2STR(_name) \
|
|
|
|
{ DEV_RX_OFFLOAD_##_name, #_name }
|
|
|
|
|
ethdev: introduce Rx buffer split
The DPDK datapath in the transmit direction is very flexible.
An application can build the multi-segment packet and manages
almost all data aspects - the memory pools where segments
are allocated from, the segment lengths, the memory attributes
like external buffers, registered for DMA, etc.
In the receiving direction, the datapath is much less flexible,
an application can only specify the memory pool to configure the
receiving queue and nothing more. In order to extend receiving
datapath capabilities it is proposed to add the way to provide
extended information how to split the packets being received.
The new offload flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT in device
capabilities is introduced to present the way for PMD to report to
application about supporting Rx packet split to configurable
segments. Prior invoking the rte_eth_rx_queue_setup() routine
application should check RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag.
The following structure is introduced to specify the Rx packet
segment for RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT offload:
struct rte_eth_rxseg_split {
struct rte_mempool *mp; /* memory pools to allocate segment from */
uint16_t length; /* segment maximal data length,
configures "split point" */
uint16_t offset; /* data offset from beginning
of mbuf data buffer */
uint32_t reserved; /* reserved field */
};
The segment descriptions are added to the rte_eth_rxconf structure:
rx_seg - pointer the array of segment descriptions, each element
describes the memory pool, maximal data length, initial
data offset from the beginning of data buffer in mbuf.
This array allows to specify the different settings for
each segment in individual fashion.
rx_nseg - number of elements in the array
If the extended segment descriptions is provided with these new
fields the mp parameter of the rte_eth_rx_queue_setup must be
specified as NULL to avoid ambiguity.
There are two options to specify Rx buffer configuration:
- mp is not NULL, rrx_conf.rx_nseg is zero, it is compatible
configuration, follows existing implementation, provides
the single pool and no description for segment sizes
and offsets.
- mp is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not
zero, it provides the extended configuration, individually for
each segment.
f the Rx queue is configured with new settings the packets being
received will be split into multiple segments pushed to the mbufs
with specified attributes. The PMD will split the received packets
into multiple segments according to the specification in the
description array.
For example, let's suppose we configured the Rx queue with the
following segments:
seg0 - pool0, len0=14B, off0=2
seg1 - pool1, len1=20B, off1=128B
seg2 - pool2, len2=20B, off2=0B
seg3 - pool3, len3=512B, off3=0B
The packet 46 bytes long will look like the following:
seg0 - 14B long @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B long @ 128 in mbuf from pool1
seg2 - 12B long @ 0 in mbuf from pool2
The packet 1500 bytes long will look like the following:
seg0 - 14B @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B @ 128 in mbuf from pool1
seg2 - 20B @ 0 in mbuf from pool2
seg3 - 512B @ 0 in mbuf from pool3
seg4 - 512B @ 0 in mbuf from pool3
seg5 - 422B @ 0 in mbuf from pool3
The offload RTE_ETH_RX_OFFLOAD_SCATTER must be present and
configured to support new buffer split feature (if rx_nseg
is greater than one).
The split limitations imposed by underlying PMD is reported
in the new introduced rte_eth_dev_info->rx_seg_capa field.
The new approach would allow splitting the ingress packets into
multiple parts pushed to the memory with different attributes.
For example, the packet headers can be pushed to the embedded
data buffers within mbufs and the application data into
the external buffers attached to mbufs allocated from the
different memory pools. The memory attributes for the split
parts may differ either - for example the application data
may be pushed into the external memory located on the dedicated
physical device, say GPU or NVMe. This would improve the DPDK
receiving datapath flexibility with preserving compatibility
with existing API.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-16 16:44:35 +00:00
|
|
|
#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \
|
|
|
|
{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
|
|
|
|
|
2018-01-18 09:44:26 +00:00
|
|
|
static const struct {
|
|
|
|
uint64_t offload;
|
|
|
|
const char *name;
|
2020-10-13 17:56:58 +01:00
|
|
|
} eth_dev_rx_offload_names[] = {
|
2018-01-18 09:44:26 +00:00
|
|
|
RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(SCATTER),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
|
|
|
|
RTE_RX_OFFLOAD_BIT2STR(SECURITY),
|
2018-06-29 13:41:13 +01:00
|
|
|
RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
|
2018-10-02 16:21:41 +05:30
|
|
|
RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
|
2018-10-09 14:18:04 +00:00
|
|
|
RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
|
2019-11-11 18:49:06 +05:30
|
|
|
RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
|
ethdev: introduce Rx buffer split
The DPDK datapath in the transmit direction is very flexible.
An application can build the multi-segment packet and manages
almost all data aspects - the memory pools where segments
are allocated from, the segment lengths, the memory attributes
like external buffers, registered for DMA, etc.
In the receiving direction, the datapath is much less flexible,
an application can only specify the memory pool to configure the
receiving queue and nothing more. In order to extend receiving
datapath capabilities it is proposed to add the way to provide
extended information how to split the packets being received.
The new offload flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT in device
capabilities is introduced to present the way for PMD to report to
application about supporting Rx packet split to configurable
segments. Prior invoking the rte_eth_rx_queue_setup() routine
application should check RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag.
The following structure is introduced to specify the Rx packet
segment for RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT offload:
struct rte_eth_rxseg_split {
struct rte_mempool *mp; /* memory pools to allocate segment from */
uint16_t length; /* segment maximal data length,
configures "split point" */
uint16_t offset; /* data offset from beginning
of mbuf data buffer */
uint32_t reserved; /* reserved field */
};
The segment descriptions are added to the rte_eth_rxconf structure:
rx_seg - pointer the array of segment descriptions, each element
describes the memory pool, maximal data length, initial
data offset from the beginning of data buffer in mbuf.
This array allows to specify the different settings for
each segment in individual fashion.
rx_nseg - number of elements in the array
If the extended segment descriptions is provided with these new
fields the mp parameter of the rte_eth_rx_queue_setup must be
specified as NULL to avoid ambiguity.
There are two options to specify Rx buffer configuration:
- mp is not NULL, rrx_conf.rx_nseg is zero, it is compatible
configuration, follows existing implementation, provides
the single pool and no description for segment sizes
and offsets.
- mp is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not
zero, it provides the extended configuration, individually for
each segment.
f the Rx queue is configured with new settings the packets being
received will be split into multiple segments pushed to the mbufs
with specified attributes. The PMD will split the received packets
into multiple segments according to the specification in the
description array.
For example, let's suppose we configured the Rx queue with the
following segments:
seg0 - pool0, len0=14B, off0=2
seg1 - pool1, len1=20B, off1=128B
seg2 - pool2, len2=20B, off2=0B
seg3 - pool3, len3=512B, off3=0B
The packet 46 bytes long will look like the following:
seg0 - 14B long @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B long @ 128 in mbuf from pool1
seg2 - 12B long @ 0 in mbuf from pool2
The packet 1500 bytes long will look like the following:
seg0 - 14B @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B @ 128 in mbuf from pool1
seg2 - 20B @ 0 in mbuf from pool2
seg3 - 512B @ 0 in mbuf from pool3
seg4 - 512B @ 0 in mbuf from pool3
seg5 - 422B @ 0 in mbuf from pool3
The offload RTE_ETH_RX_OFFLOAD_SCATTER must be present and
configured to support new buffer split feature (if rx_nseg
is greater than one).
The split limitations imposed by underlying PMD is reported
in the new introduced rte_eth_dev_info->rx_seg_capa field.
The new approach would allow splitting the ingress packets into
multiple parts pushed to the memory with different attributes.
For example, the packet headers can be pushed to the embedded
data buffers within mbufs and the application data into
the external buffers attached to mbufs allocated from the
different memory pools. The memory attributes for the split
parts may differ either - for example the application data
may be pushed into the external memory located on the dedicated
physical device, say GPU or NVMe. This would improve the DPDK
receiving datapath flexibility with preserving compatibility
with existing API.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-16 16:44:35 +00:00
|
|
|
RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
|
2018-01-18 09:44:26 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#undef RTE_RX_OFFLOAD_BIT2STR
|
ethdev: introduce Rx buffer split
The DPDK datapath in the transmit direction is very flexible.
An application can build the multi-segment packet and manages
almost all data aspects - the memory pools where segments
are allocated from, the segment lengths, the memory attributes
like external buffers, registered for DMA, etc.
In the receiving direction, the datapath is much less flexible,
an application can only specify the memory pool to configure the
receiving queue and nothing more. In order to extend receiving
datapath capabilities it is proposed to add the way to provide
extended information how to split the packets being received.
The new offload flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT in device
capabilities is introduced to present the way for PMD to report to
application about supporting Rx packet split to configurable
segments. Prior invoking the rte_eth_rx_queue_setup() routine
application should check RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag.
The following structure is introduced to specify the Rx packet
segment for RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT offload:
struct rte_eth_rxseg_split {
struct rte_mempool *mp; /* memory pools to allocate segment from */
uint16_t length; /* segment maximal data length,
configures "split point" */
uint16_t offset; /* data offset from beginning
of mbuf data buffer */
uint32_t reserved; /* reserved field */
};
The segment descriptions are added to the rte_eth_rxconf structure:
rx_seg - pointer the array of segment descriptions, each element
describes the memory pool, maximal data length, initial
data offset from the beginning of data buffer in mbuf.
This array allows to specify the different settings for
each segment in individual fashion.
rx_nseg - number of elements in the array
If the extended segment descriptions is provided with these new
fields the mp parameter of the rte_eth_rx_queue_setup must be
specified as NULL to avoid ambiguity.
There are two options to specify Rx buffer configuration:
- mp is not NULL, rrx_conf.rx_nseg is zero, it is compatible
configuration, follows existing implementation, provides
the single pool and no description for segment sizes
and offsets.
- mp is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not
zero, it provides the extended configuration, individually for
each segment.
f the Rx queue is configured with new settings the packets being
received will be split into multiple segments pushed to the mbufs
with specified attributes. The PMD will split the received packets
into multiple segments according to the specification in the
description array.
For example, let's suppose we configured the Rx queue with the
following segments:
seg0 - pool0, len0=14B, off0=2
seg1 - pool1, len1=20B, off1=128B
seg2 - pool2, len2=20B, off2=0B
seg3 - pool3, len3=512B, off3=0B
The packet 46 bytes long will look like the following:
seg0 - 14B long @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B long @ 128 in mbuf from pool1
seg2 - 12B long @ 0 in mbuf from pool2
The packet 1500 bytes long will look like the following:
seg0 - 14B @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B @ 128 in mbuf from pool1
seg2 - 20B @ 0 in mbuf from pool2
seg3 - 512B @ 0 in mbuf from pool3
seg4 - 512B @ 0 in mbuf from pool3
seg5 - 422B @ 0 in mbuf from pool3
The offload RTE_ETH_RX_OFFLOAD_SCATTER must be present and
configured to support new buffer split feature (if rx_nseg
is greater than one).
The split limitations imposed by underlying PMD is reported
in the new introduced rte_eth_dev_info->rx_seg_capa field.
The new approach would allow splitting the ingress packets into
multiple parts pushed to the memory with different attributes.
For example, the packet headers can be pushed to the embedded
data buffers within mbufs and the application data into
the external buffers attached to mbufs allocated from the
different memory pools. The memory attributes for the split
parts may differ either - for example the application data
may be pushed into the external memory located on the dedicated
physical device, say GPU or NVMe. This would improve the DPDK
receiving datapath flexibility with preserving compatibility
with existing API.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-16 16:44:35 +00:00
|
|
|
#undef RTE_ETH_RX_OFFLOAD_BIT2STR
|
2014-07-23 14:28:53 +02:00
|
|
|
|
2018-01-18 09:44:27 +00:00
|
|
|
#define RTE_TX_OFFLOAD_BIT2STR(_name) \
|
|
|
|
{ DEV_TX_OFFLOAD_##_name, #_name }
|
|
|
|
|
|
|
|
static const struct {
|
|
|
|
uint64_t offload;
|
|
|
|
const char *name;
|
2020-10-13 17:56:58 +01:00
|
|
|
} eth_dev_tx_offload_names[] = {
|
2018-01-18 09:44:27 +00:00
|
|
|
RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(SECURITY),
|
2018-09-12 11:28:02 +03:00
|
|
|
RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
|
|
|
|
RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
|
2018-10-09 14:18:09 +00:00
|
|
|
RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
|
mbuf: introduce accurate packet Tx scheduling
There is the requirement on some networks for precise traffic timing
management. The ability to send (and, generally speaking, receive)
the packets at the very precisely specified moment of time provides
the opportunity to support the connections with Time Division
Multiplexing using the contemporary general purpose NIC without involving
an auxiliary hardware. For example, the supporting of O-RAN Fronthaul
interface is one of the promising features for potentially usage of the
precise time management for the egress packets.
The main objective of this patchset is to specify the way how applications
can provide the moment of time at what the packet transmission must be
started and to describe in preliminary the supporting this feature
from mlx5 PMD side [1].
The new dynamic timestamp field is proposed, it provides some timing
information, the units and time references (initial phase) are not
explicitly defined but are maintained always the same for a given port.
Some devices allow to query rte_eth_read_clock() that will return
the current device timestamp. The dynamic timestamp flag tells whether
the field contains actual timestamp value. For the packets being sent
this value can be used by PMD to schedule packet sending.
The device clock is opaque entity, the units and frequency are
vendor specific and might depend on hardware capabilities and
configurations. If might (or not) be synchronized with real time
via PTP, might (or not) be synchronous with CPU clock (for example
if NIC and CPU share the same clock source there might be no
any drift between the NIC and CPU clocks), etc.
After PKT_RX_TIMESTAMP flag and fixed timestamp field supposed
deprecation and obsoleting, these dynamic flag and field might be
used to manage the timestamps on receiving datapath as well. Having
the dedicated flags for Rx/Tx timestamps allows applications not
to perform explicit flags reset on forwarding and not to promote
received timestamps to the transmitting datapath by default.
The static PKT_RX_TIMESTAMP is considered as candidate to become
the dynamic flag and this move should be discussed.
When PMD sees the "rte_dynfield_timestamp" set on the packet being sent
it tries to synchronize the time of packet appearing on the wire with
the specified packet timestamp. If the specified one is in the past it
should be ignored, if one is in the distant future it should be capped
with some reasonable value (in range of seconds). These specific cases
("too late" and "distant future") can be optionally reported via
device xstats to assist applications to detect the time-related
problems.
There is no any packet reordering according timestamps is supposed,
neither within packet burst, nor between packets, it is an entirely
application responsibility to generate packets and its timestamps
in desired order. The timestamps can be put only in the first packet
in the burst providing the entire burst scheduling.
PMD reports the ability to synchronize packet sending on timestamp
with new offload flag:
This is palliative and might be replaced with new eth_dev API
about reporting/managing the supported dynamic flags and its related
features. This API would break ABI compatibility and can't be introduced
at the moment, so is postponed to 20.11.
For testing purposes it is proposed to update testpmd "txonly"
forwarding mode routine. With this update testpmd application generates
the packets and sets the dynamic timestamps according to specified time
pattern if it sees the "rte_dynfield_timestamp" is registered.
The new testpmd command is proposed to configure sending pattern:
set tx_times <burst_gap>,<intra_gap>
<intra_gap> - the delay between the packets within the burst
specified in the device clock units. The number
of packets in the burst is defined by txburst parameter
<burst_gap> - the delay between the bursts in the device clock units
As the result the bursts of packet will be transmitted with specific
delays between the packets within the burst and specific delay between
the bursts. The rte_eth_read_clock is supposed to be engaged to get the
current device clock value and provide the reference for the timestamps.
[1] http://patches.dpdk.org/patch/73714/
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Olivier Matz <olivier.matz@6wind.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2020-07-10 12:39:41 +00:00
|
|
|
RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
|
2018-01-18 09:44:27 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#undef RTE_TX_OFFLOAD_BIT2STR
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
/**
|
|
|
|
* The user application callback description.
|
|
|
|
*
|
|
|
|
* It contains callback address to be registered by user application,
|
|
|
|
* the pointer to the parameters for callback, and the event type.
|
|
|
|
*/
|
|
|
|
struct rte_eth_dev_callback {
|
|
|
|
TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
|
|
|
|
rte_eth_dev_cb_fn cb_fn; /**< Callback address */
|
|
|
|
void *cb_arg; /**< Parameter for callback */
|
2017-06-15 13:29:50 +01:00
|
|
|
void *ret_param; /**< Return parameter */
|
2012-09-04 13:54:00 +01:00
|
|
|
enum rte_eth_event_type event; /**< Interrupt event type */
|
2013-06-03 00:00:00 +00:00
|
|
|
uint32_t active; /**< Callback is executing */
|
2012-09-04 13:54:00 +01:00
|
|
|
};
|
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
enum {
|
|
|
|
STAT_QMAP_TX = 0,
|
|
|
|
STAT_QMAP_RX
|
|
|
|
};
|
|
|
|
|
2018-11-01 15:46:33 +01:00
|
|
|
int
|
2018-10-23 10:28:37 +02:00
|
|
|
rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct rte_devargs devargs = {.args = NULL};
|
|
|
|
const char *bus_param_key;
|
|
|
|
char *bus_str = NULL;
|
|
|
|
char *cls_str = NULL;
|
|
|
|
int str_size;
|
|
|
|
|
|
|
|
memset(iter, 0, sizeof(*iter));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The devargs string may use various syntaxes:
|
|
|
|
* - 0000:08:00.0,representor=[1-3]
|
|
|
|
* - pci:0000:06:00.0,representor=[0,5]
|
2018-10-23 10:28:38 +02:00
|
|
|
* - class=eth,mac=00:11:22:33:44:55
|
2018-10-23 10:28:37 +02:00
|
|
|
* A new syntax is in development (not yet supported):
|
|
|
|
* - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
|
|
|
|
*/
|
|
|
|
|
2018-10-23 10:28:38 +02:00
|
|
|
/*
|
|
|
|
* Handle pure class filter (i.e. without any bus-level argument),
|
|
|
|
* from future new syntax.
|
|
|
|
* rte_devargs_parse() is not yet supporting the new syntax,
|
|
|
|
* that's why this simple case is temporarily parsed here.
|
|
|
|
*/
|
|
|
|
#define iter_anybus_str "class=eth,"
|
|
|
|
if (strncmp(devargs_str, iter_anybus_str,
|
|
|
|
strlen(iter_anybus_str)) == 0) {
|
|
|
|
iter->cls_str = devargs_str + strlen(iter_anybus_str);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2018-10-23 10:28:37 +02:00
|
|
|
/* Split bus, device and parameters. */
|
|
|
|
ret = rte_devargs_parse(&devargs, devargs_str);
|
|
|
|
if (ret != 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assume parameters of old syntax can match only at ethdev level.
|
|
|
|
* Extra parameters will be ignored, thanks to "+" prefix.
|
|
|
|
*/
|
|
|
|
str_size = strlen(devargs.args) + 2;
|
|
|
|
cls_str = malloc(str_size);
|
|
|
|
if (cls_str == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
ret = snprintf(cls_str, str_size, "+%s", devargs.args);
|
|
|
|
if (ret != str_size - 1) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
iter->cls_str = cls_str;
|
|
|
|
free(devargs.args); /* allocated by rte_devargs_parse() */
|
|
|
|
devargs.args = NULL;
|
|
|
|
|
|
|
|
iter->bus = devargs.bus;
|
|
|
|
if (iter->bus->dev_iterate == NULL) {
|
|
|
|
ret = -ENOTSUP;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Convert bus args to new syntax for use with new API dev_iterate. */
|
|
|
|
if (strcmp(iter->bus->name, "vdev") == 0) {
|
|
|
|
bus_param_key = "name";
|
|
|
|
} else if (strcmp(iter->bus->name, "pci") == 0) {
|
|
|
|
bus_param_key = "addr";
|
|
|
|
} else {
|
|
|
|
ret = -ENOTSUP;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
|
|
|
|
bus_str = malloc(str_size);
|
|
|
|
if (bus_str == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
ret = snprintf(bus_str, str_size, "%s=%s",
|
|
|
|
bus_param_key, devargs.name);
|
|
|
|
if (ret != str_size - 1) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
iter->bus_str = bus_str;
|
|
|
|
|
2018-10-23 10:28:38 +02:00
|
|
|
end:
|
2018-10-23 10:28:37 +02:00
|
|
|
iter->cls = rte_class_find_by_name("eth");
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
if (ret == -ENOTSUP)
|
2020-07-02 12:17:05 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
|
2018-10-23 10:28:37 +02:00
|
|
|
iter->bus->name);
|
|
|
|
free(devargs.args);
|
|
|
|
free(bus_str);
|
|
|
|
free(cls_str);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-01 15:46:33 +01:00
|
|
|
uint16_t
|
2018-10-23 10:28:37 +02:00
|
|
|
rte_eth_iterator_next(struct rte_dev_iterator *iter)
|
|
|
|
{
|
|
|
|
if (iter->cls == NULL) /* invalid ethdev iterator */
|
|
|
|
return RTE_MAX_ETHPORTS;
|
|
|
|
|
|
|
|
do { /* loop to try all matching rte_device */
|
2018-10-23 10:28:38 +02:00
|
|
|
/* If not pure ethdev filter and */
|
|
|
|
if (iter->bus != NULL &&
|
|
|
|
/* not in middle of rte_eth_dev iteration, */
|
|
|
|
iter->class_device == NULL) {
|
2018-10-23 10:28:37 +02:00
|
|
|
/* get next rte_device to try. */
|
|
|
|
iter->device = iter->bus->dev_iterate(
|
|
|
|
iter->device, iter->bus_str, iter);
|
|
|
|
if (iter->device == NULL)
|
|
|
|
break; /* no more rte_device candidate */
|
|
|
|
}
|
|
|
|
/* A device is matching bus part, need to check ethdev part. */
|
|
|
|
iter->class_device = iter->cls->dev_iterate(
|
|
|
|
iter->class_device, iter->cls_str, iter);
|
|
|
|
if (iter->class_device != NULL)
|
|
|
|
return eth_dev_to_id(iter->class_device); /* match */
|
2018-10-23 10:28:38 +02:00
|
|
|
} while (iter->bus != NULL); /* need to try next rte_device */
|
2018-10-23 10:28:37 +02:00
|
|
|
|
|
|
|
/* No more ethdev port to iterate. */
|
|
|
|
rte_eth_iterator_cleanup(iter);
|
|
|
|
return RTE_MAX_ETHPORTS;
|
|
|
|
}
|
|
|
|
|
2018-11-01 15:46:33 +01:00
|
|
|
void
|
2018-10-23 10:28:37 +02:00
|
|
|
rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
|
|
|
|
{
|
2018-10-23 10:28:38 +02:00
|
|
|
if (iter->bus_str == NULL)
|
|
|
|
return; /* nothing to free in pure class filter */
|
2018-10-23 10:28:37 +02:00
|
|
|
free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
|
|
|
|
free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
|
|
|
|
memset(iter, 0, sizeof(*iter));
|
|
|
|
}
|
|
|
|
|
2017-09-29 15:17:24 +08:00
|
|
|
uint16_t
|
|
|
|
rte_eth_find_next(uint16_t port_id)
|
2017-03-31 14:04:38 +02:00
|
|
|
{
|
|
|
|
while (port_id < RTE_MAX_ETHPORTS &&
|
2019-04-01 04:26:57 +02:00
|
|
|
rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
|
2017-03-31 14:04:38 +02:00
|
|
|
port_id++;
|
|
|
|
|
|
|
|
if (port_id >= RTE_MAX_ETHPORTS)
|
|
|
|
return RTE_MAX_ETHPORTS;
|
|
|
|
|
|
|
|
return port_id;
|
|
|
|
}
|
|
|
|
|
2019-04-18 00:59:27 +02:00
|
|
|
/*
|
|
|
|
* Macro to iterate over all valid ports for internal usage.
|
|
|
|
* Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
|
|
|
|
*/
|
|
|
|
#define RTE_ETH_FOREACH_VALID_DEV(port_id) \
|
|
|
|
for (port_id = rte_eth_find_next(0); \
|
|
|
|
port_id < RTE_MAX_ETHPORTS; \
|
|
|
|
port_id = rte_eth_find_next(port_id + 1))
|
|
|
|
|
2019-04-01 04:26:58 +02:00
|
|
|
uint16_t
|
|
|
|
rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
|
|
|
|
{
|
|
|
|
port_id = rte_eth_find_next(port_id);
|
|
|
|
while (port_id < RTE_MAX_ETHPORTS &&
|
|
|
|
rte_eth_devices[port_id].device != parent)
|
|
|
|
port_id = rte_eth_find_next(port_id + 1);
|
|
|
|
|
|
|
|
return port_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t
|
|
|
|
rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
|
|
|
|
{
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
|
|
|
|
return rte_eth_find_next_of(port_id,
|
|
|
|
rte_eth_devices[ref_port_id].device);
|
|
|
|
}
|
|
|
|
|
2015-04-09 14:29:39 -07:00
|
|
|
static void
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare(void)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
const unsigned flags = 0;
|
|
|
|
const struct rte_memzone *mz;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
if (eth_dev_shared_data == NULL) {
|
2018-01-22 16:38:19 +00:00
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
|
|
|
/* Allocate port data and ownership shared memory. */
|
|
|
|
mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
|
2020-10-13 17:56:58 +01:00
|
|
|
sizeof(*eth_dev_shared_data),
|
2018-01-22 16:38:19 +00:00
|
|
|
rte_socket_id(), flags);
|
|
|
|
} else
|
|
|
|
mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
|
|
|
|
if (mz == NULL)
|
|
|
|
rte_panic("Cannot allocate ethdev shared data\n");
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data = mz->addr;
|
2018-01-22 16:38:19 +00:00
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data->next_owner_id =
|
2018-01-22 16:38:19 +00:00
|
|
|
RTE_ETH_DEV_NO_OWNER + 1;
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_init(ð_dev_shared_data->ownership_lock);
|
|
|
|
memset(eth_dev_shared_data->data, 0,
|
|
|
|
sizeof(eth_dev_shared_data->data));
|
2018-01-22 16:38:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data_lock);
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2018-05-11 01:58:31 +02:00
|
|
|
static bool
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
|
2018-05-11 01:58:31 +02:00
|
|
|
{
|
|
|
|
return ethdev->data->name[0] != '\0';
|
|
|
|
}
|
|
|
|
|
2018-05-11 01:58:32 +02:00
|
|
|
static struct rte_eth_dev *
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_allocated(const char *name)
|
2014-06-25 21:07:44 +01:00
|
|
|
{
|
2020-11-04 10:57:57 +08:00
|
|
|
uint16_t i;
|
|
|
|
|
|
|
|
RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
|
2014-06-25 21:07:44 +01:00
|
|
|
|
2015-02-26 04:32:18 +09:00
|
|
|
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
2018-05-11 01:58:33 +02:00
|
|
|
if (rte_eth_devices[i].data != NULL &&
|
2017-09-22 12:30:07 +01:00
|
|
|
strcmp(rte_eth_devices[i].data->name, name) == 0)
|
|
|
|
return &rte_eth_devices[i];
|
2014-06-25 21:07:44 +01:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-05-11 01:58:32 +02:00
|
|
|
struct rte_eth_dev *
|
|
|
|
rte_eth_dev_allocated(const char *name)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *ethdev;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare();
|
2018-05-11 01:58:32 +02:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
|
2018-05-11 01:58:32 +02:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
ethdev = eth_dev_allocated(name);
|
2018-05-11 01:58:32 +02:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
|
2018-05-11 01:58:32 +02:00
|
|
|
|
|
|
|
return ethdev;
|
|
|
|
}
|
|
|
|
|
2017-09-29 15:17:24 +08:00
|
|
|
static uint16_t
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_find_free_port(void)
|
2015-02-26 04:32:18 +09:00
|
|
|
{
|
2020-11-04 10:57:57 +08:00
|
|
|
uint16_t i;
|
2015-02-26 04:32:18 +09:00
|
|
|
|
|
|
|
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
2018-01-22 16:38:18 +00:00
|
|
|
/* Using shared name field to find a free port. */
|
2020-10-13 17:56:58 +01:00
|
|
|
if (eth_dev_shared_data->data[i].name[0] == '\0') {
|
2018-01-22 16:38:18 +00:00
|
|
|
RTE_ASSERT(rte_eth_devices[i].state ==
|
|
|
|
RTE_ETH_DEV_UNUSED);
|
2015-02-26 04:32:18 +09:00
|
|
|
return i;
|
2018-01-22 16:38:18 +00:00
|
|
|
}
|
2015-02-26 04:32:18 +09:00
|
|
|
}
|
|
|
|
return RTE_MAX_ETHPORTS;
|
|
|
|
}
|
|
|
|
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
static struct rte_eth_dev *
|
2017-09-29 15:17:24 +08:00
|
|
|
eth_dev_get(uint16_t port_id)
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev->data = ð_dev_shared_data->data[port_id];
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
|
|
|
|
return eth_dev;
|
|
|
|
}
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
struct rte_eth_dev *
|
2016-09-20 18:11:26 +05:30
|
|
|
rte_eth_dev_allocate(const char *name)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
2017-09-29 15:17:24 +08:00
|
|
|
uint16_t port_id;
|
2018-01-22 16:38:20 +00:00
|
|
|
struct rte_eth_dev *eth_dev = NULL;
|
2019-03-14 09:20:47 -07:00
|
|
|
size_t name_len;
|
|
|
|
|
|
|
|
name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
|
|
|
|
if (name_len == 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (name_len >= RTE_ETH_NAME_MAX_LEN) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-01-22 16:38:20 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare();
|
2018-01-22 16:38:20 +00:00
|
|
|
|
|
|
|
/* Synchronize port creation between primary and secondary threads. */
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
if (eth_dev_allocated(name) != NULL) {
|
2018-06-19 02:04:55 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethernet device with name %s already allocated\n",
|
|
|
|
name);
|
2018-01-22 16:38:20 +00:00
|
|
|
goto unlock;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
port_id = eth_dev_find_free_port();
|
2018-05-11 01:58:33 +02:00
|
|
|
if (port_id == RTE_MAX_ETHPORTS) {
|
2018-06-19 02:04:55 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Reached maximum number of Ethernet ports\n");
|
2018-01-22 16:38:20 +00:00
|
|
|
goto unlock;
|
2014-06-25 21:07:44 +01:00
|
|
|
}
|
|
|
|
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
eth_dev = eth_dev_get(port_id);
|
2019-02-28 14:47:54 -08:00
|
|
|
strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
|
2015-02-26 04:32:18 +09:00
|
|
|
eth_dev->data->port_id = port_id;
|
2019-05-21 18:13:05 +02:00
|
|
|
eth_dev->data->mtu = RTE_ETHER_MTU;
|
ethdev: make flow API thread safe
Currently, the rte_flow functions are not defined as thread safe.
DPDK applications either call the functions in single thread or
protect any concurrent calling for the rte_flow operations using
a lock.
For PMDs support the flow operations thread safe natively, the
redundant protection in application hurts the performance of the
rte_flow operation functions.
And the restriction of thread safe is not guaranteed for the
rte_flow functions also limits the applications' expectation.
This feature is going to change the rte_flow functions to be thread
safe. As different PMDs have different flow operations, some may
support thread safe already and others may not. For PMDs don't
support flow thread safe operation, a new lock is defined in ethdev
in order to protects thread unsafe PMDs from rte_flow level.
A new RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE device flag is added to
determine whether the PMD supports thread safe flow operation or not.
For PMDs support thread safe flow operations, set the
RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE flag, rte_flow level functions will
skip the thread safe helper lock for these PMDs. Again the rte_flow
level thread safe lock only works when PMD operation functions are
not thread safe.
For the PMDs which don't want the default mutex lock, just set the
flag in the PMD, and add the prefer type of lock in the PMD. Then
the default mutex lock is easily replaced by the PMD level lock.
The change has no effect on the current DPDK applications. No change
is required for the current DPDK applications. For the standard posix
pthread_mutex, if no lock contention with the added rte_flow level
mutex, the mutex only does the atomic increasing in
pthread_mutex_lock() and decreasing in
pthread_mutex_unlock(). No futex() syscall will be involved.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-15 09:07:47 +08:00
|
|
|
pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL);
|
2016-11-17 18:16:12 +01:00
|
|
|
|
2018-01-22 16:38:20 +00:00
|
|
|
unlock:
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:20 +00:00
|
|
|
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
return eth_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attach to a port already registered by the primary process, which
|
|
|
|
* makes sure that the same device would have the same port id both
|
|
|
|
* in the primary and secondary process.
|
|
|
|
*/
|
2017-03-02 11:00:41 +02:00
|
|
|
struct rte_eth_dev *
|
|
|
|
rte_eth_dev_attach_secondary(const char *name)
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
{
|
2017-09-29 15:17:24 +08:00
|
|
|
uint16_t i;
|
2018-01-22 16:38:20 +00:00
|
|
|
struct rte_eth_dev *eth_dev = NULL;
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare();
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
|
2018-01-22 16:38:20 +00:00
|
|
|
/* Synchronize port attachment to primary port creation and release. */
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:20 +00:00
|
|
|
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
2020-10-13 17:56:58 +01:00
|
|
|
if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == RTE_MAX_ETHPORTS) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Device %s is not driven by the primary process\n",
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
name);
|
2018-01-22 16:38:20 +00:00
|
|
|
} else {
|
|
|
|
eth_dev = eth_dev_get(i);
|
|
|
|
RTE_ASSERT(eth_dev->data->port_id == i);
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 15:50:59 +08:00
|
|
|
}
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
|
2012-09-04 13:54:00 +01:00
|
|
|
return eth_dev;
|
|
|
|
}
|
|
|
|
|
2015-02-26 04:32:20 +09:00
|
|
|
int
|
|
|
|
rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
if (eth_dev == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare();
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2018-10-24 15:12:32 +02:00
|
|
|
if (eth_dev->state != RTE_ETH_DEV_UNUSED)
|
2020-09-09 14:01:48 +01:00
|
|
|
rte_eth_dev_callback_process(eth_dev,
|
2018-10-24 15:12:32 +02:00
|
|
|
RTE_ETH_EVENT_DESTROY, NULL);
|
2018-05-11 01:58:36 +02:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2017-03-31 14:04:37 +02:00
|
|
|
eth_dev->state = RTE_ETH_DEV_UNUSED;
|
2020-09-29 01:14:09 +02:00
|
|
|
eth_dev->device = NULL;
|
2020-10-16 15:32:58 +02:00
|
|
|
eth_dev->process_private = NULL;
|
2020-09-29 01:14:09 +02:00
|
|
|
eth_dev->intr_handle = NULL;
|
2020-10-16 15:32:58 +02:00
|
|
|
eth_dev->rx_pkt_burst = NULL;
|
|
|
|
eth_dev->tx_pkt_burst = NULL;
|
|
|
|
eth_dev->tx_pkt_prepare = NULL;
|
|
|
|
eth_dev->rx_queue_count = NULL;
|
|
|
|
eth_dev->rx_descriptor_done = NULL;
|
|
|
|
eth_dev->rx_descriptor_status = NULL;
|
|
|
|
eth_dev->tx_descriptor_status = NULL;
|
|
|
|
eth_dev->dev_ops = NULL;
|
2018-01-04 17:01:11 +01:00
|
|
|
|
2018-10-19 04:07:55 +02:00
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
|
|
|
rte_free(eth_dev->data->rx_queues);
|
|
|
|
rte_free(eth_dev->data->tx_queues);
|
|
|
|
rte_free(eth_dev->data->mac_addrs);
|
|
|
|
rte_free(eth_dev->data->hash_mac_addrs);
|
|
|
|
rte_free(eth_dev->data->dev_private);
|
ethdev: make flow API thread safe
Currently, the rte_flow functions are not defined as thread safe.
DPDK applications either call the functions in single thread or
protect any concurrent calling for the rte_flow operations using
a lock.
For PMDs support the flow operations thread safe natively, the
redundant protection in application hurts the performance of the
rte_flow operation functions.
And the restriction of thread safe is not guaranteed for the
rte_flow functions also limits the applications' expectation.
This feature is going to change the rte_flow functions to be thread
safe. As different PMDs have different flow operations, some may
support thread safe already and others may not. For PMDs don't
support flow thread safe operation, a new lock is defined in ethdev
in order to protects thread unsafe PMDs from rte_flow level.
A new RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE device flag is added to
determine whether the PMD supports thread safe flow operation or not.
For PMDs support thread safe flow operations, set the
RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE flag, rte_flow level functions will
skip the thread safe helper lock for these PMDs. Again the rte_flow
level thread safe lock only works when PMD operation functions are
not thread safe.
For the PMDs which don't want the default mutex lock, just set the
flag in the PMD, and add the prefer type of lock in the PMD. Then
the default mutex lock is easily replaced by the PMD level lock.
The change has no effect on the current DPDK applications. No change
is required for the current DPDK applications. For the standard posix
pthread_mutex, if no lock contention with the added rte_flow level
mutex, the mutex only does the atomic increasing in
pthread_mutex_lock() and decreasing in
pthread_mutex_unlock(). No futex() syscall will be involved.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
2020-10-15 09:07:47 +08:00
|
|
|
pthread_mutex_destroy(ð_dev->data->flow_ops_mutex);
|
2018-10-19 04:07:55 +02:00
|
|
|
memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
|
|
|
|
}
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2015-02-26 04:32:20 +09:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-10 15:06:24 -07:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_is_valid_port(uint16_t port_id)
|
2015-02-26 04:32:18 +09:00
|
|
|
{
|
|
|
|
if (port_id >= RTE_MAX_ETHPORTS ||
|
2018-01-20 21:12:19 +00:00
|
|
|
(rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
|
2015-02-26 04:32:18 +09:00
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-01-22 16:38:19 +00:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_is_valid_owner_id(uint64_t owner_id)
|
2018-01-22 16:38:19 +00:00
|
|
|
{
|
|
|
|
if (owner_id == RTE_ETH_DEV_NO_OWNER ||
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data->next_owner_id <= owner_id)
|
2018-01-22 16:38:19 +00:00
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-04-24 04:15:11 +02:00
|
|
|
uint64_t
|
2018-01-22 16:38:19 +00:00
|
|
|
rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
|
|
|
|
{
|
2019-04-18 00:59:27 +02:00
|
|
|
port_id = rte_eth_find_next(port_id);
|
2018-01-22 16:38:19 +00:00
|
|
|
while (port_id < RTE_MAX_ETHPORTS &&
|
2019-04-18 00:59:27 +02:00
|
|
|
rte_eth_devices[port_id].data->owner.id != owner_id)
|
|
|
|
port_id = rte_eth_find_next(port_id + 1);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
|
|
|
return port_id;
|
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-01-22 16:38:19 +00:00
|
|
|
rte_eth_dev_owner_new(uint64_t *owner_id)
|
|
|
|
{
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare();
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
*owner_id = eth_dev_shared_data->next_owner_id++;
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
|
2018-01-22 16:38:19 +00:00
|
|
|
const struct rte_eth_dev_owner *new_owner)
|
|
|
|
{
|
2018-05-11 01:58:31 +02:00
|
|
|
struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
|
2018-01-22 16:38:19 +00:00
|
|
|
struct rte_eth_dev_owner *port_owner;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
|
|
|
|
port_id);
|
2018-05-11 01:58:31 +02:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
if (!eth_is_valid_owner_id(new_owner->id) &&
|
|
|
|
!eth_is_valid_owner_id(old_owner_id)) {
|
2018-08-16 15:37:14 -07:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
|
|
|
|
old_owner_id, new_owner->id);
|
2018-01-22 16:38:19 +00:00
|
|
|
return -EINVAL;
|
2018-08-16 15:37:14 -07:00
|
|
|
}
|
2018-01-22 16:38:19 +00:00
|
|
|
|
|
|
|
port_owner = &rte_eth_devices[port_id].data->owner;
|
|
|
|
if (port_owner->id != old_owner_id) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
|
|
|
|
port_id, port_owner->name, port_owner->id);
|
2018-01-22 16:38:19 +00:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2019-02-28 14:47:53 -08:00
|
|
|
/* can not truncate (same structure) */
|
|
|
|
strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
|
|
|
port_owner->id = new_owner->id;
|
|
|
|
|
2018-08-01 10:43:56 -07:00
|
|
|
RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
|
2018-06-19 02:04:56 +01:00
|
|
|
port_id, new_owner->name, new_owner->id);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-01-22 16:38:19 +00:00
|
|
|
rte_eth_dev_owner_set(const uint16_t port_id,
|
|
|
|
const struct rte_eth_dev_owner *owner)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare();
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-01-22 16:38:19 +00:00
|
|
|
rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
|
|
|
|
{
|
|
|
|
const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
|
|
|
|
{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
|
|
|
|
int ret;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare();
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-10 10:02:16 +01:00
|
|
|
int
|
2018-01-22 16:38:19 +00:00
|
|
|
rte_eth_dev_owner_delete(const uint64_t owner_id)
|
|
|
|
{
|
|
|
|
uint16_t port_id;
|
2019-09-10 10:02:16 +01:00
|
|
|
int ret = 0;
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare();
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
if (eth_is_valid_owner_id(owner_id)) {
|
2018-05-11 01:58:31 +02:00
|
|
|
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
|
|
|
|
if (rte_eth_devices[port_id].data->owner.id == owner_id)
|
|
|
|
memset(&rte_eth_devices[port_id].data->owner, 0,
|
|
|
|
sizeof(struct rte_eth_dev_owner));
|
2018-08-16 15:37:14 -07:00
|
|
|
RTE_ETHDEV_LOG(NOTICE,
|
2018-06-19 02:04:56 +01:00
|
|
|
"All port owners owned by %016"PRIx64" identifier have removed\n",
|
|
|
|
owner_id);
|
2018-08-16 15:37:14 -07:00
|
|
|
} else {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Invalid owner id=%016"PRIx64"\n",
|
|
|
|
owner_id);
|
2019-09-10 10:02:16 +01:00
|
|
|
ret = -EINVAL;
|
2018-01-22 16:38:19 +00:00
|
|
|
}
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
|
2019-09-10 10:02:16 +01:00
|
|
|
|
|
|
|
return ret;
|
2018-01-22 16:38:19 +00:00
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-01-22 16:38:19 +00:00
|
|
|
rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
2018-05-11 01:58:31 +02:00
|
|
|
struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_shared_data_prepare();
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
|
|
|
|
port_id);
|
2018-01-22 16:38:19 +00:00
|
|
|
ret = -ENODEV;
|
|
|
|
} else {
|
2018-05-11 01:58:31 +02:00
|
|
|
rte_memcpy(owner, ðdev->data->owner, sizeof(*owner));
|
2018-01-22 16:38:19 +00:00
|
|
|
}
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
|
2018-01-22 16:38:19 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_socket_id(uint16_t port_id)
|
2013-06-03 00:00:00 +00:00
|
|
|
{
|
2016-05-18 21:15:11 +02:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
|
2015-11-03 13:01:58 +00:00
|
|
|
return rte_eth_devices[port_id].data->numa_node;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 20:37:21 +05:30
|
|
|
void *
|
2018-03-09 11:27:48 +00:00
|
|
|
rte_eth_dev_get_sec_ctx(uint16_t port_id)
|
2017-10-25 20:37:21 +05:30
|
|
|
{
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
|
|
|
|
return rte_eth_devices[port_id].security_ctx;
|
|
|
|
}
|
|
|
|
|
2018-04-05 17:33:22 +02:00
|
|
|
uint16_t
|
|
|
|
rte_eth_dev_count_avail(void)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
2017-09-29 15:17:24 +08:00
|
|
|
uint16_t p;
|
|
|
|
uint16_t count;
|
2017-07-18 14:48:13 +02:00
|
|
|
|
|
|
|
count = 0;
|
|
|
|
|
|
|
|
RTE_ETH_FOREACH_DEV(p)
|
|
|
|
count++;
|
|
|
|
|
|
|
|
return count;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2019-04-18 00:59:28 +02:00
|
|
|
uint16_t
|
2018-04-05 17:33:22 +02:00
|
|
|
rte_eth_dev_count_total(void)
|
|
|
|
{
|
|
|
|
uint16_t port, count = 0;
|
|
|
|
|
2019-04-18 00:59:27 +02:00
|
|
|
RTE_ETH_FOREACH_VALID_DEV(port)
|
|
|
|
count++;
|
2018-04-05 17:33:22 +02:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2016-06-15 15:06:21 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
|
2015-02-26 04:32:26 +09:00
|
|
|
{
|
2017-09-22 12:30:07 +01:00
|
|
|
char *tmp;
|
2015-02-26 04:32:26 +09:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:26 +09:00
|
|
|
|
|
|
|
if (name == NULL) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
|
2015-02-26 04:32:26 +09:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* shouldn't check 'rte_eth_devices[i].data',
|
|
|
|
* because it might be overwritten by VDEV PMD */
|
2020-10-13 17:56:58 +01:00
|
|
|
tmp = eth_dev_shared_data->data[port_id].name;
|
2015-02-26 04:32:26 +09:00
|
|
|
strcpy(name, tmp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-15 15:06:21 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
|
2015-09-23 14:16:17 -07:00
|
|
|
{
|
2020-11-04 10:57:57 +08:00
|
|
|
uint16_t pid;
|
2015-09-23 14:16:17 -07:00
|
|
|
|
|
|
|
if (name == NULL) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
|
2015-09-23 14:16:17 -07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-04-18 00:59:27 +02:00
|
|
|
RTE_ETH_FOREACH_VALID_DEV(pid)
|
2020-10-13 17:56:58 +01:00
|
|
|
if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
|
2018-01-22 16:38:19 +00:00
|
|
|
*port_id = pid;
|
2015-09-23 14:16:17 -07:00
|
|
|
return 0;
|
|
|
|
}
|
2018-01-22 16:38:19 +00:00
|
|
|
|
2015-09-23 14:16:17 -07:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
static int
|
|
|
|
eth_err(uint16_t port_id, int ret)
|
|
|
|
{
|
|
|
|
if (ret == 0)
|
|
|
|
return 0;
|
|
|
|
if (rte_eth_dev_is_removed(port_id))
|
|
|
|
return -EIO;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
2012-12-20 00:00:00 +01:00
|
|
|
{
|
|
|
|
uint16_t old_nb_queues = dev->data->nb_rx_queues;
|
|
|
|
void **rxq;
|
|
|
|
unsigned i;
|
|
|
|
|
2016-01-05 16:34:58 +00:00
|
|
|
if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
|
2012-12-20 00:00:00 +01:00
|
|
|
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
|
|
|
|
sizeof(dev->data->rx_queues[0]) * nb_queues,
|
2014-11-19 12:26:06 +00:00
|
|
|
RTE_CACHE_LINE_SIZE);
|
2012-12-20 00:00:00 +01:00
|
|
|
if (dev->data->rx_queues == NULL) {
|
|
|
|
dev->data->nb_rx_queues = 0;
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
2016-01-05 16:34:58 +00:00
|
|
|
} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
|
2013-09-13 14:14:02 +02:00
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
rxq = dev->data->rx_queues;
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->rx_queue_release)(rxq[i]);
|
|
|
|
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
|
2014-11-19 12:26:06 +00:00
|
|
|
RTE_CACHE_LINE_SIZE);
|
2012-12-20 00:00:00 +01:00
|
|
|
if (rxq == NULL)
|
|
|
|
return -(ENOMEM);
|
2015-02-23 18:30:09 +00:00
|
|
|
if (nb_queues > old_nb_queues) {
|
|
|
|
uint16_t new_qs = nb_queues - old_nb_queues;
|
2015-06-26 17:01:44 -07:00
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
memset(rxq + old_nb_queues, 0,
|
2015-02-23 18:30:09 +00:00
|
|
|
sizeof(rxq[0]) * new_qs);
|
|
|
|
}
|
2012-12-20 00:00:00 +01:00
|
|
|
|
|
|
|
dev->data->rx_queues = rxq;
|
|
|
|
|
2016-01-05 16:34:58 +00:00
|
|
|
} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
|
|
|
|
|
|
|
|
rxq = dev->data->rx_queues;
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->rx_queue_release)(rxq[i]);
|
2016-11-24 12:26:46 +01:00
|
|
|
|
|
|
|
rte_free(dev->data->rx_queues);
|
|
|
|
dev->data->rx_queues = NULL;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
dev->data->nb_rx_queues = nb_queues;
|
2015-04-09 14:29:42 -07:00
|
|
|
return 0;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
2020-10-13 19:50:53 +08:00
|
|
|
static int
|
|
|
|
eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
|
|
|
|
{
|
|
|
|
uint16_t port_id;
|
|
|
|
|
|
|
|
if (rx_queue_id >= dev->data->nb_rx_queues) {
|
|
|
|
port_id = dev->data->port_id;
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Invalid Rx queue_id=%u of device with port_id=%u\n",
|
|
|
|
rx_queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-13 19:50:54 +08:00
|
|
|
if (dev->data->rx_queues[rx_queue_id] == NULL) {
|
|
|
|
port_id = dev->data->port_id;
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Queue %u of device with port_id=%u has not been setup\n",
|
|
|
|
rx_queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-13 19:50:53 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
|
|
|
|
{
|
|
|
|
uint16_t port_id;
|
|
|
|
|
|
|
|
if (tx_queue_id >= dev->data->nb_tx_queues) {
|
|
|
|
port_id = dev->data->port_id;
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Invalid Tx queue_id=%u of device with port_id=%u\n",
|
|
|
|
tx_queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-13 19:50:54 +08:00
|
|
|
if (dev->data->tx_queues[tx_queue_id] == NULL) {
|
|
|
|
port_id = dev->data->port_id;
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Queue %u of device with port_id=%u has not been setup\n",
|
|
|
|
tx_queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-13 19:50:53 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-28 16:06:36 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
|
2014-05-28 16:06:36 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2020-10-13 19:50:53 +08:00
|
|
|
int ret;
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-05-28 16:06:36 +08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2018-03-22 20:59:01 +08:00
|
|
|
if (!dev->data->dev_started) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Port %u must be started before start any queue\n",
|
|
|
|
port_id);
|
2018-03-22 20:59:01 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-13 19:50:53 +08:00
|
|
|
ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
|
|
|
|
RTE_ETHDEV_LOG(INFO,
|
|
|
|
"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
|
|
|
|
rx_queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-09-16 22:51:24 +01:00
|
|
|
if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
|
2018-08-02 19:39:41 +01:00
|
|
|
RTE_ETHDEV_LOG(INFO,
|
2018-06-19 02:04:56 +01:00
|
|
|
"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
|
2015-09-16 22:51:24 +01:00
|
|
|
rx_queue_id, port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
|
|
|
|
rx_queue_id));
|
2014-05-28 16:06:36 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
|
2014-05-28 16:06:36 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2020-10-13 19:50:53 +08:00
|
|
|
int ret;
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-05-28 16:06:36 +08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2020-10-13 19:50:53 +08:00
|
|
|
|
|
|
|
ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
|
|
|
|
RTE_ETHDEV_LOG(INFO,
|
|
|
|
"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
|
|
|
|
rx_queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-09-16 22:51:24 +01:00
|
|
|
if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
|
2018-08-02 19:39:41 +01:00
|
|
|
RTE_ETHDEV_LOG(INFO,
|
2018-06-19 02:04:56 +01:00
|
|
|
"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
|
2015-09-16 22:51:24 +01:00
|
|
|
rx_queue_id, port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
|
2014-05-28 16:06:36 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
|
2014-05-28 16:06:36 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2020-10-13 19:50:53 +08:00
|
|
|
int ret;
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-05-28 16:06:36 +08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2018-03-22 20:59:01 +08:00
|
|
|
if (!dev->data->dev_started) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Port %u must be started before start any queue\n",
|
|
|
|
port_id);
|
2018-03-22 20:59:01 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-13 19:50:53 +08:00
|
|
|
ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
|
|
|
|
RTE_ETHDEV_LOG(INFO,
|
|
|
|
"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
|
|
|
|
tx_queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-09-16 22:51:24 +01:00
|
|
|
if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
|
2018-08-02 19:39:41 +01:00
|
|
|
RTE_ETHDEV_LOG(INFO,
|
2018-06-19 02:04:56 +01:00
|
|
|
"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
|
2015-09-16 22:51:24 +01:00
|
|
|
tx_queue_id, port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-19 02:04:56 +01:00
|
|
|
return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
|
2014-05-28 16:06:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
|
2014-05-28 16:06:36 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2020-10-13 19:50:53 +08:00
|
|
|
int ret;
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-05-28 16:06:36 +08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2020-10-13 19:50:53 +08:00
|
|
|
|
|
|
|
ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
|
2014-05-28 16:06:36 +08:00
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
|
|
|
|
RTE_ETHDEV_LOG(INFO,
|
|
|
|
"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
|
|
|
|
tx_queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-09-16 22:51:24 +01:00
|
|
|
if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
|
2018-08-02 19:39:41 +01:00
|
|
|
RTE_ETHDEV_LOG(INFO,
|
2018-06-19 02:04:56 +01:00
|
|
|
"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
|
2015-09-16 22:51:24 +01:00
|
|
|
tx_queue_id, port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
|
2014-05-28 16:06:36 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
2012-12-20 00:00:00 +01:00
|
|
|
{
|
|
|
|
uint16_t old_nb_queues = dev->data->nb_tx_queues;
|
|
|
|
void **txq;
|
|
|
|
unsigned i;
|
|
|
|
|
2016-01-05 16:34:58 +00:00
|
|
|
if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
|
2012-12-20 00:00:00 +01:00
|
|
|
dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
|
2015-06-26 17:01:44 -07:00
|
|
|
sizeof(dev->data->tx_queues[0]) * nb_queues,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
2012-12-20 00:00:00 +01:00
|
|
|
if (dev->data->tx_queues == NULL) {
|
|
|
|
dev->data->nb_tx_queues = 0;
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
2016-01-05 16:34:58 +00:00
|
|
|
} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
|
2013-09-13 14:14:02 +02:00
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
txq = dev->data->tx_queues;
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->tx_queue_release)(txq[i]);
|
|
|
|
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
|
2015-06-26 17:01:44 -07:00
|
|
|
RTE_CACHE_LINE_SIZE);
|
2012-12-20 00:00:00 +01:00
|
|
|
if (txq == NULL)
|
2015-02-23 18:30:09 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
if (nb_queues > old_nb_queues) {
|
|
|
|
uint16_t new_qs = nb_queues - old_nb_queues;
|
2015-06-26 17:01:44 -07:00
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
memset(txq + old_nb_queues, 0,
|
2015-06-26 17:01:44 -07:00
|
|
|
sizeof(txq[0]) * new_qs);
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
2012-12-20 00:00:00 +01:00
|
|
|
|
|
|
|
dev->data->tx_queues = txq;
|
|
|
|
|
2016-01-05 16:34:58 +00:00
|
|
|
} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
|
|
|
|
|
|
|
|
txq = dev->data->tx_queues;
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->tx_queue_release)(txq[i]);
|
2016-11-24 12:26:46 +01:00
|
|
|
|
|
|
|
rte_free(dev->data->tx_queues);
|
|
|
|
dev->data->tx_queues = NULL;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
dev->data->nb_tx_queues = nb_queues;
|
2015-04-09 14:29:42 -07:00
|
|
|
return 0;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
2016-04-01 00:12:30 +02:00
|
|
|
uint32_t
|
|
|
|
rte_eth_speed_bitflag(uint32_t speed, int duplex)
|
|
|
|
{
|
|
|
|
switch (speed) {
|
|
|
|
case ETH_SPEED_NUM_10M:
|
|
|
|
return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
|
|
|
|
case ETH_SPEED_NUM_100M:
|
|
|
|
return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
|
|
|
|
case ETH_SPEED_NUM_1G:
|
|
|
|
return ETH_LINK_SPEED_1G;
|
|
|
|
case ETH_SPEED_NUM_2_5G:
|
|
|
|
return ETH_LINK_SPEED_2_5G;
|
|
|
|
case ETH_SPEED_NUM_5G:
|
|
|
|
return ETH_LINK_SPEED_5G;
|
|
|
|
case ETH_SPEED_NUM_10G:
|
|
|
|
return ETH_LINK_SPEED_10G;
|
|
|
|
case ETH_SPEED_NUM_20G:
|
|
|
|
return ETH_LINK_SPEED_20G;
|
|
|
|
case ETH_SPEED_NUM_25G:
|
|
|
|
return ETH_LINK_SPEED_25G;
|
|
|
|
case ETH_SPEED_NUM_40G:
|
|
|
|
return ETH_LINK_SPEED_40G;
|
|
|
|
case ETH_SPEED_NUM_50G:
|
|
|
|
return ETH_LINK_SPEED_50G;
|
|
|
|
case ETH_SPEED_NUM_56G:
|
|
|
|
return ETH_LINK_SPEED_56G;
|
2016-04-01 00:12:31 +02:00
|
|
|
case ETH_SPEED_NUM_100G:
|
|
|
|
return ETH_LINK_SPEED_100G;
|
2020-05-06 12:22:08 +00:00
|
|
|
case ETH_SPEED_NUM_200G:
|
|
|
|
return ETH_LINK_SPEED_200G;
|
2016-04-01 00:12:30 +02:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-19 10:35:32 -07:00
|
|
|
const char *
|
2018-01-18 09:44:26 +00:00
|
|
|
rte_eth_dev_rx_offload_name(uint64_t offload)
|
|
|
|
{
|
|
|
|
const char *name = "UNKNOWN";
|
|
|
|
unsigned int i;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
|
|
|
|
if (offload == eth_dev_rx_offload_names[i].offload) {
|
|
|
|
name = eth_dev_rx_offload_names[i].name;
|
2018-01-18 09:44:26 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return name;
|
|
|
|
}
|
|
|
|
|
2018-10-19 10:35:32 -07:00
|
|
|
const char *
|
2018-01-18 09:44:27 +00:00
|
|
|
rte_eth_dev_tx_offload_name(uint64_t offload)
|
|
|
|
{
|
|
|
|
const char *name = "UNKNOWN";
|
|
|
|
unsigned int i;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
|
|
|
|
if (offload == eth_dev_tx_offload_names[i].offload) {
|
|
|
|
name = eth_dev_tx_offload_names[i].name;
|
2018-01-18 09:44:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return name;
|
|
|
|
}
|
|
|
|
|
2019-11-11 19:47:33 +02:00
|
|
|
static inline int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
|
2019-11-11 19:47:33 +02:00
|
|
|
uint32_t max_rx_pkt_len, uint32_t dev_info_size)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (dev_info_size == 0) {
|
|
|
|
if (config_size != max_rx_pkt_len) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
|
|
|
|
" %u != %u is not allowed\n",
|
|
|
|
port_id, config_size, max_rx_pkt_len);
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
} else if (config_size > dev_info_size) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
|
|
|
|
"> max allowed value %u\n", port_id, config_size,
|
|
|
|
dev_info_size);
|
|
|
|
ret = -EINVAL;
|
|
|
|
} else if (config_size < RTE_ETHER_MIN_LEN) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
|
|
|
|
"< min allowed value %u\n", port_id, config_size,
|
|
|
|
(unsigned int)RTE_ETHER_MIN_LEN);
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-11-11 18:49:07 +05:30
|
|
|
/*
|
|
|
|
* Validate offloads that are requested through rte_eth_dev_configure against
|
2020-03-10 09:24:05 -07:00
|
|
|
* the offloads successfully set by the ethernet device.
|
2019-11-11 18:49:07 +05:30
|
|
|
*
|
|
|
|
* @param port_id
|
|
|
|
* The port identifier of the Ethernet device.
|
|
|
|
* @param req_offloads
|
|
|
|
* The offloads that have been requested through `rte_eth_dev_configure`.
|
|
|
|
* @param set_offloads
|
2020-03-10 09:24:05 -07:00
|
|
|
* The offloads successfully set by the ethernet device.
|
2019-11-11 18:49:07 +05:30
|
|
|
* @param offload_type
|
|
|
|
* The offload type i.e. Rx/Tx string.
|
|
|
|
* @param offload_name
|
|
|
|
* The function that prints the offload name.
|
|
|
|
* @return
|
|
|
|
* - (0) if validation successful.
|
|
|
|
* - (-EINVAL) if requested offload has been silently disabled.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
|
2019-11-11 18:49:07 +05:30
|
|
|
uint64_t set_offloads, const char *offload_type,
|
|
|
|
const char *(*offload_name)(uint64_t))
|
|
|
|
{
|
|
|
|
uint64_t offloads_diff = req_offloads ^ set_offloads;
|
|
|
|
uint64_t offload;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
while (offloads_diff != 0) {
|
|
|
|
/* Check if any offload is requested but not enabled. */
|
|
|
|
offload = 1ULL << __builtin_ctzll(offloads_diff);
|
|
|
|
if (offload & req_offloads) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Port %u failed to enable %s offload %s\n",
|
|
|
|
port_id, offload_type, offload_name(offload));
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-03-10 09:24:05 -07:00
|
|
|
/* Check if offload couldn't be disabled. */
|
2019-11-11 18:49:07 +05:30
|
|
|
if (offload & set_offloads) {
|
2019-11-14 16:40:51 +00:00
|
|
|
RTE_ETHDEV_LOG(DEBUG,
|
2019-11-14 16:40:52 +00:00
|
|
|
"Port %u %s offload %s is not requested but enabled\n",
|
2019-11-11 18:49:07 +05:30
|
|
|
port_id, offload_type, offload_name(offload));
|
|
|
|
}
|
|
|
|
|
|
|
|
offloads_diff &= ~offload;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
|
2012-09-04 13:54:00 +01:00
|
|
|
const struct rte_eth_conf *dev_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_dev_info dev_info;
|
2018-11-13 11:12:36 +00:00
|
|
|
struct rte_eth_conf orig_conf;
|
2021-01-18 07:04:07 +00:00
|
|
|
uint16_t overhead_len;
|
2012-09-04 13:54:00 +01:00
|
|
|
int diag;
|
2018-11-13 11:12:36 +00:00
|
|
|
int ret;
|
2021-01-18 07:04:07 +00:00
|
|
|
uint16_t old_mtu;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2018-04-10 10:43:16 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2018-05-09 23:16:49 +01:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
|
|
|
|
|
2018-11-13 11:12:37 +00:00
|
|
|
if (dev->data->dev_started) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Port %u must be stopped to allow configuration\n",
|
|
|
|
port_id);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Store original config, as rollback required on failure */
|
|
|
|
memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the dev_conf parameter into the dev structure.
|
|
|
|
* rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
|
|
|
|
*/
|
2019-11-19 08:22:50 +00:00
|
|
|
if (dev_conf != &dev->data->dev_conf)
|
|
|
|
memcpy(&dev->data->dev_conf, dev_conf,
|
|
|
|
sizeof(dev->data->dev_conf));
|
2018-11-13 11:12:37 +00:00
|
|
|
|
2021-01-18 07:04:07 +00:00
|
|
|
/* Backup mtu for rollback */
|
|
|
|
old_mtu = dev->data->mtu;
|
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
goto rollback;
|
2018-04-10 10:43:16 +01:00
|
|
|
|
2021-01-18 07:04:07 +00:00
|
|
|
/* Get the real Ethernet overhead length */
|
|
|
|
if (dev_info.max_mtu != UINT16_MAX &&
|
|
|
|
dev_info.max_rx_pktlen > dev_info.max_mtu)
|
|
|
|
overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
|
|
|
|
else
|
|
|
|
overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
|
|
|
|
2018-04-10 10:43:16 +01:00
|
|
|
/* If number of queues specified by application for both Rx and Tx is
|
|
|
|
* zero, use driver preferred values. This cannot be done individually
|
|
|
|
* as it is valid for either Tx or Rx (but not both) to be zero.
|
|
|
|
* If driver does not provide any preferred valued, fall back on
|
|
|
|
* EAL defaults.
|
|
|
|
*/
|
|
|
|
if (nb_rx_q == 0 && nb_tx_q == 0) {
|
|
|
|
nb_rx_q = dev_info.default_rxportconf.nb_queues;
|
|
|
|
if (nb_rx_q == 0)
|
|
|
|
nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
|
|
|
|
nb_tx_q = dev_info.default_txportconf.nb_queues;
|
|
|
|
if (nb_tx_q == 0)
|
|
|
|
nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
|
|
|
|
}
|
|
|
|
|
2015-03-26 17:02:45 +00:00
|
|
|
if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2015-03-26 17:02:45 +00:00
|
|
|
"Number of RX queues requested (%u) is greater than max supported(%d)\n",
|
|
|
|
nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
|
2018-11-13 11:12:37 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2015-03-26 17:02:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2015-03-26 17:02:45 +00:00
|
|
|
"Number of TX queues requested (%u) is greater than max supported(%d)\n",
|
|
|
|
nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
|
2018-11-13 11:12:37 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that the numbers of RX and TX queues are not greater
|
|
|
|
* than the maximum number of RX and TX queues supported by the
|
|
|
|
* configured device.
|
|
|
|
*/
|
|
|
|
if (nb_rx_q > dev_info.max_rx_queues) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
|
|
|
|
port_id, nb_rx_q, dev_info.max_rx_queues);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nb_tx_q > dev_info.max_tx_queues) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
|
|
|
|
port_id, nb_tx_q, dev_info.max_tx_queues);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2017-04-18 14:17:38 +02:00
|
|
|
/* Check that the device supports requested interrupts */
|
2015-11-03 13:01:58 +00:00
|
|
|
if ((dev_conf->intr_conf.lsc == 1) &&
|
2018-06-19 02:04:56 +01:00
|
|
|
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
|
|
|
|
dev->device->driver->name);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2014-06-19 15:12:38 -07:00
|
|
|
}
|
2017-04-18 14:17:38 +02:00
|
|
|
if ((dev_conf->intr_conf.rmv == 1) &&
|
2018-06-19 02:04:56 +01:00
|
|
|
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
|
|
|
|
dev->device->driver->name);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2017-04-18 14:17:38 +02:00
|
|
|
}
|
2014-06-19 15:12:38 -07:00
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
/*
|
|
|
|
* If jumbo frames are enabled, check that the maximum RX packet
|
|
|
|
* length is supported by the configured device.
|
|
|
|
*/
|
2018-11-13 11:12:38 +00:00
|
|
|
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
|
2018-06-19 02:04:56 +01:00
|
|
|
if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
|
|
|
|
port_id, dev_conf->rxmode.max_rx_pkt_len,
|
|
|
|
dev_info.max_rx_pktlen);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2019-05-21 18:13:05 +02:00
|
|
|
} else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
|
|
|
|
port_id, dev_conf->rxmode.max_rx_pkt_len,
|
2019-05-21 18:13:05 +02:00
|
|
|
(unsigned int)RTE_ETHER_MIN_LEN);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
2021-01-18 07:04:07 +00:00
|
|
|
|
|
|
|
/* Scale the MTU size to adapt max_rx_pkt_len */
|
|
|
|
dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
|
|
|
|
overhead_len;
|
2014-06-05 13:08:51 +08:00
|
|
|
} else {
|
2021-01-18 07:04:07 +00:00
|
|
|
uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
|
|
|
|
if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
|
|
|
|
pktlen > RTE_ETHER_MTU + overhead_len)
|
2014-06-05 13:08:51 +08:00
|
|
|
/* Use default value */
|
|
|
|
dev->data->dev_conf.rxmode.max_rx_pkt_len =
|
2021-01-18 07:04:07 +00:00
|
|
|
RTE_ETHER_MTU + overhead_len;
|
2014-06-05 13:08:51 +08:00
|
|
|
}
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2019-11-11 19:47:33 +02:00
|
|
|
/*
|
|
|
|
* If LRO is enabled, check that the maximum aggregated packet
|
|
|
|
* size is supported by the configured device.
|
|
|
|
*/
|
|
|
|
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
|
|
|
|
if (dev_conf->rxmode.max_lro_pkt_size == 0)
|
|
|
|
dev->data->dev_conf.rxmode.max_lro_pkt_size =
|
|
|
|
dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_dev_check_lro_pkt_size(port_id,
|
2019-11-11 19:47:33 +02:00
|
|
|
dev->data->dev_conf.rxmode.max_lro_pkt_size,
|
|
|
|
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
|
|
|
dev_info.max_lro_pkt_size);
|
|
|
|
if (ret != 0)
|
|
|
|
goto rollback;
|
|
|
|
}
|
|
|
|
|
2018-05-10 19:56:55 +08:00
|
|
|
/* Any requested offloading must be within its device capabilities */
|
2018-11-13 11:12:38 +00:00
|
|
|
if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
|
|
|
|
dev_conf->rxmode.offloads) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
|
|
|
|
"capabilities 0x%"PRIx64" in %s()\n",
|
2018-11-13 11:12:38 +00:00
|
|
|
port_id, dev_conf->rxmode.offloads,
|
2018-06-19 02:04:56 +01:00
|
|
|
dev_info.rx_offload_capa,
|
|
|
|
__func__);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2018-05-10 19:56:55 +08:00
|
|
|
}
|
2018-11-13 11:12:38 +00:00
|
|
|
if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
|
|
|
|
dev_conf->txmode.offloads) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
|
|
|
|
"capabilities 0x%"PRIx64" in %s()\n",
|
2018-11-13 11:12:38 +00:00
|
|
|
port_id, dev_conf->txmode.offloads,
|
2018-06-19 02:04:56 +01:00
|
|
|
dev_info.tx_offload_capa,
|
|
|
|
__func__);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2018-05-10 19:56:55 +08:00
|
|
|
}
|
|
|
|
|
2019-10-15 23:09:48 +08:00
|
|
|
dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
|
|
|
|
rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
|
|
|
|
|
2018-04-20 22:30:22 +08:00
|
|
|
/* Check that device supports requested rss hash functions. */
|
|
|
|
if ((dev_info.flow_type_rss_offloads |
|
|
|
|
dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
|
|
|
|
dev_info.flow_type_rss_offloads) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
|
|
|
|
port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
|
|
|
|
dev_info.flow_type_rss_offloads);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
2018-04-20 22:30:22 +08:00
|
|
|
}
|
|
|
|
|
2019-11-11 18:49:06 +05:30
|
|
|
/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
|
|
|
|
if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
|
|
|
|
(dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2019-11-15 01:01:32 +05:30
|
|
|
"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
|
2019-11-11 18:49:06 +05:30
|
|
|
port_id,
|
|
|
|
rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto rollback;
|
|
|
|
}
|
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
/*
|
|
|
|
* Setup new number of RX/TX queues and reconfigure device.
|
|
|
|
*/
|
2020-10-13 17:56:58 +01:00
|
|
|
diag = eth_dev_rx_queue_config(dev, nb_rx_q);
|
2012-09-04 13:54:00 +01:00
|
|
|
if (diag != 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2020-10-13 17:56:58 +01:00
|
|
|
"Port%u eth_dev_rx_queue_config = %d\n",
|
2018-06-19 02:04:56 +01:00
|
|
|
port_id, diag);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = diag;
|
|
|
|
goto rollback;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
diag = eth_dev_tx_queue_config(dev, nb_tx_q);
|
2012-12-20 00:00:00 +01:00
|
|
|
if (diag != 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2020-10-13 17:56:58 +01:00
|
|
|
"Port%u eth_dev_tx_queue_config = %d\n",
|
2018-06-19 02:04:56 +01:00
|
|
|
port_id, diag);
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_rx_queue_config(dev, 0);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = diag;
|
|
|
|
goto rollback;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
diag = (*dev->dev_ops->dev_configure)(dev);
|
|
|
|
if (diag != 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
|
|
|
|
port_id, diag);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = eth_err(port_id, diag);
|
2019-11-11 18:49:07 +05:30
|
|
|
goto reset_queues;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
2017-09-22 17:52:29 +03:00
|
|
|
/* Initialize Rx profiling if enabled at compilation time. */
|
2018-07-19 15:21:42 +03:00
|
|
|
diag = __rte_eth_dev_profile_init(port_id, dev);
|
2017-09-22 17:52:29 +03:00
|
|
|
if (diag != 0) {
|
2018-07-19 15:21:42 +03:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
|
2018-06-19 02:04:56 +01:00
|
|
|
port_id, diag);
|
2018-11-13 11:12:36 +00:00
|
|
|
ret = eth_err(port_id, diag);
|
2019-11-11 18:49:07 +05:30
|
|
|
goto reset_queues;
|
2017-09-22 17:52:29 +03:00
|
|
|
}
|
|
|
|
|
2019-11-11 18:49:07 +05:30
|
|
|
/* Validate Rx offloads. */
|
2020-10-13 17:56:58 +01:00
|
|
|
diag = eth_dev_validate_offloads(port_id,
|
2019-11-11 18:49:07 +05:30
|
|
|
dev_conf->rxmode.offloads,
|
|
|
|
dev->data->dev_conf.rxmode.offloads, "Rx",
|
|
|
|
rte_eth_dev_rx_offload_name);
|
|
|
|
if (diag != 0) {
|
|
|
|
ret = diag;
|
|
|
|
goto reset_queues;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate Tx offloads. */
|
2020-10-13 17:56:58 +01:00
|
|
|
diag = eth_dev_validate_offloads(port_id,
|
2019-11-11 18:49:07 +05:30
|
|
|
dev_conf->txmode.offloads,
|
|
|
|
dev->data->dev_conf.txmode.offloads, "Tx",
|
|
|
|
rte_eth_dev_tx_offload_name);
|
|
|
|
if (diag != 0) {
|
|
|
|
ret = diag;
|
|
|
|
goto reset_queues;
|
|
|
|
}
|
2018-11-13 11:12:36 +00:00
|
|
|
|
2020-04-23 00:33:45 +05:30
|
|
|
rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
|
2019-11-11 18:49:07 +05:30
|
|
|
return 0;
|
|
|
|
reset_queues:
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_rx_queue_config(dev, 0);
|
|
|
|
eth_dev_tx_queue_config(dev, 0);
|
2018-11-13 11:12:36 +00:00
|
|
|
rollback:
|
|
|
|
memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
|
2021-01-18 07:04:07 +00:00
|
|
|
if (old_mtu != dev->data->mtu)
|
|
|
|
dev->data->mtu = old_mtu;
|
2018-11-13 11:12:36 +00:00
|
|
|
|
2020-04-23 00:33:45 +05:30
|
|
|
rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
|
2018-11-13 11:12:36 +00:00
|
|
|
return ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2016-11-24 12:26:47 +01:00
|
|
|
void
|
2020-09-09 14:01:48 +01:00
|
|
|
rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
|
2016-11-24 12:26:47 +01:00
|
|
|
{
|
|
|
|
if (dev->data->dev_started) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
|
2016-11-24 12:26:47 +01:00
|
|
|
dev->data->port_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_rx_queue_config(dev, 0);
|
|
|
|
eth_dev_tx_queue_config(dev, 0);
|
2016-11-24 12:26:47 +01:00
|
|
|
|
|
|
|
memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
|
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
static void
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_mac_restore(struct rte_eth_dev *dev,
|
2018-08-24 15:25:35 +01:00
|
|
|
struct rte_eth_dev_info *dev_info)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
2019-05-21 18:13:03 +02:00
|
|
|
struct rte_ether_addr *addr;
|
2012-09-04 13:54:00 +01:00
|
|
|
uint16_t i;
|
2013-06-03 00:00:00 +00:00
|
|
|
uint32_t pool = 0;
|
2017-01-27 09:57:29 -08:00
|
|
|
uint64_t pool_mask;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2017-01-27 09:57:29 -08:00
|
|
|
/* replay MAC address configuration including default MAC */
|
|
|
|
addr = &dev->data->mac_addrs[0];
|
|
|
|
if (*dev->dev_ops->mac_addr_set != NULL)
|
|
|
|
(*dev->dev_ops->mac_addr_set)(dev, addr);
|
|
|
|
else if (*dev->dev_ops->mac_addr_add != NULL)
|
|
|
|
(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
|
|
|
|
|
|
|
|
if (*dev->dev_ops->mac_addr_add != NULL) {
|
2018-08-24 15:25:35 +01:00
|
|
|
for (i = 1; i < dev_info->max_mac_addrs; i++) {
|
2017-01-27 09:57:29 -08:00
|
|
|
addr = &dev->data->mac_addrs[i];
|
|
|
|
|
|
|
|
/* skip zero address */
|
2019-05-21 18:13:04 +02:00
|
|
|
if (rte_is_zero_ether_addr(addr))
|
2017-01-27 09:57:29 -08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
pool = 0;
|
|
|
|
pool_mask = dev->data->mac_pool_sel[i];
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (pool_mask & 1ULL)
|
|
|
|
(*dev->dev_ops->mac_addr_add)(dev,
|
|
|
|
addr, i, pool);
|
|
|
|
pool_mask >>= 1;
|
|
|
|
pool++;
|
|
|
|
} while (pool_mask);
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
}
|
2018-08-24 15:25:35 +01:00
|
|
|
}
|
|
|
|
|
2019-09-14 12:37:21 +01:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_config_restore(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_dev_info *dev_info, uint16_t port_id)
|
2018-08-24 15:25:35 +01:00
|
|
|
{
|
2019-09-14 12:37:21 +01:00
|
|
|
int ret;
|
|
|
|
|
2018-08-24 15:25:35 +01:00
|
|
|
if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_mac_restore(dev, dev_info);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
|
|
|
/* replay promiscuous configuration */
|
2019-09-14 12:37:25 +01:00
|
|
|
/*
|
|
|
|
* use callbacks directly since we don't need port_id check and
|
|
|
|
* would like to bypass the same value set
|
|
|
|
*/
|
|
|
|
if (rte_eth_promiscuous_get(port_id) == 1 &&
|
|
|
|
*dev->dev_ops->promiscuous_enable != NULL) {
|
|
|
|
ret = eth_err(port_id,
|
|
|
|
(*dev->dev_ops->promiscuous_enable)(dev));
|
2019-09-14 12:37:21 +01:00
|
|
|
if (ret != 0 && ret != -ENOTSUP) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Failed to enable promiscuous mode for device (port %u): %s\n",
|
|
|
|
port_id, rte_strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
2019-09-14 12:37:25 +01:00
|
|
|
} else if (rte_eth_promiscuous_get(port_id) == 0 &&
|
|
|
|
*dev->dev_ops->promiscuous_disable != NULL) {
|
|
|
|
ret = eth_err(port_id,
|
|
|
|
(*dev->dev_ops->promiscuous_disable)(dev));
|
2019-09-14 12:37:21 +01:00
|
|
|
if (ret != 0 && ret != -ENOTSUP) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Failed to disable promiscuous mode for device (port %u): %s\n",
|
|
|
|
port_id, rte_strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2015-06-26 17:01:43 -07:00
|
|
|
/* replay all multicast configuration */
|
2019-09-24 13:56:11 +01:00
|
|
|
/*
|
|
|
|
* use callbacks directly since we don't need port_id check and
|
|
|
|
* would like to bypass the same value set
|
|
|
|
*/
|
|
|
|
if (rte_eth_allmulticast_get(port_id) == 1 &&
|
|
|
|
*dev->dev_ops->allmulticast_enable != NULL) {
|
|
|
|
ret = eth_err(port_id,
|
|
|
|
(*dev->dev_ops->allmulticast_enable)(dev));
|
2019-09-24 13:56:07 +01:00
|
|
|
if (ret != 0 && ret != -ENOTSUP) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Failed to enable allmulticast mode for device (port %u): %s\n",
|
|
|
|
port_id, rte_strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
2019-09-24 13:56:11 +01:00
|
|
|
} else if (rte_eth_allmulticast_get(port_id) == 0 &&
|
|
|
|
*dev->dev_ops->allmulticast_disable != NULL) {
|
|
|
|
ret = eth_err(port_id,
|
|
|
|
(*dev->dev_ops->allmulticast_disable)(dev));
|
2019-09-24 13:56:07 +01:00
|
|
|
if (ret != 0 && ret != -ENOTSUP) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Failed to disable allmulticast mode for device (port %u): %s\n",
|
|
|
|
port_id, rte_strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2019-09-14 12:37:21 +01:00
|
|
|
|
|
|
|
return 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_start(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2018-08-24 15:25:35 +01:00
|
|
|
struct rte_eth_dev_info dev_info;
|
2012-09-04 13:54:00 +01:00
|
|
|
int diag;
|
2020-10-15 14:30:35 +01:00
|
|
|
int ret, ret_stop;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
|
2014-06-09 18:26:16 +01:00
|
|
|
|
|
|
|
if (dev->data->dev_started != 0) {
|
2018-08-02 19:39:41 +01:00
|
|
|
RTE_ETHDEV_LOG(INFO,
|
2018-06-19 02:04:56 +01:00
|
|
|
"Device with port_id=%"PRIu16" already started\n",
|
2014-06-09 18:26:16 +01:00
|
|
|
port_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return 0;
|
2014-06-09 18:26:16 +01:00
|
|
|
}
|
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2018-08-24 15:25:35 +01:00
|
|
|
|
|
|
|
/* Lets restore MAC now if device does not support live change */
|
|
|
|
if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_mac_restore(dev, &dev_info);
|
2018-08-24 15:25:35 +01:00
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
diag = (*dev->dev_ops->dev_start)(dev);
|
|
|
|
if (diag == 0)
|
|
|
|
dev->data->dev_started = 1;
|
|
|
|
else
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, diag);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_dev_config_restore(dev, &dev_info, port_id);
|
2019-09-14 12:37:21 +01:00
|
|
|
if (ret != 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Error during restoring configuration for device (port %u): %s\n",
|
|
|
|
port_id, rte_strerror(-ret));
|
2020-10-15 14:30:35 +01:00
|
|
|
ret_stop = rte_eth_dev_stop(port_id);
|
|
|
|
if (ret_stop != 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Failed to stop device (port %u): %s\n",
|
|
|
|
port_id, rte_strerror(-ret_stop));
|
|
|
|
}
|
|
|
|
|
2019-09-14 12:37:21 +01:00
|
|
|
return ret;
|
|
|
|
}
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2015-10-27 17:38:55 -04:00
|
|
|
if (dev->data->dev_conf.intr_conf.lsc == 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
|
2014-11-07 09:31:50 -08:00
|
|
|
(*dev->dev_ops->link_update)(dev, 0);
|
|
|
|
}
|
2020-04-23 00:33:45 +05:30
|
|
|
|
|
|
|
rte_ethdev_trace_start(port_id);
|
2012-09-04 13:54:00 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-15 14:30:35 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_stop(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2020-10-15 14:30:45 +01:00
|
|
|
int ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-15 14:30:35 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2020-10-15 14:30:35 +01:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
|
2014-06-09 18:26:16 +01:00
|
|
|
|
|
|
|
if (dev->data->dev_started == 0) {
|
2018-08-02 19:39:41 +01:00
|
|
|
RTE_ETHDEV_LOG(INFO,
|
2018-06-19 02:04:56 +01:00
|
|
|
"Device with port_id=%"PRIu16" already stopped\n",
|
2014-06-09 18:26:16 +01:00
|
|
|
port_id);
|
2020-10-15 14:30:35 +01:00
|
|
|
return 0;
|
2014-06-09 18:26:16 +01:00
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
dev->data->dev_started = 0;
|
2020-10-15 14:30:45 +01:00
|
|
|
ret = (*dev->dev_ops->dev_stop)(dev);
|
|
|
|
rte_ethdev_trace_stop(port_id, ret);
|
2020-10-15 14:30:35 +01:00
|
|
|
|
2020-10-15 14:30:45 +01:00
|
|
|
return ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2014-05-28 15:15:00 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_link_up(uint16_t port_id)
|
2014-05-28 15:15:00 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2014-05-28 15:15:00 +08:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
|
2014-05-28 15:15:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_link_down(uint16_t port_id)
|
2014-05-28 15:15:00 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2014-05-28 15:15:00 +08:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
|
2014-05-28 15:15:00 +08:00
|
|
|
}
|
|
|
|
|
2020-10-16 15:32:59 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_close(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2020-10-16 15:32:59 +02:00
|
|
|
int firsterr, binerr;
|
|
|
|
int *lasterr = &firsterr;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-16 15:32:59 +02:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2020-10-16 15:32:59 +02:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
|
|
|
|
*lasterr = (*dev->dev_ops->dev_close)(dev);
|
|
|
|
if (*lasterr != 0)
|
|
|
|
lasterr = &binerr;
|
2015-07-13 14:04:05 +01:00
|
|
|
|
2020-04-23 00:33:45 +05:30
|
|
|
rte_ethdev_trace_close(port_id);
|
2020-10-16 15:32:59 +02:00
|
|
|
*lasterr = rte_eth_dev_release_port(dev);
|
|
|
|
|
2021-01-22 18:58:04 +01:00
|
|
|
return firsterr;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2017-07-23 17:15:09 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_reset(uint16_t port_id)
|
2017-07-23 17:15:09 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2017-07-23 17:15:09 +08:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
|
|
|
|
|
2020-10-15 14:30:35 +01:00
|
|
|
ret = rte_eth_dev_stop(port_id);
|
|
|
|
if (ret != 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Failed to stop device (port %u) before reset: %s - ignore\n",
|
|
|
|
port_id, rte_strerror(-ret));
|
|
|
|
}
|
2017-07-23 17:15:09 +08:00
|
|
|
ret = dev->dev_ops->dev_reset(dev);
|
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, ret);
|
2017-07-23 17:15:09 +08:00
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-01-20 21:12:19 +00:00
|
|
|
rte_eth_dev_is_removed(uint16_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (dev->state == RTE_ETH_DEV_REMOVED)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
|
|
|
|
|
|
|
|
ret = dev->dev_ops->is_removed(dev);
|
|
|
|
if (ret != 0)
|
|
|
|
/* Device is physically removed. */
|
|
|
|
dev->state = RTE_ETH_DEV_REMOVED;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
ethdev: introduce Rx buffer split
The DPDK datapath in the transmit direction is very flexible.
An application can build the multi-segment packet and manages
almost all data aspects - the memory pools where segments
are allocated from, the segment lengths, the memory attributes
like external buffers, registered for DMA, etc.
In the receiving direction, the datapath is much less flexible,
an application can only specify the memory pool to configure the
receiving queue and nothing more. In order to extend receiving
datapath capabilities it is proposed to add the way to provide
extended information how to split the packets being received.
The new offload flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT in device
capabilities is introduced to present the way for PMD to report to
application about supporting Rx packet split to configurable
segments. Prior invoking the rte_eth_rx_queue_setup() routine
application should check RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag.
The following structure is introduced to specify the Rx packet
segment for RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT offload:
struct rte_eth_rxseg_split {
struct rte_mempool *mp; /* memory pools to allocate segment from */
uint16_t length; /* segment maximal data length,
configures "split point" */
uint16_t offset; /* data offset from beginning
of mbuf data buffer */
uint32_t reserved; /* reserved field */
};
The segment descriptions are added to the rte_eth_rxconf structure:
rx_seg - pointer the array of segment descriptions, each element
describes the memory pool, maximal data length, initial
data offset from the beginning of data buffer in mbuf.
This array allows to specify the different settings for
each segment in individual fashion.
rx_nseg - number of elements in the array
If the extended segment descriptions is provided with these new
fields the mp parameter of the rte_eth_rx_queue_setup must be
specified as NULL to avoid ambiguity.
There are two options to specify Rx buffer configuration:
- mp is not NULL, rrx_conf.rx_nseg is zero, it is compatible
configuration, follows existing implementation, provides
the single pool and no description for segment sizes
and offsets.
- mp is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not
zero, it provides the extended configuration, individually for
each segment.
f the Rx queue is configured with new settings the packets being
received will be split into multiple segments pushed to the mbufs
with specified attributes. The PMD will split the received packets
into multiple segments according to the specification in the
description array.
For example, let's suppose we configured the Rx queue with the
following segments:
seg0 - pool0, len0=14B, off0=2
seg1 - pool1, len1=20B, off1=128B
seg2 - pool2, len2=20B, off2=0B
seg3 - pool3, len3=512B, off3=0B
The packet 46 bytes long will look like the following:
seg0 - 14B long @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B long @ 128 in mbuf from pool1
seg2 - 12B long @ 0 in mbuf from pool2
The packet 1500 bytes long will look like the following:
seg0 - 14B @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B @ 128 in mbuf from pool1
seg2 - 20B @ 0 in mbuf from pool2
seg3 - 512B @ 0 in mbuf from pool3
seg4 - 512B @ 0 in mbuf from pool3
seg5 - 422B @ 0 in mbuf from pool3
The offload RTE_ETH_RX_OFFLOAD_SCATTER must be present and
configured to support new buffer split feature (if rx_nseg
is greater than one).
The split limitations imposed by underlying PMD is reported
in the new introduced rte_eth_dev_info->rx_seg_capa field.
The new approach would allow splitting the ingress packets into
multiple parts pushed to the memory with different attributes.
For example, the packet headers can be pushed to the embedded
data buffers within mbufs and the application data into
the external buffers attached to mbufs allocated from the
different memory pools. The memory attributes for the split
parts may differ either - for example the application data
may be pushed into the external memory located on the dedicated
physical device, say GPU or NVMe. This would improve the DPDK
receiving datapath flexibility with preserving compatibility
with existing API.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-16 16:44:35 +00:00
|
|
|
static int
|
|
|
|
rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
|
|
|
|
uint16_t n_seg, uint32_t *mbp_buf_size,
|
|
|
|
const struct rte_eth_dev_info *dev_info)
|
|
|
|
{
|
|
|
|
const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
|
|
|
|
struct rte_mempool *mp_first;
|
|
|
|
uint32_t offset_mask;
|
|
|
|
uint16_t seg_idx;
|
|
|
|
|
|
|
|
if (n_seg > seg_capa->max_nseg) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Requested Rx segments %u exceed supported %u\n",
|
|
|
|
n_seg, seg_capa->max_nseg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Check the sizes and offsets against buffer sizes
|
|
|
|
* for each segment specified in extended configuration.
|
|
|
|
*/
|
|
|
|
mp_first = rx_seg[0].mp;
|
|
|
|
offset_mask = (1u << seg_capa->offset_align_log2) - 1;
|
|
|
|
for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
|
|
|
|
struct rte_mempool *mpl = rx_seg[seg_idx].mp;
|
|
|
|
uint32_t length = rx_seg[seg_idx].length;
|
|
|
|
uint32_t offset = rx_seg[seg_idx].offset;
|
|
|
|
|
|
|
|
if (mpl == NULL) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (seg_idx != 0 && mp_first != mpl &&
|
|
|
|
seg_capa->multi_pools == 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
if (offset != 0) {
|
|
|
|
if (seg_capa->offset_allowed == 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
if (offset & offset_mask) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
|
|
|
|
offset,
|
|
|
|
seg_capa->offset_align_log2);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (mpl->private_data_size <
|
|
|
|
sizeof(struct rte_pktmbuf_pool_private)) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"%s private_data_size %u < %u\n",
|
|
|
|
mpl->name, mpl->private_data_size,
|
|
|
|
(unsigned int)sizeof
|
|
|
|
(struct rte_pktmbuf_pool_private));
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
|
|
|
|
*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
|
|
|
|
length = length != 0 ? length : *mbp_buf_size;
|
|
|
|
if (*mbp_buf_size < length + offset) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
|
|
|
|
mpl->name, *mbp_buf_size,
|
|
|
|
length + offset, length, offset);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
|
2012-09-04 13:54:00 +01:00
|
|
|
uint16_t nb_rx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_rxconf *rx_conf,
|
|
|
|
struct rte_mempool *mp)
|
|
|
|
{
|
2014-06-17 20:09:28 +02:00
|
|
|
int ret;
|
|
|
|
uint32_t mbp_buf_size;
|
2012-09-04 13:54:00 +01:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_dev_info dev_info;
|
2017-10-04 11:17:58 +03:00
|
|
|
struct rte_eth_rxconf local_conf;
|
2016-11-24 12:26:45 +01:00
|
|
|
void **rxq;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (rx_queue_id >= dev->data->nb_rx_queues) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
ethdev: introduce Rx buffer split
The DPDK datapath in the transmit direction is very flexible.
An application can build the multi-segment packet and manages
almost all data aspects - the memory pools where segments
are allocated from, the segment lengths, the memory attributes
like external buffers, registered for DMA, etc.
In the receiving direction, the datapath is much less flexible,
an application can only specify the memory pool to configure the
receiving queue and nothing more. In order to extend receiving
datapath capabilities it is proposed to add the way to provide
extended information how to split the packets being received.
The new offload flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT in device
capabilities is introduced to present the way for PMD to report to
application about supporting Rx packet split to configurable
segments. Prior invoking the rte_eth_rx_queue_setup() routine
application should check RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag.
The following structure is introduced to specify the Rx packet
segment for RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT offload:
struct rte_eth_rxseg_split {
struct rte_mempool *mp; /* memory pools to allocate segment from */
uint16_t length; /* segment maximal data length,
configures "split point" */
uint16_t offset; /* data offset from beginning
of mbuf data buffer */
uint32_t reserved; /* reserved field */
};
The segment descriptions are added to the rte_eth_rxconf structure:
rx_seg - pointer the array of segment descriptions, each element
describes the memory pool, maximal data length, initial
data offset from the beginning of data buffer in mbuf.
This array allows to specify the different settings for
each segment in individual fashion.
rx_nseg - number of elements in the array
If the extended segment descriptions is provided with these new
fields the mp parameter of the rte_eth_rx_queue_setup must be
specified as NULL to avoid ambiguity.
There are two options to specify Rx buffer configuration:
- mp is not NULL, rrx_conf.rx_nseg is zero, it is compatible
configuration, follows existing implementation, provides
the single pool and no description for segment sizes
and offsets.
- mp is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not
zero, it provides the extended configuration, individually for
each segment.
f the Rx queue is configured with new settings the packets being
received will be split into multiple segments pushed to the mbufs
with specified attributes. The PMD will split the received packets
into multiple segments according to the specification in the
description array.
For example, let's suppose we configured the Rx queue with the
following segments:
seg0 - pool0, len0=14B, off0=2
seg1 - pool1, len1=20B, off1=128B
seg2 - pool2, len2=20B, off2=0B
seg3 - pool3, len3=512B, off3=0B
The packet 46 bytes long will look like the following:
seg0 - 14B long @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B long @ 128 in mbuf from pool1
seg2 - 12B long @ 0 in mbuf from pool2
The packet 1500 bytes long will look like the following:
seg0 - 14B @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B @ 128 in mbuf from pool1
seg2 - 20B @ 0 in mbuf from pool2
seg3 - 512B @ 0 in mbuf from pool3
seg4 - 512B @ 0 in mbuf from pool3
seg5 - 422B @ 0 in mbuf from pool3
The offload RTE_ETH_RX_OFFLOAD_SCATTER must be present and
configured to support new buffer split feature (if rx_nseg
is greater than one).
The split limitations imposed by underlying PMD is reported
in the new introduced rte_eth_dev_info->rx_seg_capa field.
The new approach would allow splitting the ingress packets into
multiple parts pushed to the memory with different attributes.
For example, the packet headers can be pushed to the embedded
data buffers within mbufs and the application data into
the external buffers attached to mbufs allocated from the
different memory pools. The memory attributes for the split
parts may differ either - for example the application data
may be pushed into the external memory located on the dedicated
physical device, say GPU or NVMe. This would improve the DPDK
receiving datapath flexibility with preserving compatibility
with existing API.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-16 16:44:35 +00:00
|
|
|
if (mp != NULL) {
|
|
|
|
/* Single pool configuration check. */
|
|
|
|
if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ambiguous segment configuration\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Check the size of the mbuf data buffer, this value
|
|
|
|
* must be provided in the private data of the memory pool.
|
|
|
|
* First check that the memory pool(s) has a valid private data.
|
|
|
|
*/
|
|
|
|
if (mp->private_data_size <
|
|
|
|
sizeof(struct rte_pktmbuf_pool_private)) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
|
|
|
|
mp->name, mp->private_data_size,
|
|
|
|
(unsigned int)
|
|
|
|
sizeof(struct rte_pktmbuf_pool_private));
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
mbp_buf_size = rte_pktmbuf_data_room_size(mp);
|
|
|
|
if (mbp_buf_size < dev_info.min_rx_bufsize +
|
|
|
|
RTE_PKTMBUF_HEADROOM) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
|
|
|
|
mp->name, mbp_buf_size,
|
|
|
|
RTE_PKTMBUF_HEADROOM +
|
|
|
|
dev_info.min_rx_bufsize,
|
|
|
|
RTE_PKTMBUF_HEADROOM,
|
|
|
|
dev_info.min_rx_bufsize);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
2020-11-04 09:24:36 +08:00
|
|
|
const struct rte_eth_rxseg_split *rx_seg;
|
|
|
|
uint16_t n_seg;
|
2014-06-17 20:09:28 +02:00
|
|
|
|
ethdev: introduce Rx buffer split
The DPDK datapath in the transmit direction is very flexible.
An application can build the multi-segment packet and manages
almost all data aspects - the memory pools where segments
are allocated from, the segment lengths, the memory attributes
like external buffers, registered for DMA, etc.
In the receiving direction, the datapath is much less flexible,
an application can only specify the memory pool to configure the
receiving queue and nothing more. In order to extend receiving
datapath capabilities it is proposed to add the way to provide
extended information how to split the packets being received.
The new offload flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT in device
capabilities is introduced to present the way for PMD to report to
application about supporting Rx packet split to configurable
segments. Prior invoking the rte_eth_rx_queue_setup() routine
application should check RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag.
The following structure is introduced to specify the Rx packet
segment for RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT offload:
struct rte_eth_rxseg_split {
struct rte_mempool *mp; /* memory pools to allocate segment from */
uint16_t length; /* segment maximal data length,
configures "split point" */
uint16_t offset; /* data offset from beginning
of mbuf data buffer */
uint32_t reserved; /* reserved field */
};
The segment descriptions are added to the rte_eth_rxconf structure:
rx_seg - pointer the array of segment descriptions, each element
describes the memory pool, maximal data length, initial
data offset from the beginning of data buffer in mbuf.
This array allows to specify the different settings for
each segment in individual fashion.
rx_nseg - number of elements in the array
If the extended segment descriptions is provided with these new
fields the mp parameter of the rte_eth_rx_queue_setup must be
specified as NULL to avoid ambiguity.
There are two options to specify Rx buffer configuration:
- mp is not NULL, rrx_conf.rx_nseg is zero, it is compatible
configuration, follows existing implementation, provides
the single pool and no description for segment sizes
and offsets.
- mp is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not
zero, it provides the extended configuration, individually for
each segment.
f the Rx queue is configured with new settings the packets being
received will be split into multiple segments pushed to the mbufs
with specified attributes. The PMD will split the received packets
into multiple segments according to the specification in the
description array.
For example, let's suppose we configured the Rx queue with the
following segments:
seg0 - pool0, len0=14B, off0=2
seg1 - pool1, len1=20B, off1=128B
seg2 - pool2, len2=20B, off2=0B
seg3 - pool3, len3=512B, off3=0B
The packet 46 bytes long will look like the following:
seg0 - 14B long @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B long @ 128 in mbuf from pool1
seg2 - 12B long @ 0 in mbuf from pool2
The packet 1500 bytes long will look like the following:
seg0 - 14B @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B @ 128 in mbuf from pool1
seg2 - 20B @ 0 in mbuf from pool2
seg3 - 512B @ 0 in mbuf from pool3
seg4 - 512B @ 0 in mbuf from pool3
seg5 - 422B @ 0 in mbuf from pool3
The offload RTE_ETH_RX_OFFLOAD_SCATTER must be present and
configured to support new buffer split feature (if rx_nseg
is greater than one).
The split limitations imposed by underlying PMD is reported
in the new introduced rte_eth_dev_info->rx_seg_capa field.
The new approach would allow splitting the ingress packets into
multiple parts pushed to the memory with different attributes.
For example, the packet headers can be pushed to the embedded
data buffers within mbufs and the application data into
the external buffers attached to mbufs allocated from the
different memory pools. The memory attributes for the split
parts may differ either - for example the application data
may be pushed into the external memory located on the dedicated
physical device, say GPU or NVMe. This would improve the DPDK
receiving datapath flexibility with preserving compatibility
with existing API.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-16 16:44:35 +00:00
|
|
|
/* Extended multi-segment configuration check. */
|
|
|
|
if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Memory pool is null and no extended configuration provided\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-11-04 09:24:36 +08:00
|
|
|
|
|
|
|
rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
|
|
|
|
n_seg = rx_conf->rx_nseg;
|
|
|
|
|
ethdev: introduce Rx buffer split
The DPDK datapath in the transmit direction is very flexible.
An application can build the multi-segment packet and manages
almost all data aspects - the memory pools where segments
are allocated from, the segment lengths, the memory attributes
like external buffers, registered for DMA, etc.
In the receiving direction, the datapath is much less flexible,
an application can only specify the memory pool to configure the
receiving queue and nothing more. In order to extend receiving
datapath capabilities it is proposed to add the way to provide
extended information how to split the packets being received.
The new offload flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT in device
capabilities is introduced to present the way for PMD to report to
application about supporting Rx packet split to configurable
segments. Prior invoking the rte_eth_rx_queue_setup() routine
application should check RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag.
The following structure is introduced to specify the Rx packet
segment for RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT offload:
struct rte_eth_rxseg_split {
struct rte_mempool *mp; /* memory pools to allocate segment from */
uint16_t length; /* segment maximal data length,
configures "split point" */
uint16_t offset; /* data offset from beginning
of mbuf data buffer */
uint32_t reserved; /* reserved field */
};
The segment descriptions are added to the rte_eth_rxconf structure:
rx_seg - pointer the array of segment descriptions, each element
describes the memory pool, maximal data length, initial
data offset from the beginning of data buffer in mbuf.
This array allows to specify the different settings for
each segment in individual fashion.
rx_nseg - number of elements in the array
If the extended segment descriptions is provided with these new
fields the mp parameter of the rte_eth_rx_queue_setup must be
specified as NULL to avoid ambiguity.
There are two options to specify Rx buffer configuration:
- mp is not NULL, rrx_conf.rx_nseg is zero, it is compatible
configuration, follows existing implementation, provides
the single pool and no description for segment sizes
and offsets.
- mp is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not
zero, it provides the extended configuration, individually for
each segment.
f the Rx queue is configured with new settings the packets being
received will be split into multiple segments pushed to the mbufs
with specified attributes. The PMD will split the received packets
into multiple segments according to the specification in the
description array.
For example, let's suppose we configured the Rx queue with the
following segments:
seg0 - pool0, len0=14B, off0=2
seg1 - pool1, len1=20B, off1=128B
seg2 - pool2, len2=20B, off2=0B
seg3 - pool3, len3=512B, off3=0B
The packet 46 bytes long will look like the following:
seg0 - 14B long @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B long @ 128 in mbuf from pool1
seg2 - 12B long @ 0 in mbuf from pool2
The packet 1500 bytes long will look like the following:
seg0 - 14B @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0
seg1 - 20B @ 128 in mbuf from pool1
seg2 - 20B @ 0 in mbuf from pool2
seg3 - 512B @ 0 in mbuf from pool3
seg4 - 512B @ 0 in mbuf from pool3
seg5 - 422B @ 0 in mbuf from pool3
The offload RTE_ETH_RX_OFFLOAD_SCATTER must be present and
configured to support new buffer split feature (if rx_nseg
is greater than one).
The split limitations imposed by underlying PMD is reported
in the new introduced rte_eth_dev_info->rx_seg_capa field.
The new approach would allow splitting the ingress packets into
multiple parts pushed to the memory with different attributes.
For example, the packet headers can be pushed to the embedded
data buffers within mbufs and the application data into
the external buffers attached to mbufs allocated from the
different memory pools. The memory attributes for the split
parts may differ either - for example the application data
may be pushed into the external memory located on the dedicated
physical device, say GPU or NVMe. This would improve the DPDK
receiving datapath flexibility with preserving compatibility
with existing API.
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-16 16:44:35 +00:00
|
|
|
if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
|
|
|
|
ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
|
|
|
|
&mbp_buf_size,
|
|
|
|
&dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
} else {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2018-04-10 10:43:16 +01:00
|
|
|
/* Use default specified by driver, if nb_rx_desc is zero */
|
|
|
|
if (nb_rx_desc == 0) {
|
|
|
|
nb_rx_desc = dev_info.default_rxportconf.ring_size;
|
|
|
|
/* If driver default is also zero, fall back on EAL default */
|
|
|
|
if (nb_rx_desc == 0)
|
|
|
|
nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
|
|
|
|
}
|
|
|
|
|
2015-10-27 12:51:43 +00:00
|
|
|
if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
|
|
|
|
nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
|
|
|
|
nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
|
|
|
|
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2018-12-03 11:54:04 +02:00
|
|
|
"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
|
2018-06-19 02:04:56 +01:00
|
|
|
nb_rx_desc, dev_info.rx_desc_lim.nb_max,
|
2015-10-27 12:51:43 +00:00
|
|
|
dev_info.rx_desc_lim.nb_min,
|
|
|
|
dev_info.rx_desc_lim.nb_align);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-04-24 20:44:06 +08:00
|
|
|
if (dev->data->dev_started &&
|
|
|
|
!(dev_info.dev_capa &
|
|
|
|
RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2018-05-11 16:22:28 +08:00
|
|
|
if (dev->data->dev_started &&
|
|
|
|
(dev->data->rx_queue_state[rx_queue_id] !=
|
|
|
|
RTE_ETH_QUEUE_STATE_STOPPED))
|
2018-04-24 20:44:06 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
2016-11-24 12:26:45 +01:00
|
|
|
rxq = dev->data->rx_queues;
|
|
|
|
if (rxq[rx_queue_id]) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
|
|
|
|
-ENOTSUP);
|
|
|
|
(*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
|
|
|
|
rxq[rx_queue_id] = NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-01 10:49:04 +01:00
|
|
|
if (rx_conf == NULL)
|
|
|
|
rx_conf = &dev_info.default_rxconf;
|
|
|
|
|
2017-10-04 11:17:58 +03:00
|
|
|
local_conf = *rx_conf;
|
|
|
|
|
2018-05-10 19:56:55 +08:00
|
|
|
/*
|
|
|
|
* If an offloading has already been enabled in
|
|
|
|
* rte_eth_dev_configure(), it has been enabled on all queues,
|
|
|
|
* so there is no need to enable it in this queue again.
|
|
|
|
* The local_conf.offloads input to underlying PMD only carries
|
|
|
|
* those offloadings which are only enabled on this queue and
|
|
|
|
* not enabled on all queues.
|
|
|
|
*/
|
|
|
|
local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* New added offloadings for this queue are those not enabled in
|
|
|
|
* rte_eth_dev_configure() and they must be per-queue type.
|
|
|
|
* A pure per-port offloading can't be enabled on a queue while
|
|
|
|
* disabled on another queue. A pure per-port offloading can't
|
|
|
|
* be enabled for any queue as new added one if it hasn't been
|
|
|
|
* enabled in rte_eth_dev_configure().
|
|
|
|
*/
|
|
|
|
if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
|
|
|
|
local_conf.offloads) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
|
2019-03-31 13:46:48 +03:00
|
|
|
"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
|
2018-06-19 02:04:56 +01:00
|
|
|
port_id, rx_queue_id, local_conf.offloads,
|
|
|
|
dev_info.rx_queue_offload_capa,
|
|
|
|
__func__);
|
2018-05-31 13:44:30 +01:00
|
|
|
return -EINVAL;
|
2018-05-10 19:56:55 +08:00
|
|
|
}
|
|
|
|
|
2019-11-11 19:47:33 +02:00
|
|
|
/*
|
|
|
|
* If LRO is enabled, check that the maximum aggregated packet
|
|
|
|
* size is supported by the configured device.
|
|
|
|
*/
|
|
|
|
if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
|
|
|
|
if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
|
|
|
|
dev->data->dev_conf.rxmode.max_lro_pkt_size =
|
|
|
|
dev->data->dev_conf.rxmode.max_rx_pkt_len;
|
2020-10-13 17:56:58 +01:00
|
|
|
int ret = eth_dev_check_lro_pkt_size(port_id,
|
2019-11-11 19:47:33 +02:00
|
|
|
dev->data->dev_conf.rxmode.max_lro_pkt_size,
|
|
|
|
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
|
|
|
dev_info.max_lro_pkt_size);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-17 20:09:28 +02:00
|
|
|
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
|
2017-10-04 11:17:58 +03:00
|
|
|
socket_id, &local_conf, mp);
|
2014-06-17 20:09:28 +02:00
|
|
|
if (!ret) {
|
|
|
|
if (!dev->data->min_rx_buf_size ||
|
|
|
|
dev->data->min_rx_buf_size > mbp_buf_size)
|
|
|
|
dev->data->min_rx_buf_size = mbp_buf_size;
|
|
|
|
}
|
|
|
|
|
2020-04-23 00:33:45 +05:30
|
|
|
rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
|
|
|
|
rx_conf, ret);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, ret);
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
int
|
|
|
|
rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
|
|
|
|
uint16_t nb_rx_desc,
|
|
|
|
const struct rte_eth_hairpin_conf *conf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_hairpin_cap cap;
|
|
|
|
void **rxq;
|
|
|
|
int i;
|
|
|
|
int count;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2019-10-30 23:53:11 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (rx_queue_id >= dev->data->nb_rx_queues) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
|
|
|
|
-ENOTSUP);
|
|
|
|
/* if nb_rx_desc is zero use max number of desc from the driver. */
|
|
|
|
if (nb_rx_desc == 0)
|
|
|
|
nb_rx_desc = cap.max_nb_desc;
|
|
|
|
if (nb_rx_desc > cap.max_nb_desc) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
|
|
|
|
nb_rx_desc, cap.max_nb_desc);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (conf->peer_count > cap.max_rx_2_tx) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2020-10-15 21:08:52 +08:00
|
|
|
"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
|
2019-10-30 23:53:11 +00:00
|
|
|
conf->peer_count, cap.max_rx_2_tx);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (conf->peer_count == 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2020-10-15 21:08:52 +08:00
|
|
|
"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
|
2019-10-30 23:53:11 +00:00
|
|
|
conf->peer_count);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
|
|
|
|
cap.max_nb_queues != UINT16_MAX; i++) {
|
|
|
|
if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
if (count > cap.max_nb_queues) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
|
|
|
|
cap.max_nb_queues);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (dev->data->dev_started)
|
|
|
|
return -EBUSY;
|
|
|
|
rxq = dev->data->rx_queues;
|
|
|
|
if (rxq[rx_queue_id] != NULL) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
|
|
|
|
-ENOTSUP);
|
|
|
|
(*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
|
|
|
|
rxq[rx_queue_id] = NULL;
|
|
|
|
}
|
|
|
|
ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
|
|
|
|
nb_rx_desc, conf);
|
|
|
|
if (ret == 0)
|
|
|
|
dev->data->rx_queue_state[rx_queue_id] =
|
|
|
|
RTE_ETH_QUEUE_STATE_HAIRPIN;
|
|
|
|
return eth_err(port_id, ret);
|
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
|
2012-09-04 13:54:00 +01:00
|
|
|
uint16_t nb_tx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2014-10-01 10:49:04 +01:00
|
|
|
struct rte_eth_dev_info dev_info;
|
2017-10-04 11:17:59 +03:00
|
|
|
struct rte_eth_txconf local_conf;
|
2016-11-24 12:26:45 +01:00
|
|
|
void **txq;
|
2019-09-12 17:42:13 +01:00
|
|
|
int ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (tx_queue_id >= dev->data->nb_tx_queues) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
|
2014-10-01 10:49:04 +01:00
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2014-10-01 10:49:04 +01:00
|
|
|
|
2018-04-10 10:43:16 +01:00
|
|
|
/* Use default specified by driver, if nb_tx_desc is zero */
|
|
|
|
if (nb_tx_desc == 0) {
|
|
|
|
nb_tx_desc = dev_info.default_txportconf.ring_size;
|
|
|
|
/* If driver default is zero, fall back on EAL default */
|
|
|
|
if (nb_tx_desc == 0)
|
|
|
|
nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
|
|
|
|
}
|
2015-11-20 15:56:37 +05:30
|
|
|
if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
|
|
|
|
nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
|
|
|
|
nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2018-12-03 11:54:04 +02:00
|
|
|
"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
|
2018-06-19 02:04:56 +01:00
|
|
|
nb_tx_desc, dev_info.tx_desc_lim.nb_max,
|
|
|
|
dev_info.tx_desc_lim.nb_min,
|
|
|
|
dev_info.tx_desc_lim.nb_align);
|
2015-11-20 15:56:37 +05:30
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-04-24 20:44:06 +08:00
|
|
|
if (dev->data->dev_started &&
|
|
|
|
!(dev_info.dev_capa &
|
|
|
|
RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2018-05-11 16:22:28 +08:00
|
|
|
if (dev->data->dev_started &&
|
|
|
|
(dev->data->tx_queue_state[tx_queue_id] !=
|
|
|
|
RTE_ETH_QUEUE_STATE_STOPPED))
|
2018-04-24 20:44:06 +08:00
|
|
|
return -EBUSY;
|
|
|
|
|
2016-11-24 12:26:45 +01:00
|
|
|
txq = dev->data->tx_queues;
|
|
|
|
if (txq[tx_queue_id]) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
|
|
|
|
-ENOTSUP);
|
|
|
|
(*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
|
|
|
|
txq[tx_queue_id] = NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-01 10:49:04 +01:00
|
|
|
if (tx_conf == NULL)
|
|
|
|
tx_conf = &dev_info.default_txconf;
|
|
|
|
|
2017-10-04 11:17:59 +03:00
|
|
|
local_conf = *tx_conf;
|
|
|
|
|
2018-05-10 19:56:55 +08:00
|
|
|
/*
|
|
|
|
* If an offloading has already been enabled in
|
|
|
|
* rte_eth_dev_configure(), it has been enabled on all queues,
|
|
|
|
* so there is no need to enable it in this queue again.
|
|
|
|
* The local_conf.offloads input to underlying PMD only carries
|
|
|
|
* those offloadings which are only enabled on this queue and
|
|
|
|
* not enabled on all queues.
|
|
|
|
*/
|
|
|
|
local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* New added offloadings for this queue are those not enabled in
|
|
|
|
* rte_eth_dev_configure() and they must be per-queue type.
|
|
|
|
* A pure per-port offloading can't be enabled on a queue while
|
|
|
|
* disabled on another queue. A pure per-port offloading can't
|
|
|
|
* be enabled for any queue as new added one if it hasn't been
|
|
|
|
* enabled in rte_eth_dev_configure().
|
|
|
|
*/
|
|
|
|
if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
|
|
|
|
local_conf.offloads) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
|
2019-03-31 13:46:48 +03:00
|
|
|
"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
|
2018-06-19 02:04:56 +01:00
|
|
|
port_id, tx_queue_id, local_conf.offloads,
|
|
|
|
dev_info.tx_queue_offload_capa,
|
|
|
|
__func__);
|
2018-05-31 13:44:30 +01:00
|
|
|
return -EINVAL;
|
2018-05-10 19:56:55 +08:00
|
|
|
}
|
|
|
|
|
2020-04-23 00:33:45 +05:30
|
|
|
rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
|
|
|
|
tx_queue_id, nb_tx_desc, socket_id, &local_conf));
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
int
|
|
|
|
rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
|
|
|
|
uint16_t nb_tx_desc,
|
|
|
|
const struct rte_eth_hairpin_conf *conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_hairpin_cap cap;
|
|
|
|
void **txq;
|
|
|
|
int i;
|
|
|
|
int count;
|
|
|
|
int ret;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2019-10-30 23:53:11 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (tx_queue_id >= dev->data->nb_tx_queues) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
|
|
|
|
-ENOTSUP);
|
|
|
|
/* if nb_rx_desc is zero use max number of desc from the driver. */
|
|
|
|
if (nb_tx_desc == 0)
|
|
|
|
nb_tx_desc = cap.max_nb_desc;
|
|
|
|
if (nb_tx_desc > cap.max_nb_desc) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
|
|
|
|
nb_tx_desc, cap.max_nb_desc);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (conf->peer_count > cap.max_tx_2_rx) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2020-10-15 21:08:52 +08:00
|
|
|
"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
|
2019-10-30 23:53:11 +00:00
|
|
|
conf->peer_count, cap.max_tx_2_rx);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (conf->peer_count == 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
2020-10-15 21:08:52 +08:00
|
|
|
"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
|
2019-10-30 23:53:11 +00:00
|
|
|
conf->peer_count);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
|
|
|
|
cap.max_nb_queues != UINT16_MAX; i++) {
|
|
|
|
if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
if (count > cap.max_nb_queues) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
|
|
|
|
cap.max_nb_queues);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (dev->data->dev_started)
|
|
|
|
return -EBUSY;
|
|
|
|
txq = dev->data->tx_queues;
|
|
|
|
if (txq[tx_queue_id] != NULL) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
|
|
|
|
-ENOTSUP);
|
|
|
|
(*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
|
|
|
|
txq[tx_queue_id] = NULL;
|
|
|
|
}
|
|
|
|
ret = (*dev->dev_ops->tx_hairpin_queue_setup)
|
|
|
|
(dev, tx_queue_id, nb_tx_desc, conf);
|
|
|
|
if (ret == 0)
|
|
|
|
dev->data->tx_queue_state[tx_queue_id] =
|
|
|
|
RTE_ETH_QUEUE_STATE_HAIRPIN;
|
|
|
|
return eth_err(port_id, ret);
|
|
|
|
}
|
|
|
|
|
ethdev: add hairpin bind and unbind API
In single port hairpin mode, all the hairpin Tx and Rx queues belong
to the same device. After the queues are set up properly, there is
no other dependency between the Tx queue and its Rx peer queue. The
binding process that connected the Tx and Rx queues together from
hardware level will be done automatically during the device start
procedure. Everything required is configured and initialized already
for the binding process.
But in two ports hairpin mode, there will be some cross-dependences
between two different ports. Usually, the ports will be initialized
serially by the main thread but not in parallel. The earlier port
will not be able to enable the bind if the following peer port is
not yet configured with HW resources. What's more, if one port is
detached / attached dynamically, it would introduce more trouble
for the hairpin binding.
To overcome these, new APIs for binding and unbinding are added.
During startup, only the hairpin Tx and Rx peer queues will be set
up. Nothing will be done when starting the device if the queues are
without auto-bind attribute. Only after the required ports pair
started, the `rte_eth_hairpin_bind()` API can be called to bind the
all Tx queues of the egress port to the Rx queues of the peer port.
Then the connection between the egress and ingress ports pair will
be established.
The `rte_eth_hairpin_unbind()` API could be used to disconnect the
egress and the peer ingress ports. This should only be called before
the device is closed if needed. When doing the clean up, all the
egress and ingress pairs related to a single port should be taken
into consideration, especially in the hot unplug case.
mode is described.
Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
2020-10-15 21:08:51 +08:00
|
|
|
int
|
|
|
|
rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[tx_port];
|
|
|
|
if (dev->data->dev_started == 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
|
|
|
|
ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
|
|
|
|
if (ret != 0)
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
|
|
|
|
" to Rx %d (%d - all ports)\n",
|
|
|
|
tx_port, rx_port, RTE_MAX_ETHPORTS);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[tx_port];
|
|
|
|
if (dev->data->dev_started == 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
|
|
|
|
ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
|
|
|
|
if (ret != 0)
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
|
|
|
|
" from Rx %d (%d - all ports)\n",
|
|
|
|
tx_port, rx_port, RTE_MAX_ETHPORTS);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-10-15 21:08:53 +08:00
|
|
|
int
|
|
|
|
rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
|
|
|
|
size_t len, uint32_t direction)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (peer_ports == NULL || len == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
|
|
|
|
-ENOTSUP);
|
|
|
|
|
|
|
|
ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
|
|
|
|
len, direction);
|
|
|
|
if (ret < 0)
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
|
|
|
|
port_id, direction ? "Rx" : "Tx");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-10 18:19:34 +01:00
|
|
|
void
|
|
|
|
rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
|
|
|
|
void *userdata __rte_unused)
|
|
|
|
{
|
2020-09-30 14:27:18 -07:00
|
|
|
rte_pktmbuf_free_bulk(pkts, unsent);
|
2016-03-10 18:19:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
|
|
|
|
void *userdata)
|
|
|
|
{
|
|
|
|
uint64_t *count = userdata;
|
|
|
|
|
2020-09-30 14:27:18 -07:00
|
|
|
rte_pktmbuf_free_bulk(pkts, unsent);
|
2016-03-10 18:19:34 +01:00
|
|
|
*count += unsent;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
|
|
|
|
buffer_tx_error_fn cbfn, void *userdata)
|
|
|
|
{
|
|
|
|
buffer->error_callback = cbfn;
|
|
|
|
buffer->error_userdata = userdata;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
|
|
|
|
{
|
2016-04-07 13:46:32 +02:00
|
|
|
int ret = 0;
|
|
|
|
|
2016-03-10 18:19:34 +01:00
|
|
|
if (buffer == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
buffer->size = size;
|
2016-04-07 13:46:32 +02:00
|
|
|
if (buffer->error_callback == NULL) {
|
|
|
|
ret = rte_eth_tx_buffer_set_err_callback(
|
|
|
|
buffer, rte_eth_tx_buffer_drop_callback, NULL);
|
|
|
|
}
|
2016-03-10 18:19:34 +01:00
|
|
|
|
2016-04-07 13:46:32 +02:00
|
|
|
return ret;
|
2016-03-10 18:19:34 +01:00
|
|
|
}
|
|
|
|
|
2017-03-24 14:55:53 -04:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
|
2017-03-24 14:55:53 -04:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
2018-01-20 21:12:22 +00:00
|
|
|
int ret;
|
2017-03-24 14:55:53 -04:00
|
|
|
|
|
|
|
/* Validate Input Data. Bail if not valid or not supported. */
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
|
|
|
|
|
|
|
|
/* Call driver to free pending mbufs. */
|
2018-01-20 21:12:22 +00:00
|
|
|
ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
|
|
|
|
free_cnt);
|
|
|
|
return eth_err(port_id, ret);
|
2017-03-24 14:55:53 -04:00
|
|
|
}
|
|
|
|
|
2019-09-14 12:37:21 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_promiscuous_enable(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2019-09-14 12:37:25 +01:00
|
|
|
int diag = 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2019-09-14 12:37:21 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2019-10-21 13:22:37 +01:00
|
|
|
if (dev->data->promiscuous == 1)
|
|
|
|
return 0;
|
|
|
|
|
2019-09-14 12:37:21 +01:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
|
2019-09-14 12:37:25 +01:00
|
|
|
|
2019-10-21 13:22:37 +01:00
|
|
|
diag = (*dev->dev_ops->promiscuous_enable)(dev);
|
|
|
|
dev->data->promiscuous = (diag == 0) ? 1 : 0;
|
2019-09-14 12:37:21 +01:00
|
|
|
|
2019-09-14 12:37:24 +01:00
|
|
|
return eth_err(port_id, diag);
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2019-09-14 12:37:21 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_promiscuous_disable(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2019-09-14 12:37:25 +01:00
|
|
|
int diag = 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2019-09-14 12:37:21 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2019-10-21 13:22:37 +01:00
|
|
|
if (dev->data->promiscuous == 0)
|
|
|
|
return 0;
|
|
|
|
|
2019-09-14 12:37:21 +01:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
|
2019-09-14 12:37:25 +01:00
|
|
|
|
2019-10-21 13:22:37 +01:00
|
|
|
dev->data->promiscuous = 0;
|
|
|
|
diag = (*dev->dev_ops->promiscuous_disable)(dev);
|
|
|
|
if (diag != 0)
|
|
|
|
dev->data->promiscuous = 1;
|
2019-09-14 12:37:21 +01:00
|
|
|
|
2019-09-14 12:37:24 +01:00
|
|
|
return eth_err(port_id, diag);
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_promiscuous_get(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
return dev->data->promiscuous;
|
|
|
|
}
|
|
|
|
|
2019-09-24 13:56:07 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_allmulticast_enable(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2019-09-24 13:56:10 +01:00
|
|
|
int diag;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2019-09-24 13:56:07 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2019-09-24 13:56:11 +01:00
|
|
|
if (dev->data->all_multicast == 1)
|
|
|
|
return 0;
|
|
|
|
|
2019-09-24 13:56:07 +01:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
|
2019-09-24 13:56:10 +01:00
|
|
|
diag = (*dev->dev_ops->allmulticast_enable)(dev);
|
2019-09-24 13:56:11 +01:00
|
|
|
dev->data->all_multicast = (diag == 0) ? 1 : 0;
|
2019-09-24 13:56:07 +01:00
|
|
|
|
2019-09-24 13:56:10 +01:00
|
|
|
return eth_err(port_id, diag);
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2019-09-24 13:56:07 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_allmulticast_disable(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2019-09-24 13:56:10 +01:00
|
|
|
int diag;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2019-09-24 13:56:07 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2019-09-24 13:56:11 +01:00
|
|
|
if (dev->data->all_multicast == 0)
|
|
|
|
return 0;
|
|
|
|
|
2019-09-24 13:56:07 +01:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev->data->all_multicast = 0;
|
2019-09-24 13:56:10 +01:00
|
|
|
diag = (*dev->dev_ops->allmulticast_disable)(dev);
|
|
|
|
if (diag != 0)
|
2019-09-24 13:56:11 +01:00
|
|
|
dev->data->all_multicast = 1;
|
2019-09-24 13:56:07 +01:00
|
|
|
|
2019-09-24 13:56:10 +01:00
|
|
|
return eth_err(port_id, diag);
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_allmulticast_get(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
return dev->data->all_multicast;
|
|
|
|
}
|
|
|
|
|
2019-09-10 09:25:42 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2019-09-10 09:25:42 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2018-04-10 09:16:31 +03:00
|
|
|
if (dev->data->dev_conf.intr_conf.lsc &&
|
|
|
|
dev->data->dev_started)
|
2018-01-25 18:01:38 -08:00
|
|
|
rte_eth_linkstatus_get(dev, eth_link);
|
2012-09-04 13:54:00 +01:00
|
|
|
else {
|
2019-09-10 09:25:42 +01:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
|
2012-09-04 13:54:00 +01:00
|
|
|
(*dev->dev_ops->link_update)(dev, 1);
|
|
|
|
*eth_link = dev->data->dev_link;
|
|
|
|
}
|
2019-09-10 09:25:42 +01:00
|
|
|
|
|
|
|
return 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2019-09-10 09:25:42 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2019-09-10 09:25:42 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2018-04-10 09:16:31 +03:00
|
|
|
if (dev->data->dev_conf.intr_conf.lsc &&
|
|
|
|
dev->data->dev_started)
|
2018-01-25 18:01:38 -08:00
|
|
|
rte_eth_linkstatus_get(dev, eth_link);
|
2012-09-04 13:54:00 +01:00
|
|
|
else {
|
2019-09-10 09:25:42 +01:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
|
2012-09-04 13:54:00 +01:00
|
|
|
(*dev->dev_ops->link_update)(dev, 0);
|
|
|
|
*eth_link = dev->data->dev_link;
|
|
|
|
}
|
2019-09-10 09:25:42 +01:00
|
|
|
|
|
|
|
return 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2020-09-15 22:06:57 +03:00
|
|
|
const char *
|
|
|
|
rte_eth_link_speed_to_str(uint32_t link_speed)
|
|
|
|
{
|
|
|
|
switch (link_speed) {
|
|
|
|
case ETH_SPEED_NUM_NONE: return "None";
|
|
|
|
case ETH_SPEED_NUM_10M: return "10 Mbps";
|
|
|
|
case ETH_SPEED_NUM_100M: return "100 Mbps";
|
|
|
|
case ETH_SPEED_NUM_1G: return "1 Gbps";
|
|
|
|
case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
|
|
|
|
case ETH_SPEED_NUM_5G: return "5 Gbps";
|
|
|
|
case ETH_SPEED_NUM_10G: return "10 Gbps";
|
|
|
|
case ETH_SPEED_NUM_20G: return "20 Gbps";
|
|
|
|
case ETH_SPEED_NUM_25G: return "25 Gbps";
|
|
|
|
case ETH_SPEED_NUM_40G: return "40 Gbps";
|
|
|
|
case ETH_SPEED_NUM_50G: return "50 Gbps";
|
|
|
|
case ETH_SPEED_NUM_56G: return "56 Gbps";
|
|
|
|
case ETH_SPEED_NUM_100G: return "100 Gbps";
|
|
|
|
case ETH_SPEED_NUM_200G: return "200 Gbps";
|
|
|
|
case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
|
|
|
|
default: return "Invalid";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
|
|
|
|
{
|
|
|
|
if (eth_link->link_status == ETH_LINK_DOWN)
|
|
|
|
return snprintf(str, len, "Link down");
|
|
|
|
else
|
|
|
|
return snprintf(str, len, "Link up at %s %s %s",
|
|
|
|
rte_eth_link_speed_to_str(eth_link->link_speed),
|
|
|
|
(eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
|
|
|
|
"FDX" : "HDX",
|
|
|
|
(eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
|
|
|
|
"Autoneg" : "Fixed");
|
|
|
|
}
|
|
|
|
|
2014-11-07 09:31:51 -08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2013-09-18 12:00:00 +02:00
|
|
|
memset(stats, 0, sizeof(*stats));
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
|
2012-09-04 13:54:00 +01:00
|
|
|
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2017-09-20 10:11:30 -04:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_stats_reset(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2019-09-06 15:34:54 +01:00
|
|
|
int ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2017-09-20 10:11:30 -04:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2017-09-20 10:11:30 -04:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
|
2019-09-06 15:34:54 +01:00
|
|
|
ret = (*dev->dev_ops->stats_reset)(dev);
|
|
|
|
if (ret != 0)
|
|
|
|
return eth_err(port_id, ret);
|
|
|
|
|
2015-11-27 13:31:06 +03:00
|
|
|
dev->data->rx_mbuf_alloc_failed = 0;
|
2017-09-20 10:11:30 -04:00
|
|
|
|
|
|
|
return 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2017-10-19 23:39:55 +01:00
|
|
|
static inline int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
|
2017-10-19 23:39:55 +01:00
|
|
|
{
|
|
|
|
uint16_t nb_rxqs, nb_txqs;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
|
|
|
|
count = RTE_NB_STATS;
|
2020-10-14 03:26:47 +01:00
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
|
|
|
|
count += nb_rxqs * RTE_NB_RXQ_STATS;
|
|
|
|
count += nb_txqs * RTE_NB_TXQ_STATS;
|
|
|
|
}
|
2017-10-19 23:39:55 +01:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2016-06-15 16:25:27 +01:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_get_xstats_count(uint16_t port_id)
|
2016-06-15 16:25:27 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int count;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2016-06-15 16:25:27 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2017-04-27 16:42:36 +02:00
|
|
|
if (dev->dev_ops->xstats_get_names_by_id != NULL) {
|
|
|
|
count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
|
|
|
|
NULL, 0);
|
|
|
|
if (count < 0)
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, count);
|
2017-04-27 16:42:36 +02:00
|
|
|
}
|
2016-06-15 16:25:27 +01:00
|
|
|
if (dev->dev_ops->xstats_get_names != NULL) {
|
|
|
|
count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
|
|
|
|
if (count < 0)
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, count);
|
2016-06-15 16:25:27 +01:00
|
|
|
} else
|
|
|
|
count = 0;
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-19 23:39:55 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
count += eth_dev_get_xstats_basic_count(dev);
|
2017-10-19 23:39:55 +01:00
|
|
|
|
2016-06-15 16:25:27 +01:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2017-04-27 16:42:37 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
|
2017-04-27 16:42:37 +02:00
|
|
|
uint64_t *id)
|
|
|
|
{
|
|
|
|
int cnt_xstats, idx_xstat;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
|
|
|
if (!id) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
|
2017-04-27 16:42:37 +02:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!xstat_name) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
|
2017-04-27 16:42:37 +02:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get count */
|
|
|
|
cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
|
|
|
|
if (cnt_xstats < 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
|
2017-04-27 16:42:37 +02:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get id-name lookup table */
|
|
|
|
struct rte_eth_xstat_name xstats_names[cnt_xstats];
|
|
|
|
|
|
|
|
if (cnt_xstats != rte_eth_xstats_get_names_by_id(
|
|
|
|
port_id, xstats_names, cnt_xstats, NULL)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
|
2017-04-27 16:42:37 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
|
|
|
|
if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
|
|
|
|
*id = idx_xstat;
|
|
|
|
return 0;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-12-05 16:25:06 -08:00
|
|
|
/* retrieve basic stats names */
|
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_basic_stats_get_names(struct rte_eth_dev *dev,
|
2017-12-05 16:25:06 -08:00
|
|
|
struct rte_eth_xstat_name *xstats_names)
|
|
|
|
{
|
|
|
|
int cnt_used_entries = 0;
|
|
|
|
uint32_t idx, id_queue;
|
|
|
|
uint16_t num_q;
|
|
|
|
|
|
|
|
for (idx = 0; idx < RTE_NB_STATS; idx++) {
|
2019-04-03 15:45:04 +01:00
|
|
|
strlcpy(xstats_names[cnt_used_entries].name,
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_stats_strings[idx].name,
|
2019-04-03 15:45:04 +01:00
|
|
|
sizeof(xstats_names[0].name));
|
2017-12-05 16:25:06 -08:00
|
|
|
cnt_used_entries++;
|
|
|
|
}
|
2020-10-14 03:26:47 +01:00
|
|
|
|
|
|
|
if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
|
|
|
|
return cnt_used_entries;
|
|
|
|
|
2017-12-05 16:25:06 -08:00
|
|
|
num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
for (id_queue = 0; id_queue < num_q; id_queue++) {
|
|
|
|
for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
|
|
|
|
snprintf(xstats_names[cnt_used_entries].name,
|
|
|
|
sizeof(xstats_names[0].name),
|
2020-10-07 23:48:48 +02:00
|
|
|
"rx_q%u_%s",
|
2020-10-13 17:56:58 +01:00
|
|
|
id_queue, eth_dev_rxq_stats_strings[idx].name);
|
2017-12-05 16:25:06 -08:00
|
|
|
cnt_used_entries++;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
for (id_queue = 0; id_queue < num_q; id_queue++) {
|
|
|
|
for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
|
|
|
|
snprintf(xstats_names[cnt_used_entries].name,
|
|
|
|
sizeof(xstats_names[0].name),
|
2020-10-07 23:48:48 +02:00
|
|
|
"tx_q%u_%s",
|
2020-10-13 17:56:58 +01:00
|
|
|
id_queue, eth_dev_txq_stats_strings[idx].name);
|
2017-12-05 16:25:06 -08:00
|
|
|
cnt_used_entries++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cnt_used_entries;
|
|
|
|
}
|
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
/* retrieve ethdev extended statistics names */
|
2017-04-27 16:42:36 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_xstats_get_names_by_id(uint16_t port_id,
|
2017-04-27 16:42:36 +02:00
|
|
|
struct rte_eth_xstat_name *xstats_names, unsigned int size,
|
|
|
|
uint64_t *ids)
|
|
|
|
{
|
2017-10-12 14:31:28 +01:00
|
|
|
struct rte_eth_xstat_name *xstats_names_copy;
|
2017-10-18 23:51:43 +01:00
|
|
|
unsigned int no_basic_stat_requested = 1;
|
2017-12-05 16:25:07 -08:00
|
|
|
unsigned int no_ext_stat_requested = 1;
|
2017-10-12 14:31:28 +01:00
|
|
|
unsigned int expected_entries;
|
2017-12-05 16:25:07 -08:00
|
|
|
unsigned int basic_count;
|
2017-10-12 14:31:28 +01:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
unsigned int i;
|
2017-10-20 00:39:52 +01:00
|
|
|
int ret;
|
2017-10-12 14:31:28 +01:00
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
basic_count = eth_dev_get_xstats_basic_count(dev);
|
|
|
|
ret = eth_dev_get_xstats_count(port_id);
|
2017-10-20 00:39:52 +01:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
expected_entries = (unsigned int)ret;
|
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
/* Return max number of stats if no ids given */
|
2017-04-27 16:42:36 +02:00
|
|
|
if (!ids) {
|
2017-10-12 14:31:28 +01:00
|
|
|
if (!xstats_names)
|
|
|
|
return expected_entries;
|
|
|
|
else if (xstats_names && size < expected_entries)
|
|
|
|
return expected_entries;
|
|
|
|
}
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
if (ids && !xstats_names)
|
|
|
|
return -EINVAL;
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-18 23:51:43 +01:00
|
|
|
if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
|
|
|
|
uint64_t ids_copy[size];
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
if (ids[i] < basic_count) {
|
|
|
|
no_basic_stat_requested = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert ids to xstats ids that PMD knows.
|
|
|
|
* ids known by user are basic + extended stats.
|
|
|
|
*/
|
|
|
|
ids_copy[i] = ids[i] - basic_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (no_basic_stat_requested)
|
|
|
|
return (*dev->dev_ops->xstats_get_names_by_id)(dev,
|
|
|
|
xstats_names, ids_copy, size);
|
|
|
|
}
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
/* Retrieve all stats */
|
|
|
|
if (!ids) {
|
|
|
|
int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
|
|
|
|
expected_entries);
|
|
|
|
if (num_stats < 0 || num_stats > (int)expected_entries)
|
|
|
|
return num_stats;
|
|
|
|
else
|
|
|
|
return expected_entries;
|
2017-04-27 16:42:36 +02:00
|
|
|
}
|
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
xstats_names_copy = calloc(expected_entries,
|
|
|
|
sizeof(struct rte_eth_xstat_name));
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
if (!xstats_names_copy) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
|
2017-10-12 14:31:28 +01:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-12-05 16:25:07 -08:00
|
|
|
if (ids) {
|
|
|
|
for (i = 0; i < size; i++) {
|
2018-02-06 16:06:59 +00:00
|
|
|
if (ids[i] >= basic_count) {
|
2017-12-05 16:25:07 -08:00
|
|
|
no_ext_stat_requested = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
/* Fill xstats_names_copy structure */
|
2017-12-05 16:25:07 -08:00
|
|
|
if (ids && no_ext_stat_requested) {
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_basic_stats_get_names(dev, xstats_names_copy);
|
2017-12-05 16:25:07 -08:00
|
|
|
} else {
|
2018-01-20 21:12:22 +00:00
|
|
|
ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
|
2017-12-05 16:25:07 -08:00
|
|
|
expected_entries);
|
2018-01-20 21:12:22 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
free(xstats_names_copy);
|
|
|
|
return ret;
|
|
|
|
}
|
2017-12-05 16:25:07 -08:00
|
|
|
}
|
2017-10-12 14:31:28 +01:00
|
|
|
|
|
|
|
/* Filter stats */
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
if (ids[i] >= expected_entries) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
|
2017-04-27 16:42:36 +02:00
|
|
|
free(xstats_names_copy);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-10-12 14:31:28 +01:00
|
|
|
xstats_names[i] = xstats_names_copy[ids[i]];
|
2017-04-27 16:42:36 +02:00
|
|
|
}
|
2017-10-12 14:31:28 +01:00
|
|
|
|
|
|
|
free(xstats_names_copy);
|
|
|
|
return size;
|
2017-04-27 16:42:36 +02:00
|
|
|
}
|
|
|
|
|
2017-04-13 16:59:25 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_xstats_get_names(uint16_t port_id,
|
2016-06-15 16:25:27 +01:00
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
2017-04-13 16:59:24 +02:00
|
|
|
unsigned int size)
|
2016-06-15 16:25:27 +01:00
|
|
|
{
|
2017-04-27 16:42:35 +02:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int cnt_used_entries;
|
|
|
|
int cnt_expected_entries;
|
|
|
|
int cnt_driver_entries;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
cnt_expected_entries = eth_dev_get_xstats_count(port_id);
|
2017-04-27 16:42:35 +02:00
|
|
|
if (xstats_names == NULL || cnt_expected_entries < 0 ||
|
|
|
|
(int)size < cnt_expected_entries)
|
|
|
|
return cnt_expected_entries;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
/* port_id checked in eth_dev_get_xstats_count() */
|
2017-04-27 16:42:35 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2016-06-15 16:25:27 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
|
2016-07-08 16:44:24 +01:00
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
if (dev->dev_ops->xstats_get_names != NULL) {
|
|
|
|
/* If there are any driver-specific xstats, append them
|
|
|
|
* to end of list.
|
|
|
|
*/
|
|
|
|
cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
|
|
|
|
dev,
|
|
|
|
xstats_names + cnt_used_entries,
|
|
|
|
size - cnt_used_entries);
|
|
|
|
if (cnt_driver_entries < 0)
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, cnt_driver_entries);
|
2017-04-27 16:42:35 +02:00
|
|
|
cnt_used_entries += cnt_driver_entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cnt_used_entries;
|
2016-06-15 16:25:27 +01:00
|
|
|
}
|
|
|
|
|
2017-12-05 16:25:06 -08:00
|
|
|
|
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
|
2017-12-05 16:25:06 -08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_stats eth_stats;
|
|
|
|
unsigned int count = 0, i, q;
|
|
|
|
uint64_t val, *stats_ptr;
|
|
|
|
uint16_t nb_rxqs, nb_txqs;
|
2018-01-20 21:12:22 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = rte_eth_stats_get(port_id, ð_stats);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2017-12-05 16:25:06 -08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
|
|
|
|
/* global stats */
|
|
|
|
for (i = 0; i < RTE_NB_STATS; i++) {
|
|
|
|
stats_ptr = RTE_PTR_ADD(ð_stats,
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_stats_strings[i].offset);
|
2017-12-05 16:25:06 -08:00
|
|
|
val = *stats_ptr;
|
|
|
|
xstats[count++].value = val;
|
|
|
|
}
|
|
|
|
|
2020-10-14 03:26:47 +01:00
|
|
|
if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
|
|
|
|
return count;
|
|
|
|
|
2017-12-05 16:25:06 -08:00
|
|
|
/* per-rxq stats */
|
|
|
|
for (q = 0; q < nb_rxqs; q++) {
|
|
|
|
for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
|
|
|
|
stats_ptr = RTE_PTR_ADD(ð_stats,
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_rxq_stats_strings[i].offset +
|
2017-12-05 16:25:06 -08:00
|
|
|
q * sizeof(uint64_t));
|
|
|
|
val = *stats_ptr;
|
|
|
|
xstats[count++].value = val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* per-txq stats */
|
|
|
|
for (q = 0; q < nb_txqs; q++) {
|
|
|
|
for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
|
|
|
|
stats_ptr = RTE_PTR_ADD(ð_stats,
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_txq_stats_strings[i].offset +
|
2017-12-05 16:25:06 -08:00
|
|
|
q * sizeof(uint64_t));
|
|
|
|
val = *stats_ptr;
|
|
|
|
xstats[count++].value = val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2014-07-23 14:28:53 +02:00
|
|
|
/* retrieve ethdev extended statistics */
|
2017-04-27 16:42:36 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
|
2017-10-12 14:31:28 +01:00
|
|
|
uint64_t *values, unsigned int size)
|
2017-04-27 16:42:36 +02:00
|
|
|
{
|
2017-10-18 23:51:43 +01:00
|
|
|
unsigned int no_basic_stat_requested = 1;
|
2017-12-05 16:25:07 -08:00
|
|
|
unsigned int no_ext_stat_requested = 1;
|
2017-10-12 14:31:28 +01:00
|
|
|
unsigned int num_xstats_filled;
|
2017-12-05 16:25:07 -08:00
|
|
|
unsigned int basic_count;
|
2017-10-12 14:31:28 +01:00
|
|
|
uint16_t expected_entries;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
unsigned int i;
|
2017-10-20 00:39:52 +01:00
|
|
|
int ret;
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_dev_get_xstats_count(port_id);
|
2018-01-20 21:12:22 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
expected_entries = (uint16_t)ret;
|
2017-10-12 14:31:28 +01:00
|
|
|
struct rte_eth_xstat xstats[expected_entries];
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2020-10-13 17:56:58 +01:00
|
|
|
basic_count = eth_dev_get_xstats_basic_count(dev);
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
/* Return max number of stats if no ids given */
|
|
|
|
if (!ids) {
|
|
|
|
if (!values)
|
|
|
|
return expected_entries;
|
|
|
|
else if (values && size < expected_entries)
|
|
|
|
return expected_entries;
|
|
|
|
}
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
if (ids && !values)
|
|
|
|
return -EINVAL;
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-18 23:51:43 +01:00
|
|
|
if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
|
2020-10-13 17:56:58 +01:00
|
|
|
unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
|
2017-10-18 23:51:43 +01:00
|
|
|
uint64_t ids_copy[size];
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
if (ids[i] < basic_count) {
|
|
|
|
no_basic_stat_requested = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert ids to xstats ids that PMD knows.
|
|
|
|
* ids known by user are basic + extended stats.
|
|
|
|
*/
|
|
|
|
ids_copy[i] = ids[i] - basic_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (no_basic_stat_requested)
|
|
|
|
return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
|
|
|
|
values, size);
|
|
|
|
}
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-12-05 16:25:07 -08:00
|
|
|
if (ids) {
|
|
|
|
for (i = 0; i < size; i++) {
|
2018-02-06 16:06:59 +00:00
|
|
|
if (ids[i] >= basic_count) {
|
2017-12-05 16:25:07 -08:00
|
|
|
no_ext_stat_requested = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
/* Fill the xstats structure */
|
2017-12-05 16:25:07 -08:00
|
|
|
if (ids && no_ext_stat_requested)
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_basic_stats_get(port_id, xstats);
|
2017-12-05 16:25:07 -08:00
|
|
|
else
|
|
|
|
ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
|
|
|
|
|
2017-10-20 00:39:52 +01:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
num_xstats_filled = (unsigned int)ret;
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
/* Return all stats */
|
|
|
|
if (!ids) {
|
|
|
|
for (i = 0; i < num_xstats_filled; i++)
|
|
|
|
values[i] = xstats[i].value;
|
|
|
|
return expected_entries;
|
|
|
|
}
|
2017-04-27 16:42:36 +02:00
|
|
|
|
2017-10-12 14:31:28 +01:00
|
|
|
/* Filter stats */
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
if (ids[i] >= expected_entries) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
|
2017-04-27 16:42:36 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2017-10-12 14:31:28 +01:00
|
|
|
values[i] = xstats[ids[i]].value;
|
2017-04-27 16:42:36 +02:00
|
|
|
}
|
2017-10-12 14:31:28 +01:00
|
|
|
return size;
|
2017-04-27 16:42:36 +02:00
|
|
|
}
|
|
|
|
|
2014-07-23 14:28:53 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
|
2017-04-13 16:59:24 +02:00
|
|
|
unsigned int n)
|
2014-07-23 14:28:53 +02:00
|
|
|
{
|
2017-04-27 16:42:35 +02:00
|
|
|
struct rte_eth_dev *dev;
|
2017-12-05 16:25:06 -08:00
|
|
|
unsigned int count = 0, i;
|
2017-04-27 16:42:35 +02:00
|
|
|
signed int xcount = 0;
|
|
|
|
uint16_t nb_rxqs, nb_txqs;
|
2018-01-20 21:12:22 +00:00
|
|
|
int ret;
|
2014-07-23 14:28:53 +02:00
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2014-07-23 14:28:53 +02:00
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
2016-11-21 09:59:38 +00:00
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
/* Return generic statistics */
|
2020-10-14 03:26:47 +01:00
|
|
|
count = RTE_NB_STATS;
|
|
|
|
if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
|
|
|
|
count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
|
2014-07-23 14:28:53 +02:00
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
/* implemented by the driver */
|
|
|
|
if (dev->dev_ops->xstats_get != NULL) {
|
|
|
|
/* Retrieve the xstats from the driver at the end of the
|
|
|
|
* xstats struct.
|
|
|
|
*/
|
|
|
|
xcount = (*dev->dev_ops->xstats_get)(dev,
|
|
|
|
xstats ? xstats + count : NULL,
|
|
|
|
(n > count) ? n - count : 0);
|
2015-07-15 14:11:28 +01:00
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
if (xcount < 0)
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, xcount);
|
2015-07-15 14:11:28 +01:00
|
|
|
}
|
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
if (n < count + xcount || xstats == NULL)
|
|
|
|
return count + xcount;
|
2014-07-23 14:28:53 +02:00
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
/* now fill the xstats structure */
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_basic_stats_get(port_id, xstats);
|
2018-01-20 21:12:22 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
count = ret;
|
2014-07-23 14:28:53 +02:00
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
for (i = 0; i < count; i++)
|
2016-07-08 16:44:24 +01:00
|
|
|
xstats[i].id = i;
|
2017-04-27 16:42:35 +02:00
|
|
|
/* add an offset to driver-specific stats */
|
|
|
|
for ( ; i < count + xcount; i++)
|
|
|
|
xstats[i].id += count;
|
2016-07-08 16:44:24 +01:00
|
|
|
|
2017-04-27 16:42:35 +02:00
|
|
|
return count + xcount;
|
2014-07-23 14:28:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* reset ethdev extended statistics */
|
2019-09-06 15:34:53 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_xstats_reset(uint16_t port_id)
|
2014-07-23 14:28:53 +02:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2019-09-06 15:34:53 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-07-23 14:28:53 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
/* implemented by the driver */
|
2019-09-06 15:34:54 +01:00
|
|
|
if (dev->dev_ops->xstats_reset != NULL)
|
|
|
|
return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
|
2014-07-23 14:28:53 +02:00
|
|
|
|
|
|
|
/* fallback to default */
|
2019-09-06 15:34:53 +01:00
|
|
|
return rte_eth_stats_reset(port_id);
|
2014-07-23 14:28:53 +02:00
|
|
|
}
|
2012-12-20 00:00:00 +01:00
|
|
|
|
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
|
|
|
|
uint8_t stat_idx, uint8_t is_rx)
|
2012-12-20 00:00:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
|
2018-07-11 14:11:59 +05:30
|
|
|
|
|
|
|
if (is_rx && (queue_id >= dev->data->nb_rx_queues))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
return (*dev->dev_ops->queue_stats_mapping_set)
|
|
|
|
(dev, queue_id, stat_idx, is_rx);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
|
2012-12-20 00:00:00 +01:00
|
|
|
uint8_t stat_idx)
|
|
|
|
{
|
2020-10-13 17:56:58 +01:00
|
|
|
return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
|
|
|
|
tx_queue_id,
|
2018-01-20 21:12:22 +00:00
|
|
|
stat_idx, STAT_QMAP_TX));
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
|
2012-12-20 00:00:00 +01:00
|
|
|
uint8_t stat_idx)
|
|
|
|
{
|
2020-10-13 17:56:58 +01:00
|
|
|
return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
|
|
|
|
rx_queue_id,
|
2018-01-20 21:12:22 +00:00
|
|
|
stat_idx, STAT_QMAP_RX));
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
2017-01-16 18:48:27 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
|
2017-01-16 18:48:27 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
|
|
|
|
fw_version, fw_size));
|
2017-01-16 18:48:27 +08:00
|
|
|
}
|
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2015-10-27 12:51:43 +00:00
|
|
|
const struct rte_eth_desc_lim lim = {
|
|
|
|
.nb_max = UINT16_MAX,
|
|
|
|
.nb_min = 0,
|
|
|
|
.nb_align = 1,
|
2019-05-27 14:05:28 +05:30
|
|
|
.nb_seg_max = UINT16_MAX,
|
|
|
|
.nb_mtu_seg_max = UINT16_MAX,
|
2015-10-27 12:51:43 +00:00
|
|
|
};
|
2019-09-12 17:42:28 +01:00
|
|
|
int diag;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2019-07-23 13:11:21 +01:00
|
|
|
/*
|
|
|
|
* Init dev_info before port_id check since caller does not have
|
|
|
|
* return status and does not know if get is successful or not.
|
|
|
|
*/
|
|
|
|
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
|
2020-01-16 16:19:54 +00:00
|
|
|
dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
|
2019-07-23 13:11:21 +01:00
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-10-27 12:51:43 +00:00
|
|
|
dev_info->rx_desc_lim = lim;
|
|
|
|
dev_info->tx_desc_lim = lim;
|
2018-04-09 13:09:38 +01:00
|
|
|
dev_info->device = dev->device;
|
2019-05-21 18:13:05 +02:00
|
|
|
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
|
2019-03-29 17:52:13 +00:00
|
|
|
dev_info->max_mtu = UINT16_MAX;
|
2014-10-01 10:49:03 +01:00
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
|
2019-09-12 17:42:28 +01:00
|
|
|
diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
|
|
|
|
if (diag != 0) {
|
|
|
|
/* Cleanup already filled in device information */
|
|
|
|
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
|
|
|
|
return eth_err(port_id, diag);
|
|
|
|
}
|
|
|
|
|
2019-11-27 13:22:56 +01:00
|
|
|
/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
|
|
|
|
dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
|
|
|
|
RTE_MAX_QUEUES_PER_PORT);
|
|
|
|
dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
|
|
|
|
RTE_MAX_QUEUES_PER_PORT);
|
|
|
|
|
2017-06-12 16:25:12 +01:00
|
|
|
dev_info->driver_name = dev->device->driver->name;
|
2016-06-15 15:06:20 +01:00
|
|
|
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
|
|
|
|
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
|
2018-04-26 11:41:00 +01:00
|
|
|
|
|
|
|
dev_info->dev_flags = &dev->data->dev_flags;
|
2019-09-12 17:42:13 +01:00
|
|
|
|
|
|
|
return 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2016-03-15 04:50:50 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
|
2016-03-15 04:50:50 +08:00
|
|
|
uint32_t *ptypes, int num)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
const uint32_t *all_ptypes;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2016-04-06 11:51:13 +08:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
|
2016-03-15 04:50:50 +08:00
|
|
|
all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
|
|
|
|
|
|
|
|
if (!all_ptypes)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
|
|
|
|
if (all_ptypes[i] & ptype_mask) {
|
|
|
|
if (j < num)
|
|
|
|
ptypes[j] = all_ptypes[i];
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return j;
|
|
|
|
}
|
|
|
|
|
2019-11-11 18:49:05 +05:30
|
|
|
int
|
|
|
|
rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
|
|
|
|
uint32_t *set_ptypes, unsigned int num)
|
|
|
|
{
|
|
|
|
const uint32_t valid_ptype_masks[] = {
|
|
|
|
RTE_PTYPE_L2_MASK,
|
|
|
|
RTE_PTYPE_L3_MASK,
|
|
|
|
RTE_PTYPE_L4_MASK,
|
|
|
|
RTE_PTYPE_TUNNEL_MASK,
|
|
|
|
RTE_PTYPE_INNER_L2_MASK,
|
|
|
|
RTE_PTYPE_INNER_L3_MASK,
|
|
|
|
RTE_PTYPE_INNER_L4_MASK,
|
|
|
|
};
|
|
|
|
const uint32_t *all_ptypes;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
uint32_t unused_mask;
|
|
|
|
unsigned int i, j;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (num > 0 && set_ptypes == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
|
|
|
|
*dev->dev_ops->dev_ptypes_set == NULL) {
|
|
|
|
ret = 0;
|
|
|
|
goto ptype_unknown;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ptype_mask == 0) {
|
|
|
|
ret = (*dev->dev_ops->dev_ptypes_set)(dev,
|
|
|
|
ptype_mask);
|
|
|
|
goto ptype_unknown;
|
|
|
|
}
|
|
|
|
|
|
|
|
unused_mask = ptype_mask;
|
|
|
|
for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
|
|
|
|
uint32_t mask = ptype_mask & valid_ptype_masks[i];
|
|
|
|
if (mask && mask != valid_ptype_masks[i]) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto ptype_unknown;
|
|
|
|
}
|
|
|
|
unused_mask &= ~valid_ptype_masks[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unused_mask) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto ptype_unknown;
|
|
|
|
}
|
|
|
|
|
|
|
|
all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
|
|
|
|
if (all_ptypes == NULL) {
|
|
|
|
ret = 0;
|
|
|
|
goto ptype_unknown;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Accommodate as many set_ptypes as possible. If the supplied
|
|
|
|
* set_ptypes array is insufficient fill it partially.
|
|
|
|
*/
|
|
|
|
for (i = 0, j = 0; set_ptypes != NULL &&
|
|
|
|
(all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
|
|
|
|
if (ptype_mask & all_ptypes[i]) {
|
|
|
|
if (j < num - 1) {
|
|
|
|
set_ptypes[j] = all_ptypes[i];
|
|
|
|
j++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (set_ptypes != NULL && j < num)
|
|
|
|
set_ptypes[j] = RTE_PTYPE_UNKNOWN;
|
|
|
|
|
|
|
|
return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
|
|
|
|
|
|
|
|
ptype_unknown:
|
|
|
|
if (num > 0)
|
|
|
|
set_ptypes[0] = RTE_PTYPE_UNKNOWN;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-09-10 09:52:15 +01:00
|
|
|
int
|
2019-05-21 18:13:03 +02:00
|
|
|
rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2019-09-10 09:52:15 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2019-05-21 18:13:04 +02:00
|
|
|
rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
|
2019-09-10 09:52:15 +01:00
|
|
|
|
|
|
|
return 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2014-06-17 20:09:30 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
|
2014-06-17 20:09:30 +02:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-17 20:09:30 +02:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
*mtu = dev->data->mtu;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
|
2014-06-17 20:09:30 +02:00
|
|
|
{
|
|
|
|
int ret;
|
2019-03-29 17:52:13 +00:00
|
|
|
struct rte_eth_dev_info dev_info;
|
2014-06-17 20:09:30 +02:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-17 20:09:30 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
|
2014-06-17 20:09:30 +02:00
|
|
|
|
2019-03-29 17:52:13 +00:00
|
|
|
/*
|
|
|
|
* Check if the device supports dev_infos_get, if it does not
|
|
|
|
* skip min_mtu/max_mtu validation here as this requires values
|
|
|
|
* that are populated within the call to rte_eth_dev_info_get()
|
|
|
|
* which relies on dev->dev_ops->dev_infos_get.
|
|
|
|
*/
|
|
|
|
if (*dev->dev_ops->dev_infos_get != NULL) {
|
2019-09-12 17:42:13 +01:00
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2019-03-29 17:52:13 +00:00
|
|
|
if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-06-17 20:09:30 +02:00
|
|
|
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
|
|
|
|
if (!ret)
|
|
|
|
dev->data->mtu = mtu;
|
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, ret);
|
2014-06-17 20:09:30 +02:00
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2017-07-09 03:44:45 +02:00
|
|
|
int ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2017-10-04 11:17:58 +03:00
|
|
|
if (!(dev->data->dev_conf.rxmode.offloads &
|
|
|
|
DEV_RX_OFFLOAD_VLAN_FILTER)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
|
|
|
|
port_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -ENOSYS;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
if (vlan_id > 4095) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
|
|
|
|
port_id, vlan_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
|
2015-02-20 11:26:12 +01:00
|
|
|
|
2017-07-09 03:44:45 +02:00
|
|
|
ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
|
|
|
|
if (ret == 0) {
|
|
|
|
struct rte_vlan_filter_conf *vfc;
|
|
|
|
int vidx;
|
|
|
|
int vbit;
|
|
|
|
|
|
|
|
vfc = &dev->data->vlan_filter_conf;
|
|
|
|
vidx = vlan_id / 64;
|
|
|
|
vbit = vlan_id % 64;
|
|
|
|
|
|
|
|
if (on)
|
|
|
|
vfc->ids[vidx] |= UINT64_C(1) << vbit;
|
|
|
|
else
|
|
|
|
vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
|
|
|
|
}
|
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, ret);
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
|
|
|
|
int on)
|
2012-12-20 00:00:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-20 00:00:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (rx_queue_id >= dev->data->nb_rx_queues) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
|
2012-12-20 00:00:00 +01:00
|
|
|
(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
|
|
|
|
|
2015-04-09 14:29:42 -07:00
|
|
|
return 0;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
|
2016-03-12 00:50:57 +08:00
|
|
|
enum rte_vlan_type vlan_type,
|
|
|
|
uint16_t tpid)
|
2012-12-20 00:00:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-20 00:00:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
|
|
|
|
tpid));
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
|
2012-12-20 00:00:00 +01:00
|
|
|
{
|
2020-07-09 18:43:14 +08:00
|
|
|
struct rte_eth_dev_info dev_info;
|
2012-12-20 00:00:00 +01:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret = 0;
|
|
|
|
int mask = 0;
|
|
|
|
int cur, org = 0;
|
2017-08-31 22:36:28 -04:00
|
|
|
uint64_t orig_offloads;
|
2020-01-17 19:49:14 +08:00
|
|
|
uint64_t dev_offloads;
|
2020-07-09 18:43:14 +08:00
|
|
|
uint64_t new_offloads;
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-20 00:00:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2017-08-31 22:36:28 -04:00
|
|
|
/* save original values in case of failure */
|
|
|
|
orig_offloads = dev->data->dev_conf.rxmode.offloads;
|
2020-01-17 19:49:14 +08:00
|
|
|
dev_offloads = orig_offloads;
|
2017-08-31 22:36:28 -04:00
|
|
|
|
2020-03-10 09:24:05 -07:00
|
|
|
/* check which option changed by application */
|
2012-12-20 00:00:00 +01:00
|
|
|
cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
|
2020-01-17 19:49:14 +08:00
|
|
|
org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
|
2015-06-26 17:01:44 -07:00
|
|
|
if (cur != org) {
|
2017-10-04 11:17:58 +03:00
|
|
|
if (cur)
|
2020-01-17 19:49:14 +08:00
|
|
|
dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
|
2017-10-04 11:17:58 +03:00
|
|
|
else
|
2020-01-17 19:49:14 +08:00
|
|
|
dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
|
2012-12-20 00:00:00 +01:00
|
|
|
mask |= ETH_VLAN_STRIP_MASK;
|
|
|
|
}
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
|
2020-01-17 19:49:14 +08:00
|
|
|
org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
|
2015-06-26 17:01:44 -07:00
|
|
|
if (cur != org) {
|
2017-10-04 11:17:58 +03:00
|
|
|
if (cur)
|
2020-01-17 19:49:14 +08:00
|
|
|
dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
|
2017-10-04 11:17:58 +03:00
|
|
|
else
|
2020-01-17 19:49:14 +08:00
|
|
|
dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
|
2012-12-20 00:00:00 +01:00
|
|
|
mask |= ETH_VLAN_FILTER_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
|
2020-01-17 19:49:14 +08:00
|
|
|
org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
|
2015-06-26 17:01:44 -07:00
|
|
|
if (cur != org) {
|
2017-10-04 11:17:58 +03:00
|
|
|
if (cur)
|
2020-01-17 19:49:14 +08:00
|
|
|
dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
|
2017-10-04 11:17:58 +03:00
|
|
|
else
|
2020-01-17 19:49:14 +08:00
|
|
|
dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
|
2012-12-20 00:00:00 +01:00
|
|
|
mask |= ETH_VLAN_EXTEND_MASK;
|
|
|
|
}
|
|
|
|
|
2019-07-02 09:07:07 +05:30
|
|
|
cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
|
2020-01-17 19:49:14 +08:00
|
|
|
org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
|
2019-07-02 09:07:07 +05:30
|
|
|
if (cur != org) {
|
|
|
|
if (cur)
|
2020-01-17 19:49:14 +08:00
|
|
|
dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
|
2019-07-02 09:07:07 +05:30
|
|
|
else
|
2020-01-17 19:49:14 +08:00
|
|
|
dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
|
2019-07-02 09:07:07 +05:30
|
|
|
mask |= ETH_QINQ_STRIP_MASK;
|
|
|
|
}
|
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
/*no change*/
|
2015-06-26 17:01:44 -07:00
|
|
|
if (mask == 0)
|
2012-12-20 00:00:00 +01:00
|
|
|
return ret;
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2020-07-09 18:43:14 +08:00
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Rx VLAN offloading must be within its device capabilities */
|
|
|
|
if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
|
|
|
|
new_offloads = dev_offloads & ~orig_offloads;
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethdev port_id=%u requested new added VLAN offloads "
|
|
|
|
"0x%" PRIx64 " must be within Rx offloads capabilities "
|
|
|
|
"0x%" PRIx64 " in %s()\n",
|
|
|
|
port_id, new_offloads, dev_info.rx_offload_capa,
|
|
|
|
__func__);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
|
2020-01-17 19:49:14 +08:00
|
|
|
dev->data->dev_conf.rxmode.offloads = dev_offloads;
|
2017-08-31 22:36:28 -04:00
|
|
|
ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
|
|
|
|
if (ret) {
|
|
|
|
/* hit an error restore original values */
|
2020-01-17 19:49:14 +08:00
|
|
|
dev->data->dev_conf.rxmode.offloads = orig_offloads;
|
2017-08-31 22:36:28 -04:00
|
|
|
}
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, ret);
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_get_vlan_offload(uint16_t port_id)
|
2012-12-20 00:00:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2019-07-02 09:07:07 +05:30
|
|
|
uint64_t *dev_offloads;
|
2012-12-20 00:00:00 +01:00
|
|
|
int ret = 0;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-20 00:00:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2019-07-02 09:07:07 +05:30
|
|
|
dev_offloads = &dev->data->dev_conf.rxmode.offloads;
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2019-07-02 09:07:07 +05:30
|
|
|
if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
|
2015-06-26 17:01:44 -07:00
|
|
|
ret |= ETH_VLAN_STRIP_OFFLOAD;
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2019-07-02 09:07:07 +05:30
|
|
|
if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
|
2015-06-26 17:01:44 -07:00
|
|
|
ret |= ETH_VLAN_FILTER_OFFLOAD;
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2019-07-02 09:07:07 +05:30
|
|
|
if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
|
2015-06-26 17:01:44 -07:00
|
|
|
ret |= ETH_VLAN_EXTEND_OFFLOAD;
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2019-07-02 09:07:07 +05:30
|
|
|
if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
|
2019-09-13 16:44:49 +05:30
|
|
|
ret |= ETH_QINQ_STRIP_OFFLOAD;
|
2019-07-02 09:07:07 +05:30
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-05 13:08:50 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
|
2014-06-05 13:08:50 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-05 13:08:50 +08:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
|
2014-06-05 13:08:50 +08:00
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
|
2014-06-05 13:08:50 +08:00
|
|
|
}
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2014-06-17 20:09:26 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
|
2014-06-17 20:09:26 +02:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-17 20:09:26 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
|
2014-06-17 20:09:26 +02:00
|
|
|
memset(fc_conf, 0, sizeof(*fc_conf));
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
|
2014-06-17 20:09:26 +02:00
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2012-12-20 00:00:00 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
|
|
|
|
struct rte_eth_pfc_conf *pfc_conf)
|
2012-12-20 00:00:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-20 00:00:00 +01:00
|
|
|
if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2012-12-20 00:00:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
/* High water, low water validation are device specific */
|
|
|
|
if (*dev->dev_ops->priority_flow_ctrl_set)
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
|
|
|
|
(dev, pfc_conf));
|
2015-04-09 14:29:42 -07:00
|
|
|
return -ENOTSUP;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2015-04-09 14:29:39 -07:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
|
2014-11-16 00:03:43 +08:00
|
|
|
uint16_t reta_size)
|
2013-06-03 00:00:00 +00:00
|
|
|
{
|
2014-11-16 00:03:43 +08:00
|
|
|
uint16_t i, num;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2014-11-16 00:03:43 +08:00
|
|
|
if (!reta_conf)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-03-20 16:04:33 -07:00
|
|
|
num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
|
2014-11-16 00:03:43 +08:00
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
if (reta_conf[i].mask)
|
|
|
|
return 0;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 00:03:43 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2015-04-09 14:29:39 -07:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
|
2014-11-16 00:03:43 +08:00
|
|
|
uint16_t reta_size,
|
2016-01-12 11:49:08 +01:00
|
|
|
uint16_t max_rxq)
|
2014-11-16 00:03:43 +08:00
|
|
|
{
|
|
|
|
uint16_t i, idx, shift;
|
|
|
|
|
|
|
|
if (!reta_conf)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (max_rxq == 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
|
2014-11-16 00:03:43 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < reta_size; i++) {
|
|
|
|
idx = i / RTE_RETA_GROUP_SIZE;
|
|
|
|
shift = i % RTE_RETA_GROUP_SIZE;
|
|
|
|
if ((reta_conf[idx].mask & (1ULL << shift)) &&
|
|
|
|
(reta_conf[idx].reta[shift] >= max_rxq)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
|
|
|
|
idx, shift,
|
2014-11-16 00:03:43 +08:00
|
|
|
reta_conf[idx].reta[shift], max_rxq);
|
|
|
|
return -EINVAL;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-16 00:03:43 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2014-11-16 00:03:43 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rss_reta_update(uint16_t port_id,
|
2014-11-16 00:03:43 +08:00
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-11-16 00:03:43 +08:00
|
|
|
/* Check mask bits */
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_check_reta_mask(reta_conf, reta_size);
|
2014-11-16 00:03:43 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
/* Check entry value */
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_check_reta_entry(reta_conf, reta_size,
|
2014-11-16 00:03:43 +08:00
|
|
|
dev->data->nb_rx_queues);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
|
|
|
|
reta_size));
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-06-04 00:42:50 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rss_reta_query(uint16_t port_id,
|
2014-11-16 00:03:43 +08:00
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size)
|
2013-06-03 00:00:00 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2014-11-16 00:03:43 +08:00
|
|
|
int ret;
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2016-05-18 21:15:11 +02:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2014-11-16 00:03:43 +08:00
|
|
|
/* Check mask bits */
|
2020-10-13 17:56:58 +01:00
|
|
|
ret = eth_check_reta_mask(reta_conf, reta_size);
|
2014-11-16 00:03:43 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
|
|
|
|
reta_size));
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-05-16 10:58:40 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rss_hash_update(uint16_t port_id,
|
|
|
|
struct rte_eth_rss_conf *rss_conf)
|
2014-05-16 10:58:40 +02:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2018-04-20 22:30:22 +08:00
|
|
|
struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
|
2019-09-12 17:42:13 +01:00
|
|
|
int ret;
|
2014-05-16 10:58:40 +02:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2019-09-12 17:42:13 +01:00
|
|
|
|
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2019-10-15 23:09:48 +08:00
|
|
|
rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
|
|
|
|
|
2014-05-16 10:58:40 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2018-04-20 22:30:22 +08:00
|
|
|
if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
|
|
|
|
dev_info.flow_type_rss_offloads) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
|
|
|
|
port_id, rss_conf->rss_hf,
|
|
|
|
dev_info.flow_type_rss_offloads);
|
2018-05-31 14:22:45 +01:00
|
|
|
return -EINVAL;
|
2018-04-20 22:30:22 +08:00
|
|
|
}
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
|
|
|
|
rss_conf));
|
2014-05-16 10:58:40 +02:00
|
|
|
}
|
|
|
|
|
2014-05-16 10:58:42 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
|
2014-05-16 10:58:42 +02:00
|
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-05-16 10:58:42 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
|
|
|
|
rss_conf));
|
2014-05-16 10:58:42 +02:00
|
|
|
}
|
|
|
|
|
2014-10-23 21:18:53 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
|
2016-03-10 10:42:10 +08:00
|
|
|
struct rte_eth_udp_tunnel *udp_tunnel)
|
2014-10-23 21:18:53 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-10-23 21:18:53 +08:00
|
|
|
if (udp_tunnel == NULL) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
|
2014-10-23 21:18:53 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
|
2014-10-23 21:18:53 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2016-03-10 10:42:10 +08:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
|
|
|
|
udp_tunnel));
|
2014-10-23 21:18:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
|
2016-03-10 10:42:10 +08:00
|
|
|
struct rte_eth_udp_tunnel *udp_tunnel)
|
2014-10-23 21:18:53 +08:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-10-23 21:18:53 +08:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (udp_tunnel == NULL) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
|
2014-10-23 21:18:53 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
|
2014-10-23 21:18:53 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-03-10 10:42:10 +08:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
|
|
|
|
udp_tunnel));
|
2014-10-23 21:18:53 +08:00
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_led_on(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_led_off(uint16_t port_id)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2020-10-08 18:02:54 +08:00
|
|
|
int
|
|
|
|
rte_eth_fec_get_capability(uint16_t port_id,
|
|
|
|
struct rte_eth_fec_capa *speed_fec_capa,
|
|
|
|
unsigned int num)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (speed_fec_capa == NULL && num > 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
|
|
|
|
ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
if (fec_capa == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
|
|
|
|
return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
|
|
|
|
return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
|
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
/*
|
|
|
|
* Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
|
|
|
|
* an empty spot.
|
|
|
|
*/
|
2015-04-09 14:29:39 -07:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev_info dev_info;
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
|
|
|
unsigned i;
|
2019-09-12 17:42:13 +01:00
|
|
|
int ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return -1;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
|
|
|
for (i = 0; i < dev_info.max_mac_addrs; i++)
|
2019-05-21 18:13:05 +02:00
|
|
|
if (memcmp(addr, &dev->data->mac_addrs[i],
|
|
|
|
RTE_ETHER_ADDR_LEN) == 0)
|
2012-09-04 13:54:00 +01:00
|
|
|
return i;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-05-21 18:13:03 +02:00
|
|
|
static const struct rte_ether_addr null_mac_addr;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
|
|
|
int
|
2019-05-21 18:13:03 +02:00
|
|
|
rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
|
2013-09-18 12:00:00 +02:00
|
|
|
uint32_t pool)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int index;
|
2013-09-18 12:00:00 +02:00
|
|
|
uint64_t pool_mask;
|
2017-05-05 08:40:00 +08:00
|
|
|
int ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2019-05-21 18:13:04 +02:00
|
|
|
if (rte_is_zero_ether_addr(addr)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
|
2013-09-18 12:00:00 +02:00
|
|
|
port_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
2013-09-18 12:00:00 +02:00
|
|
|
if (pool >= ETH_64_POOLS) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
index = eth_dev_get_mac_addr_index(port_id, addr);
|
2012-09-04 13:54:00 +01:00
|
|
|
if (index < 0) {
|
2020-10-13 17:56:58 +01:00
|
|
|
index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
|
2013-09-18 12:00:00 +02:00
|
|
|
if (index < 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
|
2013-09-18 12:00:00 +02:00
|
|
|
port_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -ENOSPC;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pool_mask = dev->data->mac_pool_sel[index];
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2015-06-26 17:01:43 -07:00
|
|
|
/* Check if both MAC address and pool is already there, and do nothing */
|
2013-09-18 12:00:00 +02:00
|
|
|
if (pool_mask & (1ULL << pool))
|
|
|
|
return 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update NIC */
|
2017-05-05 08:40:00 +08:00
|
|
|
ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2017-05-05 08:40:00 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
/* Update address in NIC data structure */
|
2019-05-21 18:13:04 +02:00
|
|
|
rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2017-05-05 08:40:00 +08:00
|
|
|
/* Update pool bitmap in NIC data structure */
|
|
|
|
dev->data->mac_pool_sel[index] |= (1ULL << pool);
|
|
|
|
}
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, ret);
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-05-21 18:13:03 +02:00
|
|
|
rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int index;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 13:54:00 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
|
2012-12-20 00:00:00 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
index = eth_dev_get_mac_addr_index(port_id, addr);
|
2012-09-04 13:54:00 +01:00
|
|
|
if (index == 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Port %u: Cannot remove default MAC address\n",
|
|
|
|
port_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EADDRINUSE;
|
2012-09-04 13:54:00 +01:00
|
|
|
} else if (index < 0)
|
|
|
|
return 0; /* Do nothing if address wasn't found */
|
|
|
|
|
|
|
|
/* Update NIC */
|
|
|
|
(*dev->dev_ops->mac_addr_remove)(dev, index);
|
|
|
|
|
|
|
|
/* Update address in NIC data structure */
|
2019-05-21 18:13:04 +02:00
|
|
|
rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2014-11-04 18:01:24 +08:00
|
|
|
/* reset pool bitmap */
|
|
|
|
dev->data->mac_pool_sel[index] = 0;
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-16 09:25:33 -04:00
|
|
|
int
|
2019-05-21 18:13:03 +02:00
|
|
|
rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
|
2015-07-16 09:25:33 -04:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2018-04-11 18:32:51 +02:00
|
|
|
int ret;
|
2015-07-16 09:25:33 -04:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 09:25:33 -04:00
|
|
|
|
2019-05-21 18:13:04 +02:00
|
|
|
if (!rte_is_valid_assigned_ether_addr(addr))
|
2015-07-16 09:25:33 -04:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
|
2015-07-16 09:25:33 -04:00
|
|
|
|
2018-04-11 18:32:51 +02:00
|
|
|
ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2015-07-16 09:25:33 -04:00
|
|
|
/* Update default address in NIC data structure */
|
2019-05-21 18:13:04 +02:00
|
|
|
rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
|
2015-07-16 09:25:33 -04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
|
|
|
|
* an empty spot.
|
|
|
|
*/
|
2015-04-09 14:29:39 -07:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_get_hash_mac_addr_index(uint16_t port_id,
|
|
|
|
const struct rte_ether_addr *addr)
|
2013-09-18 12:00:00 +02:00
|
|
|
{
|
|
|
|
struct rte_eth_dev_info dev_info;
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
|
|
|
unsigned i;
|
2019-09-12 17:42:13 +01:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return -1;
|
2013-09-18 12:00:00 +02:00
|
|
|
|
|
|
|
if (!dev->data->hash_mac_addrs)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
|
|
|
|
if (memcmp(addr, &dev->data->hash_mac_addrs[i],
|
2019-05-21 18:13:05 +02:00
|
|
|
RTE_ETHER_ADDR_LEN) == 0)
|
2013-09-18 12:00:00 +02:00
|
|
|
return i;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-05-21 18:13:03 +02:00
|
|
|
rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
|
2013-09-18 12:00:00 +02:00
|
|
|
uint8_t on)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
int ret;
|
|
|
|
struct rte_eth_dev *dev;
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2019-05-21 18:13:04 +02:00
|
|
|
if (rte_is_zero_ether_addr(addr)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
|
2013-09-18 12:00:00 +02:00
|
|
|
port_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
index = eth_dev_get_hash_mac_addr_index(port_id, addr);
|
2013-09-18 12:00:00 +02:00
|
|
|
/* Check if it's already there, and do nothing */
|
2017-12-14 15:32:19 -08:00
|
|
|
if ((index >= 0) && on)
|
2013-09-18 12:00:00 +02:00
|
|
|
return 0;
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
if (index < 0) {
|
|
|
|
if (!on) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Port %u: the MAC address was not set in UTA\n",
|
|
|
|
port_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
|
2013-09-18 12:00:00 +02:00
|
|
|
if (index < 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
|
|
|
|
port_id);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -ENOSPC;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
2014-06-04 00:42:50 +01:00
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
|
2013-09-18 12:00:00 +02:00
|
|
|
ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
|
|
|
|
if (ret == 0) {
|
|
|
|
/* Update address in NIC data structure */
|
|
|
|
if (on)
|
2019-05-21 18:13:04 +02:00
|
|
|
rte_ether_addr_copy(addr,
|
2013-09-18 12:00:00 +02:00
|
|
|
&dev->data->hash_mac_addrs[index]);
|
2014-06-04 00:42:50 +01:00
|
|
|
else
|
2019-05-21 18:13:04 +02:00
|
|
|
rte_ether_addr_copy(&null_mac_addr,
|
2013-09-18 12:00:00 +02:00
|
|
|
&dev->data->hash_mac_addrs[index]);
|
|
|
|
}
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, ret);
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
|
2013-09-18 12:00:00 +02:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
|
|
|
|
on));
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
2017-09-29 15:17:24 +08:00
|
|
|
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
|
2014-05-26 15:45:29 +08:00
|
|
|
uint16_t tx_rate)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_dev_info dev_info;
|
|
|
|
struct rte_eth_link link;
|
2019-09-12 17:42:13 +01:00
|
|
|
int ret;
|
2014-05-26 15:45:29 +08:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-05-26 15:45:29 +08:00
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2014-05-26 15:45:29 +08:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
link = dev->data->dev_link;
|
|
|
|
|
|
|
|
if (queue_idx > dev_info.max_tx_queues) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Set queue rate limit:port %u: invalid queue id=%u\n",
|
|
|
|
port_id, queue_idx);
|
2014-05-26 15:45:29 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx_rate > link.link_speed) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
|
2014-07-02 15:10:30 +02:00
|
|
|
tx_rate, link.link_speed);
|
2014-05-26 15:45:29 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
|
|
|
|
queue_idx, tx_rate));
|
2014-05-26 15:45:29 +08:00
|
|
|
}
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_mirror_rule_set(uint16_t port_id,
|
2015-06-10 14:24:30 +08:00
|
|
|
struct rte_eth_mirror_conf *mirror_conf,
|
2013-09-18 12:00:00 +02:00
|
|
|
uint8_t rule_id, uint8_t on)
|
|
|
|
{
|
2017-01-24 21:28:35 +01:00
|
|
|
struct rte_eth_dev *dev;
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-06-10 14:24:31 +08:00
|
|
|
if (mirror_conf->rule_type == 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
if (mirror_conf->dst_pool >= ETH_64_POOLS) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
|
|
|
|
ETH_64_POOLS - 1);
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2015-06-10 14:24:31 +08:00
|
|
|
if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
|
|
|
|
ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
|
|
|
|
(mirror_conf->pool_mask == 0)) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Invalid mirror pool, pool mask can not be 0\n");
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
2014-06-04 00:42:50 +01:00
|
|
|
|
2015-06-10 14:24:31 +08:00
|
|
|
if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
|
|
|
|
mirror_conf->vlan.vlan_mask == 0) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Invalid vlan mask, vlan mask can not be 0\n");
|
2015-06-10 14:24:31 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-09-18 12:00:00 +02:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
|
|
|
|
mirror_conf, rule_id, on));
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
|
2013-09-18 12:00:00 +02:00
|
|
|
{
|
2017-01-24 21:28:35 +01:00
|
|
|
struct rte_eth_dev *dev;
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-09-18 12:00:00 +02:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
|
2013-09-18 12:00:00 +02:00
|
|
|
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
|
|
|
|
rule_id));
|
2013-09-18 12:00:00 +02:00
|
|
|
}
|
|
|
|
|
2018-01-04 17:01:09 +01:00
|
|
|
RTE_INIT(eth_dev_init_cb_lists)
|
|
|
|
{
|
2020-11-04 10:57:57 +08:00
|
|
|
uint16_t i;
|
2018-01-04 17:01:09 +01:00
|
|
|
|
|
|
|
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
|
|
|
|
TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
|
|
|
|
}
|
|
|
|
|
2012-09-04 13:54:00 +01:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_callback_register(uint16_t port_id,
|
2012-09-04 13:54:00 +01:00
|
|
|
enum rte_eth_event_type event,
|
|
|
|
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2013-06-03 00:00:00 +00:00
|
|
|
struct rte_eth_dev_callback *user_cb;
|
2020-11-04 10:57:57 +08:00
|
|
|
uint16_t next_port;
|
2018-01-04 17:01:09 +01:00
|
|
|
uint16_t last_port;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
|
|
|
if (!cb_fn)
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2018-01-04 17:01:09 +01:00
|
|
|
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
|
2018-06-19 02:04:55 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
|
2018-01-04 17:01:09 +01:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (port_id == RTE_ETH_ALL) {
|
|
|
|
next_port = 0;
|
|
|
|
last_port = RTE_MAX_ETHPORTS - 1;
|
|
|
|
} else {
|
|
|
|
next_port = last_port = port_id;
|
|
|
|
}
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_cb_lock);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2018-01-04 17:01:09 +01:00
|
|
|
do {
|
|
|
|
dev = &rte_eth_devices[next_port];
|
|
|
|
|
|
|
|
TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
|
|
|
|
if (user_cb->cb_fn == cb_fn &&
|
|
|
|
user_cb->cb_arg == cb_arg &&
|
|
|
|
user_cb->event == event) {
|
|
|
|
break;
|
|
|
|
}
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2018-01-04 17:01:09 +01:00
|
|
|
/* create a new callback. */
|
|
|
|
if (user_cb == NULL) {
|
|
|
|
user_cb = rte_zmalloc("INTR_USER_CALLBACK",
|
|
|
|
sizeof(struct rte_eth_dev_callback), 0);
|
|
|
|
if (user_cb != NULL) {
|
|
|
|
user_cb->cb_fn = cb_fn;
|
|
|
|
user_cb->cb_arg = cb_arg;
|
|
|
|
user_cb->event = event;
|
|
|
|
TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
|
|
|
|
user_cb, next);
|
|
|
|
} else {
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_cb_lock);
|
2018-01-04 17:01:09 +01:00
|
|
|
rte_eth_dev_callback_unregister(port_id, event,
|
|
|
|
cb_fn, cb_arg);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-10-20 09:34:41 -04:00
|
|
|
}
|
2018-01-04 17:01:09 +01:00
|
|
|
} while (++next_port <= last_port);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_cb_lock);
|
2018-01-04 17:01:09 +01:00
|
|
|
return 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_callback_unregister(uint16_t port_id,
|
2012-09-04 13:54:00 +01:00
|
|
|
enum rte_eth_event_type event,
|
|
|
|
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
|
|
|
|
{
|
2013-06-03 00:00:00 +00:00
|
|
|
int ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
struct rte_eth_dev *dev;
|
2013-06-03 00:00:00 +00:00
|
|
|
struct rte_eth_dev_callback *cb, *next;
|
2020-11-04 10:57:57 +08:00
|
|
|
uint16_t next_port;
|
2018-01-04 17:01:09 +01:00
|
|
|
uint16_t last_port;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
|
|
|
if (!cb_fn)
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2015-02-26 04:32:18 +09:00
|
|
|
|
2018-01-04 17:01:09 +01:00
|
|
|
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
|
2018-06-19 02:04:55 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
|
2018-01-04 17:01:09 +01:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (port_id == RTE_ETH_ALL) {
|
|
|
|
next_port = 0;
|
|
|
|
last_port = RTE_MAX_ETHPORTS - 1;
|
|
|
|
} else {
|
|
|
|
next_port = last_port = port_id;
|
|
|
|
}
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_cb_lock);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2018-01-04 17:01:09 +01:00
|
|
|
do {
|
|
|
|
dev = &rte_eth_devices[next_port];
|
|
|
|
ret = 0;
|
|
|
|
for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
|
|
|
|
cb = next) {
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2018-01-04 17:01:09 +01:00
|
|
|
next = TAILQ_NEXT(cb, next);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2018-01-04 17:01:09 +01:00
|
|
|
if (cb->cb_fn != cb_fn || cb->event != event ||
|
2020-01-07 16:51:36 +01:00
|
|
|
(cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
|
2018-01-04 17:01:09 +01:00
|
|
|
continue;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2018-01-04 17:01:09 +01:00
|
|
|
/*
|
|
|
|
* if this callback is not executing right now,
|
|
|
|
* then remove it.
|
|
|
|
*/
|
|
|
|
if (cb->active == 0) {
|
|
|
|
TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
|
|
|
|
rte_free(cb);
|
|
|
|
} else {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
}
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
2018-01-04 17:01:09 +01:00
|
|
|
} while (++next_port <= last_port);
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_cb_lock);
|
2015-04-09 14:29:42 -07:00
|
|
|
return ret;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
|
|
|
|
2017-06-15 13:29:50 +01:00
|
|
|
int
|
2020-09-09 14:01:48 +01:00
|
|
|
rte_eth_dev_callback_process(struct rte_eth_dev *dev,
|
2018-01-04 17:01:08 +01:00
|
|
|
enum rte_eth_event_type event, void *ret_param)
|
2012-09-04 13:54:00 +01:00
|
|
|
{
|
2013-06-03 00:00:00 +00:00
|
|
|
struct rte_eth_dev_callback *cb_lst;
|
2012-09-04 13:54:00 +01:00
|
|
|
struct rte_eth_dev_callback dev_cb;
|
2017-06-15 13:29:50 +01:00
|
|
|
int rc = 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_cb_lock);
|
2015-02-23 18:30:08 +00:00
|
|
|
TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
|
2012-09-04 13:54:00 +01:00
|
|
|
if (cb_lst->cb_fn == NULL || cb_lst->event != event)
|
|
|
|
continue;
|
|
|
|
dev_cb = *cb_lst;
|
2013-06-03 00:00:00 +00:00
|
|
|
cb_lst->active = 1;
|
2017-06-15 13:29:50 +01:00
|
|
|
if (ret_param != NULL)
|
|
|
|
dev_cb.ret_param = ret_param;
|
2016-10-10 15:34:14 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_cb_lock);
|
2017-06-15 13:29:50 +01:00
|
|
|
rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
|
|
|
|
dev_cb.cb_arg, dev_cb.ret_param);
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_cb_lock);
|
2013-06-03 00:00:00 +00:00
|
|
|
cb_lst->active = 0;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_cb_lock);
|
2017-06-15 13:29:50 +01:00
|
|
|
return rc;
|
2012-09-04 13:54:00 +01:00
|
|
|
}
|
2015-07-20 11:02:26 +08:00
|
|
|
|
2018-05-11 01:58:30 +02:00
|
|
|
void
|
|
|
|
rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
if (dev == NULL)
|
|
|
|
return;
|
2018-05-11 01:58:33 +02:00
|
|
|
|
2020-09-09 14:01:48 +01:00
|
|
|
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
|
2018-05-11 01:58:34 +02:00
|
|
|
|
2018-05-11 01:58:33 +02:00
|
|
|
dev->state = RTE_ETH_DEV_ATTACHED;
|
2018-05-11 01:58:30 +02:00
|
|
|
}
|
|
|
|
|
2015-07-20 11:02:26 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
|
2015-07-20 11:02:26 +08:00
|
|
|
{
|
|
|
|
uint32_t vec;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_intr_handle *intr_handle;
|
|
|
|
uint16_t qid;
|
|
|
|
int rc;
|
|
|
|
|
2016-05-18 21:15:11 +02:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-20 11:02:26 +08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2016-12-23 16:58:09 +01:00
|
|
|
|
|
|
|
if (!dev->intr_handle) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
|
2016-12-23 16:58:09 +01:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
intr_handle = dev->intr_handle;
|
2015-07-20 11:02:26 +08:00
|
|
|
if (!intr_handle->intr_vec) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
|
2015-07-20 11:02:26 +08:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
|
|
|
|
vec = intr_handle->intr_vec[qid];
|
|
|
|
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
|
|
|
|
if (rc && rc != -EEXIST) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"p %u q %u rx ctl error op %d epfd %d vec %u\n",
|
|
|
|
port_id, qid, op, epfd, vec);
|
2015-07-20 11:02:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-09-29 10:12:04 +08:00
|
|
|
rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
|
|
|
|
{
|
|
|
|
struct rte_intr_handle *intr_handle;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
unsigned int efd_idx;
|
|
|
|
uint32_t vec;
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (queue_id >= dev->data->nb_rx_queues) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dev->intr_handle) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
intr_handle = dev->intr_handle;
|
|
|
|
if (!intr_handle->intr_vec) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
vec = intr_handle->intr_vec[queue_id];
|
|
|
|
efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
|
|
|
|
(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
|
|
|
|
fd = intr_handle->efds[efd_idx];
|
|
|
|
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
2020-07-10 22:43:42 +01:00
|
|
|
static inline int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
|
2020-07-10 22:43:42 +01:00
|
|
|
const char *ring_name)
|
|
|
|
{
|
|
|
|
return snprintf(name, len, "eth_p%d_q%d_%s",
|
|
|
|
port_id, queue_id, ring_name);
|
|
|
|
}
|
|
|
|
|
2015-11-05 16:09:30 -08:00
|
|
|
const struct rte_memzone *
|
|
|
|
rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
|
|
|
|
uint16_t queue_id, size_t size, unsigned align,
|
|
|
|
int socket_id)
|
|
|
|
{
|
|
|
|
char z_name[RTE_MEMZONE_NAMESIZE];
|
|
|
|
const struct rte_memzone *mz;
|
2019-01-17 14:13:54 +00:00
|
|
|
int rc;
|
2015-11-05 16:09:30 -08:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
|
2020-07-10 22:43:42 +01:00
|
|
|
queue_id, ring_name);
|
2019-01-17 14:13:54 +00:00
|
|
|
if (rc >= RTE_MEMZONE_NAMESIZE) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "ring name too long\n");
|
|
|
|
rte_errno = ENAMETOOLONG;
|
|
|
|
return NULL;
|
|
|
|
}
|
2015-11-05 16:09:30 -08:00
|
|
|
|
|
|
|
mz = rte_memzone_lookup(z_name);
|
2020-06-24 10:35:20 +01:00
|
|
|
if (mz) {
|
|
|
|
if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
|
|
|
|
size > mz->len ||
|
|
|
|
((uintptr_t)mz->addr & (align - 1)) != 0) {
|
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"memzone %s does not justify the requested attributes\n",
|
|
|
|
mz->name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-11-05 16:09:30 -08:00
|
|
|
return mz;
|
2020-06-24 10:35:20 +01:00
|
|
|
}
|
2015-11-05 16:09:30 -08:00
|
|
|
|
2018-04-11 13:29:47 +01:00
|
|
|
return rte_memzone_reserve_aligned(z_name, size, socket_id,
|
|
|
|
RTE_MEMZONE_IOVA_CONTIG, align);
|
2015-11-05 16:09:30 -08:00
|
|
|
}
|
|
|
|
|
2020-07-10 22:43:42 +01:00
|
|
|
int
|
|
|
|
rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
|
|
|
|
uint16_t queue_id)
|
|
|
|
{
|
|
|
|
char z_name[RTE_MEMZONE_NAMESIZE];
|
|
|
|
const struct rte_memzone *mz;
|
|
|
|
int rc = 0;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
|
2020-07-10 22:43:42 +01:00
|
|
|
queue_id, ring_name);
|
|
|
|
if (rc >= RTE_MEMZONE_NAMESIZE) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "ring name too long\n");
|
|
|
|
return -ENAMETOOLONG;
|
|
|
|
}
|
|
|
|
|
|
|
|
mz = rte_memzone_lookup(z_name);
|
|
|
|
if (mz)
|
|
|
|
rc = rte_memzone_free(mz);
|
|
|
|
else
|
|
|
|
rc = -ENOENT;
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-04-26 11:40:59 +01:00
|
|
|
rte_eth_dev_create(struct rte_device *device, const char *name,
|
|
|
|
size_t priv_data_size,
|
|
|
|
ethdev_bus_specific_init ethdev_bus_specific_init,
|
|
|
|
void *bus_init_params,
|
|
|
|
ethdev_init_t ethdev_init, void *init_params)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *ethdev;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
|
|
|
|
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
|
|
|
ethdev = rte_eth_dev_allocate(name);
|
2018-09-24 14:43:24 +01:00
|
|
|
if (!ethdev)
|
|
|
|
return -ENODEV;
|
2018-04-26 11:40:59 +01:00
|
|
|
|
|
|
|
if (priv_data_size) {
|
|
|
|
ethdev->data->dev_private = rte_zmalloc_socket(
|
|
|
|
name, priv_data_size, RTE_CACHE_LINE_SIZE,
|
|
|
|
device->numa_node);
|
|
|
|
|
|
|
|
if (!ethdev->data->dev_private) {
|
2020-07-02 12:17:05 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"failed to allocate private data\n");
|
2018-04-26 11:40:59 +01:00
|
|
|
retval = -ENOMEM;
|
2018-10-19 04:07:55 +02:00
|
|
|
goto probe_failed;
|
2018-04-26 11:40:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ethdev = rte_eth_dev_attach_secondary(name);
|
|
|
|
if (!ethdev) {
|
2020-07-02 12:17:05 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"secondary process attach failed, ethdev doesn't exist\n");
|
2018-09-24 14:43:24 +01:00
|
|
|
return -ENODEV;
|
2018-04-26 11:40:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ethdev->device = device;
|
|
|
|
|
|
|
|
if (ethdev_bus_specific_init) {
|
|
|
|
retval = ethdev_bus_specific_init(ethdev, bus_init_params);
|
|
|
|
if (retval) {
|
2020-07-02 12:17:05 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"ethdev bus specific initialisation failed\n");
|
2018-04-26 11:40:59 +01:00
|
|
|
goto probe_failed;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = ethdev_init(ethdev, init_params);
|
|
|
|
if (retval) {
|
2020-07-02 12:17:05 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
|
2018-04-26 11:40:59 +01:00
|
|
|
goto probe_failed;
|
|
|
|
}
|
|
|
|
|
2018-05-11 01:58:30 +02:00
|
|
|
rte_eth_dev_probing_finish(ethdev);
|
|
|
|
|
2018-04-26 11:40:59 +01:00
|
|
|
return retval;
|
|
|
|
|
2018-10-19 04:07:55 +02:00
|
|
|
probe_failed:
|
2018-04-26 11:40:59 +01:00
|
|
|
rte_eth_dev_release_port(ethdev);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-04-26 11:40:59 +01:00
|
|
|
rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
|
|
|
|
ethdev_uninit_t ethdev_uninit)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ethdev = rte_eth_dev_allocated(ethdev->data->name);
|
|
|
|
if (!ethdev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
|
2018-10-28 01:46:50 +00:00
|
|
|
|
|
|
|
ret = ethdev_uninit(ethdev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2018-04-26 11:40:59 +01:00
|
|
|
|
|
|
|
return rte_eth_dev_release_port(ethdev);
|
|
|
|
}
|
|
|
|
|
2015-07-20 11:02:26 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
|
2015-07-20 11:02:26 +08:00
|
|
|
int epfd, int op, void *data)
|
|
|
|
{
|
|
|
|
uint32_t vec;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_intr_handle *intr_handle;
|
|
|
|
int rc;
|
|
|
|
|
2016-05-18 21:15:11 +02:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-20 11:02:26 +08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (queue_id >= dev->data->nb_rx_queues) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
|
2015-07-20 11:02:26 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-12-23 16:58:09 +01:00
|
|
|
if (!dev->intr_handle) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
|
2016-12-23 16:58:09 +01:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
intr_handle = dev->intr_handle;
|
2015-07-20 11:02:26 +08:00
|
|
|
if (!intr_handle->intr_vec) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
|
2015-07-20 11:02:26 +08:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
vec = intr_handle->intr_vec[queue_id];
|
|
|
|
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
|
|
|
|
if (rc && rc != -EEXIST) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"p %u q %u rx ctl error op %d epfd %d vec %u\n",
|
|
|
|
port_id, queue_id, op, epfd, vec);
|
2015-07-20 11:02:26 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rx_intr_enable(uint16_t port_id,
|
2015-07-20 11:02:26 +08:00
|
|
|
uint16_t queue_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2020-10-13 19:50:55 +08:00
|
|
|
int ret;
|
2015-07-20 11:02:26 +08:00
|
|
|
|
2016-05-18 21:15:11 +02:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-20 11:02:26 +08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2020-10-13 19:50:55 +08:00
|
|
|
ret = eth_dev_validate_rx_queue(dev, queue_id);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
|
|
|
|
queue_id));
|
2015-07-20 11:02:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_rx_intr_disable(uint16_t port_id,
|
2015-07-20 11:02:26 +08:00
|
|
|
uint16_t queue_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2020-10-13 19:50:55 +08:00
|
|
|
int ret;
|
2015-07-20 11:02:26 +08:00
|
|
|
|
2016-05-18 21:15:11 +02:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-20 11:02:26 +08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2020-10-13 19:50:55 +08:00
|
|
|
ret = eth_dev_validate_rx_queue(dev, queue_id);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
|
|
|
|
queue_id));
|
2015-07-20 11:02:26 +08:00
|
|
|
}
|
|
|
|
|
2014-06-16 15:31:43 +08:00
|
|
|
|
2018-03-20 16:34:04 +00:00
|
|
|
const struct rte_eth_rxtx_callback *
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
|
2015-03-12 16:54:28 +00:00
|
|
|
rte_rx_callback_fn fn, void *user_param)
|
2015-02-23 18:30:09 +00:00
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return NULL;
|
|
|
|
#endif
|
2019-10-30 23:53:11 +00:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-02-23 18:30:09 +00:00
|
|
|
/* check input parameters */
|
2015-02-26 08:00:32 -06:00
|
|
|
if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
|
2015-02-23 18:30:09 +00:00
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-10-30 23:53:11 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
2015-02-23 18:30:09 +00:00
|
|
|
struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
|
|
|
|
|
|
|
|
if (cb == NULL) {
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-12 16:54:28 +00:00
|
|
|
cb->fn.rx = fn;
|
2015-02-23 18:30:09 +00:00
|
|
|
cb->param = user_param;
|
2015-07-10 14:08:13 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_rx_cb_lock);
|
2015-07-10 14:08:13 +01:00
|
|
|
/* Add the callbacks in fifo order. */
|
|
|
|
struct rte_eth_rxtx_callback *tail =
|
|
|
|
rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
|
|
|
|
|
|
|
|
if (!tail) {
|
2020-10-13 11:25:37 -05:00
|
|
|
/* Stores to cb->fn and cb->param should complete before
|
|
|
|
* cb is visible to data plane.
|
|
|
|
*/
|
|
|
|
__atomic_store_n(
|
|
|
|
&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
|
|
|
|
cb, __ATOMIC_RELEASE);
|
2015-07-10 14:08:13 +01:00
|
|
|
|
|
|
|
} else {
|
|
|
|
while (tail->next)
|
|
|
|
tail = tail->next;
|
2020-10-13 11:25:37 -05:00
|
|
|
/* Stores to cb->fn and cb->param should complete before
|
|
|
|
* cb is visible to data plane.
|
|
|
|
*/
|
|
|
|
__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
|
2015-07-10 14:08:13 +01:00
|
|
|
}
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_rx_cb_lock);
|
2016-06-15 15:06:19 +01:00
|
|
|
|
|
|
|
return cb;
|
|
|
|
}
|
|
|
|
|
2018-03-20 16:34:04 +00:00
|
|
|
const struct rte_eth_rxtx_callback *
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
|
2016-06-15 15:06:19 +01:00
|
|
|
rte_rx_callback_fn fn, void *user_param)
|
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
/* check input parameters */
|
|
|
|
if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
|
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
|
|
|
|
|
|
|
|
if (cb == NULL) {
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
cb->fn.rx = fn;
|
|
|
|
cb->param = user_param;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_rx_cb_lock);
|
2020-03-10 09:24:05 -07:00
|
|
|
/* Add the callbacks at first position */
|
2016-06-15 15:06:19 +01:00
|
|
|
cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
|
2020-10-13 11:25:36 -05:00
|
|
|
/* Stores to cb->fn, cb->param and cb->next should complete before
|
|
|
|
* cb is visible to data plane threads.
|
|
|
|
*/
|
|
|
|
__atomic_store_n(
|
|
|
|
&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
|
|
|
|
cb, __ATOMIC_RELEASE);
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_rx_cb_lock);
|
2015-07-10 14:08:13 +01:00
|
|
|
|
2015-02-23 18:30:09 +00:00
|
|
|
return cb;
|
|
|
|
}
|
|
|
|
|
2018-03-20 16:34:04 +00:00
|
|
|
const struct rte_eth_rxtx_callback *
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
|
2015-03-12 16:54:28 +00:00
|
|
|
rte_tx_callback_fn fn, void *user_param)
|
2015-02-23 18:30:09 +00:00
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return NULL;
|
|
|
|
#endif
|
2019-10-30 23:53:11 +00:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-02-23 18:30:09 +00:00
|
|
|
/* check input parameters */
|
2015-02-26 08:00:32 -06:00
|
|
|
if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
|
2015-02-23 18:30:09 +00:00
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-02-23 18:30:09 +00:00
|
|
|
struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
|
|
|
|
|
|
|
|
if (cb == NULL) {
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-12 16:54:28 +00:00
|
|
|
cb->fn.tx = fn;
|
2015-02-23 18:30:09 +00:00
|
|
|
cb->param = user_param;
|
2015-07-10 14:08:13 +01:00
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_tx_cb_lock);
|
2015-07-10 14:08:13 +01:00
|
|
|
/* Add the callbacks in fifo order. */
|
|
|
|
struct rte_eth_rxtx_callback *tail =
|
|
|
|
rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
|
|
|
|
|
|
|
|
if (!tail) {
|
2020-10-13 11:25:37 -05:00
|
|
|
/* Stores to cb->fn and cb->param should complete before
|
|
|
|
* cb is visible to data plane.
|
|
|
|
*/
|
|
|
|
__atomic_store_n(
|
|
|
|
&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
|
|
|
|
cb, __ATOMIC_RELEASE);
|
2015-07-10 14:08:13 +01:00
|
|
|
|
|
|
|
} else {
|
|
|
|
while (tail->next)
|
|
|
|
tail = tail->next;
|
2020-10-13 11:25:37 -05:00
|
|
|
/* Stores to cb->fn and cb->param should complete before
|
|
|
|
* cb is visible to data plane.
|
|
|
|
*/
|
|
|
|
__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
|
2015-07-10 14:08:13 +01:00
|
|
|
}
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_tx_cb_lock);
|
2015-07-10 14:08:13 +01:00
|
|
|
|
2015-02-23 18:30:09 +00:00
|
|
|
return cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
|
2018-03-20 16:34:04 +00:00
|
|
|
const struct rte_eth_rxtx_callback *user_cb)
|
2015-02-23 18:30:09 +00:00
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
2015-04-09 14:29:42 -07:00
|
|
|
return -ENOTSUP;
|
2015-02-23 18:30:09 +00:00
|
|
|
#endif
|
|
|
|
/* Check input parameters. */
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2016-05-18 21:15:11 +02:00
|
|
|
if (user_cb == NULL ||
|
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2015-02-23 18:30:09 +00:00
|
|
|
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
2016-06-15 15:06:18 +01:00
|
|
|
struct rte_eth_rxtx_callback *cb;
|
|
|
|
struct rte_eth_rxtx_callback **prev_cb;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_rx_cb_lock);
|
2016-06-15 15:06:18 +01:00
|
|
|
prev_cb = &dev->post_rx_burst_cbs[queue_id];
|
|
|
|
for (; *prev_cb != NULL; prev_cb = &cb->next) {
|
|
|
|
cb = *prev_cb;
|
2015-02-23 18:30:09 +00:00
|
|
|
if (cb == user_cb) {
|
2016-06-15 15:06:18 +01:00
|
|
|
/* Remove the user cb from the callback list. */
|
2020-10-13 11:25:37 -05:00
|
|
|
__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
|
2016-06-15 15:06:18 +01:00
|
|
|
ret = 0;
|
|
|
|
break;
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
2016-06-15 15:06:18 +01:00
|
|
|
}
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_rx_cb_lock);
|
2015-02-23 18:30:09 +00:00
|
|
|
|
2016-06-15 15:06:18 +01:00
|
|
|
return ret;
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
|
2018-03-20 16:34:04 +00:00
|
|
|
const struct rte_eth_rxtx_callback *user_cb)
|
2015-02-23 18:30:09 +00:00
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
2015-04-09 14:29:42 -07:00
|
|
|
return -ENOTSUP;
|
2015-02-23 18:30:09 +00:00
|
|
|
#endif
|
|
|
|
/* Check input parameters. */
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2016-05-18 21:15:11 +02:00
|
|
|
if (user_cb == NULL ||
|
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
|
2015-04-09 14:29:42 -07:00
|
|
|
return -EINVAL;
|
2015-02-23 18:30:09 +00:00
|
|
|
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
2016-06-15 15:06:18 +01:00
|
|
|
int ret = -EINVAL;
|
|
|
|
struct rte_eth_rxtx_callback *cb;
|
|
|
|
struct rte_eth_rxtx_callback **prev_cb;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_lock(ð_dev_tx_cb_lock);
|
2016-06-15 15:06:18 +01:00
|
|
|
prev_cb = &dev->pre_tx_burst_cbs[queue_id];
|
|
|
|
for (; *prev_cb != NULL; prev_cb = &cb->next) {
|
|
|
|
cb = *prev_cb;
|
2015-02-23 18:30:09 +00:00
|
|
|
if (cb == user_cb) {
|
2016-06-15 15:06:18 +01:00
|
|
|
/* Remove the user cb from the callback list. */
|
2020-10-13 11:25:37 -05:00
|
|
|
__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
|
2016-06-15 15:06:18 +01:00
|
|
|
ret = 0;
|
|
|
|
break;
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
2016-06-15 15:06:18 +01:00
|
|
|
}
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_spinlock_unlock(ð_dev_tx_cb_lock);
|
2015-02-23 18:30:09 +00:00
|
|
|
|
2016-06-15 15:06:18 +01:00
|
|
|
return ret;
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
2015-05-29 10:56:25 +02:00
|
|
|
|
2015-10-27 12:51:43 +00:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
|
2015-10-27 12:51:43 +00:00
|
|
|
struct rte_eth_rxq_info *qinfo)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-10-27 12:51:43 +00:00
|
|
|
|
|
|
|
if (qinfo == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (queue_id >= dev->data->nb_rx_queues) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
|
2015-10-27 12:51:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-01 19:14:02 +01:00
|
|
|
if (dev->data->rx_queues == NULL ||
|
|
|
|
dev->data->rx_queues[queue_id] == NULL) {
|
2020-08-24 19:01:30 +08:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Rx queue %"PRIu16" of device with port_id=%"
|
|
|
|
PRIu16" has not been setup\n",
|
|
|
|
queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
|
|
|
|
RTE_ETHDEV_LOG(INFO,
|
|
|
|
"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
|
|
|
|
queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
|
2015-10-27 12:51:43 +00:00
|
|
|
|
|
|
|
memset(qinfo, 0, sizeof(*qinfo));
|
|
|
|
dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
|
2015-10-27 12:51:43 +00:00
|
|
|
struct rte_eth_txq_info *qinfo)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-10-27 12:51:43 +00:00
|
|
|
|
|
|
|
if (qinfo == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (queue_id >= dev->data->nb_tx_queues) {
|
2018-06-19 02:04:56 +01:00
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
|
2015-10-27 12:51:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-01 19:14:02 +01:00
|
|
|
if (dev->data->tx_queues == NULL ||
|
|
|
|
dev->data->tx_queues[queue_id] == NULL) {
|
2020-08-24 19:01:30 +08:00
|
|
|
RTE_ETHDEV_LOG(ERR,
|
|
|
|
"Tx queue %"PRIu16" of device with port_id=%"
|
|
|
|
PRIu16" has not been setup\n",
|
|
|
|
queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
|
|
|
|
RTE_ETHDEV_LOG(INFO,
|
|
|
|
"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
|
|
|
|
queue_id, port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
|
2015-10-27 12:51:43 +00:00
|
|
|
|
|
|
|
memset(qinfo, 0, sizeof(*qinfo));
|
|
|
|
dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
|
2018-05-03 14:03:25 +08:00
|
|
|
|
2015-10-27 12:51:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-15 15:51:30 +08:00
|
|
|
int
|
|
|
|
rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
|
|
|
|
struct rte_eth_burst_mode *mode)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
|
|
|
if (mode == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (queue_id >= dev->data->nb_rx_queues) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
|
|
|
|
memset(mode, 0, sizeof(*mode));
|
|
|
|
return eth_err(port_id,
|
|
|
|
dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
|
|
|
|
struct rte_eth_burst_mode *mode)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
|
|
|
if (mode == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (queue_id >= dev->data->nb_tx_queues) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
|
|
|
|
memset(mode, 0, sizeof(*mode));
|
|
|
|
return eth_err(port_id,
|
|
|
|
dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
|
|
|
|
}
|
|
|
|
|
2021-01-14 14:46:08 +00:00
|
|
|
int
|
|
|
|
rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
|
|
|
|
struct rte_power_monitor_cond *pmc)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
|
|
|
|
|
|
|
|
if (queue_id >= dev->data->nb_rx_queues) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pmc == NULL) {
|
|
|
|
RTE_ETHDEV_LOG(ERR, "Invalid power monitor condition=%p\n",
|
|
|
|
pmc);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return eth_err(port_id,
|
|
|
|
dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id],
|
|
|
|
pmc));
|
|
|
|
}
|
|
|
|
|
2015-05-29 10:56:25 +02:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_mc_addr_list(uint16_t port_id,
|
2019-05-21 18:13:03 +02:00
|
|
|
struct rte_ether_addr *mc_addr_set,
|
2015-05-29 10:56:25 +02:00
|
|
|
uint32_t nb_mc_addr)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-05-29 10:56:25 +02:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
|
|
|
|
mc_addr_set, nb_mc_addr));
|
2015-05-29 10:56:25 +02:00
|
|
|
}
|
2015-07-02 16:16:28 +01:00
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_timesync_enable(uint16_t port_id)
|
2015-07-02 16:16:28 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-02 16:16:28 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
|
2015-07-02 16:16:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_timesync_disable(uint16_t port_id)
|
2015-07-02 16:16:28 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-02 16:16:28 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
|
2015-07-02 16:16:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
|
2015-07-02 16:16:28 +01:00
|
|
|
uint32_t flags)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-02 16:16:28 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
|
|
|
|
(dev, timestamp, flags));
|
2015-07-02 16:16:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
|
|
|
|
struct timespec *timestamp)
|
2015-07-02 16:16:28 +01:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-02 16:16:28 +01:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
|
|
|
|
(dev, timestamp));
|
2015-07-02 16:16:28 +01:00
|
|
|
}
|
2015-07-16 09:25:34 -04:00
|
|
|
|
2015-11-13 16:09:07 +00:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
|
2015-11-13 16:09:07 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-11-13 16:09:07 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
|
|
|
|
delta));
|
2015-11-13 16:09:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
|
2015-11-13 16:09:07 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-11-13 16:09:07 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
|
|
|
|
timestamp));
|
2015-11-13 16:09:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
|
2015-11-13 16:09:07 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-11-13 16:09:07 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
|
|
|
|
timestamp));
|
2015-11-13 16:09:07 +00:00
|
|
|
}
|
|
|
|
|
2019-05-02 14:11:33 +02:00
|
|
|
int
|
|
|
|
rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
|
|
|
|
return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
|
|
|
|
}
|
|
|
|
|
2015-07-16 09:25:34 -04:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
|
2015-07-16 09:25:34 -04:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 09:25:34 -04:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
|
2015-07-16 09:25:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_get_eeprom_length(uint16_t port_id)
|
2015-07-16 09:25:34 -04:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 09:25:34 -04:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
|
2015-07-16 09:25:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
|
2015-07-16 09:25:34 -04:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 09:25:34 -04:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
|
2015-07-16 09:25:34 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
|
2015-07-16 09:25:34 -04:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 09:25:34 -04:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
|
2015-07-16 09:25:34 -04:00
|
|
|
}
|
2015-10-31 23:57:27 +08:00
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-04-25 16:02:02 +02:00
|
|
|
rte_eth_dev_get_module_info(uint16_t port_id,
|
|
|
|
struct rte_eth_dev_module_info *modinfo)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
|
|
|
|
return (*dev->dev_ops->get_module_info)(dev, modinfo);
|
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-04-25 16:02:02 +02:00
|
|
|
rte_eth_dev_get_module_eeprom(uint16_t port_id,
|
|
|
|
struct rte_dev_eeprom_info *info)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
|
|
|
|
return (*dev->dev_ops->get_module_eeprom)(dev, info);
|
|
|
|
}
|
|
|
|
|
2015-10-31 23:57:27 +08:00
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_get_dcb_info(uint16_t port_id,
|
2015-10-31 23:57:27 +08:00
|
|
|
struct rte_eth_dcb_info *dcb_info)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2016-05-18 21:15:11 +02:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-10-31 23:57:27 +08:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
|
2018-01-20 21:12:22 +00:00
|
|
|
return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
|
2015-10-31 23:57:27 +08:00
|
|
|
}
|
2015-11-03 13:01:55 +00:00
|
|
|
|
2017-05-25 16:57:53 +01:00
|
|
|
static void
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_adjust_nb_desc(uint16_t *nb_desc,
|
|
|
|
const struct rte_eth_desc_lim *desc_lim)
|
2017-05-25 16:57:53 +01:00
|
|
|
{
|
|
|
|
if (desc_lim->nb_align != 0)
|
|
|
|
*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
|
|
|
|
|
|
|
|
if (desc_lim->nb_max != 0)
|
|
|
|
*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
|
|
|
|
|
|
|
|
*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2017-09-29 15:17:24 +08:00
|
|
|
rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
|
2017-05-25 16:57:53 +01:00
|
|
|
uint16_t *nb_rx_desc,
|
|
|
|
uint16_t *nb_tx_desc)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev_info dev_info;
|
2019-09-12 17:42:13 +01:00
|
|
|
int ret;
|
2017-05-25 16:57:53 +01:00
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
2019-09-12 17:42:13 +01:00
|
|
|
ret = rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2017-05-25 16:57:53 +01:00
|
|
|
|
|
|
|
if (nb_rx_desc != NULL)
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
|
2017-05-25 16:57:53 +01:00
|
|
|
|
|
|
|
if (nb_tx_desc != NULL)
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
|
2017-05-25 16:57:53 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-10-06 13:15:30 +05:30
|
|
|
|
2019-10-30 23:53:11 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_hairpin_capability_get(uint16_t port_id,
|
|
|
|
struct rte_eth_hairpin_cap *cap)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2020-10-13 15:53:38 +01:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2019-10-30 23:53:11 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
|
|
|
|
memset(cap, 0, sizeof(*cap));
|
|
|
|
return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
|
|
|
|
{
|
|
|
|
if (dev->data->rx_queue_state[queue_id] ==
|
|
|
|
RTE_ETH_QUEUE_STATE_HAIRPIN)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
|
|
|
|
{
|
|
|
|
if (dev->data->tx_queue_state[queue_id] ==
|
|
|
|
RTE_ETH_QUEUE_STATE_HAIRPIN)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-10-06 13:15:30 +05:30
|
|
|
int
|
2017-10-12 17:32:47 +08:00
|
|
|
rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
|
2017-10-06 13:15:30 +05:30
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
|
|
|
if (pool == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (*dev->dev_ops->pool_ops_supported == NULL)
|
|
|
|
return 1; /* all pools are supported */
|
|
|
|
|
|
|
|
return (*dev->dev_ops->pool_ops_supported)(dev, pool);
|
|
|
|
}
|
2018-03-13 11:07:23 +00:00
|
|
|
|
2018-04-26 11:41:03 +01:00
|
|
|
/**
|
|
|
|
* A set of values to describe the possible states of a switch domain.
|
|
|
|
*/
|
|
|
|
enum rte_eth_switch_domain_state {
|
|
|
|
RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
|
|
|
|
RTE_ETH_SWITCH_DOMAIN_ALLOCATED
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Array of switch domains available for allocation. Array is sized to
|
|
|
|
* RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
|
|
|
|
* ethdev ports in a single process.
|
|
|
|
*/
|
2018-10-28 23:57:38 +00:00
|
|
|
static struct rte_eth_dev_switch {
|
2018-04-26 11:41:03 +01:00
|
|
|
enum rte_eth_switch_domain_state state;
|
2020-10-13 17:56:58 +01:00
|
|
|
} eth_dev_switch_domains[RTE_MAX_ETHPORTS];
|
2018-04-26 11:41:03 +01:00
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-04-26 11:41:03 +01:00
|
|
|
rte_eth_switch_domain_alloc(uint16_t *domain_id)
|
|
|
|
{
|
2020-11-04 10:57:57 +08:00
|
|
|
uint16_t i;
|
2018-04-26 11:41:03 +01:00
|
|
|
|
|
|
|
*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
|
|
|
|
|
2020-01-16 16:19:54 +00:00
|
|
|
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
2020-10-13 17:56:58 +01:00
|
|
|
if (eth_dev_switch_domains[i].state ==
|
2018-04-26 11:41:03 +01:00
|
|
|
RTE_ETH_SWITCH_DOMAIN_UNUSED) {
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_switch_domains[i].state =
|
2018-04-26 11:41:03 +01:00
|
|
|
RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
|
|
|
|
*domain_id = i;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
2018-04-26 11:41:03 +01:00
|
|
|
rte_eth_switch_domain_free(uint16_t domain_id)
|
|
|
|
{
|
|
|
|
if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
|
|
|
|
domain_id >= RTE_MAX_ETHPORTS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
if (eth_dev_switch_domains[domain_id].state !=
|
2018-04-26 11:41:03 +01:00
|
|
|
RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
|
2018-04-26 11:41:03 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ethdev: add common devargs parser
Introduces a new structure, rte_eth_devargs, to support generic
ethdev arguments common across NET PMDs, with a new API
rte_eth_devargs_parse API to support PMD parsing these arguments. The
patch add support for a representor argument passed with passed with
the EAL -w option. The representor parameter allows the user to specify
which representor ports to initialise on a device.
The argument supports passing a single representor port, a list of
port values or a range of port values.
-w BDF,representor=1 # create representor port 1 on pci device BDF
-w BDF,representor=[1,2,5,6,10] # create representor ports in list
-w BDF,representor=[0-31] # create representor ports in range
Signed-off-by: Remy Horton <remy.horton@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-04-26 11:41:02 +01:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
|
ethdev: add common devargs parser
Introduces a new structure, rte_eth_devargs, to support generic
ethdev arguments common across NET PMDs, with a new API
rte_eth_devargs_parse API to support PMD parsing these arguments. The
patch add support for a representor argument passed with passed with
the EAL -w option. The representor parameter allows the user to specify
which representor ports to initialise on a device.
The argument supports passing a single representor port, a list of
port values or a range of port values.
-w BDF,representor=1 # create representor port 1 on pci device BDF
-w BDF,representor=[1,2,5,6,10] # create representor ports in list
-w BDF,representor=[0-31] # create representor ports in range
Signed-off-by: Remy Horton <remy.horton@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-04-26 11:41:02 +01:00
|
|
|
{
|
|
|
|
int state;
|
|
|
|
struct rte_kvargs_pair *pair;
|
|
|
|
char *letter;
|
|
|
|
|
|
|
|
arglist->str = strdup(str_in);
|
|
|
|
if (arglist->str == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
letter = arglist->str;
|
|
|
|
state = 0;
|
|
|
|
arglist->count = 0;
|
|
|
|
pair = &arglist->pairs[0];
|
|
|
|
while (1) {
|
|
|
|
switch (state) {
|
|
|
|
case 0: /* Initial */
|
|
|
|
if (*letter == '=')
|
|
|
|
return -EINVAL;
|
|
|
|
else if (*letter == '\0')
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
state = 1;
|
|
|
|
pair->key = letter;
|
|
|
|
/* fall-thru */
|
|
|
|
|
|
|
|
case 1: /* Parsing key */
|
|
|
|
if (*letter == '=') {
|
|
|
|
*letter = '\0';
|
|
|
|
pair->value = letter + 1;
|
|
|
|
state = 2;
|
|
|
|
} else if (*letter == ',' || *letter == '\0')
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
|
|
case 2: /* Parsing value */
|
|
|
|
if (*letter == '[')
|
|
|
|
state = 3;
|
|
|
|
else if (*letter == ',') {
|
|
|
|
*letter = '\0';
|
|
|
|
arglist->count++;
|
|
|
|
pair = &arglist->pairs[arglist->count];
|
|
|
|
state = 0;
|
|
|
|
} else if (*letter == '\0') {
|
|
|
|
letter--;
|
|
|
|
arglist->count++;
|
|
|
|
pair = &arglist->pairs[arglist->count];
|
|
|
|
state = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 3: /* Parsing list */
|
|
|
|
if (*letter == ']')
|
|
|
|
state = 2;
|
|
|
|
else if (*letter == '\0')
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
letter++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-29 13:58:52 +02:00
|
|
|
int
|
ethdev: add common devargs parser
Introduces a new structure, rte_eth_devargs, to support generic
ethdev arguments common across NET PMDs, with a new API
rte_eth_devargs_parse API to support PMD parsing these arguments. The
patch add support for a representor argument passed with passed with
the EAL -w option. The representor parameter allows the user to specify
which representor ports to initialise on a device.
The argument supports passing a single representor port, a list of
port values or a range of port values.
-w BDF,representor=1 # create representor port 1 on pci device BDF
-w BDF,representor=[1,2,5,6,10] # create representor ports in list
-w BDF,representor=[0-31] # create representor ports in range
Signed-off-by: Remy Horton <remy.horton@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-04-26 11:41:02 +01:00
|
|
|
rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
|
|
|
|
{
|
|
|
|
struct rte_kvargs args;
|
|
|
|
struct rte_kvargs_pair *pair;
|
|
|
|
unsigned int i;
|
|
|
|
int result = 0;
|
|
|
|
|
|
|
|
memset(eth_da, 0, sizeof(*eth_da));
|
|
|
|
|
2020-10-13 17:56:58 +01:00
|
|
|
result = eth_dev_devargs_tokenise(&args, dargs);
|
ethdev: add common devargs parser
Introduces a new structure, rte_eth_devargs, to support generic
ethdev arguments common across NET PMDs, with a new API
rte_eth_devargs_parse API to support PMD parsing these arguments. The
patch add support for a representor argument passed with passed with
the EAL -w option. The representor parameter allows the user to specify
which representor ports to initialise on a device.
The argument supports passing a single representor port, a list of
port values or a range of port values.
-w BDF,representor=1 # create representor port 1 on pci device BDF
-w BDF,representor=[1,2,5,6,10] # create representor ports in list
-w BDF,representor=[0-31] # create representor ports in range
Signed-off-by: Remy Horton <remy.horton@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-04-26 11:41:02 +01:00
|
|
|
if (result < 0)
|
|
|
|
goto parse_cleanup;
|
|
|
|
|
|
|
|
for (i = 0; i < args.count; i++) {
|
|
|
|
pair = &args.pairs[i];
|
|
|
|
if (strcmp("representor", pair->key) == 0) {
|
2021-03-11 13:13:25 +00:00
|
|
|
result = rte_eth_devargs_parse_representor_ports(
|
|
|
|
pair->value, eth_da);
|
ethdev: add common devargs parser
Introduces a new structure, rte_eth_devargs, to support generic
ethdev arguments common across NET PMDs, with a new API
rte_eth_devargs_parse API to support PMD parsing these arguments. The
patch add support for a representor argument passed with passed with
the EAL -w option. The representor parameter allows the user to specify
which representor ports to initialise on a device.
The argument supports passing a single representor port, a list of
port values or a range of port values.
-w BDF,representor=1 # create representor port 1 on pci device BDF
-w BDF,representor=[1,2,5,6,10] # create representor ports in list
-w BDF,representor=[0-31] # create representor ports in range
Signed-off-by: Remy Horton <remy.horton@intel.com>
Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
2018-04-26 11:41:02 +01:00
|
|
|
if (result < 0)
|
|
|
|
goto parse_cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
parse_cleanup:
|
|
|
|
if (args.str)
|
|
|
|
free(args.str);
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_handle_port_list(const char *cmd __rte_unused,
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
const char *params __rte_unused,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
int port_id;
|
|
|
|
|
|
|
|
rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
|
|
|
|
RTE_ETH_FOREACH_DEV(port_id)
|
|
|
|
rte_tel_data_add_array_int(d, port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ethdev: add common stats for telemetry
The ethdev library now registers a telemetry command for common ethdev
statistics.
An example usage is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.08.0-rc1", "pid": 14119, "max_output_len": 16384}
--> /ethdev/stats,0
{"/ethdev/stats": {"ipackets": 0, "opackets": 0, "ibytes": 0, "obytes": \
0, "imissed": 0, "ierrors": 0, "oerrors": 0, "rx_nombuf": 0, \
"q_ipackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_opackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_ibytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_obytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_errors": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}}
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2020-09-23 12:12:28 +01:00
|
|
|
static void
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
|
ethdev: add common stats for telemetry
The ethdev library now registers a telemetry command for common ethdev
statistics.
An example usage is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.08.0-rc1", "pid": 14119, "max_output_len": 16384}
--> /ethdev/stats,0
{"/ethdev/stats": {"ipackets": 0, "opackets": 0, "ibytes": 0, "obytes": \
0, "imissed": 0, "ierrors": 0, "oerrors": 0, "rx_nombuf": 0, \
"q_ipackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_opackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_ibytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_obytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_errors": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}}
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2020-09-23 12:12:28 +01:00
|
|
|
const char *stat_name)
|
|
|
|
{
|
|
|
|
int q;
|
|
|
|
struct rte_tel_data *q_data = rte_tel_data_alloc();
|
|
|
|
rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
|
|
|
|
for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
|
|
|
|
rte_tel_data_add_array_u64(q_data, q_stats[q]);
|
|
|
|
rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
|
|
|
|
|
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_handle_port_stats(const char *cmd __rte_unused,
|
ethdev: add common stats for telemetry
The ethdev library now registers a telemetry command for common ethdev
statistics.
An example usage is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.08.0-rc1", "pid": 14119, "max_output_len": 16384}
--> /ethdev/stats,0
{"/ethdev/stats": {"ipackets": 0, "opackets": 0, "ibytes": 0, "obytes": \
0, "imissed": 0, "ierrors": 0, "oerrors": 0, "rx_nombuf": 0, \
"q_ipackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_opackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_ibytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_obytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_errors": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}}
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2020-09-23 12:12:28 +01:00
|
|
|
const char *params,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
struct rte_eth_stats stats;
|
|
|
|
int port_id, ret;
|
|
|
|
|
|
|
|
if (params == NULL || strlen(params) == 0 || !isdigit(*params))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
port_id = atoi(params);
|
|
|
|
if (!rte_eth_dev_is_valid_port(port_id))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = rte_eth_stats_get(port_id, &stats);
|
|
|
|
if (ret < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rte_tel_data_start_dict(d);
|
|
|
|
ADD_DICT_STAT(stats, ipackets);
|
|
|
|
ADD_DICT_STAT(stats, opackets);
|
|
|
|
ADD_DICT_STAT(stats, ibytes);
|
|
|
|
ADD_DICT_STAT(stats, obytes);
|
|
|
|
ADD_DICT_STAT(stats, imissed);
|
|
|
|
ADD_DICT_STAT(stats, ierrors);
|
|
|
|
ADD_DICT_STAT(stats, oerrors);
|
|
|
|
ADD_DICT_STAT(stats, rx_nombuf);
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
|
|
|
|
eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
|
|
|
|
eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
|
|
|
|
eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
|
|
|
|
eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
|
ethdev: add common stats for telemetry
The ethdev library now registers a telemetry command for common ethdev
statistics.
An example usage is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.08.0-rc1", "pid": 14119, "max_output_len": 16384}
--> /ethdev/stats,0
{"/ethdev/stats": {"ipackets": 0, "opackets": 0, "ibytes": 0, "obytes": \
0, "imissed": 0, "ierrors": 0, "oerrors": 0, "rx_nombuf": 0, \
"q_ipackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_opackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_ibytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_obytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_errors": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}}
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2020-09-23 12:12:28 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_handle_port_xstats(const char *cmd __rte_unused,
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
const char *params,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
struct rte_eth_xstat *eth_xstats;
|
|
|
|
struct rte_eth_xstat_name *xstat_names;
|
|
|
|
int port_id, num_xstats;
|
|
|
|
int i, ret;
|
2020-08-27 09:39:22 +01:00
|
|
|
char *end_param;
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
|
|
|
|
if (params == NULL || strlen(params) == 0 || !isdigit(*params))
|
|
|
|
return -1;
|
|
|
|
|
2020-08-27 09:39:22 +01:00
|
|
|
port_id = strtoul(params, &end_param, 0);
|
|
|
|
if (*end_param != '\0')
|
|
|
|
RTE_ETHDEV_LOG(NOTICE,
|
|
|
|
"Extra parameters passed to ethdev telemetry command, ignoring");
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
if (!rte_eth_dev_is_valid_port(port_id))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
|
|
|
|
if (num_xstats < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* use one malloc for both names and stats */
|
|
|
|
eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
|
|
|
|
sizeof(struct rte_eth_xstat_name)) * num_xstats);
|
|
|
|
if (eth_xstats == NULL)
|
|
|
|
return -1;
|
|
|
|
xstat_names = (void *)ð_xstats[num_xstats];
|
|
|
|
|
|
|
|
ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
|
|
|
|
if (ret < 0 || ret > num_xstats) {
|
|
|
|
free(eth_xstats);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
|
|
|
|
if (ret < 0 || ret > num_xstats) {
|
|
|
|
free(eth_xstats);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_tel_data_start_dict(d);
|
|
|
|
for (i = 0; i < num_xstats; i++)
|
|
|
|
rte_tel_data_add_dict_u64(d, xstat_names[i].name,
|
|
|
|
eth_xstats[i].value);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_handle_port_link_status(const char *cmd __rte_unused,
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
const char *params,
|
|
|
|
struct rte_tel_data *d)
|
|
|
|
{
|
|
|
|
static const char *status_str = "status";
|
|
|
|
int ret, port_id;
|
|
|
|
struct rte_eth_link link;
|
2020-08-27 09:39:22 +01:00
|
|
|
char *end_param;
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
|
|
|
|
if (params == NULL || strlen(params) == 0 || !isdigit(*params))
|
|
|
|
return -1;
|
|
|
|
|
2020-08-27 09:39:22 +01:00
|
|
|
port_id = strtoul(params, &end_param, 0);
|
|
|
|
if (*end_param != '\0')
|
|
|
|
RTE_ETHDEV_LOG(NOTICE,
|
|
|
|
"Extra parameters passed to ethdev telemetry command, ignoring");
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
if (!rte_eth_dev_is_valid_port(port_id))
|
|
|
|
return -1;
|
|
|
|
|
2021-01-14 12:17:33 +00:00
|
|
|
ret = rte_eth_link_get_nowait(port_id, &link);
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
if (ret < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rte_tel_data_start_dict(d);
|
|
|
|
if (!link.link_status) {
|
|
|
|
rte_tel_data_add_dict_string(d, status_str, "DOWN");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
rte_tel_data_add_dict_string(d, status_str, "UP");
|
|
|
|
rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
|
|
|
|
rte_tel_data_add_dict_string(d, "duplex",
|
|
|
|
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
|
|
|
|
"full-duplex" : "half-duplex");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-15 21:08:54 +08:00
|
|
|
int
|
|
|
|
rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
|
|
|
|
struct rte_hairpin_peer_info *cur_info,
|
|
|
|
struct rte_hairpin_peer_info *peer_info,
|
|
|
|
uint32_t direction)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
/* Current queue information is not mandatory. */
|
|
|
|
if (peer_info == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* No need to check the validity again. */
|
|
|
|
dev = &rte_eth_devices[peer_port];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
|
|
|
|
-ENOTSUP);
|
|
|
|
|
|
|
|
return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
|
|
|
|
cur_info, peer_info, direction);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
|
|
|
|
struct rte_hairpin_peer_info *peer_info,
|
|
|
|
uint32_t direction)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
if (peer_info == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* No need to check the validity again. */
|
|
|
|
dev = &rte_eth_devices[cur_port];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
|
|
|
|
-ENOTSUP);
|
|
|
|
|
|
|
|
return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
|
|
|
|
peer_info, direction);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
|
|
|
|
uint32_t direction)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
/* No need to check the validity again. */
|
|
|
|
dev = &rte_eth_devices[cur_port];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
|
|
|
|
-ENOTSUP);
|
|
|
|
|
|
|
|
return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
|
|
|
|
direction);
|
|
|
|
}
|
|
|
|
|
2020-07-01 18:03:35 +05:30
|
|
|
RTE_LOG_REGISTER(rte_eth_dev_logtype, lib.ethdev, INFO);
|
|
|
|
|
|
|
|
RTE_INIT(ethdev_init_telemetry)
|
2018-03-13 11:07:23 +00:00
|
|
|
{
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
"Returns list of available ethdev ports. Takes no parameters");
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
|
ethdev: add common stats for telemetry
The ethdev library now registers a telemetry command for common ethdev
statistics.
An example usage is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.08.0-rc1", "pid": 14119, "max_output_len": 16384}
--> /ethdev/stats,0
{"/ethdev/stats": {"ipackets": 0, "opackets": 0, "ibytes": 0, "obytes": \
0, "imissed": 0, "ierrors": 0, "oerrors": 0, "rx_nombuf": 0, \
"q_ipackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_opackets": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_ibytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_obytes": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \
"q_errors": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}}
Signed-off-by: Ciara Power <ciara.power@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
2020-09-23 12:12:28 +01:00
|
|
|
"Returns the common stats for a port. Parameters: int port_id");
|
2020-10-13 17:56:58 +01:00
|
|
|
rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
"Returns the extended stats for a port. Parameters: int port_id");
|
|
|
|
rte_telemetry_register_cmd("/ethdev/link_status",
|
2020-10-13 17:56:58 +01:00
|
|
|
eth_dev_handle_port_link_status,
|
ethdev: add telemetry callbacks
The ethdev library now registers commands with telemetry, and
implements the callback functions. These commands allow the list of
ethdev ports and the xstats and link status for a port to be queried.
An example using ethdev commands is shown below:
Connecting to /var/run/dpdk/rte/dpdk_telemetry.v2
{"version": "DPDK 20.05.0-rc0", "pid": 64379, "max_output_len": 16384}
--> /
{"/": ["/", "/ethdev/link_status", "/ethdev/list", "/ethdev/xstats", \
"/help", "/info"]}
--> /ethdev/list
{"/ethdev/list": [0, 1, 2, 3]}
--> /ethdev/link_status,0
{"/ethdev/link_status": {"status": "UP", "speed": 10000, "duplex": \
"full-duplex"}}
--> /ethdev/xstats,0
{"/ethdev/xstats": {"rx_good_packets": 0, "tx_good_packets": 0, \
<snip>
"tx_priority7_xon_to_xoff_packets": 0}}
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Ciara Power <ciara.power@intel.com>
Reviewed-by: Keith Wiles <keith.wiles@intel.com>
2020-04-30 17:01:29 +01:00
|
|
|
"Returns the link status for a port. Parameters: int port_id");
|
2018-03-13 11:07:23 +00:00
|
|
|
}
|