2012-09-04 12:54:00 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
2014-06-03 23:42:50 +00:00
|
|
|
*
|
2016-02-10 10:13:45 +00:00
|
|
|
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
|
2012-09-04 12:54:00 +00:00
|
|
|
* All rights reserved.
|
2014-06-03 23:42:50 +00:00
|
|
|
*
|
2013-09-18 10:00:00 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
2012-09-04 12:54:00 +00:00
|
|
|
* are met:
|
2014-06-03 23:42:50 +00:00
|
|
|
*
|
2013-09-18 10:00:00 +00:00
|
|
|
* * Redistributions of source code must retain the above copyright
|
2012-09-04 12:54:00 +00:00
|
|
|
* notice, this list of conditions and the following disclaimer.
|
2013-09-18 10:00:00 +00:00
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
2012-09-04 12:54:00 +00:00
|
|
|
* distribution.
|
2013-09-18 10:00:00 +00:00
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
2012-09-04 12:54:00 +00:00
|
|
|
* from this software without specific prior written permission.
|
2014-06-03 23:42:50 +00:00
|
|
|
*
|
2013-09-18 10:00:00 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
2012-09-04 12:54:00 +00:00
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <ctype.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <inttypes.h>
|
2014-06-16 07:31:43 +00:00
|
|
|
#include <netinet/in.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
#include <rte_byteorder.h>
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_debug.h>
|
|
|
|
#include <rte_interrupts.h>
|
|
|
|
#include <rte_pci.h>
|
|
|
|
#include <rte_memory.h>
|
|
|
|
#include <rte_memcpy.h>
|
|
|
|
#include <rte_memzone.h>
|
|
|
|
#include <rte_launch.h>
|
|
|
|
#include <rte_eal.h>
|
|
|
|
#include <rte_per_lcore.h>
|
|
|
|
#include <rte_lcore.h>
|
|
|
|
#include <rte_atomic.h>
|
|
|
|
#include <rte_branch_prediction.h>
|
|
|
|
#include <rte_common.h>
|
|
|
|
#include <rte_mempool.h>
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_mbuf.h>
|
|
|
|
#include <rte_errno.h>
|
|
|
|
#include <rte_spinlock.h>
|
2014-06-25 20:07:44 +00:00
|
|
|
#include <rte_string_fns.h>
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
#include "rte_ether.h"
|
|
|
|
#include "rte_ethdev.h"
|
|
|
|
|
|
|
|
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
|
|
|
|
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
|
2015-06-27 00:01:44 +00:00
|
|
|
static struct rte_eth_dev_data *rte_eth_dev_data;
|
2016-09-20 12:41:25 +00:00
|
|
|
static uint8_t eth_dev_last_created_port;
|
2015-06-27 00:01:44 +00:00
|
|
|
static uint8_t nb_ports;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/* spinlock for eth device callbacks */
|
|
|
|
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
|
|
|
|
|
2016-06-15 14:06:18 +00:00
|
|
|
/* spinlock for add/remove rx callbacks */
|
|
|
|
static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
|
|
|
|
|
|
|
|
/* spinlock for add/remove tx callbacks */
|
|
|
|
static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
|
|
|
|
|
2014-07-23 12:28:53 +00:00
|
|
|
/* store statistics names and its offset in stats structure */
|
|
|
|
struct rte_eth_xstats_name_off {
|
|
|
|
char name[RTE_ETH_XSTATS_NAME_SIZE];
|
|
|
|
unsigned offset;
|
|
|
|
};
|
|
|
|
|
2015-04-09 21:29:41 +00:00
|
|
|
static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
|
2015-11-02 10:18:59 +00:00
|
|
|
{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
|
|
|
|
{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
|
|
|
|
{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
|
|
|
|
{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
|
2015-04-09 21:29:41 +00:00
|
|
|
{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
|
2015-11-02 10:18:59 +00:00
|
|
|
{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
|
|
|
|
{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
|
|
|
|
rx_nombuf)},
|
2014-07-23 12:28:53 +00:00
|
|
|
};
|
2015-11-02 10:18:59 +00:00
|
|
|
|
2014-07-23 12:28:53 +00:00
|
|
|
#define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
|
|
|
|
|
2015-04-09 21:29:41 +00:00
|
|
|
static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
|
2015-11-02 10:18:59 +00:00
|
|
|
{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
|
|
|
|
{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
|
|
|
|
{"errors", offsetof(struct rte_eth_stats, q_errors)},
|
2014-07-23 12:28:53 +00:00
|
|
|
};
|
2015-11-02 10:18:59 +00:00
|
|
|
|
2014-07-23 12:28:53 +00:00
|
|
|
#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
|
|
|
|
sizeof(rte_rxq_stats_strings[0]))
|
|
|
|
|
2015-04-09 21:29:41 +00:00
|
|
|
static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
|
2015-11-02 10:18:59 +00:00
|
|
|
{"packets", offsetof(struct rte_eth_stats, q_opackets)},
|
|
|
|
{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
|
2014-07-23 12:28:53 +00:00
|
|
|
};
|
|
|
|
#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
|
|
|
|
sizeof(rte_txq_stats_strings[0]))
|
|
|
|
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/**
|
|
|
|
* The user application callback description.
|
|
|
|
*
|
|
|
|
* It contains callback address to be registered by user application,
|
|
|
|
* the pointer to the parameters for callback, and the event type.
|
|
|
|
*/
|
|
|
|
struct rte_eth_dev_callback {
|
|
|
|
TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
|
|
|
|
rte_eth_dev_cb_fn cb_fn; /**< Callback address */
|
|
|
|
void *cb_arg; /**< Parameter for callback */
|
|
|
|
enum rte_eth_event_type event; /**< Interrupt event type */
|
2013-06-03 00:00:00 +00:00
|
|
|
uint32_t active; /**< Callback is executing */
|
2012-09-04 12:54:00 +00:00
|
|
|
};
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
enum {
|
|
|
|
STAT_QMAP_TX = 0,
|
|
|
|
STAT_QMAP_RX
|
|
|
|
};
|
|
|
|
|
2017-03-31 12:04:38 +00:00
|
|
|
uint8_t
|
|
|
|
rte_eth_find_next(uint8_t port_id)
|
|
|
|
{
|
|
|
|
while (port_id < RTE_MAX_ETHPORTS &&
|
|
|
|
rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
|
|
|
|
port_id++;
|
|
|
|
|
|
|
|
if (port_id >= RTE_MAX_ETHPORTS)
|
|
|
|
return RTE_MAX_ETHPORTS;
|
|
|
|
|
|
|
|
return port_id;
|
|
|
|
}
|
|
|
|
|
2015-04-09 21:29:39 +00:00
|
|
|
static void
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_eth_dev_data_alloc(void)
|
|
|
|
{
|
|
|
|
const unsigned flags = 0;
|
|
|
|
const struct rte_memzone *mz;
|
|
|
|
|
2015-06-27 00:01:44 +00:00
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
2012-09-04 12:54:00 +00:00
|
|
|
mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
|
|
|
|
RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
|
|
|
|
rte_socket_id(), flags);
|
|
|
|
} else
|
|
|
|
mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
|
|
|
|
if (mz == NULL)
|
|
|
|
rte_panic("Cannot allocate memzone for ethernet port data\n");
|
|
|
|
|
|
|
|
rte_eth_dev_data = mz->addr;
|
|
|
|
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
|
|
|
|
memset(rte_eth_dev_data, 0,
|
|
|
|
RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
|
|
|
|
}
|
|
|
|
|
2015-02-25 19:32:26 +00:00
|
|
|
struct rte_eth_dev *
|
2014-06-25 20:07:44 +00:00
|
|
|
rte_eth_dev_allocated(const char *name)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2015-02-25 19:32:18 +00:00
|
|
|
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
2017-03-31 12:04:37 +00:00
|
|
|
if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
|
2015-02-25 19:32:18 +00:00
|
|
|
strcmp(rte_eth_devices[i].data->name, name) == 0)
|
2014-06-25 20:07:44 +00:00
|
|
|
return &rte_eth_devices[i];
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-02-25 19:32:18 +00:00
|
|
|
static uint8_t
|
|
|
|
rte_eth_dev_find_free_port(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
2017-03-31 12:04:37 +00:00
|
|
|
if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
|
2015-02-25 19:32:18 +00:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return RTE_MAX_ETHPORTS;
|
|
|
|
}
|
|
|
|
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 07:50:59 +00:00
|
|
|
static struct rte_eth_dev *
|
|
|
|
eth_dev_get(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
eth_dev->data = &rte_eth_dev_data[port_id];
|
2017-03-31 12:04:37 +00:00
|
|
|
eth_dev->state = RTE_ETH_DEV_ATTACHED;
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 07:50:59 +00:00
|
|
|
TAILQ_INIT(&(eth_dev->link_intr_cbs));
|
|
|
|
|
|
|
|
eth_dev_last_created_port = port_id;
|
|
|
|
nb_ports++;
|
|
|
|
|
|
|
|
return eth_dev;
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
struct rte_eth_dev *
|
2016-09-20 12:41:26 +00:00
|
|
|
rte_eth_dev_allocate(const char *name)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2015-02-25 19:32:18 +00:00
|
|
|
uint8_t port_id;
|
2012-09-04 12:54:00 +00:00
|
|
|
struct rte_eth_dev *eth_dev;
|
|
|
|
|
2015-02-25 19:32:18 +00:00
|
|
|
port_id = rte_eth_dev_find_free_port();
|
|
|
|
if (port_id == RTE_MAX_ETHPORTS) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
|
2012-09-04 12:54:00 +00:00
|
|
|
return NULL;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
if (rte_eth_dev_data == NULL)
|
|
|
|
rte_eth_dev_data_alloc();
|
|
|
|
|
2014-06-25 20:07:44 +00:00
|
|
|
if (rte_eth_dev_allocated(name) != NULL) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
|
2015-06-27 00:01:44 +00:00
|
|
|
name);
|
2014-06-25 20:07:44 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-01-20 08:04:53 +00:00
|
|
|
memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 07:50:59 +00:00
|
|
|
eth_dev = eth_dev_get(port_id);
|
2014-06-25 20:07:44 +00:00
|
|
|
snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
|
2015-02-25 19:32:18 +00:00
|
|
|
eth_dev->data->port_id = port_id;
|
2016-11-17 17:16:12 +00:00
|
|
|
eth_dev->data->mtu = ETHER_MTU;
|
|
|
|
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 07:50:59 +00:00
|
|
|
return eth_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attach to a port already registered by the primary process, which
|
|
|
|
* makes sure that the same device would have the same port id both
|
|
|
|
* in the primary and secondary process.
|
|
|
|
*/
|
2017-03-02 09:00:41 +00:00
|
|
|
struct rte_eth_dev *
|
|
|
|
rte_eth_dev_attach_secondary(const char *name)
|
ethdev: fix port data mismatched in multiple process model
Assume we have two virtio ports, 00:03.0 and 00:04.0. The first one is
managed by the kernel driver, while the later one is managed by DPDK.
Now we start the primary process. 00:03.0 will be skipped by DPDK virtio
PMD driver (since it's being used by the kernel). 00:04.0 would be
successfully initiated by DPDK virtio PMD (if nothing abnormal happens).
After that, we would get a port id 0, and all the related info needed
by virtio (virtio_hw) is stored at rte_eth_dev_data[0].
Then we start the secondary process. As usual, 00:03.0 will be firstly
probed. It firstly tries to get a local eth_dev structure for it (by
rte_eth_dev_allocate):
port_id = rte_eth_dev_find_free_port();
...
eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
...
return eth_dev;
Since it's a first PCI device, port_id will be 0. eth_dev->data would
then point to rte_eth_dev_data[0]. And here things start going wrong,
as rte_eth_dev_data[0] actually stores the virtio_hw for 00:04.0.
That said, in the secondary process, DPDK will continue to drive PCI
device 00.03.0 (despite the fact it's been managed by kernel), with
the info from PCI device 00:04.0. Which is wrong.
The fix is to attach the port already registered by the primary process.
That is, iterate the rte_eth_dev_data[], and get the port id who's PCI
ID matches the current PCI device.
This would let us maintain same port ID for the same PCI device, keeping
the chance of referencing to wrong data minimal.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
2017-01-09 07:50:59 +00:00
|
|
|
{
|
|
|
|
uint8_t i;
|
|
|
|
struct rte_eth_dev *eth_dev;
|
|
|
|
|
|
|
|
if (rte_eth_dev_data == NULL)
|
|
|
|
rte_eth_dev_data_alloc();
|
|
|
|
|
|
|
|
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
|
|
|
|
if (strcmp(rte_eth_dev_data[i].name, name) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == RTE_MAX_ETHPORTS) {
|
|
|
|
RTE_PMD_DEBUG_TRACE(
|
|
|
|
"device %s is not driven by the primary process\n",
|
|
|
|
name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
eth_dev = eth_dev_get(i);
|
|
|
|
RTE_ASSERT(eth_dev->data->port_id == i);
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
return eth_dev;
|
|
|
|
}
|
|
|
|
|
2015-02-25 19:32:20 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
if (eth_dev == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-03-31 12:04:37 +00:00
|
|
|
eth_dev->state = RTE_ETH_DEV_UNUSED;
|
2015-02-25 19:32:20 +00:00
|
|
|
nb_ports--;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-10 22:06:24 +00:00
|
|
|
int
|
2015-02-25 19:32:18 +00:00
|
|
|
rte_eth_dev_is_valid_port(uint8_t port_id)
|
|
|
|
{
|
|
|
|
if (port_id >= RTE_MAX_ETHPORTS ||
|
2017-03-31 12:04:37 +00:00
|
|
|
rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
|
2015-02-25 19:32:18 +00:00
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_socket_id(uint8_t port_id)
|
|
|
|
{
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
|
2015-11-03 13:01:58 +00:00
|
|
|
return rte_eth_devices[port_id].data->numa_node;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
uint8_t
|
|
|
|
rte_eth_dev_count(void)
|
|
|
|
{
|
2015-04-09 21:29:42 +00:00
|
|
|
return nb_ports;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 14:06:21 +00:00
|
|
|
int
|
2015-02-25 19:32:26 +00:00
|
|
|
rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
|
|
|
|
{
|
|
|
|
char *tmp;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:26 +00:00
|
|
|
|
|
|
|
if (name == NULL) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
|
2015-02-25 19:32:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* shouldn't check 'rte_eth_devices[i].data',
|
|
|
|
* because it might be overwritten by VDEV PMD */
|
|
|
|
tmp = rte_eth_dev_data[port_id].name;
|
|
|
|
strcpy(name, tmp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-15 14:06:21 +00:00
|
|
|
int
|
2015-09-23 21:16:17 +00:00
|
|
|
rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (name == NULL) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
|
2015-09-23 21:16:17 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-11-19 13:10:11 +00:00
|
|
|
if (!nb_ports)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2015-09-23 21:16:17 +00:00
|
|
|
*port_id = RTE_MAX_ETHPORTS;
|
2017-03-31 12:04:38 +00:00
|
|
|
RTE_ETH_FOREACH_DEV(i) {
|
2015-09-23 21:16:17 +00:00
|
|
|
if (!strncmp(name,
|
|
|
|
rte_eth_dev_data[i].name, strlen(name))) {
|
|
|
|
|
|
|
|
*port_id = i;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2015-02-25 19:32:26 +00:00
|
|
|
static int
|
|
|
|
rte_eth_dev_is_detachable(uint8_t port_id)
|
|
|
|
{
|
2015-11-03 13:01:58 +00:00
|
|
|
uint32_t dev_flags;
|
2015-02-25 19:32:26 +00:00
|
|
|
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:26 +00:00
|
|
|
|
2015-11-03 13:01:58 +00:00
|
|
|
switch (rte_eth_devices[port_id].data->kdrv) {
|
|
|
|
case RTE_KDRV_IGB_UIO:
|
|
|
|
case RTE_KDRV_UIO_GENERIC:
|
|
|
|
case RTE_KDRV_NIC_UIO:
|
|
|
|
case RTE_KDRV_NONE:
|
|
|
|
case RTE_KDRV_VFIO:
|
2017-03-29 09:54:50 +00:00
|
|
|
break;
|
2015-11-03 13:01:58 +00:00
|
|
|
default:
|
|
|
|
return -ENOTSUP;
|
2015-02-25 19:32:26 +00:00
|
|
|
}
|
2015-11-03 13:01:58 +00:00
|
|
|
dev_flags = rte_eth_devices[port_id].data->dev_flags;
|
2016-02-10 10:13:45 +00:00
|
|
|
if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
|
|
|
|
(!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return 1;
|
2015-02-25 19:32:26 +00:00
|
|
|
}
|
|
|
|
|
2016-09-20 12:41:25 +00:00
|
|
|
/* attach the new device, then store port_id of the device */
|
|
|
|
int
|
|
|
|
rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
|
2015-02-25 19:32:26 +00:00
|
|
|
{
|
2016-09-20 12:41:25 +00:00
|
|
|
int ret = -1;
|
2016-10-07 13:01:15 +00:00
|
|
|
int current = rte_eth_dev_count();
|
2016-09-20 12:41:25 +00:00
|
|
|
char *name = NULL;
|
|
|
|
char *args = NULL;
|
2015-09-23 21:16:17 +00:00
|
|
|
|
2016-09-20 12:41:25 +00:00
|
|
|
if ((devargs == NULL) || (port_id == NULL)) {
|
|
|
|
ret = -EINVAL;
|
2015-02-25 19:32:26 +00:00
|
|
|
goto err;
|
2016-09-20 12:41:25 +00:00
|
|
|
}
|
2015-02-25 19:32:26 +00:00
|
|
|
|
2016-09-20 12:41:25 +00:00
|
|
|
/* parse devargs, then retrieve device name and args */
|
|
|
|
if (rte_eal_parse_devargs_str(devargs, &name, &args))
|
2015-02-25 19:32:26 +00:00
|
|
|
goto err;
|
|
|
|
|
2016-09-20 12:41:25 +00:00
|
|
|
ret = rte_eal_dev_attach(name, args);
|
|
|
|
if (ret < 0)
|
2015-02-25 19:32:26 +00:00
|
|
|
goto err;
|
|
|
|
|
2016-10-07 13:01:15 +00:00
|
|
|
/* no point looking at the port count if no port exists */
|
|
|
|
if (!rte_eth_dev_count()) {
|
|
|
|
RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
|
2016-09-20 12:41:25 +00:00
|
|
|
ret = -1;
|
2015-02-25 19:32:26 +00:00
|
|
|
goto err;
|
2016-09-20 12:41:25 +00:00
|
|
|
}
|
2015-02-25 19:32:26 +00:00
|
|
|
|
2016-09-20 12:41:25 +00:00
|
|
|
/* if nothing happened, there is a bug here, since some driver told us
|
|
|
|
* it did attach a device, but did not create a port.
|
2015-09-23 21:16:17 +00:00
|
|
|
*/
|
2016-10-07 13:01:15 +00:00
|
|
|
if (current == rte_eth_dev_count()) {
|
2016-09-20 12:41:25 +00:00
|
|
|
ret = -1;
|
2016-01-22 14:06:58 +00:00
|
|
|
goto err;
|
|
|
|
}
|
2015-02-25 19:32:26 +00:00
|
|
|
|
2016-09-20 12:41:25 +00:00
|
|
|
*port_id = eth_dev_last_created_port;
|
|
|
|
ret = 0;
|
2016-01-22 14:06:58 +00:00
|
|
|
|
|
|
|
err:
|
2016-09-20 12:41:25 +00:00
|
|
|
free(name);
|
|
|
|
free(args);
|
2016-01-22 14:06:58 +00:00
|
|
|
return ret;
|
2015-02-25 19:32:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* detach the device, then store the name of the device */
|
|
|
|
int
|
|
|
|
rte_eth_dev_detach(uint8_t port_id, char *name)
|
|
|
|
{
|
2016-01-22 14:06:58 +00:00
|
|
|
int ret = -1;
|
2015-02-25 19:32:26 +00:00
|
|
|
|
2016-01-22 14:06:58 +00:00
|
|
|
if (name == NULL) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2016-09-20 12:41:25 +00:00
|
|
|
/* FIXME: move this to eal, once device flags are relocated there */
|
2016-01-22 14:06:58 +00:00
|
|
|
if (rte_eth_dev_is_detachable(port_id))
|
|
|
|
goto err;
|
2015-02-25 19:32:26 +00:00
|
|
|
|
2016-09-20 12:41:25 +00:00
|
|
|
snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
|
|
|
|
"%s", rte_eth_devices[port_id].data->name);
|
|
|
|
ret = rte_eal_dev_detach(name);
|
|
|
|
if (ret < 0)
|
|
|
|
goto err;
|
2015-02-25 19:32:26 +00:00
|
|
|
|
2016-01-22 14:06:58 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
return ret;
|
2015-02-25 19:32:26 +00:00
|
|
|
}
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
static int
|
|
|
|
rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
|
|
|
{
|
|
|
|
uint16_t old_nb_queues = dev->data->nb_rx_queues;
|
|
|
|
void **rxq;
|
|
|
|
unsigned i;
|
|
|
|
|
2016-01-05 16:34:58 +00:00
|
|
|
if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
|
2012-12-19 23:00:00 +00:00
|
|
|
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
|
|
|
|
sizeof(dev->data->rx_queues[0]) * nb_queues,
|
2014-11-19 12:26:06 +00:00
|
|
|
RTE_CACHE_LINE_SIZE);
|
2012-12-19 23:00:00 +00:00
|
|
|
if (dev->data->rx_queues == NULL) {
|
|
|
|
dev->data->nb_rx_queues = 0;
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
2016-01-05 16:34:58 +00:00
|
|
|
} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
|
2013-09-13 12:14:02 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
rxq = dev->data->rx_queues;
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->rx_queue_release)(rxq[i]);
|
|
|
|
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
|
2014-11-19 12:26:06 +00:00
|
|
|
RTE_CACHE_LINE_SIZE);
|
2012-12-19 23:00:00 +00:00
|
|
|
if (rxq == NULL)
|
|
|
|
return -(ENOMEM);
|
2015-02-23 18:30:09 +00:00
|
|
|
if (nb_queues > old_nb_queues) {
|
|
|
|
uint16_t new_qs = nb_queues - old_nb_queues;
|
2015-06-27 00:01:44 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
memset(rxq + old_nb_queues, 0,
|
2015-02-23 18:30:09 +00:00
|
|
|
sizeof(rxq[0]) * new_qs);
|
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
|
|
|
|
dev->data->rx_queues = rxq;
|
|
|
|
|
2016-01-05 16:34:58 +00:00
|
|
|
} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
|
|
|
|
|
|
|
|
rxq = dev->data->rx_queues;
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->rx_queue_release)(rxq[i]);
|
2016-11-24 11:26:46 +00:00
|
|
|
|
|
|
|
rte_free(dev->data->rx_queues);
|
|
|
|
dev->data->rx_queues = NULL;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
dev->data->nb_rx_queues = nb_queues;
|
2015-04-09 21:29:42 +00:00
|
|
|
return 0;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-05-28 08:06:36 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2014-05-28 08:06:36 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (rx_queue_id >= dev->data->nb_rx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
|
2014-05-28 08:06:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
|
2014-05-28 08:06:36 +00:00
|
|
|
|
2015-09-16 21:51:24 +00:00
|
|
|
if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
|
2015-09-16 21:51:24 +00:00
|
|
|
" already started\n",
|
|
|
|
rx_queue_id, port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-28 08:06:36 +00:00
|
|
|
return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2014-05-28 08:06:36 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (rx_queue_id >= dev->data->nb_rx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
|
2014-05-28 08:06:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
|
2014-05-28 08:06:36 +00:00
|
|
|
|
2015-09-16 21:51:24 +00:00
|
|
|
if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
|
2015-09-16 21:51:24 +00:00
|
|
|
" already stopped\n",
|
|
|
|
rx_queue_id, port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-28 08:06:36 +00:00
|
|
|
return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2014-05-28 08:06:36 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (tx_queue_id >= dev->data->nb_tx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
|
2014-05-28 08:06:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
|
2014-05-28 08:06:36 +00:00
|
|
|
|
2015-09-16 21:51:24 +00:00
|
|
|
if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
|
2015-09-16 21:51:24 +00:00
|
|
|
" already started\n",
|
|
|
|
tx_queue_id, port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-28 08:06:36 +00:00
|
|
|
return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2014-05-28 08:06:36 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (tx_queue_id >= dev->data->nb_tx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
|
2014-05-28 08:06:36 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
|
2014-05-28 08:06:36 +00:00
|
|
|
|
2015-09-16 21:51:24 +00:00
|
|
|
if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
|
2015-09-16 21:51:24 +00:00
|
|
|
" already stopped\n",
|
|
|
|
tx_queue_id, port_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-28 08:06:36 +00:00
|
|
|
return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
static int
|
|
|
|
rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
|
|
|
|
{
|
|
|
|
uint16_t old_nb_queues = dev->data->nb_tx_queues;
|
|
|
|
void **txq;
|
|
|
|
unsigned i;
|
|
|
|
|
2016-01-05 16:34:58 +00:00
|
|
|
if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
|
2012-12-19 23:00:00 +00:00
|
|
|
dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
|
2015-06-27 00:01:44 +00:00
|
|
|
sizeof(dev->data->tx_queues[0]) * nb_queues,
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
2012-12-19 23:00:00 +00:00
|
|
|
if (dev->data->tx_queues == NULL) {
|
|
|
|
dev->data->nb_tx_queues = 0;
|
|
|
|
return -(ENOMEM);
|
|
|
|
}
|
2016-01-05 16:34:58 +00:00
|
|
|
} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
|
2013-09-13 12:14:02 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
txq = dev->data->tx_queues;
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->tx_queue_release)(txq[i]);
|
|
|
|
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
|
2015-06-27 00:01:44 +00:00
|
|
|
RTE_CACHE_LINE_SIZE);
|
2012-12-19 23:00:00 +00:00
|
|
|
if (txq == NULL)
|
2015-02-23 18:30:09 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
if (nb_queues > old_nb_queues) {
|
|
|
|
uint16_t new_qs = nb_queues - old_nb_queues;
|
2015-06-27 00:01:44 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
memset(txq + old_nb_queues, 0,
|
2015-06-27 00:01:44 +00:00
|
|
|
sizeof(txq[0]) * new_qs);
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
|
|
|
|
dev->data->tx_queues = txq;
|
|
|
|
|
2016-01-05 16:34:58 +00:00
|
|
|
} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
|
|
|
|
|
|
|
|
txq = dev->data->tx_queues;
|
|
|
|
|
|
|
|
for (i = nb_queues; i < old_nb_queues; i++)
|
|
|
|
(*dev->dev_ops->tx_queue_release)(txq[i]);
|
2016-11-24 11:26:46 +00:00
|
|
|
|
|
|
|
rte_free(dev->data->tx_queues);
|
|
|
|
dev->data->tx_queues = NULL;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
dev->data->nb_tx_queues = nb_queues;
|
2015-04-09 21:29:42 +00:00
|
|
|
return 0;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
2016-03-31 22:12:30 +00:00
|
|
|
uint32_t
|
|
|
|
rte_eth_speed_bitflag(uint32_t speed, int duplex)
|
|
|
|
{
|
|
|
|
switch (speed) {
|
|
|
|
case ETH_SPEED_NUM_10M:
|
|
|
|
return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
|
|
|
|
case ETH_SPEED_NUM_100M:
|
|
|
|
return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
|
|
|
|
case ETH_SPEED_NUM_1G:
|
|
|
|
return ETH_LINK_SPEED_1G;
|
|
|
|
case ETH_SPEED_NUM_2_5G:
|
|
|
|
return ETH_LINK_SPEED_2_5G;
|
|
|
|
case ETH_SPEED_NUM_5G:
|
|
|
|
return ETH_LINK_SPEED_5G;
|
|
|
|
case ETH_SPEED_NUM_10G:
|
|
|
|
return ETH_LINK_SPEED_10G;
|
|
|
|
case ETH_SPEED_NUM_20G:
|
|
|
|
return ETH_LINK_SPEED_20G;
|
|
|
|
case ETH_SPEED_NUM_25G:
|
|
|
|
return ETH_LINK_SPEED_25G;
|
|
|
|
case ETH_SPEED_NUM_40G:
|
|
|
|
return ETH_LINK_SPEED_40G;
|
|
|
|
case ETH_SPEED_NUM_50G:
|
|
|
|
return ETH_LINK_SPEED_50G;
|
|
|
|
case ETH_SPEED_NUM_56G:
|
|
|
|
return ETH_LINK_SPEED_56G;
|
2016-03-31 22:12:31 +00:00
|
|
|
case ETH_SPEED_NUM_100G:
|
|
|
|
return ETH_LINK_SPEED_100G;
|
2016-03-31 22:12:30 +00:00
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
|
|
|
|
const struct rte_eth_conf *dev_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_dev_info dev_info;
|
|
|
|
int diag;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2015-03-26 17:02:45 +00:00
|
|
|
if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE(
|
2015-03-26 17:02:45 +00:00
|
|
|
"Number of RX queues requested (%u) is greater than max supported(%d)\n",
|
|
|
|
nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2015-03-26 17:02:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE(
|
2015-03-26 17:02:45 +00:00
|
|
|
"Number of TX queues requested (%u) is greater than max supported(%d)\n",
|
|
|
|
nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2015-03-26 17:02:45 +00:00
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
if (dev->data->dev_started) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE(
|
2012-12-19 23:00:00 +00:00
|
|
|
"port %d must be stopped to allow configuration\n", port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EBUSY;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2016-03-24 15:22:03 +00:00
|
|
|
/* Copy the dev_conf parameter into the dev structure */
|
|
|
|
memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/*
|
|
|
|
* Check that the numbers of RX and TX queues are not greater
|
|
|
|
* than the maximum number of RX and TX queues supported by the
|
|
|
|
* configured device.
|
|
|
|
*/
|
|
|
|
(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
|
2016-01-05 16:34:58 +00:00
|
|
|
|
|
|
|
if (nb_rx_q == 0 && nb_tx_q == 0) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
if (nb_rx_q > dev_info.max_rx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
|
2012-09-04 12:54:00 +00:00
|
|
|
port_id, nb_rx_q, dev_info.max_rx_queues);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nb_tx_q > dev_info.max_tx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
|
2012-09-04 12:54:00 +00:00
|
|
|
port_id, nb_tx_q, dev_info.max_tx_queues);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 12:17:38 +00:00
|
|
|
/* Check that the device supports requested interrupts */
|
2015-11-03 13:01:58 +00:00
|
|
|
if ((dev_conf->intr_conf.lsc == 1) &&
|
|
|
|
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
|
2015-11-03 13:01:58 +00:00
|
|
|
dev->data->drv_name);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2014-06-19 22:12:38 +00:00
|
|
|
}
|
2017-04-18 12:17:38 +00:00
|
|
|
if ((dev_conf->intr_conf.rmv == 1) &&
|
|
|
|
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
|
|
|
|
dev->data->drv_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2014-06-19 22:12:38 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
/*
|
|
|
|
* If jumbo frames are enabled, check that the maximum RX packet
|
|
|
|
* length is supported by the configured device.
|
|
|
|
*/
|
|
|
|
if (dev_conf->rxmode.jumbo_frame == 1) {
|
|
|
|
if (dev_conf->rxmode.max_rx_pkt_len >
|
|
|
|
dev_info.max_rx_pktlen) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
|
2012-12-19 23:00:00 +00:00
|
|
|
" > max valid value %u\n",
|
2012-09-04 12:54:00 +00:00
|
|
|
port_id,
|
|
|
|
(unsigned)dev_conf->rxmode.max_rx_pkt_len,
|
|
|
|
(unsigned)dev_info.max_rx_pktlen);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2015-06-27 00:01:44 +00:00
|
|
|
} else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
|
2012-12-19 23:00:00 +00:00
|
|
|
" < min valid value %u\n",
|
|
|
|
port_id,
|
|
|
|
(unsigned)dev_conf->rxmode.max_rx_pkt_len,
|
|
|
|
(unsigned)ETHER_MIN_LEN);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
2014-06-05 05:08:51 +00:00
|
|
|
} else {
|
|
|
|
if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
|
|
|
|
dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
|
|
|
|
/* Use default value */
|
|
|
|
dev->data->dev_conf.rxmode.max_rx_pkt_len =
|
|
|
|
ETHER_MAX_LEN;
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
/*
|
|
|
|
* Setup new number of RX/TX queues and reconfigure device.
|
|
|
|
*/
|
|
|
|
diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
|
2012-09-04 12:54:00 +00:00
|
|
|
if (diag != 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
|
2012-12-19 23:00:00 +00:00
|
|
|
port_id, diag);
|
|
|
|
return diag;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
|
|
|
|
diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
|
|
|
|
if (diag != 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
|
2012-12-19 23:00:00 +00:00
|
|
|
port_id, diag);
|
|
|
|
rte_eth_dev_rx_queue_config(dev, 0);
|
|
|
|
return diag;
|
|
|
|
}
|
|
|
|
|
|
|
|
diag = (*dev->dev_ops->dev_configure)(dev);
|
|
|
|
if (diag != 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
|
2012-12-19 23:00:00 +00:00
|
|
|
port_id, diag);
|
|
|
|
rte_eth_dev_rx_queue_config(dev, 0);
|
|
|
|
rte_eth_dev_tx_queue_config(dev, 0);
|
|
|
|
return diag;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2016-11-24 11:26:47 +00:00
|
|
|
void
|
|
|
|
_rte_eth_dev_reset(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
if (dev->data->dev_started) {
|
|
|
|
RTE_PMD_DEBUG_TRACE(
|
|
|
|
"port %d must be stopped to allow reset\n",
|
|
|
|
dev->data->port_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_eth_dev_rx_queue_config(dev, 0);
|
|
|
|
rte_eth_dev_tx_queue_config(dev, 0);
|
|
|
|
|
|
|
|
memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
static void
|
|
|
|
rte_eth_dev_config_restore(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_dev_info dev_info;
|
2017-01-27 17:57:29 +00:00
|
|
|
struct ether_addr *addr;
|
2012-09-04 12:54:00 +00:00
|
|
|
uint16_t i;
|
2013-06-03 00:00:00 +00:00
|
|
|
uint32_t pool = 0;
|
2017-01-27 17:57:29 +00:00
|
|
|
uint64_t pool_mask;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
|
2017-01-27 17:57:29 +00:00
|
|
|
/* replay MAC address configuration including default MAC */
|
|
|
|
addr = &dev->data->mac_addrs[0];
|
|
|
|
if (*dev->dev_ops->mac_addr_set != NULL)
|
|
|
|
(*dev->dev_ops->mac_addr_set)(dev, addr);
|
|
|
|
else if (*dev->dev_ops->mac_addr_add != NULL)
|
|
|
|
(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
|
|
|
|
|
|
|
|
if (*dev->dev_ops->mac_addr_add != NULL) {
|
|
|
|
for (i = 1; i < dev_info.max_mac_addrs; i++) {
|
|
|
|
addr = &dev->data->mac_addrs[i];
|
|
|
|
|
|
|
|
/* skip zero address */
|
|
|
|
if (is_zero_ether_addr(addr))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pool = 0;
|
|
|
|
pool_mask = dev->data->mac_pool_sel[i];
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (pool_mask & 1ULL)
|
|
|
|
(*dev->dev_ops->mac_addr_add)(dev,
|
|
|
|
addr, i, pool);
|
|
|
|
pool_mask >>= 1;
|
|
|
|
pool++;
|
|
|
|
} while (pool_mask);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* replay promiscuous configuration */
|
|
|
|
if (rte_eth_promiscuous_get(port_id) == 1)
|
|
|
|
rte_eth_promiscuous_enable(port_id);
|
|
|
|
else if (rte_eth_promiscuous_get(port_id) == 0)
|
|
|
|
rte_eth_promiscuous_disable(port_id);
|
|
|
|
|
2015-06-27 00:01:43 +00:00
|
|
|
/* replay all multicast configuration */
|
2012-09-04 12:54:00 +00:00
|
|
|
if (rte_eth_allmulticast_get(port_id) == 1)
|
|
|
|
rte_eth_allmulticast_enable(port_id);
|
|
|
|
else if (rte_eth_allmulticast_get(port_id) == 0)
|
|
|
|
rte_eth_allmulticast_disable(port_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_start(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int diag;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
|
2014-06-09 17:26:16 +00:00
|
|
|
|
|
|
|
if (dev->data->dev_started != 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
|
2014-06-09 17:26:16 +00:00
|
|
|
" already started\n",
|
|
|
|
port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return 0;
|
2014-06-09 17:26:16 +00:00
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
diag = (*dev->dev_ops->dev_start)(dev);
|
|
|
|
if (diag == 0)
|
|
|
|
dev->data->dev_started = 1;
|
|
|
|
else
|
|
|
|
return diag;
|
|
|
|
|
|
|
|
rte_eth_dev_config_restore(port_id);
|
|
|
|
|
2015-10-27 21:38:55 +00:00
|
|
|
if (dev->data->dev_conf.intr_conf.lsc == 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
|
2014-11-07 17:31:50 +00:00
|
|
|
(*dev->dev_ops->link_update)(dev, 0);
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rte_eth_dev_stop(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
|
2014-06-09 17:26:16 +00:00
|
|
|
|
|
|
|
if (dev->data->dev_started == 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
|
2014-06-09 17:26:16 +00:00
|
|
|
" already stopped\n",
|
|
|
|
port_id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
dev->data->dev_started = 0;
|
|
|
|
(*dev->dev_ops->dev_stop)(dev);
|
|
|
|
}
|
|
|
|
|
2014-05-28 07:15:00 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_set_link_up(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2014-05-28 07:15:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
|
2014-05-28 07:15:00 +00:00
|
|
|
return (*dev->dev_ops->dev_set_link_up)(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_set_link_down(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2014-05-28 07:15:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
|
2014-05-28 07:15:00 +00:00
|
|
|
return (*dev->dev_ops->dev_set_link_down)(dev);
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
void
|
|
|
|
rte_eth_dev_close(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev->data->dev_started = 0;
|
|
|
|
(*dev->dev_ops->dev_close)(dev);
|
2015-07-13 13:04:05 +00:00
|
|
|
|
2017-02-20 14:04:46 +00:00
|
|
|
dev->data->nb_rx_queues = 0;
|
2015-07-13 13:04:05 +00:00
|
|
|
rte_free(dev->data->rx_queues);
|
|
|
|
dev->data->rx_queues = NULL;
|
2017-02-20 14:04:46 +00:00
|
|
|
dev->data->nb_tx_queues = 0;
|
2015-07-13 13:04:05 +00:00
|
|
|
rte_free(dev->data->tx_queues);
|
|
|
|
dev->data->tx_queues = NULL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
|
|
|
|
uint16_t nb_rx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_rxconf *rx_conf,
|
|
|
|
struct rte_mempool *mp)
|
|
|
|
{
|
2014-06-17 18:09:28 +00:00
|
|
|
int ret;
|
|
|
|
uint32_t mbp_buf_size;
|
2012-09-04 12:54:00 +00:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_dev_info dev_info;
|
2016-11-24 11:26:45 +00:00
|
|
|
void **rxq;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (rx_queue_id >= dev->data->nb_rx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->data->dev_started) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE(
|
2012-12-19 23:00:00 +00:00
|
|
|
"port %d must be stopped to allow configuration\n", port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the size of the mbuf data buffer.
|
|
|
|
* This value must be provided in the private data of the memory pool.
|
|
|
|
* First check that the memory pool has a valid private data.
|
|
|
|
*/
|
2014-10-20 17:26:35 +00:00
|
|
|
rte_eth_dev_info_get(port_id, &dev_info);
|
2012-09-04 12:54:00 +00:00
|
|
|
if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
|
2012-09-04 12:54:00 +00:00
|
|
|
mp->name, (int) mp->private_data_size,
|
|
|
|
(int) sizeof(struct rte_pktmbuf_pool_private));
|
2015-04-09 21:29:42 +00:00
|
|
|
return -ENOSPC;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
2015-04-22 09:57:20 +00:00
|
|
|
mbp_buf_size = rte_pktmbuf_data_room_size(mp);
|
2014-06-17 18:09:28 +00:00
|
|
|
|
|
|
|
if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
|
2012-09-04 12:54:00 +00:00
|
|
|
"(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
|
|
|
|
"=%d)\n",
|
|
|
|
mp->name,
|
2014-06-17 18:09:28 +00:00
|
|
|
(int)mbp_buf_size,
|
2012-09-04 12:54:00 +00:00
|
|
|
(int)(RTE_PKTMBUF_HEADROOM +
|
|
|
|
dev_info.min_rx_bufsize),
|
|
|
|
(int)RTE_PKTMBUF_HEADROOM,
|
|
|
|
(int)dev_info.min_rx_bufsize);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2015-10-27 12:51:43 +00:00
|
|
|
if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
|
|
|
|
nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
|
|
|
|
nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
|
2015-10-27 12:51:43 +00:00
|
|
|
"should be: <= %hu, = %hu, and a product of %hu\n",
|
|
|
|
nb_rx_desc,
|
|
|
|
dev_info.rx_desc_lim.nb_max,
|
|
|
|
dev_info.rx_desc_lim.nb_min,
|
|
|
|
dev_info.rx_desc_lim.nb_align);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-11-24 11:26:45 +00:00
|
|
|
rxq = dev->data->rx_queues;
|
|
|
|
if (rxq[rx_queue_id]) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
|
|
|
|
-ENOTSUP);
|
|
|
|
(*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
|
|
|
|
rxq[rx_queue_id] = NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-01 09:49:04 +00:00
|
|
|
if (rx_conf == NULL)
|
|
|
|
rx_conf = &dev_info.default_rxconf;
|
|
|
|
|
2014-06-17 18:09:28 +00:00
|
|
|
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
|
|
|
|
socket_id, rx_conf, mp);
|
|
|
|
if (!ret) {
|
|
|
|
if (!dev->data->min_rx_buf_size ||
|
|
|
|
dev->data->min_rx_buf_size > mbp_buf_size)
|
|
|
|
dev->data->min_rx_buf_size = mbp_buf_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
|
|
|
|
uint16_t nb_tx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2014-10-01 09:49:04 +00:00
|
|
|
struct rte_eth_dev_info dev_info;
|
2016-11-24 11:26:45 +00:00
|
|
|
void **txq;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (tx_queue_id >= dev->data->nb_tx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->data->dev_started) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE(
|
2012-12-19 23:00:00 +00:00
|
|
|
"port %d must be stopped to allow configuration\n", port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
|
2014-10-01 09:49:04 +00:00
|
|
|
|
2014-10-20 17:26:35 +00:00
|
|
|
rte_eth_dev_info_get(port_id, &dev_info);
|
2014-10-01 09:49:04 +00:00
|
|
|
|
2015-11-20 10:26:37 +00:00
|
|
|
if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
|
|
|
|
nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
|
|
|
|
nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
|
2015-11-20 10:26:37 +00:00
|
|
|
"should be: <= %hu, = %hu, and a product of %hu\n",
|
|
|
|
nb_tx_desc,
|
|
|
|
dev_info.tx_desc_lim.nb_max,
|
|
|
|
dev_info.tx_desc_lim.nb_min,
|
|
|
|
dev_info.tx_desc_lim.nb_align);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-11-24 11:26:45 +00:00
|
|
|
txq = dev->data->tx_queues;
|
|
|
|
if (txq[tx_queue_id]) {
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
|
|
|
|
-ENOTSUP);
|
|
|
|
(*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
|
|
|
|
txq[tx_queue_id] = NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-01 09:49:04 +00:00
|
|
|
if (tx_conf == NULL)
|
|
|
|
tx_conf = &dev_info.default_txconf;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
|
|
|
|
socket_id, tx_conf);
|
|
|
|
}
|
|
|
|
|
2016-03-10 17:19:34 +00:00
|
|
|
void
|
|
|
|
rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
|
|
|
|
void *userdata __rte_unused)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < unsent; i++)
|
|
|
|
rte_pktmbuf_free(pkts[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
|
|
|
|
void *userdata)
|
|
|
|
{
|
|
|
|
uint64_t *count = userdata;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < unsent; i++)
|
|
|
|
rte_pktmbuf_free(pkts[i]);
|
|
|
|
|
|
|
|
*count += unsent;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
|
|
|
|
buffer_tx_error_fn cbfn, void *userdata)
|
|
|
|
{
|
|
|
|
buffer->error_callback = cbfn;
|
|
|
|
buffer->error_userdata = userdata;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
|
|
|
|
{
|
2016-04-07 11:46:32 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2016-03-10 17:19:34 +00:00
|
|
|
if (buffer == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
buffer->size = size;
|
2016-04-07 11:46:32 +00:00
|
|
|
if (buffer->error_callback == NULL) {
|
|
|
|
ret = rte_eth_tx_buffer_set_err_callback(
|
|
|
|
buffer, rte_eth_tx_buffer_drop_callback, NULL);
|
|
|
|
}
|
2016-03-10 17:19:34 +00:00
|
|
|
|
2016-04-07 11:46:32 +00:00
|
|
|
return ret;
|
2016-03-10 17:19:34 +00:00
|
|
|
}
|
|
|
|
|
2017-03-24 18:55:53 +00:00
|
|
|
int
|
|
|
|
rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
/* Validate Input Data. Bail if not valid or not supported. */
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
|
|
|
|
|
|
|
|
/* Call driver to free pending mbufs. */
|
|
|
|
return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
|
|
|
|
free_cnt);
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
void
|
|
|
|
rte_eth_promiscuous_enable(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
|
2012-09-04 12:54:00 +00:00
|
|
|
(*dev->dev_ops->promiscuous_enable)(dev);
|
|
|
|
dev->data->promiscuous = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rte_eth_promiscuous_disable(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev->data->promiscuous = 0;
|
|
|
|
(*dev->dev_ops->promiscuous_disable)(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_promiscuous_get(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
return dev->data->promiscuous;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rte_eth_allmulticast_enable(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
|
2012-09-04 12:54:00 +00:00
|
|
|
(*dev->dev_ops->allmulticast_enable)(dev);
|
|
|
|
dev->data->all_multicast = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rte_eth_allmulticast_disable(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev->data->all_multicast = 0;
|
|
|
|
(*dev->dev_ops->allmulticast_disable)(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_allmulticast_get(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
return dev->data->all_multicast;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
|
|
|
|
struct rte_eth_link *link)
|
|
|
|
{
|
|
|
|
struct rte_eth_link *dst = link;
|
|
|
|
struct rte_eth_link *src = &(dev->data->dev_link);
|
|
|
|
|
|
|
|
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
|
|
|
|
*(uint64_t *)src) == 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (dev->data->dev_conf.intr_conf.lsc != 0)
|
|
|
|
rte_eth_dev_atomic_read_link_status(dev, eth_link);
|
|
|
|
else {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
|
2012-09-04 12:54:00 +00:00
|
|
|
(*dev->dev_ops->link_update)(dev, 1);
|
|
|
|
*eth_link = dev->data->dev_link;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (dev->data->dev_conf.intr_conf.lsc != 0)
|
|
|
|
rte_eth_dev_atomic_read_link_status(dev, eth_link);
|
|
|
|
else {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
|
2012-09-04 12:54:00 +00:00
|
|
|
(*dev->dev_ops->link_update)(dev, 0);
|
|
|
|
*eth_link = dev->data->dev_link;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-07 17:31:51 +00:00
|
|
|
int
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2013-09-18 10:00:00 +00:00
|
|
|
memset(stats, 0, sizeof(*stats));
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
|
2012-09-04 12:54:00 +00:00
|
|
|
stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
|
2016-07-19 11:05:17 +00:00
|
|
|
(*dev->dev_ops->stats_get)(dev, stats);
|
2014-11-07 17:31:51 +00:00
|
|
|
return 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rte_eth_stats_reset(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
|
2012-09-04 12:54:00 +00:00
|
|
|
(*dev->dev_ops->stats_reset)(dev);
|
2015-11-27 10:31:06 +00:00
|
|
|
dev->data->rx_mbuf_alloc_failed = 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2016-06-15 15:25:27 +00:00
|
|
|
static int
|
|
|
|
get_xstats_count(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2017-04-27 14:42:36 +00:00
|
|
|
if (dev->dev_ops->xstats_get_names_by_id != NULL) {
|
|
|
|
count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
|
|
|
|
NULL, 0);
|
|
|
|
if (count < 0)
|
|
|
|
return count;
|
|
|
|
}
|
2016-06-15 15:25:27 +00:00
|
|
|
if (dev->dev_ops->xstats_get_names != NULL) {
|
|
|
|
count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
|
|
|
|
if (count < 0)
|
|
|
|
return count;
|
|
|
|
} else
|
|
|
|
count = 0;
|
2017-04-27 14:42:36 +00:00
|
|
|
|
2016-06-15 15:25:27 +00:00
|
|
|
count += RTE_NB_STATS;
|
2016-11-21 09:59:38 +00:00
|
|
|
count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
|
|
|
|
RTE_NB_RXQ_STATS;
|
|
|
|
count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
|
|
|
|
RTE_NB_TXQ_STATS;
|
2016-06-15 15:25:27 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2017-04-27 14:42:37 +00:00
|
|
|
int
|
|
|
|
rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
|
|
|
|
uint64_t *id)
|
|
|
|
{
|
|
|
|
int cnt_xstats, idx_xstat;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
|
|
|
if (!id) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!xstat_name) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get count */
|
|
|
|
cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
|
|
|
|
if (cnt_xstats < 0) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get id-name lookup table */
|
|
|
|
struct rte_eth_xstat_name xstats_names[cnt_xstats];
|
|
|
|
|
|
|
|
if (cnt_xstats != rte_eth_xstats_get_names_by_id(
|
|
|
|
port_id, xstats_names, cnt_xstats, NULL)) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
|
|
|
|
if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
|
|
|
|
*id = idx_xstat;
|
|
|
|
return 0;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-04-27 14:42:36 +00:00
|
|
|
int
|
|
|
|
rte_eth_xstats_get_names_by_id(uint8_t port_id,
|
|
|
|
struct rte_eth_xstat_name *xstats_names, unsigned int size,
|
|
|
|
uint64_t *ids)
|
|
|
|
{
|
|
|
|
/* Get all xstats */
|
|
|
|
if (!ids) {
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int cnt_used_entries;
|
|
|
|
int cnt_expected_entries;
|
|
|
|
int cnt_driver_entries;
|
|
|
|
uint32_t idx, id_queue;
|
|
|
|
uint16_t num_q;
|
|
|
|
|
|
|
|
cnt_expected_entries = get_xstats_count(port_id);
|
|
|
|
if (xstats_names == NULL || cnt_expected_entries < 0 ||
|
|
|
|
(int)size < cnt_expected_entries)
|
|
|
|
return cnt_expected_entries;
|
|
|
|
|
|
|
|
/* port_id checked in get_xstats_count() */
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
cnt_used_entries = 0;
|
|
|
|
|
|
|
|
for (idx = 0; idx < RTE_NB_STATS; idx++) {
|
|
|
|
snprintf(xstats_names[cnt_used_entries].name,
|
|
|
|
sizeof(xstats_names[0].name),
|
|
|
|
"%s", rte_stats_strings[idx].name);
|
|
|
|
cnt_used_entries++;
|
|
|
|
}
|
|
|
|
num_q = RTE_MIN(dev->data->nb_rx_queues,
|
|
|
|
RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
for (id_queue = 0; id_queue < num_q; id_queue++) {
|
|
|
|
for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
|
|
|
|
snprintf(xstats_names[cnt_used_entries].name,
|
|
|
|
sizeof(xstats_names[0].name),
|
|
|
|
"rx_q%u%s",
|
|
|
|
id_queue,
|
|
|
|
rte_rxq_stats_strings[idx].name);
|
|
|
|
cnt_used_entries++;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
num_q = RTE_MIN(dev->data->nb_tx_queues,
|
|
|
|
RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
for (id_queue = 0; id_queue < num_q; id_queue++) {
|
|
|
|
for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
|
|
|
|
snprintf(xstats_names[cnt_used_entries].name,
|
|
|
|
sizeof(xstats_names[0].name),
|
|
|
|
"tx_q%u%s",
|
|
|
|
id_queue,
|
|
|
|
rte_txq_stats_strings[idx].name);
|
|
|
|
cnt_used_entries++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->dev_ops->xstats_get_names_by_id != NULL) {
|
|
|
|
/* If there are any driver-specific xstats, append them
|
|
|
|
* to end of list.
|
|
|
|
*/
|
|
|
|
cnt_driver_entries =
|
|
|
|
(*dev->dev_ops->xstats_get_names_by_id)(
|
|
|
|
dev,
|
|
|
|
xstats_names + cnt_used_entries,
|
|
|
|
NULL,
|
|
|
|
size - cnt_used_entries);
|
|
|
|
if (cnt_driver_entries < 0)
|
|
|
|
return cnt_driver_entries;
|
|
|
|
cnt_used_entries += cnt_driver_entries;
|
|
|
|
|
|
|
|
} else if (dev->dev_ops->xstats_get_names != NULL) {
|
|
|
|
/* If there are any driver-specific xstats, append them
|
|
|
|
* to end of list.
|
|
|
|
*/
|
|
|
|
cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
|
|
|
|
dev,
|
|
|
|
xstats_names + cnt_used_entries,
|
|
|
|
size - cnt_used_entries);
|
|
|
|
if (cnt_driver_entries < 0)
|
|
|
|
return cnt_driver_entries;
|
|
|
|
cnt_used_entries += cnt_driver_entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cnt_used_entries;
|
|
|
|
}
|
|
|
|
/* Get only xstats given by IDS */
|
|
|
|
else {
|
|
|
|
uint16_t len, i;
|
|
|
|
struct rte_eth_xstat_name *xstats_names_copy;
|
|
|
|
|
|
|
|
len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
|
|
|
|
|
|
|
|
xstats_names_copy =
|
|
|
|
malloc(sizeof(struct rte_eth_xstat_name) * len);
|
|
|
|
if (!xstats_names_copy) {
|
|
|
|
RTE_PMD_DEBUG_TRACE(
|
|
|
|
"ERROR: can't allocate memory for values_copy\n");
|
|
|
|
free(xstats_names_copy);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_eth_xstats_get_names_by_id(port_id, xstats_names_copy,
|
|
|
|
len, NULL);
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
if (ids[i] >= len) {
|
|
|
|
RTE_PMD_DEBUG_TRACE(
|
|
|
|
"ERROR: id value isn't valid\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
strcpy(xstats_names[i].name,
|
|
|
|
xstats_names_copy[ids[i]].name);
|
|
|
|
}
|
|
|
|
free(xstats_names_copy);
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-13 14:59:25 +00:00
|
|
|
int
|
2017-04-27 14:42:35 +00:00
|
|
|
rte_eth_xstats_get_names(uint8_t port_id,
|
2016-06-15 15:25:27 +00:00
|
|
|
struct rte_eth_xstat_name *xstats_names,
|
2017-04-13 14:59:24 +00:00
|
|
|
unsigned int size)
|
2016-06-15 15:25:27 +00:00
|
|
|
{
|
2017-04-27 14:42:35 +00:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int cnt_used_entries;
|
|
|
|
int cnt_expected_entries;
|
|
|
|
int cnt_driver_entries;
|
|
|
|
uint32_t idx, id_queue;
|
|
|
|
uint16_t num_q;
|
|
|
|
|
|
|
|
cnt_expected_entries = get_xstats_count(port_id);
|
|
|
|
if (xstats_names == NULL || cnt_expected_entries < 0 ||
|
|
|
|
(int)size < cnt_expected_entries)
|
|
|
|
return cnt_expected_entries;
|
|
|
|
|
|
|
|
/* port_id checked in get_xstats_count() */
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
cnt_used_entries = 0;
|
2016-06-15 15:25:27 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
for (idx = 0; idx < RTE_NB_STATS; idx++) {
|
|
|
|
snprintf(xstats_names[cnt_used_entries].name,
|
|
|
|
sizeof(xstats_names[0].name),
|
|
|
|
"%s", rte_stats_strings[idx].name);
|
|
|
|
cnt_used_entries++;
|
|
|
|
}
|
|
|
|
num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
for (id_queue = 0; id_queue < num_q; id_queue++) {
|
|
|
|
for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
|
2016-06-15 15:25:27 +00:00
|
|
|
snprintf(xstats_names[cnt_used_entries].name,
|
|
|
|
sizeof(xstats_names[0].name),
|
2017-04-27 14:42:35 +00:00
|
|
|
"rx_q%u%s",
|
|
|
|
id_queue, rte_rxq_stats_strings[idx].name);
|
2016-06-15 15:25:27 +00:00
|
|
|
cnt_used_entries++;
|
|
|
|
}
|
2017-04-13 14:59:24 +00:00
|
|
|
|
2016-06-15 15:25:27 +00:00
|
|
|
}
|
2017-04-27 14:42:35 +00:00
|
|
|
num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
for (id_queue = 0; id_queue < num_q; id_queue++) {
|
|
|
|
for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
|
|
|
|
snprintf(xstats_names[cnt_used_entries].name,
|
|
|
|
sizeof(xstats_names[0].name),
|
|
|
|
"tx_q%u%s",
|
|
|
|
id_queue, rte_txq_stats_strings[idx].name);
|
|
|
|
cnt_used_entries++;
|
2016-06-15 15:25:27 +00:00
|
|
|
}
|
|
|
|
}
|
2016-07-08 15:44:24 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
if (dev->dev_ops->xstats_get_names != NULL) {
|
|
|
|
/* If there are any driver-specific xstats, append them
|
|
|
|
* to end of list.
|
|
|
|
*/
|
|
|
|
cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
|
|
|
|
dev,
|
|
|
|
xstats_names + cnt_used_entries,
|
|
|
|
size - cnt_used_entries);
|
|
|
|
if (cnt_driver_entries < 0)
|
|
|
|
return cnt_driver_entries;
|
|
|
|
cnt_used_entries += cnt_driver_entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cnt_used_entries;
|
2016-06-15 15:25:27 +00:00
|
|
|
}
|
|
|
|
|
2014-07-23 12:28:53 +00:00
|
|
|
/* retrieve ethdev extended statistics */
|
2017-04-27 14:42:36 +00:00
|
|
|
int
|
|
|
|
rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values,
|
|
|
|
unsigned int n)
|
|
|
|
{
|
|
|
|
/* If need all xstats */
|
|
|
|
if (!ids) {
|
|
|
|
struct rte_eth_stats eth_stats;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
unsigned int count = 0, i, q;
|
|
|
|
signed int xcount = 0;
|
|
|
|
uint64_t val, *stats_ptr;
|
|
|
|
uint16_t nb_rxqs, nb_txqs;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
nb_rxqs = RTE_MIN(dev->data->nb_rx_queues,
|
|
|
|
RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
nb_txqs = RTE_MIN(dev->data->nb_tx_queues,
|
|
|
|
RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
|
|
|
|
/* Return generic statistics */
|
|
|
|
count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
|
|
|
|
(nb_txqs * RTE_NB_TXQ_STATS);
|
|
|
|
|
|
|
|
|
|
|
|
/* implemented by the driver */
|
|
|
|
if (dev->dev_ops->xstats_get_by_id != NULL) {
|
|
|
|
/* Retrieve the xstats from the driver at the end of the
|
|
|
|
* xstats struct. Retrieve all xstats.
|
|
|
|
*/
|
|
|
|
xcount = (*dev->dev_ops->xstats_get_by_id)(dev,
|
|
|
|
NULL,
|
|
|
|
values ? values + count : NULL,
|
|
|
|
(n > count) ? n - count : 0);
|
|
|
|
|
|
|
|
if (xcount < 0)
|
|
|
|
return xcount;
|
|
|
|
/* implemented by the driver */
|
|
|
|
} else if (dev->dev_ops->xstats_get != NULL) {
|
|
|
|
/* Retrieve the xstats from the driver at the end of the
|
|
|
|
* xstats struct. Retrieve all xstats.
|
|
|
|
* Compatibility for PMD without xstats_get_by_ids
|
|
|
|
*/
|
|
|
|
unsigned int size = (n > count) ? n - count : 1;
|
|
|
|
struct rte_eth_xstat xstats[size];
|
|
|
|
|
|
|
|
xcount = (*dev->dev_ops->xstats_get)(dev,
|
|
|
|
values ? xstats : NULL, size);
|
|
|
|
|
|
|
|
if (xcount < 0)
|
|
|
|
return xcount;
|
|
|
|
|
|
|
|
if (values != NULL)
|
|
|
|
for (i = 0 ; i < (unsigned int)xcount; i++)
|
|
|
|
values[i + count] = xstats[i].value;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n < count + xcount || values == NULL)
|
|
|
|
return count + xcount;
|
|
|
|
|
|
|
|
/* now fill the xstats structure */
|
|
|
|
count = 0;
|
|
|
|
rte_eth_stats_get(port_id, ð_stats);
|
|
|
|
|
|
|
|
/* global stats */
|
|
|
|
for (i = 0; i < RTE_NB_STATS; i++) {
|
|
|
|
stats_ptr = RTE_PTR_ADD(ð_stats,
|
|
|
|
rte_stats_strings[i].offset);
|
|
|
|
val = *stats_ptr;
|
|
|
|
values[count++] = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* per-rxq stats */
|
|
|
|
for (q = 0; q < nb_rxqs; q++) {
|
|
|
|
for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
|
|
|
|
stats_ptr = RTE_PTR_ADD(ð_stats,
|
|
|
|
rte_rxq_stats_strings[i].offset +
|
|
|
|
q * sizeof(uint64_t));
|
|
|
|
val = *stats_ptr;
|
|
|
|
values[count++] = val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* per-txq stats */
|
|
|
|
for (q = 0; q < nb_txqs; q++) {
|
|
|
|
for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
|
|
|
|
stats_ptr = RTE_PTR_ADD(ð_stats,
|
|
|
|
rte_txq_stats_strings[i].offset +
|
|
|
|
q * sizeof(uint64_t));
|
|
|
|
val = *stats_ptr;
|
|
|
|
values[count++] = val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return count + xcount;
|
|
|
|
}
|
|
|
|
/* Need only xstats given by IDS array */
|
|
|
|
else {
|
|
|
|
uint16_t i, size;
|
|
|
|
uint64_t *values_copy;
|
|
|
|
|
|
|
|
size = rte_eth_xstats_get_by_id(port_id, NULL, NULL, 0);
|
|
|
|
|
|
|
|
values_copy = malloc(sizeof(values_copy) * size);
|
|
|
|
if (!values_copy) {
|
|
|
|
RTE_PMD_DEBUG_TRACE(
|
|
|
|
"ERROR: can't allocate memory for values_copy\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_eth_xstats_get_by_id(port_id, NULL, values_copy, size);
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
if (ids[i] >= size) {
|
|
|
|
RTE_PMD_DEBUG_TRACE(
|
|
|
|
"ERROR: id value isn't valid\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
values[i] = values_copy[ids[i]];
|
|
|
|
}
|
|
|
|
free(values_copy);
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-23 12:28:53 +00:00
|
|
|
int
|
2017-04-27 14:42:35 +00:00
|
|
|
rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
|
2017-04-13 14:59:24 +00:00
|
|
|
unsigned int n)
|
2014-07-23 12:28:53 +00:00
|
|
|
{
|
2017-04-27 14:42:35 +00:00
|
|
|
struct rte_eth_stats eth_stats;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
unsigned int count = 0, i, q;
|
|
|
|
signed int xcount = 0;
|
|
|
|
uint64_t val, *stats_ptr;
|
|
|
|
uint16_t nb_rxqs, nb_txqs;
|
2014-07-23 12:28:53 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2014-07-23 12:28:53 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
|
|
|
nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
2016-11-21 09:59:38 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
/* Return generic statistics */
|
|
|
|
count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
|
|
|
|
(nb_txqs * RTE_NB_TXQ_STATS);
|
2014-07-23 12:28:53 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
/* implemented by the driver */
|
|
|
|
if (dev->dev_ops->xstats_get != NULL) {
|
|
|
|
/* Retrieve the xstats from the driver at the end of the
|
|
|
|
* xstats struct.
|
|
|
|
*/
|
|
|
|
xcount = (*dev->dev_ops->xstats_get)(dev,
|
|
|
|
xstats ? xstats + count : NULL,
|
|
|
|
(n > count) ? n - count : 0);
|
2015-07-15 13:11:28 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
if (xcount < 0)
|
|
|
|
return xcount;
|
2015-07-15 13:11:28 +00:00
|
|
|
}
|
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
if (n < count + xcount || xstats == NULL)
|
|
|
|
return count + xcount;
|
2014-07-23 12:28:53 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
/* now fill the xstats structure */
|
|
|
|
count = 0;
|
|
|
|
rte_eth_stats_get(port_id, ð_stats);
|
2014-07-23 12:28:53 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
/* global stats */
|
|
|
|
for (i = 0; i < RTE_NB_STATS; i++) {
|
|
|
|
stats_ptr = RTE_PTR_ADD(ð_stats,
|
|
|
|
rte_stats_strings[i].offset);
|
|
|
|
val = *stats_ptr;
|
|
|
|
xstats[count++].value = val;
|
|
|
|
}
|
2014-07-23 12:28:53 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
/* per-rxq stats */
|
|
|
|
for (q = 0; q < nb_rxqs; q++) {
|
|
|
|
for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
|
|
|
|
stats_ptr = RTE_PTR_ADD(ð_stats,
|
|
|
|
rte_rxq_stats_strings[i].offset +
|
|
|
|
q * sizeof(uint64_t));
|
|
|
|
val = *stats_ptr;
|
|
|
|
xstats[count++].value = val;
|
2014-07-23 12:28:53 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-13 14:59:24 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
/* per-txq stats */
|
|
|
|
for (q = 0; q < nb_txqs; q++) {
|
|
|
|
for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
|
|
|
|
stats_ptr = RTE_PTR_ADD(ð_stats,
|
|
|
|
rte_txq_stats_strings[i].offset +
|
|
|
|
q * sizeof(uint64_t));
|
|
|
|
val = *stats_ptr;
|
|
|
|
xstats[count++].value = val;
|
|
|
|
}
|
2014-07-23 12:28:53 +00:00
|
|
|
}
|
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
for (i = 0; i < count; i++)
|
2016-07-08 15:44:24 +00:00
|
|
|
xstats[i].id = i;
|
2017-04-27 14:42:35 +00:00
|
|
|
/* add an offset to driver-specific stats */
|
|
|
|
for ( ; i < count + xcount; i++)
|
|
|
|
xstats[i].id += count;
|
2016-07-08 15:44:24 +00:00
|
|
|
|
2017-04-27 14:42:35 +00:00
|
|
|
return count + xcount;
|
2014-07-23 12:28:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* reset ethdev extended statistics */
|
|
|
|
void
|
|
|
|
rte_eth_xstats_reset(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2014-07-23 12:28:53 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
/* implemented by the driver */
|
|
|
|
if (dev->dev_ops->xstats_reset != NULL) {
|
|
|
|
(*dev->dev_ops->xstats_reset)(dev);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fallback to default */
|
|
|
|
rte_eth_stats_reset(port_id);
|
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
|
|
|
|
uint8_t is_rx)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
|
2012-12-19 23:00:00 +00:00
|
|
|
return (*dev->dev_ops->queue_stats_mapping_set)
|
|
|
|
(dev, queue_id, stat_idx, is_rx);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
|
|
|
|
uint8_t stat_idx)
|
|
|
|
{
|
|
|
|
return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
|
|
|
|
STAT_QMAP_TX);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
|
|
|
|
uint8_t stat_idx)
|
|
|
|
{
|
|
|
|
return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
|
|
|
|
STAT_QMAP_RX);
|
|
|
|
}
|
|
|
|
|
2017-01-16 10:48:27 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_fw_version_get(uint8_t port_id, char *fw_version, size_t fw_size)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
|
|
|
|
return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
void
|
|
|
|
rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2015-10-27 12:51:43 +00:00
|
|
|
const struct rte_eth_desc_lim lim = {
|
|
|
|
.nb_max = UINT16_MAX,
|
|
|
|
.nb_min = 0,
|
|
|
|
.nb_align = 1,
|
|
|
|
};
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2014-10-01 09:49:03 +00:00
|
|
|
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
|
2015-10-27 12:51:43 +00:00
|
|
|
dev_info->rx_desc_lim = lim;
|
|
|
|
dev_info->tx_desc_lim = lim;
|
2014-10-01 09:49:03 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
|
2012-09-04 12:54:00 +00:00
|
|
|
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
|
2015-11-03 13:01:58 +00:00
|
|
|
dev_info->driver_name = dev->data->drv_name;
|
2016-06-15 14:06:20 +00:00
|
|
|
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
|
|
|
|
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2016-03-14 20:50:50 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
|
|
|
|
uint32_t *ptypes, int num)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
const uint32_t *all_ptypes;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2016-04-06 03:51:13 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
|
2016-03-14 20:50:50 +00:00
|
|
|
all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
|
|
|
|
|
|
|
|
if (!all_ptypes)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
|
|
|
|
if (all_ptypes[i] & ptype_mask) {
|
|
|
|
if (j < num)
|
|
|
|
ptypes[j] = all_ptypes[i];
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return j;
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
void
|
|
|
|
rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_RET(port_id);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
|
|
|
|
}
|
|
|
|
|
2014-06-17 18:09:30 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-17 18:09:30 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
*mtu = dev->data->mtu;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-17 18:09:30 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
|
2014-06-17 18:09:30 +00:00
|
|
|
|
|
|
|
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
|
|
|
|
if (!ret)
|
|
|
|
dev->data->mtu = mtu;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-06-27 00:01:44 +00:00
|
|
|
if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -ENOSYS;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
if (vlan_id > 4095) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
|
2012-09-04 12:54:00 +00:00
|
|
|
port_id, (unsigned) vlan_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
|
2015-02-20 10:26:12 +00:00
|
|
|
|
|
|
|
return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-19 23:00:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (rx_queue_id >= dev->data->nb_rx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
|
2012-12-19 23:00:00 +00:00
|
|
|
(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
|
|
|
|
|
2015-04-09 21:29:42 +00:00
|
|
|
return 0;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-03-11 16:50:57 +00:00
|
|
|
rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
|
|
|
|
enum rte_vlan_type vlan_type,
|
|
|
|
uint16_t tpid)
|
2012-12-19 23:00:00 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-19 23:00:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2016-03-11 16:50:57 +00:00
|
|
|
return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret = 0;
|
|
|
|
int mask = 0;
|
|
|
|
int cur, org = 0;
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-19 23:00:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
/*check which option changed by application*/
|
|
|
|
cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
|
|
|
|
org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
|
2015-06-27 00:01:44 +00:00
|
|
|
if (cur != org) {
|
2012-12-19 23:00:00 +00:00
|
|
|
dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
|
|
|
|
mask |= ETH_VLAN_STRIP_MASK;
|
|
|
|
}
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
|
|
|
|
org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
|
2015-06-27 00:01:44 +00:00
|
|
|
if (cur != org) {
|
2012-12-19 23:00:00 +00:00
|
|
|
dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
|
|
|
|
mask |= ETH_VLAN_FILTER_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
|
|
|
|
org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
|
2015-06-27 00:01:44 +00:00
|
|
|
if (cur != org) {
|
2012-12-19 23:00:00 +00:00
|
|
|
dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
|
|
|
|
mask |= ETH_VLAN_EXTEND_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*no change*/
|
2015-06-27 00:01:44 +00:00
|
|
|
if (mask == 0)
|
2012-12-19 23:00:00 +00:00
|
|
|
return ret;
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
|
2012-12-19 23:00:00 +00:00
|
|
|
(*dev->dev_ops->vlan_offload_set)(dev, mask);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_get_vlan_offload(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret = 0;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-19 23:00:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (dev->data->dev_conf.rxmode.hw_vlan_strip)
|
2015-06-27 00:01:44 +00:00
|
|
|
ret |= ETH_VLAN_STRIP_OFFLOAD;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
|
|
|
if (dev->data->dev_conf.rxmode.hw_vlan_filter)
|
2015-06-27 00:01:44 +00:00
|
|
|
ret |= ETH_VLAN_FILTER_OFFLOAD;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
|
|
|
if (dev->data->dev_conf.rxmode.hw_vlan_extend)
|
2015-06-27 00:01:44 +00:00
|
|
|
ret |= ETH_VLAN_EXTEND_OFFLOAD;
|
2012-12-19 23:00:00 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-05 05:08:50 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-05 05:08:50 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
|
2014-06-05 05:08:50 +00:00
|
|
|
(*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2014-06-17 18:09:26 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-17 18:09:26 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
|
2014-06-17 18:09:26 +00:00
|
|
|
memset(fc_conf, 0, sizeof(*fc_conf));
|
|
|
|
return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 12:54:00 +00:00
|
|
|
if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
|
2012-12-19 23:00:00 +00:00
|
|
|
return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
|
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2012-12-19 23:00:00 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-12-19 23:00:00 +00:00
|
|
|
if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-12-19 23:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
/* High water, low water validation are device specific */
|
|
|
|
if (*dev->dev_ops->priority_flow_ctrl_set)
|
|
|
|
return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -ENOTSUP;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
2015-04-09 21:29:39 +00:00
|
|
|
static int
|
2014-11-15 16:03:43 +00:00
|
|
|
rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size)
|
2013-06-03 00:00:00 +00:00
|
|
|
{
|
2014-11-15 16:03:43 +00:00
|
|
|
uint16_t i, num;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2014-11-15 16:03:43 +00:00
|
|
|
if (!reta_conf)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-03-20 23:04:33 +00:00
|
|
|
num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
|
2014-11-15 16:03:43 +00:00
|
|
|
for (i = 0; i < num; i++) {
|
|
|
|
if (reta_conf[i].mask)
|
|
|
|
return 0;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-11-15 16:03:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2015-04-09 21:29:39 +00:00
|
|
|
static int
|
2014-11-15 16:03:43 +00:00
|
|
|
rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size,
|
2016-01-12 10:49:08 +00:00
|
|
|
uint16_t max_rxq)
|
2014-11-15 16:03:43 +00:00
|
|
|
{
|
|
|
|
uint16_t i, idx, shift;
|
|
|
|
|
|
|
|
if (!reta_conf)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (max_rxq == 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
|
2014-11-15 16:03:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < reta_size; i++) {
|
|
|
|
idx = i / RTE_RETA_GROUP_SIZE;
|
|
|
|
shift = i % RTE_RETA_GROUP_SIZE;
|
|
|
|
if ((reta_conf[idx].mask & (1ULL << shift)) &&
|
|
|
|
(reta_conf[idx].reta[shift] >= max_rxq)) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
|
2014-11-15 16:03:43 +00:00
|
|
|
"the maximum rxq index: %u\n", idx, shift,
|
|
|
|
reta_conf[idx].reta[shift], max_rxq);
|
|
|
|
return -EINVAL;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-15 16:03:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2014-11-15 16:03:43 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_rss_reta_update(uint8_t port_id,
|
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int ret;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-11-15 16:03:43 +00:00
|
|
|
/* Check mask bits */
|
|
|
|
ret = rte_eth_check_reta_mask(reta_conf, reta_size);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
/* Check entry value */
|
|
|
|
ret = rte_eth_check_reta_entry(reta_conf, reta_size,
|
|
|
|
dev->data->nb_rx_queues);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
|
2014-11-15 16:03:43 +00:00
|
|
|
return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
int
|
2014-11-15 16:03:43 +00:00
|
|
|
rte_eth_dev_rss_reta_query(uint8_t port_id,
|
|
|
|
struct rte_eth_rss_reta_entry64 *reta_conf,
|
|
|
|
uint16_t reta_size)
|
2013-06-03 00:00:00 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2014-11-15 16:03:43 +00:00
|
|
|
int ret;
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2014-11-15 16:03:43 +00:00
|
|
|
/* Check mask bits */
|
|
|
|
ret = rte_eth_check_reta_mask(reta_conf, reta_size);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
|
2014-11-15 16:03:43 +00:00
|
|
|
return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-05-16 08:58:40 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
uint16_t rss_hash_protos;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-05-16 08:58:40 +00:00
|
|
|
rss_hash_protos = rss_conf->rss_hf;
|
|
|
|
if ((rss_hash_protos != 0) &&
|
|
|
|
((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
|
2014-05-16 08:58:40 +00:00
|
|
|
rss_hash_protos);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2014-05-16 08:58:40 +00:00
|
|
|
}
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
|
2014-05-16 08:58:40 +00:00
|
|
|
return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
|
|
|
|
}
|
|
|
|
|
2014-05-16 08:58:42 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
|
|
|
|
struct rte_eth_rss_conf *rss_conf)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-05-16 08:58:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
|
2014-05-16 08:58:42 +00:00
|
|
|
return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
|
|
|
|
}
|
|
|
|
|
2014-10-23 13:18:53 +00:00
|
|
|
int
|
2016-03-10 02:42:10 +00:00
|
|
|
rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
|
|
|
|
struct rte_eth_udp_tunnel *udp_tunnel)
|
2014-10-23 13:18:53 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-10-23 13:18:53 +00:00
|
|
|
if (udp_tunnel == NULL) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
|
2014-10-23 13:18:53 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
|
2014-10-23 13:18:53 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2016-03-10 02:42:10 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
|
|
|
|
return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
|
2014-10-23 13:18:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-03-10 02:42:10 +00:00
|
|
|
rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
|
|
|
|
struct rte_eth_udp_tunnel *udp_tunnel)
|
2014-10-23 13:18:53 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-10-23 13:18:53 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
|
|
|
if (udp_tunnel == NULL) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
|
2014-10-23 13:18:53 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
|
2014-10-23 13:18:53 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-03-10 02:42:10 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
|
|
|
|
return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
|
2014-10-23 13:18:53 +00:00
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
int
|
|
|
|
rte_eth_led_on(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
|
2015-04-09 21:29:42 +00:00
|
|
|
return (*dev->dev_ops->dev_led_on)(dev);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_led_off(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
|
2015-04-09 21:29:42 +00:00
|
|
|
return (*dev->dev_ops->dev_led_off)(dev);
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
|
|
|
|
* an empty spot.
|
|
|
|
*/
|
2015-04-09 21:29:39 +00:00
|
|
|
static int
|
2015-04-09 21:29:41 +00:00
|
|
|
get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev_info dev_info;
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
|
|
|
|
for (i = 0; i < dev_info.max_mac_addrs; i++)
|
|
|
|
if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
|
|
|
|
return i;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-04-09 21:29:41 +00:00
|
|
|
static const struct ether_addr null_mac_addr;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
|
2013-09-18 10:00:00 +00:00
|
|
|
uint32_t pool)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int index;
|
2013-09-18 10:00:00 +00:00
|
|
|
uint64_t pool_mask;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
if (is_zero_ether_addr(addr)) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
|
2013-09-18 10:00:00 +00:00
|
|
|
port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
2013-09-18 10:00:00 +00:00
|
|
|
if (pool >= ETH_64_POOLS) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
index = get_mac_addr_index(port_id, addr);
|
|
|
|
if (index < 0) {
|
2013-09-18 10:00:00 +00:00
|
|
|
index = get_mac_addr_index(port_id, &null_mac_addr);
|
|
|
|
if (index < 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
|
2013-09-18 10:00:00 +00:00
|
|
|
port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -ENOSPC;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
pool_mask = dev->data->mac_pool_sel[index];
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2015-06-27 00:01:43 +00:00
|
|
|
/* Check if both MAC address and pool is already there, and do nothing */
|
2013-09-18 10:00:00 +00:00
|
|
|
if (pool_mask & (1ULL << pool))
|
|
|
|
return 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update NIC */
|
|
|
|
(*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
|
|
|
|
|
|
|
|
/* Update address in NIC data structure */
|
|
|
|
ether_addr_copy(addr, &dev->data->mac_addrs[index]);
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
/* Update pool bitmap in NIC data structure */
|
|
|
|
dev->data->mac_pool_sel[index] |= (1ULL << pool);
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
int index;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
|
2012-12-19 23:00:00 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
index = get_mac_addr_index(port_id, addr);
|
|
|
|
if (index == 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EADDRINUSE;
|
2012-09-04 12:54:00 +00:00
|
|
|
} else if (index < 0)
|
|
|
|
return 0; /* Do nothing if address wasn't found */
|
|
|
|
|
|
|
|
/* Update NIC */
|
|
|
|
(*dev->dev_ops->mac_addr_remove)(dev, index);
|
|
|
|
|
|
|
|
/* Update address in NIC data structure */
|
|
|
|
ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
|
|
|
|
|
2014-11-04 10:01:24 +00:00
|
|
|
/* reset pool bitmap */
|
|
|
|
dev->data->mac_pool_sel[index] = 0;
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-07-16 13:25:33 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 13:25:33 +00:00
|
|
|
|
|
|
|
if (!is_valid_assigned_ether_addr(addr))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
|
2015-07-16 13:25:33 +00:00
|
|
|
|
|
|
|
/* Update default address in NIC data structure */
|
|
|
|
ether_addr_copy(addr, &dev->data->mac_addrs[0]);
|
|
|
|
|
|
|
|
(*dev->dev_ops->mac_addr_set)(dev, addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
|
|
|
|
* an empty spot.
|
|
|
|
*/
|
2015-04-09 21:29:39 +00:00
|
|
|
static int
|
2015-04-09 21:29:41 +00:00
|
|
|
get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
|
2013-09-18 10:00:00 +00:00
|
|
|
{
|
|
|
|
struct rte_eth_dev_info dev_info;
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
if (!dev->data->hash_mac_addrs)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
|
|
|
|
if (memcmp(addr, &dev->data->hash_mac_addrs[i],
|
|
|
|
ETHER_ADDR_LEN) == 0)
|
|
|
|
return i;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
|
|
|
|
uint8_t on)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
int ret;
|
|
|
|
struct rte_eth_dev *dev;
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (is_zero_ether_addr(addr)) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
|
2013-09-18 10:00:00 +00:00
|
|
|
port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
index = get_hash_mac_addr_index(port_id, addr);
|
|
|
|
/* Check if it's already there, and do nothing */
|
|
|
|
if ((index >= 0) && (on))
|
|
|
|
return 0;
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
if (index < 0) {
|
|
|
|
if (!on) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
|
2013-09-18 10:00:00 +00:00
|
|
|
"set in UTA\n", port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
index = get_hash_mac_addr_index(port_id, &null_mac_addr);
|
|
|
|
if (index < 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
|
2013-09-18 10:00:00 +00:00
|
|
|
port_id);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -ENOSPC;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-06-03 23:42:50 +00:00
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
|
2013-09-18 10:00:00 +00:00
|
|
|
ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
|
|
|
|
if (ret == 0) {
|
|
|
|
/* Update address in NIC data structure */
|
|
|
|
if (on)
|
|
|
|
ether_addr_copy(addr,
|
|
|
|
&dev->data->hash_mac_addrs[index]);
|
2014-06-03 23:42:50 +00:00
|
|
|
else
|
2013-09-18 10:00:00 +00:00
|
|
|
ether_addr_copy(&null_mac_addr,
|
|
|
|
&dev->data->hash_mac_addrs[index]);
|
|
|
|
}
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
|
2013-09-18 10:00:00 +00:00
|
|
|
return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
|
|
|
|
}
|
|
|
|
|
2014-05-26 07:45:29 +00:00
|
|
|
int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
|
|
|
|
uint16_t tx_rate)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_eth_dev_info dev_info;
|
|
|
|
struct rte_eth_link link;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-05-26 07:45:29 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
rte_eth_dev_info_get(port_id, &dev_info);
|
|
|
|
link = dev->data->dev_link;
|
|
|
|
|
|
|
|
if (queue_idx > dev_info.max_tx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
|
2014-05-26 07:45:29 +00:00
|
|
|
"invalid queue id=%d\n", port_id, queue_idx);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tx_rate > link.link_speed) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
|
2014-05-26 07:45:29 +00:00
|
|
|
"bigger than link speed= %d\n",
|
2014-07-02 13:10:30 +00:00
|
|
|
tx_rate, link.link_speed);
|
2014-05-26 07:45:29 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
|
2014-05-26 07:45:29 +00:00
|
|
|
return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
int
|
2014-06-03 23:42:50 +00:00
|
|
|
rte_eth_mirror_rule_set(uint8_t port_id,
|
2015-06-10 06:24:30 +00:00
|
|
|
struct rte_eth_mirror_conf *mirror_conf,
|
2013-09-18 10:00:00 +00:00
|
|
|
uint8_t rule_id, uint8_t on)
|
|
|
|
{
|
2017-01-24 20:28:35 +00:00
|
|
|
struct rte_eth_dev *dev;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-06-10 06:24:31 +00:00
|
|
|
if (mirror_conf->rule_type == 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
if (mirror_conf->dst_pool >= ETH_64_POOLS) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
|
2015-06-27 00:01:44 +00:00
|
|
|
ETH_64_POOLS - 1);
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2015-06-10 06:24:31 +00:00
|
|
|
if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
|
|
|
|
ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
|
|
|
|
(mirror_conf->pool_mask == 0)) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2013-09-18 10:00:00 +00:00
|
|
|
}
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2015-06-10 06:24:31 +00:00
|
|
|
if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
|
|
|
|
mirror_conf->vlan.vlan_mask == 0) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
|
2015-06-10 06:24:31 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-09-18 10:00:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
|
|
|
|
{
|
2017-01-24 20:28:35 +00:00
|
|
|
struct rte_eth_dev *dev;
|
2013-09-18 10:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
|
2013-09-18 10:00:00 +00:00
|
|
|
|
|
|
|
return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
|
|
|
|
}
|
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_callback_register(uint8_t port_id,
|
|
|
|
enum rte_eth_event_type event,
|
|
|
|
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
2013-06-03 00:00:00 +00:00
|
|
|
struct rte_eth_dev_callback *user_cb;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
if (!cb_fn)
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
rte_spinlock_lock(&rte_eth_dev_cb_lock);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2015-02-23 18:30:08 +00:00
|
|
|
TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
|
2012-09-04 12:54:00 +00:00
|
|
|
if (user_cb->cb_fn == cb_fn &&
|
|
|
|
user_cb->cb_arg == cb_arg &&
|
|
|
|
user_cb->event == event) {
|
2013-06-03 00:00:00 +00:00
|
|
|
break;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/* create a new callback. */
|
2016-10-20 13:34:41 +00:00
|
|
|
if (user_cb == NULL) {
|
2015-07-16 23:47:23 +00:00
|
|
|
user_cb = rte_zmalloc("INTR_USER_CALLBACK",
|
2016-01-05 16:34:56 +00:00
|
|
|
sizeof(struct rte_eth_dev_callback), 0);
|
2016-10-20 13:34:41 +00:00
|
|
|
if (user_cb != NULL) {
|
|
|
|
user_cb->cb_fn = cb_fn;
|
|
|
|
user_cb->cb_arg = cb_arg;
|
|
|
|
user_cb->event = event;
|
|
|
|
TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
|
|
|
|
}
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
2012-09-04 12:54:00 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
|
2015-04-09 21:29:42 +00:00
|
|
|
return (user_cb == NULL) ? -ENOMEM : 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_callback_unregister(uint8_t port_id,
|
|
|
|
enum rte_eth_event_type event,
|
|
|
|
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
|
|
|
|
{
|
2013-06-03 00:00:00 +00:00
|
|
|
int ret;
|
2012-09-04 12:54:00 +00:00
|
|
|
struct rte_eth_dev *dev;
|
2013-06-03 00:00:00 +00:00
|
|
|
struct rte_eth_dev_callback *cb, *next;
|
2012-09-04 12:54:00 +00:00
|
|
|
|
|
|
|
if (!cb_fn)
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2015-02-25 19:32:18 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
rte_spinlock_lock(&rte_eth_dev_cb_lock);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
ret = 0;
|
2015-02-23 18:30:08 +00:00
|
|
|
for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
next = TAILQ_NEXT(cb, next);
|
|
|
|
|
|
|
|
if (cb->cb_fn != cb_fn || cb->event != event ||
|
|
|
|
(cb->cb_arg != (void *)-1 &&
|
|
|
|
cb->cb_arg != cb_arg))
|
2012-09-04 12:54:00 +00:00
|
|
|
continue;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if this callback is not executing right now,
|
|
|
|
* then remove it.
|
|
|
|
*/
|
|
|
|
if (cb->active == 0) {
|
2015-02-23 18:30:08 +00:00
|
|
|
TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
|
2013-06-03 00:00:00 +00:00
|
|
|
rte_free(cb);
|
|
|
|
} else {
|
|
|
|
ret = -EAGAIN;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
|
2015-04-09 21:29:42 +00:00
|
|
|
return ret;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2013-06-03 00:00:00 +00:00
|
|
|
_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
|
2016-10-10 14:34:14 +00:00
|
|
|
enum rte_eth_event_type event, void *cb_arg)
|
2012-09-04 12:54:00 +00:00
|
|
|
{
|
2013-06-03 00:00:00 +00:00
|
|
|
struct rte_eth_dev_callback *cb_lst;
|
2012-09-04 12:54:00 +00:00
|
|
|
struct rte_eth_dev_callback dev_cb;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&rte_eth_dev_cb_lock);
|
2015-02-23 18:30:08 +00:00
|
|
|
TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
|
2012-09-04 12:54:00 +00:00
|
|
|
if (cb_lst->cb_fn == NULL || cb_lst->event != event)
|
|
|
|
continue;
|
|
|
|
dev_cb = *cb_lst;
|
2013-06-03 00:00:00 +00:00
|
|
|
cb_lst->active = 1;
|
2016-10-10 14:34:14 +00:00
|
|
|
if (cb_arg != NULL)
|
2017-04-07 17:44:47 +00:00
|
|
|
dev_cb.cb_arg = cb_arg;
|
2016-10-10 14:34:14 +00:00
|
|
|
|
2012-09-04 12:54:00 +00:00
|
|
|
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
|
|
|
|
dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
|
|
|
|
dev_cb.cb_arg);
|
|
|
|
rte_spinlock_lock(&rte_eth_dev_cb_lock);
|
2013-06-03 00:00:00 +00:00
|
|
|
cb_lst->active = 0;
|
2012-09-04 12:54:00 +00:00
|
|
|
}
|
|
|
|
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
|
|
|
|
}
|
2015-07-20 03:02:26 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
|
|
|
|
{
|
|
|
|
uint32_t vec;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_intr_handle *intr_handle;
|
|
|
|
uint16_t qid;
|
|
|
|
int rc;
|
|
|
|
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-20 03:02:26 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2016-12-23 15:58:09 +00:00
|
|
|
|
|
|
|
if (!dev->intr_handle) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
intr_handle = dev->intr_handle;
|
2015-07-20 03:02:26 +00:00
|
|
|
if (!intr_handle->intr_vec) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
|
2015-07-20 03:02:26 +00:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
|
|
|
|
vec = intr_handle->intr_vec[qid];
|
|
|
|
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
|
|
|
|
if (rc && rc != -EEXIST) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
|
2015-07-20 03:02:26 +00:00
|
|
|
" op %d epfd %d vec %u\n",
|
|
|
|
port_id, qid, op, epfd, vec);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-06 00:09:30 +00:00
|
|
|
const struct rte_memzone *
|
|
|
|
rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
|
|
|
|
uint16_t queue_id, size_t size, unsigned align,
|
|
|
|
int socket_id)
|
|
|
|
{
|
|
|
|
char z_name[RTE_MEMZONE_NAMESIZE];
|
|
|
|
const struct rte_memzone *mz;
|
|
|
|
|
|
|
|
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
|
2016-12-23 15:58:11 +00:00
|
|
|
dev->data->drv_name, ring_name,
|
2015-11-06 00:09:30 +00:00
|
|
|
dev->data->port_id, queue_id);
|
|
|
|
|
|
|
|
mz = rte_memzone_lookup(z_name);
|
|
|
|
if (mz)
|
|
|
|
return mz;
|
|
|
|
|
2015-12-03 04:16:55 +00:00
|
|
|
if (rte_xen_dom0_supported())
|
2015-11-06 00:09:30 +00:00
|
|
|
return rte_memzone_reserve_bounded(z_name, size, socket_id,
|
|
|
|
0, align, RTE_PGSIZE_2M);
|
|
|
|
else
|
|
|
|
return rte_memzone_reserve_aligned(z_name, size, socket_id,
|
|
|
|
0, align);
|
|
|
|
}
|
|
|
|
|
2015-07-20 03:02:26 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
|
|
|
|
int epfd, int op, void *data)
|
|
|
|
{
|
|
|
|
uint32_t vec;
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
struct rte_intr_handle *intr_handle;
|
|
|
|
int rc;
|
|
|
|
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-20 03:02:26 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (queue_id >= dev->data->nb_rx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
|
2015-07-20 03:02:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2016-12-23 15:58:09 +00:00
|
|
|
if (!dev->intr_handle) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
intr_handle = dev->intr_handle;
|
2015-07-20 03:02:26 +00:00
|
|
|
if (!intr_handle->intr_vec) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
|
2015-07-20 03:02:26 +00:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
vec = intr_handle->intr_vec[queue_id];
|
|
|
|
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
|
|
|
|
if (rc && rc != -EEXIST) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
|
2015-07-20 03:02:26 +00:00
|
|
|
" op %d epfd %d vec %u\n",
|
|
|
|
port_id, queue_id, op, epfd, vec);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_rx_intr_enable(uint8_t port_id,
|
|
|
|
uint16_t queue_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-20 03:02:26 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
|
2015-07-20 03:02:26 +00:00
|
|
|
return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_rx_intr_disable(uint8_t port_id,
|
|
|
|
uint16_t queue_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-20 03:02:26 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
|
2015-07-20 03:02:26 +00:00
|
|
|
return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
|
|
|
|
}
|
|
|
|
|
2013-11-08 02:00:00 +00:00
|
|
|
#ifdef RTE_NIC_BYPASS
|
|
|
|
int rte_eth_dev_bypass_init(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-06-27 00:01:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
|
2013-11-08 02:00:00 +00:00
|
|
|
(*dev->dev_ops->bypass_init)(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-06-27 00:01:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
|
2013-11-08 02:00:00 +00:00
|
|
|
(*dev->dev_ops->bypass_state_show)(dev, state);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-06-27 00:01:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
|
2013-11-08 02:00:00 +00:00
|
|
|
(*dev->dev_ops->bypass_state_set)(dev, new_state);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-06-27 00:01:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
|
2013-11-08 02:00:00 +00:00
|
|
|
(*dev->dev_ops->bypass_event_show)(dev, event, state);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-06-27 00:01:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
|
2013-11-08 02:00:00 +00:00
|
|
|
(*dev->dev_ops->bypass_event_set)(dev, event, state);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-06-27 00:01:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
|
2013-11-08 02:00:00 +00:00
|
|
|
(*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-06-27 00:01:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
|
2013-11-08 02:00:00 +00:00
|
|
|
(*dev->dev_ops->bypass_ver_show)(dev, ver);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-06-27 00:01:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
|
2013-11-08 02:00:00 +00:00
|
|
|
(*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_bypass_wd_reset(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-06-27 00:01:42 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
2013-11-08 02:00:00 +00:00
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
|
2013-11-08 02:00:00 +00:00
|
|
|
(*dev->dev_ops->bypass_wd_reset)(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
2014-06-16 07:31:43 +00:00
|
|
|
|
2014-10-20 05:40:32 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-10-20 05:40:32 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
|
2014-10-20 05:40:32 +00:00
|
|
|
return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
|
|
|
|
RTE_ETH_FILTER_NOP, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
|
|
|
|
enum rte_filter_op filter_op, void *arg)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2014-10-20 05:40:32 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
|
2014-10-20 05:40:32 +00:00
|
|
|
return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
|
|
|
|
}
|
2015-02-23 18:30:09 +00:00
|
|
|
|
|
|
|
void *
|
|
|
|
rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
|
2015-03-12 16:54:28 +00:00
|
|
|
rte_rx_callback_fn fn, void *user_param)
|
2015-02-23 18:30:09 +00:00
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
/* check input parameters */
|
2015-02-26 14:00:32 +00:00
|
|
|
if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
|
2015-02-23 18:30:09 +00:00
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
|
|
|
|
|
|
|
|
if (cb == NULL) {
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-12 16:54:28 +00:00
|
|
|
cb->fn.rx = fn;
|
2015-02-23 18:30:09 +00:00
|
|
|
cb->param = user_param;
|
2015-07-10 13:08:13 +00:00
|
|
|
|
2016-06-15 14:06:18 +00:00
|
|
|
rte_spinlock_lock(&rte_eth_rx_cb_lock);
|
2015-07-10 13:08:13 +00:00
|
|
|
/* Add the callbacks in fifo order. */
|
|
|
|
struct rte_eth_rxtx_callback *tail =
|
|
|
|
rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
|
|
|
|
|
|
|
|
if (!tail) {
|
|
|
|
rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
while (tail->next)
|
|
|
|
tail = tail->next;
|
|
|
|
tail->next = cb;
|
|
|
|
}
|
2016-06-15 14:06:18 +00:00
|
|
|
rte_spinlock_unlock(&rte_eth_rx_cb_lock);
|
2016-06-15 14:06:19 +00:00
|
|
|
|
|
|
|
return cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
|
|
|
|
rte_rx_callback_fn fn, void *user_param)
|
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
/* check input parameters */
|
|
|
|
if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
|
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
|
|
|
|
|
|
|
|
if (cb == NULL) {
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
cb->fn.rx = fn;
|
|
|
|
cb->param = user_param;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&rte_eth_rx_cb_lock);
|
|
|
|
/* Add the callbacks at fisrt position*/
|
|
|
|
cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
|
|
|
|
rte_smp_wmb();
|
|
|
|
rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
|
|
|
|
rte_spinlock_unlock(&rte_eth_rx_cb_lock);
|
2015-07-10 13:08:13 +00:00
|
|
|
|
2015-02-23 18:30:09 +00:00
|
|
|
return cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
|
2015-03-12 16:54:28 +00:00
|
|
|
rte_tx_callback_fn fn, void *user_param)
|
2015-02-23 18:30:09 +00:00
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
|
|
|
rte_errno = ENOTSUP;
|
|
|
|
return NULL;
|
|
|
|
#endif
|
|
|
|
/* check input parameters */
|
2015-02-26 14:00:32 +00:00
|
|
|
if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
|
2015-02-23 18:30:09 +00:00
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
|
|
|
|
rte_errno = EINVAL;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
|
|
|
|
|
|
|
|
if (cb == NULL) {
|
|
|
|
rte_errno = ENOMEM;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-12 16:54:28 +00:00
|
|
|
cb->fn.tx = fn;
|
2015-02-23 18:30:09 +00:00
|
|
|
cb->param = user_param;
|
2015-07-10 13:08:13 +00:00
|
|
|
|
2016-06-15 14:06:18 +00:00
|
|
|
rte_spinlock_lock(&rte_eth_tx_cb_lock);
|
2015-07-10 13:08:13 +00:00
|
|
|
/* Add the callbacks in fifo order. */
|
|
|
|
struct rte_eth_rxtx_callback *tail =
|
|
|
|
rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
|
|
|
|
|
|
|
|
if (!tail) {
|
|
|
|
rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
while (tail->next)
|
|
|
|
tail = tail->next;
|
|
|
|
tail->next = cb;
|
|
|
|
}
|
2016-06-15 14:06:18 +00:00
|
|
|
rte_spinlock_unlock(&rte_eth_tx_cb_lock);
|
2015-07-10 13:08:13 +00:00
|
|
|
|
2015-02-23 18:30:09 +00:00
|
|
|
return cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
|
|
|
|
struct rte_eth_rxtx_callback *user_cb)
|
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
2015-04-09 21:29:42 +00:00
|
|
|
return -ENOTSUP;
|
2015-02-23 18:30:09 +00:00
|
|
|
#endif
|
|
|
|
/* Check input parameters. */
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
|
|
|
if (user_cb == NULL ||
|
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2015-02-23 18:30:09 +00:00
|
|
|
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
2016-06-15 14:06:18 +00:00
|
|
|
struct rte_eth_rxtx_callback *cb;
|
|
|
|
struct rte_eth_rxtx_callback **prev_cb;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&rte_eth_rx_cb_lock);
|
|
|
|
prev_cb = &dev->post_rx_burst_cbs[queue_id];
|
|
|
|
for (; *prev_cb != NULL; prev_cb = &cb->next) {
|
|
|
|
cb = *prev_cb;
|
2015-02-23 18:30:09 +00:00
|
|
|
if (cb == user_cb) {
|
2016-06-15 14:06:18 +00:00
|
|
|
/* Remove the user cb from the callback list. */
|
|
|
|
*prev_cb = cb->next;
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
2016-06-15 14:06:18 +00:00
|
|
|
}
|
|
|
|
rte_spinlock_unlock(&rte_eth_rx_cb_lock);
|
2015-02-23 18:30:09 +00:00
|
|
|
|
2016-06-15 14:06:18 +00:00
|
|
|
return ret;
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
|
|
|
|
struct rte_eth_rxtx_callback *user_cb)
|
|
|
|
{
|
|
|
|
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
|
2015-04-09 21:29:42 +00:00
|
|
|
return -ENOTSUP;
|
2015-02-23 18:30:09 +00:00
|
|
|
#endif
|
|
|
|
/* Check input parameters. */
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
|
|
|
|
if (user_cb == NULL ||
|
|
|
|
queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
|
2015-04-09 21:29:42 +00:00
|
|
|
return -EINVAL;
|
2015-02-23 18:30:09 +00:00
|
|
|
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
2016-06-15 14:06:18 +00:00
|
|
|
int ret = -EINVAL;
|
|
|
|
struct rte_eth_rxtx_callback *cb;
|
|
|
|
struct rte_eth_rxtx_callback **prev_cb;
|
|
|
|
|
|
|
|
rte_spinlock_lock(&rte_eth_tx_cb_lock);
|
|
|
|
prev_cb = &dev->pre_tx_burst_cbs[queue_id];
|
|
|
|
for (; *prev_cb != NULL; prev_cb = &cb->next) {
|
|
|
|
cb = *prev_cb;
|
2015-02-23 18:30:09 +00:00
|
|
|
if (cb == user_cb) {
|
2016-06-15 14:06:18 +00:00
|
|
|
/* Remove the user cb from the callback list. */
|
|
|
|
*prev_cb = cb->next;
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
2016-06-15 14:06:18 +00:00
|
|
|
}
|
|
|
|
rte_spinlock_unlock(&rte_eth_tx_cb_lock);
|
2015-02-23 18:30:09 +00:00
|
|
|
|
2016-06-15 14:06:18 +00:00
|
|
|
return ret;
|
2015-02-23 18:30:09 +00:00
|
|
|
}
|
2015-05-29 08:56:25 +00:00
|
|
|
|
2015-10-27 12:51:43 +00:00
|
|
|
int
|
|
|
|
rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
|
|
|
|
struct rte_eth_rxq_info *qinfo)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-10-27 12:51:43 +00:00
|
|
|
|
|
|
|
if (qinfo == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (queue_id >= dev->data->nb_rx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
|
2015-10-27 12:51:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
|
2015-10-27 12:51:43 +00:00
|
|
|
|
|
|
|
memset(qinfo, 0, sizeof(*qinfo));
|
|
|
|
dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
|
|
|
|
struct rte_eth_txq_info *qinfo)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-10-27 12:51:43 +00:00
|
|
|
|
|
|
|
if (qinfo == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
if (queue_id >= dev->data->nb_tx_queues) {
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
|
2015-10-27 12:51:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
|
2015-10-27 12:51:43 +00:00
|
|
|
|
|
|
|
memset(qinfo, 0, sizeof(*qinfo));
|
|
|
|
dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-29 08:56:25 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_set_mc_addr_list(uint8_t port_id,
|
|
|
|
struct ether_addr *mc_addr_set,
|
|
|
|
uint32_t nb_mc_addr)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-05-29 08:56:25 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
|
2015-05-29 08:56:25 +00:00
|
|
|
return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
|
|
|
|
}
|
2015-07-02 15:16:28 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_timesync_enable(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-02 15:16:28 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
|
2015-07-02 15:16:28 +00:00
|
|
|
return (*dev->dev_ops->timesync_enable)(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_timesync_disable(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-02 15:16:28 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
|
2015-07-02 15:16:28 +00:00
|
|
|
return (*dev->dev_ops->timesync_disable)(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
|
|
|
|
uint32_t flags)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-02 15:16:28 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
|
2015-07-02 15:16:28 +00:00
|
|
|
return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-02 15:16:28 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
|
2015-07-02 15:16:28 +00:00
|
|
|
return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
|
|
|
|
}
|
2015-07-16 13:25:34 +00:00
|
|
|
|
2015-11-13 16:09:07 +00:00
|
|
|
int
|
|
|
|
rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-11-13 16:09:07 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
|
2015-11-13 16:09:07 +00:00
|
|
|
return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-11-13 16:09:07 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
|
2015-11-13 16:09:07 +00:00
|
|
|
return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-11-13 16:09:07 +00:00
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
|
2015-11-13 16:09:07 +00:00
|
|
|
return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
|
|
|
|
}
|
|
|
|
|
2015-07-16 13:25:34 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 13:25:34 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
|
2015-07-16 13:25:34 +00:00
|
|
|
return (*dev->dev_ops->get_reg)(dev, info);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_get_eeprom_length(uint8_t port_id)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 13:25:34 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
|
2015-07-16 13:25:34 +00:00
|
|
|
return (*dev->dev_ops->get_eeprom_length)(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 13:25:34 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
|
2015-07-16 13:25:34 +00:00
|
|
|
return (*dev->dev_ops->get_eeprom)(dev, info);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-07-16 13:25:34 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
|
2015-07-16 13:25:34 +00:00
|
|
|
return (*dev->dev_ops->set_eeprom)(dev, info);
|
|
|
|
}
|
2015-10-31 15:57:27 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_get_dcb_info(uint8_t port_id,
|
|
|
|
struct rte_eth_dcb_info *dcb_info)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
2016-05-18 19:15:11 +00:00
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
2015-10-31 15:57:27 +00:00
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
|
|
|
|
|
2015-11-25 13:25:08 +00:00
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
|
2015-10-31 15:57:27 +00:00
|
|
|
return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
|
|
|
|
}
|
2015-11-03 13:01:55 +00:00
|
|
|
|
2016-03-11 01:10:08 +00:00
|
|
|
int
|
|
|
|
rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
|
|
|
|
struct rte_eth_l2_tunnel_conf *l2_tunnel)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
if (l2_tunnel == NULL) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
|
|
|
|
-ENOTSUP);
|
|
|
|
return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
|
|
|
|
struct rte_eth_l2_tunnel_conf *l2_tunnel,
|
|
|
|
uint32_t mask,
|
|
|
|
uint8_t en)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
|
|
|
|
|
|
|
|
if (l2_tunnel == NULL) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask == 0) {
|
|
|
|
RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev = &rte_eth_devices[port_id];
|
|
|
|
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
|
|
|
|
-ENOTSUP);
|
|
|
|
return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
|
|
|
|
}
|