2018-01-29 13:11:34 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2017 6WIND S.A.
|
2018-03-20 19:20:35 +00:00
|
|
|
* Copyright 2017 Mellanox Technologies, Ltd
|
2017-07-18 12:48:15 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2017-07-18 12:48:17 +00:00
|
|
|
#include <rte_flow.h>
|
|
|
|
#include <rte_flow_driver.h>
|
2017-10-21 20:54:46 +00:00
|
|
|
#include <rte_cycles.h>
|
2017-07-18 12:48:17 +00:00
|
|
|
|
2017-07-18 12:48:15 +00:00
|
|
|
#include "failsafe_private.h"
|
|
|
|
|
2017-07-18 12:48:17 +00:00
|
|
|
/** Print a message out of a flow error. */
|
|
|
|
static int
|
|
|
|
fs_flow_complain(struct rte_flow_error *error)
|
|
|
|
{
|
|
|
|
static const char *const errstrlist[] = {
|
|
|
|
[RTE_FLOW_ERROR_TYPE_NONE] = "no error",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
|
|
|
|
[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
|
|
|
|
};
|
|
|
|
const char *errstr;
|
|
|
|
char buf[32];
|
|
|
|
int err = rte_errno;
|
|
|
|
|
|
|
|
if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
|
|
|
|
!errstrlist[error->type])
|
|
|
|
errstr = "unknown type";
|
|
|
|
else
|
|
|
|
errstr = errstrlist[error->type];
|
|
|
|
ERROR("Caught error type %d (%s): %s%s\n",
|
|
|
|
error->type, errstr,
|
|
|
|
error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
|
|
|
|
error->cause), buf) : "",
|
|
|
|
error->message ? error->message : "(no stated reason)");
|
|
|
|
return -err;
|
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:22 +00:00
|
|
|
static int
|
|
|
|
eth_dev_flow_isolate_set(struct rte_eth_dev *dev,
|
|
|
|
struct sub_device *sdev)
|
|
|
|
{
|
|
|
|
struct rte_flow_error ferror;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!PRIV(dev)->flow_isolated) {
|
|
|
|
DEBUG("Flow isolation already disabled");
|
|
|
|
} else {
|
|
|
|
DEBUG("Enabling flow isolation");
|
|
|
|
ret = rte_flow_isolate(PORT_ID(sdev),
|
|
|
|
PRIV(dev)->flow_isolated,
|
|
|
|
&ferror);
|
|
|
|
if (ret) {
|
|
|
|
fs_flow_complain(&ferror);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:15 +00:00
|
|
|
static int
|
|
|
|
fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
|
|
|
|
struct sub_device *sdev)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *edev;
|
|
|
|
struct rte_vlan_filter_conf *vfc1;
|
|
|
|
struct rte_vlan_filter_conf *vfc2;
|
2017-07-18 12:48:17 +00:00
|
|
|
struct rte_flow *flow;
|
|
|
|
struct rte_flow_error ferror;
|
2017-07-18 12:48:15 +00:00
|
|
|
uint32_t i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
edev = ETH(sdev);
|
|
|
|
/* RX queue setup */
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
|
|
|
struct rxq *rxq;
|
|
|
|
|
|
|
|
rxq = dev->data->rx_queues[i];
|
|
|
|
ret = rte_eth_rx_queue_setup(PORT_ID(sdev), i,
|
|
|
|
rxq->info.nb_desc, rxq->socket_id,
|
|
|
|
&rxq->info.conf, rxq->info.mp);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("rx_queue_setup failed");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* TX queue setup */
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
|
|
struct txq *txq;
|
|
|
|
|
|
|
|
txq = dev->data->tx_queues[i];
|
|
|
|
ret = rte_eth_tx_queue_setup(PORT_ID(sdev), i,
|
|
|
|
txq->info.nb_desc, txq->socket_id,
|
|
|
|
&txq->info.conf);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("tx_queue_setup failed");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* dev_link.link_status */
|
|
|
|
if (dev->data->dev_link.link_status !=
|
|
|
|
edev->data->dev_link.link_status) {
|
|
|
|
DEBUG("Configuring link_status");
|
|
|
|
if (dev->data->dev_link.link_status)
|
|
|
|
ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
|
|
|
|
else
|
|
|
|
ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Failed to apply link_status");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
DEBUG("link_status already set");
|
|
|
|
}
|
|
|
|
/* promiscuous */
|
|
|
|
if (dev->data->promiscuous != edev->data->promiscuous) {
|
|
|
|
DEBUG("Configuring promiscuous");
|
|
|
|
if (dev->data->promiscuous)
|
2019-09-14 11:37:22 +00:00
|
|
|
ret = rte_eth_promiscuous_enable(PORT_ID(sdev));
|
2017-07-18 12:48:15 +00:00
|
|
|
else
|
2019-09-14 11:37:22 +00:00
|
|
|
ret = rte_eth_promiscuous_disable(PORT_ID(sdev));
|
|
|
|
if (ret != 0) {
|
|
|
|
ERROR("Failed to apply promiscuous mode");
|
|
|
|
return ret;
|
|
|
|
}
|
2017-07-18 12:48:15 +00:00
|
|
|
} else {
|
|
|
|
DEBUG("promiscuous already set");
|
|
|
|
}
|
|
|
|
/* all_multicast */
|
|
|
|
if (dev->data->all_multicast != edev->data->all_multicast) {
|
|
|
|
DEBUG("Configuring all_multicast");
|
|
|
|
if (dev->data->all_multicast)
|
2019-09-24 12:56:08 +00:00
|
|
|
ret = rte_eth_allmulticast_enable(PORT_ID(sdev));
|
2017-07-18 12:48:15 +00:00
|
|
|
else
|
2019-09-24 12:56:08 +00:00
|
|
|
ret = rte_eth_allmulticast_disable(PORT_ID(sdev));
|
|
|
|
if (ret != 0) {
|
|
|
|
ERROR("Failed to apply allmulticast mode");
|
|
|
|
return ret;
|
|
|
|
}
|
2017-07-18 12:48:15 +00:00
|
|
|
} else {
|
|
|
|
DEBUG("all_multicast already set");
|
|
|
|
}
|
|
|
|
/* MTU */
|
|
|
|
if (dev->data->mtu != edev->data->mtu) {
|
|
|
|
DEBUG("Configuring MTU");
|
|
|
|
ret = rte_eth_dev_set_mtu(PORT_ID(sdev), dev->data->mtu);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Failed to apply MTU");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
DEBUG("MTU already set");
|
|
|
|
}
|
|
|
|
/* default MAC */
|
|
|
|
DEBUG("Configuring default MAC address");
|
|
|
|
ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev),
|
|
|
|
&dev->data->mac_addrs[0]);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Setting default MAC address failed");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
/* additional MAC */
|
|
|
|
if (PRIV(dev)->nb_mac_addr > 1)
|
|
|
|
DEBUG("Configure additional MAC address%s",
|
|
|
|
(PRIV(dev)->nb_mac_addr > 2 ? "es" : ""));
|
|
|
|
for (i = 1; i < PRIV(dev)->nb_mac_addr; i++) {
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_addr *ea;
|
2017-07-18 12:48:15 +00:00
|
|
|
|
|
|
|
ea = &dev->data->mac_addrs[i];
|
|
|
|
ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), ea,
|
|
|
|
PRIV(dev)->mac_addr_pool[i]);
|
|
|
|
if (ret) {
|
2019-05-21 16:13:05 +00:00
|
|
|
char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
|
2017-07-18 12:48:15 +00:00
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
rte_ether_format_addr(ea_fmt,
|
|
|
|
RTE_ETHER_ADDR_FMT_SIZE, ea);
|
2017-07-18 12:48:15 +00:00
|
|
|
ERROR("Adding MAC address %s failed", ea_fmt);
|
2017-09-03 15:26:45 +00:00
|
|
|
return ret;
|
2017-07-18 12:48:15 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-21 15:36:22 +00:00
|
|
|
/*
|
|
|
|
* Propagate multicast MAC addresses to sub-devices,
|
|
|
|
* if non zero number of addresses is set.
|
|
|
|
* The condition is required to avoid breakage of failsafe
|
|
|
|
* for sub-devices which do not support the operation
|
|
|
|
* if the feature is really not used.
|
|
|
|
*/
|
|
|
|
if (PRIV(dev)->nb_mcast_addr > 0) {
|
|
|
|
DEBUG("Configuring multicast MAC addresses");
|
|
|
|
ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
|
|
|
|
PRIV(dev)->mcast_addrs,
|
|
|
|
PRIV(dev)->nb_mcast_addr);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Failed to apply multicast MAC addresses");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2017-07-18 12:48:15 +00:00
|
|
|
/* VLAN filter */
|
|
|
|
vfc1 = &dev->data->vlan_filter_conf;
|
|
|
|
vfc2 = &edev->data->vlan_filter_conf;
|
|
|
|
if (memcmp(vfc1, vfc2, sizeof(struct rte_vlan_filter_conf))) {
|
|
|
|
uint64_t vbit;
|
|
|
|
uint64_t ids;
|
|
|
|
size_t i;
|
|
|
|
uint16_t vlan_id;
|
|
|
|
|
|
|
|
DEBUG("Configuring VLAN filter");
|
|
|
|
for (i = 0; i < RTE_DIM(vfc1->ids); i++) {
|
|
|
|
if (vfc1->ids[i] == 0)
|
|
|
|
continue;
|
|
|
|
ids = vfc1->ids[i];
|
|
|
|
while (ids) {
|
|
|
|
vlan_id = 64 * i;
|
|
|
|
/* count trailing zeroes */
|
|
|
|
vbit = ~ids & (ids - 1);
|
|
|
|
/* clear least significant bit set */
|
|
|
|
ids ^= (ids ^ (ids - 1)) ^ vbit;
|
|
|
|
for (; vbit; vlan_id++)
|
|
|
|
vbit >>= 1;
|
|
|
|
ret = rte_eth_dev_vlan_filter(
|
|
|
|
PORT_ID(sdev), vlan_id, 1);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Failed to apply VLAN filter %hu",
|
|
|
|
vlan_id);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
DEBUG("VLAN filter already set");
|
|
|
|
}
|
2017-07-18 12:48:17 +00:00
|
|
|
/* rte_flow */
|
|
|
|
if (TAILQ_EMPTY(&PRIV(dev)->flow_list)) {
|
|
|
|
DEBUG("rte_flow already set");
|
|
|
|
} else {
|
|
|
|
DEBUG("Resetting rte_flow configuration");
|
|
|
|
ret = rte_flow_flush(PORT_ID(sdev), &ferror);
|
|
|
|
if (ret) {
|
|
|
|
fs_flow_complain(&ferror);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
i = 0;
|
|
|
|
rte_errno = 0;
|
|
|
|
DEBUG("Configuring rte_flow");
|
|
|
|
TAILQ_FOREACH(flow, &PRIV(dev)->flow_list, next) {
|
|
|
|
DEBUG("Creating flow #%" PRIu32, i++);
|
|
|
|
flow->flows[SUB_ID(sdev)] =
|
|
|
|
rte_flow_create(PORT_ID(sdev),
|
2018-08-31 09:01:07 +00:00
|
|
|
flow->rule.attr,
|
|
|
|
flow->rule.pattern,
|
|
|
|
flow->rule.actions,
|
2017-07-18 12:48:17 +00:00
|
|
|
&ferror);
|
|
|
|
ret = rte_errno;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ret) {
|
|
|
|
fs_flow_complain(&ferror);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2017-07-18 12:48:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:20 +00:00
|
|
|
static void
|
|
|
|
fs_dev_remove(struct sub_device *sdev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (sdev == NULL)
|
|
|
|
return;
|
|
|
|
switch (sdev->state) {
|
|
|
|
case DEV_STARTED:
|
2018-01-25 16:19:31 +00:00
|
|
|
failsafe_rx_intr_uninstall_subdevice(sdev);
|
2017-07-18 12:48:20 +00:00
|
|
|
rte_eth_dev_stop(PORT_ID(sdev));
|
|
|
|
sdev->state = DEV_ACTIVE;
|
|
|
|
/* fallthrough */
|
|
|
|
case DEV_ACTIVE:
|
2018-05-22 12:38:46 +00:00
|
|
|
failsafe_eth_dev_unregister_callbacks(sdev);
|
2017-07-18 12:48:20 +00:00
|
|
|
rte_eth_dev_close(PORT_ID(sdev));
|
|
|
|
sdev->state = DEV_PROBED;
|
|
|
|
/* fallthrough */
|
|
|
|
case DEV_PROBED:
|
2018-09-07 22:27:27 +00:00
|
|
|
ret = rte_dev_remove(sdev->dev);
|
2019-06-06 10:02:28 +00:00
|
|
|
if (ret < 0) {
|
2017-07-18 12:48:20 +00:00
|
|
|
ERROR("Bus detach failed for sub_device %u",
|
|
|
|
SUB_ID(sdev));
|
|
|
|
} else {
|
2018-01-22 16:38:21 +00:00
|
|
|
rte_eth_dev_release_port(ETH(sdev));
|
2017-07-18 12:48:20 +00:00
|
|
|
}
|
|
|
|
sdev->state = DEV_PARSED;
|
|
|
|
/* fallthrough */
|
|
|
|
case DEV_PARSED:
|
|
|
|
case DEV_UNDEFINED:
|
|
|
|
sdev->state = DEV_UNDEFINED;
|
2019-03-18 16:05:27 +00:00
|
|
|
sdev->sdev_port_id = RTE_MAX_ETHPORTS;
|
2017-07-18 12:48:20 +00:00
|
|
|
/* the end */
|
|
|
|
break;
|
|
|
|
}
|
2018-02-12 20:51:41 +00:00
|
|
|
sdev->remove = 0;
|
2019-03-18 16:05:26 +00:00
|
|
|
failsafe_hotplug_alarm_install(fs_dev(sdev));
|
2017-07-18 12:48:20 +00:00
|
|
|
}
|
|
|
|
|
2017-09-07 11:31:13 +00:00
|
|
|
static void
|
|
|
|
fs_dev_stats_save(struct sub_device *sdev)
|
|
|
|
{
|
2017-10-21 20:54:45 +00:00
|
|
|
struct rte_eth_stats stats;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Attempt to read current stats. */
|
|
|
|
err = rte_eth_stats_get(PORT_ID(sdev), &stats);
|
2017-10-21 20:54:46 +00:00
|
|
|
if (err) {
|
|
|
|
uint64_t timestamp = sdev->stats_snapshot.timestamp;
|
|
|
|
|
|
|
|
WARN("Could not access latest statistics from sub-device %d.\n",
|
|
|
|
SUB_ID(sdev));
|
|
|
|
if (timestamp != 0)
|
|
|
|
WARN("Using latest snapshot taken before %"PRIu64" seconds.\n",
|
|
|
|
(rte_rdtsc() - timestamp) / rte_get_tsc_hz());
|
|
|
|
}
|
2019-03-18 16:05:26 +00:00
|
|
|
failsafe_stats_increment
|
|
|
|
(&PRIV(fs_dev(sdev))->stats_accumulator,
|
|
|
|
err ? &sdev->stats_snapshot.stats : &stats);
|
2017-10-21 20:54:46 +00:00
|
|
|
memset(&sdev->stats_snapshot, 0, sizeof(sdev->stats_snapshot));
|
2017-09-07 11:31:13 +00:00
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:20 +00:00
|
|
|
static inline int
|
|
|
|
fs_rxtx_clean(struct sub_device *sdev)
|
|
|
|
{
|
|
|
|
uint16_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < ETH(sdev)->data->nb_rx_queues; i++)
|
|
|
|
if (FS_ATOMIC_RX(sdev, i))
|
|
|
|
return 0;
|
|
|
|
for (i = 0; i < ETH(sdev)->data->nb_tx_queues; i++)
|
|
|
|
if (FS_ATOMIC_TX(sdev, i))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-05-22 12:38:46 +00:00
|
|
|
void
|
|
|
|
failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (sdev == NULL)
|
|
|
|
return;
|
|
|
|
if (sdev->rmv_callback) {
|
|
|
|
ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
|
|
|
|
RTE_ETH_EVENT_INTR_RMV,
|
|
|
|
failsafe_eth_rmv_event_callback,
|
|
|
|
sdev);
|
|
|
|
if (ret)
|
|
|
|
WARN("Failed to unregister RMV callback for sub_device"
|
|
|
|
" %d", SUB_ID(sdev));
|
|
|
|
sdev->rmv_callback = 0;
|
|
|
|
}
|
|
|
|
if (sdev->lsc_callback) {
|
|
|
|
ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
|
|
|
|
RTE_ETH_EVENT_INTR_LSC,
|
|
|
|
failsafe_eth_lsc_event_callback,
|
|
|
|
sdev);
|
|
|
|
if (ret)
|
|
|
|
WARN("Failed to unregister LSC callback for sub_device"
|
|
|
|
" %d", SUB_ID(sdev));
|
|
|
|
sdev->lsc_callback = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:20 +00:00
|
|
|
void
|
|
|
|
failsafe_dev_remove(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct sub_device *sdev;
|
|
|
|
uint8_t i;
|
|
|
|
|
|
|
|
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
|
2017-09-07 11:31:13 +00:00
|
|
|
if (sdev->remove && fs_rxtx_clean(sdev)) {
|
net/failsafe: fix hotplug races
Fail-safe uses a periodic alarm mechanism, running from the host
thread, to manage the hot-plug events of its sub-devices. This
management requires a lot of sub-devices PMDs operations
(stop, close, start, configure, etc.).
While the hot-plug alarm runs in the host thread, the application may
call fail-safe operations, which directly trigger the sub-devices PMDs
operations as well. This call may occur from any thread decided by the
application (probably the master thread).
Thus, more than one operation can be executed to a sub-device at the
same time. This can initiate a lot of races in the sub-PMDs.
Moreover, some control operations update the fail-safe internal
databases, which can be used by the alarm mechanism at the same time.
This can also initiate races and crashes.
Fail-safe is the owner of its sub-devices and must synchronize their
use according to the ETHDEV ownership rules.
Synchronize hot-plug management by a new lock mechanism uses a mutex to
atomically defend each critical section in the fail-safe hot-plug
mechanism and control operations to prevent any races between them.
Fixes: a46f8d5 ("net/failsafe: add fail-safe PMD")
Cc: stable@dpdk.org
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Gaetan Rivet <gaetan.rivet@6wind.com>
2018-02-12 20:51:42 +00:00
|
|
|
if (fs_lock(dev, 1) != 0)
|
|
|
|
return;
|
2017-09-07 11:31:13 +00:00
|
|
|
fs_dev_stats_save(sdev);
|
2017-07-18 12:48:20 +00:00
|
|
|
fs_dev_remove(sdev);
|
net/failsafe: fix hotplug races
Fail-safe uses a periodic alarm mechanism, running from the host
thread, to manage the hot-plug events of its sub-devices. This
management requires a lot of sub-devices PMDs operations
(stop, close, start, configure, etc.).
While the hot-plug alarm runs in the host thread, the application may
call fail-safe operations, which directly trigger the sub-devices PMDs
operations as well. This call may occur from any thread decided by the
application (probably the master thread).
Thus, more than one operation can be executed to a sub-device at the
same time. This can initiate a lot of races in the sub-PMDs.
Moreover, some control operations update the fail-safe internal
databases, which can be used by the alarm mechanism at the same time.
This can also initiate races and crashes.
Fail-safe is the owner of its sub-devices and must synchronize their
use according to the ETHDEV ownership rules.
Synchronize hot-plug management by a new lock mechanism uses a mutex to
atomically defend each critical section in the fail-safe hot-plug
mechanism and control operations to prevent any races between them.
Fixes: a46f8d5 ("net/failsafe: add fail-safe PMD")
Cc: stable@dpdk.org
Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Gaetan Rivet <gaetan.rivet@6wind.com>
2018-02-12 20:51:42 +00:00
|
|
|
fs_unlock(dev, 1);
|
2017-09-07 11:31:13 +00:00
|
|
|
}
|
2017-07-18 12:48:20 +00:00
|
|
|
}
|
|
|
|
|
2018-09-20 13:55:51 +00:00
|
|
|
static int
|
|
|
|
failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct rxq *rxq;
|
|
|
|
int ret;
|
|
|
|
uint16_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
|
|
|
rxq = dev->data->rx_queues[i];
|
|
|
|
|
|
|
|
if (rxq->info.conf.rx_deferred_start &&
|
|
|
|
dev->data->rx_queue_state[i] ==
|
|
|
|
RTE_ETH_QUEUE_STATE_STARTED) {
|
|
|
|
/*
|
|
|
|
* The subdevice Rx queue does not launch on device
|
|
|
|
* start if deferred start flag is set. It needs to be
|
|
|
|
* started manually in case an appropriate failsafe Rx
|
|
|
|
* queue has been started earlier.
|
|
|
|
*/
|
|
|
|
ret = dev->dev_ops->rx_queue_start(dev, i);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Could not synchronize Rx queue %d", i);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else if (dev->data->rx_queue_state[i] ==
|
|
|
|
RTE_ETH_QUEUE_STATE_STOPPED) {
|
|
|
|
/*
|
|
|
|
* The subdevice Rx queue needs to be stopped manually
|
|
|
|
* in case an appropriate failsafe Rx queue has been
|
|
|
|
* stopped earlier.
|
|
|
|
*/
|
|
|
|
ret = dev->dev_ops->rx_queue_stop(dev, i);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Could not synchronize Rx queue %d", i);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-20 13:55:52 +00:00
|
|
|
static int
|
|
|
|
failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct txq *txq;
|
|
|
|
int ret;
|
|
|
|
uint16_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
|
|
|
txq = dev->data->tx_queues[i];
|
|
|
|
|
|
|
|
if (txq->info.conf.tx_deferred_start &&
|
|
|
|
dev->data->tx_queue_state[i] ==
|
|
|
|
RTE_ETH_QUEUE_STATE_STARTED) {
|
|
|
|
/*
|
|
|
|
* The subdevice Tx queue does not launch on device
|
|
|
|
* start if deferred start flag is set. It needs to be
|
|
|
|
* started manually in case an appropriate failsafe Tx
|
|
|
|
* queue has been started earlier.
|
|
|
|
*/
|
|
|
|
ret = dev->dev_ops->tx_queue_start(dev, i);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Could not synchronize Tx queue %d", i);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else if (dev->data->tx_queue_state[i] ==
|
|
|
|
RTE_ETH_QUEUE_STATE_STOPPED) {
|
|
|
|
/*
|
|
|
|
* The subdevice Tx queue needs to be stopped manually
|
|
|
|
* in case an appropriate failsafe Tx queue has been
|
|
|
|
* stopped earlier.
|
|
|
|
*/
|
|
|
|
ret = dev->dev_ops->tx_queue_stop(dev, i);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Could not synchronize Tx queue %d", i);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:15 +00:00
|
|
|
int
|
|
|
|
failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct sub_device *sdev;
|
|
|
|
uint32_t inactive;
|
|
|
|
int ret;
|
|
|
|
uint8_t i;
|
|
|
|
|
2017-07-18 12:48:16 +00:00
|
|
|
if (PRIV(dev)->state < DEV_PARSED)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = failsafe_args_parse_subs(dev);
|
|
|
|
if (ret)
|
2017-07-18 12:48:20 +00:00
|
|
|
goto err_remove;
|
2017-07-18 12:48:16 +00:00
|
|
|
|
2017-07-18 12:48:15 +00:00
|
|
|
if (PRIV(dev)->state < DEV_PROBED)
|
|
|
|
return 0;
|
|
|
|
ret = failsafe_eal_init(dev);
|
|
|
|
if (ret)
|
2017-07-18 12:48:20 +00:00
|
|
|
goto err_remove;
|
2017-07-18 12:48:15 +00:00
|
|
|
if (PRIV(dev)->state < DEV_ACTIVE)
|
|
|
|
return 0;
|
|
|
|
inactive = 0;
|
2017-07-18 12:48:22 +00:00
|
|
|
FOREACH_SUBDEV(sdev, i, dev) {
|
|
|
|
if (sdev->state == DEV_PROBED) {
|
2017-07-18 12:48:15 +00:00
|
|
|
inactive |= UINT32_C(1) << i;
|
2017-07-18 12:48:22 +00:00
|
|
|
ret = eth_dev_flow_isolate_set(dev, sdev);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Could not apply configuration to sub_device %d",
|
|
|
|
i);
|
|
|
|
goto err_remove;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-07-18 12:48:15 +00:00
|
|
|
ret = dev->dev_ops->dev_configure(dev);
|
|
|
|
if (ret)
|
2017-07-18 12:48:20 +00:00
|
|
|
goto err_remove;
|
2017-07-18 12:48:15 +00:00
|
|
|
FOREACH_SUBDEV(sdev, i, dev) {
|
|
|
|
if (inactive & (UINT32_C(1) << i)) {
|
|
|
|
ret = fs_eth_dev_conf_apply(dev, sdev);
|
|
|
|
if (ret) {
|
|
|
|
ERROR("Could not apply configuration to sub_device %d",
|
|
|
|
i);
|
2017-07-18 12:48:20 +00:00
|
|
|
goto err_remove;
|
2017-07-18 12:48:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If new devices have been configured, check if
|
|
|
|
* the link state has changed.
|
|
|
|
*/
|
|
|
|
if (inactive)
|
|
|
|
dev->dev_ops->link_update(dev, 1);
|
|
|
|
if (PRIV(dev)->state < DEV_STARTED)
|
|
|
|
return 0;
|
|
|
|
ret = dev->dev_ops->dev_start(dev);
|
2018-09-20 13:55:51 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_remove;
|
|
|
|
ret = failsafe_eth_dev_rx_queues_sync(dev);
|
2018-09-20 13:55:52 +00:00
|
|
|
if (ret)
|
|
|
|
goto err_remove;
|
|
|
|
ret = failsafe_eth_dev_tx_queues_sync(dev);
|
2017-07-18 12:48:15 +00:00
|
|
|
if (ret)
|
2017-07-18 12:48:20 +00:00
|
|
|
goto err_remove;
|
|
|
|
return 0;
|
|
|
|
err_remove:
|
|
|
|
FOREACH_SUBDEV(sdev, i, dev)
|
|
|
|
if (sdev->state != PRIV(dev)->state)
|
|
|
|
sdev->remove = 1;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-07 11:31:13 +00:00
|
|
|
void
|
|
|
|
failsafe_stats_increment(struct rte_eth_stats *to, struct rte_eth_stats *from)
|
|
|
|
{
|
|
|
|
uint32_t i;
|
|
|
|
|
|
|
|
RTE_ASSERT(to != NULL && from != NULL);
|
|
|
|
to->ipackets += from->ipackets;
|
|
|
|
to->opackets += from->opackets;
|
|
|
|
to->ibytes += from->ibytes;
|
|
|
|
to->obytes += from->obytes;
|
|
|
|
to->imissed += from->imissed;
|
|
|
|
to->ierrors += from->ierrors;
|
|
|
|
to->oerrors += from->oerrors;
|
|
|
|
to->rx_nombuf += from->rx_nombuf;
|
|
|
|
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
|
|
|
|
to->q_ipackets[i] += from->q_ipackets[i];
|
|
|
|
to->q_opackets[i] += from->q_opackets[i];
|
|
|
|
to->q_ibytes[i] += from->q_ibytes[i];
|
|
|
|
to->q_obytes[i] += from->q_obytes[i];
|
|
|
|
to->q_errors[i] += from->q_errors[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:20 +00:00
|
|
|
int
|
2017-09-29 07:17:24 +00:00
|
|
|
failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
|
2017-07-18 12:48:20 +00:00
|
|
|
enum rte_eth_event_type event __rte_unused,
|
|
|
|
void *cb_arg, void *out __rte_unused)
|
|
|
|
{
|
|
|
|
struct sub_device *sdev = cb_arg;
|
|
|
|
|
2019-03-18 16:05:26 +00:00
|
|
|
fs_lock(fs_dev(sdev), 0);
|
2017-07-18 12:48:20 +00:00
|
|
|
/* Switch as soon as possible tx_dev. */
|
2019-03-18 16:05:26 +00:00
|
|
|
fs_switch_dev(fs_dev(sdev), sdev);
|
2017-07-18 12:48:20 +00:00
|
|
|
/* Use safe bursts in any case. */
|
2019-03-18 16:05:26 +00:00
|
|
|
failsafe_set_burst_fn(fs_dev(sdev), 1);
|
2017-07-18 12:48:20 +00:00
|
|
|
/*
|
|
|
|
* Async removal, the sub-PMD will try to unregister
|
|
|
|
* the callback at the source of the current thread context.
|
|
|
|
*/
|
|
|
|
sdev->remove = 1;
|
2019-03-18 16:05:26 +00:00
|
|
|
fs_unlock(fs_dev(sdev), 0);
|
2017-07-18 12:48:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2017-07-18 12:48:21 +00:00
|
|
|
|
|
|
|
int
|
2017-09-29 07:17:24 +00:00
|
|
|
failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused,
|
2017-07-18 12:48:21 +00:00
|
|
|
enum rte_eth_event_type event __rte_unused,
|
|
|
|
void *cb_arg, void *out __rte_unused)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *dev = cb_arg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dev->dev_ops->link_update(dev, 0);
|
|
|
|
/* We must pass on the LSC event */
|
|
|
|
if (ret)
|
|
|
|
return _rte_eth_dev_callback_process(dev,
|
|
|
|
RTE_ETH_EVENT_INTR_LSC,
|
2018-01-04 16:01:08 +00:00
|
|
|
NULL);
|
2017-07-18 12:48:21 +00:00
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
2018-05-10 23:58:35 +00:00
|
|
|
|
|
|
|
/* Take sub-device ownership before it becomes exposed to the application. */
|
|
|
|
int
|
|
|
|
failsafe_eth_new_event_callback(uint16_t port_id,
|
|
|
|
enum rte_eth_event_type event __rte_unused,
|
|
|
|
void *cb_arg, void *out __rte_unused)
|
|
|
|
{
|
|
|
|
struct rte_eth_dev *fs_dev = cb_arg;
|
|
|
|
struct sub_device *sdev;
|
|
|
|
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
|
|
|
|
uint8_t i;
|
|
|
|
|
|
|
|
FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) {
|
|
|
|
if (sdev->state >= DEV_PROBED)
|
|
|
|
continue;
|
2020-05-05 19:10:29 +00:00
|
|
|
if (dev->device == NULL) {
|
|
|
|
WARN("Trying to probe malformed device %s.\n",
|
|
|
|
sdev->devargs.name);
|
|
|
|
continue;
|
|
|
|
}
|
2018-05-10 23:58:35 +00:00
|
|
|
if (strcmp(sdev->devargs.name, dev->device->name) != 0)
|
|
|
|
continue;
|
|
|
|
rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner);
|
|
|
|
/* The actual owner will be checked after the port probing. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|