2018-01-29 13:11:34 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright 2017 6WIND S.A.
|
2018-03-20 19:20:35 +00:00
|
|
|
* Copyright 2017 Mellanox Technologies, Ltd
|
2017-07-18 12:48:14 +00:00
|
|
|
*/
|
|
|
|
|
2017-07-18 12:48:20 +00:00
|
|
|
#include <rte_atomic.h>
|
2017-07-18 12:48:19 +00:00
|
|
|
#include <rte_debug.h>
|
2017-07-18 12:48:14 +00:00
|
|
|
#include <rte_mbuf.h>
|
2018-01-22 00:16:22 +00:00
|
|
|
#include <rte_ethdev_driver.h>
|
2017-07-18 12:48:14 +00:00
|
|
|
|
|
|
|
#include "failsafe_private.h"
|
|
|
|
|
2017-07-18 12:48:19 +00:00
|
|
|
static inline int
|
|
|
|
fs_rx_unsafe(struct sub_device *sdev)
|
|
|
|
{
|
|
|
|
return (ETH(sdev) == NULL) ||
|
|
|
|
(ETH(sdev)->rx_pkt_burst == NULL) ||
|
2017-10-22 05:51:08 +00:00
|
|
|
(sdev->state != DEV_STARTED) ||
|
|
|
|
(sdev->remove != 0);
|
2017-07-18 12:48:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
fs_tx_unsafe(struct sub_device *sdev)
|
|
|
|
{
|
|
|
|
return (sdev == NULL) ||
|
|
|
|
(ETH(sdev) == NULL) ||
|
|
|
|
(ETH(sdev)->tx_pkt_burst == NULL) ||
|
|
|
|
(sdev->state != DEV_STARTED);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-10-08 22:31:40 +00:00
|
|
|
failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe)
|
2017-07-18 12:48:19 +00:00
|
|
|
{
|
|
|
|
struct sub_device *sdev;
|
|
|
|
uint8_t i;
|
|
|
|
int need_safe;
|
|
|
|
int safe_set;
|
|
|
|
|
|
|
|
need_safe = force_safe;
|
|
|
|
FOREACH_SUBDEV(sdev, i, dev)
|
|
|
|
need_safe |= fs_rx_unsafe(sdev);
|
|
|
|
safe_set = (dev->rx_pkt_burst == &failsafe_rx_burst);
|
|
|
|
if (need_safe && !safe_set) {
|
|
|
|
DEBUG("Using safe RX bursts%s",
|
|
|
|
(force_safe ? " (forced)" : ""));
|
|
|
|
dev->rx_pkt_burst = &failsafe_rx_burst;
|
|
|
|
} else if (!need_safe && safe_set) {
|
|
|
|
DEBUG("Using fast RX bursts");
|
|
|
|
dev->rx_pkt_burst = &failsafe_rx_burst_fast;
|
|
|
|
}
|
|
|
|
need_safe = force_safe || fs_tx_unsafe(TX_SUBDEV(dev));
|
|
|
|
safe_set = (dev->tx_pkt_burst == &failsafe_tx_burst);
|
|
|
|
if (need_safe && !safe_set) {
|
|
|
|
DEBUG("Using safe TX bursts%s",
|
|
|
|
(force_safe ? " (forced)" : ""));
|
|
|
|
dev->tx_pkt_burst = &failsafe_tx_burst;
|
|
|
|
} else if (!need_safe && safe_set) {
|
|
|
|
DEBUG("Using fast TX bursts");
|
|
|
|
dev->tx_pkt_burst = &failsafe_tx_burst_fast;
|
|
|
|
}
|
|
|
|
rte_wmb();
|
|
|
|
}
|
|
|
|
|
2019-04-18 17:20:55 +00:00
|
|
|
/*
|
|
|
|
* Override source port in Rx packets.
|
|
|
|
*
|
|
|
|
* Make Rx packets originate from this PMD instance instead of one of its
|
|
|
|
* sub-devices. This is mandatory to avoid breaking applications.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
failsafe_rx_set_port(struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint16_t port)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i != nb_pkts; ++i)
|
|
|
|
rx_pkts[i]->port = port;
|
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:14 +00:00
|
|
|
uint16_t
|
|
|
|
failsafe_rx_burst(void *queue,
|
|
|
|
struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct sub_device *sdev;
|
|
|
|
struct rxq *rxq;
|
|
|
|
void *sub_rxq;
|
|
|
|
uint16_t nb_rx;
|
|
|
|
|
|
|
|
rxq = queue;
|
2017-12-19 17:14:29 +00:00
|
|
|
sdev = rxq->sdev;
|
|
|
|
do {
|
|
|
|
if (fs_rx_unsafe(sdev)) {
|
|
|
|
nb_rx = 0;
|
2018-01-24 10:19:17 +00:00
|
|
|
sdev = sdev->next;
|
2017-07-18 12:48:14 +00:00
|
|
|
continue;
|
2017-12-19 17:14:29 +00:00
|
|
|
}
|
2017-07-18 12:48:14 +00:00
|
|
|
sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
|
2017-07-18 12:48:20 +00:00
|
|
|
FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
|
2017-07-18 12:48:14 +00:00
|
|
|
nb_rx = ETH(sdev)->
|
|
|
|
rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
|
2017-07-18 12:48:20 +00:00
|
|
|
FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
|
2017-12-19 17:14:29 +00:00
|
|
|
sdev = sdev->next;
|
|
|
|
} while (nb_rx == 0 && sdev != rxq->sdev);
|
|
|
|
rxq->sdev = sdev;
|
2019-04-18 17:20:55 +00:00
|
|
|
if (nb_rx)
|
|
|
|
failsafe_rx_set_port(rx_pkts, nb_rx,
|
|
|
|
rxq->priv->data->port_id);
|
2017-12-19 17:14:29 +00:00
|
|
|
return nb_rx;
|
2017-07-18 12:48:14 +00:00
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:19 +00:00
|
|
|
uint16_t
|
|
|
|
failsafe_rx_burst_fast(void *queue,
|
|
|
|
struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct sub_device *sdev;
|
|
|
|
struct rxq *rxq;
|
|
|
|
void *sub_rxq;
|
|
|
|
uint16_t nb_rx;
|
|
|
|
|
|
|
|
rxq = queue;
|
2017-12-19 17:14:29 +00:00
|
|
|
sdev = rxq->sdev;
|
|
|
|
do {
|
2017-07-18 12:48:19 +00:00
|
|
|
RTE_ASSERT(!fs_rx_unsafe(sdev));
|
|
|
|
sub_rxq = ETH(sdev)->data->rx_queues[rxq->qid];
|
2017-07-18 12:48:20 +00:00
|
|
|
FS_ATOMIC_P(rxq->refcnt[sdev->sid]);
|
2017-07-18 12:48:19 +00:00
|
|
|
nb_rx = ETH(sdev)->
|
|
|
|
rx_pkt_burst(sub_rxq, rx_pkts, nb_pkts);
|
2017-07-18 12:48:20 +00:00
|
|
|
FS_ATOMIC_V(rxq->refcnt[sdev->sid]);
|
2017-12-19 17:14:29 +00:00
|
|
|
sdev = sdev->next;
|
|
|
|
} while (nb_rx == 0 && sdev != rxq->sdev);
|
|
|
|
rxq->sdev = sdev;
|
2019-04-18 17:20:55 +00:00
|
|
|
if (nb_rx)
|
|
|
|
failsafe_rx_set_port(rx_pkts, nb_rx,
|
|
|
|
rxq->priv->data->port_id);
|
2017-12-19 17:14:29 +00:00
|
|
|
return nb_rx;
|
2017-07-18 12:48:19 +00:00
|
|
|
}
|
|
|
|
|
2017-07-18 12:48:14 +00:00
|
|
|
uint16_t
|
|
|
|
failsafe_tx_burst(void *queue,
|
|
|
|
struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct sub_device *sdev;
|
|
|
|
struct txq *txq;
|
|
|
|
void *sub_txq;
|
2017-07-18 12:48:20 +00:00
|
|
|
uint16_t nb_tx;
|
2017-07-18 12:48:14 +00:00
|
|
|
|
|
|
|
txq = queue;
|
2019-03-18 16:05:25 +00:00
|
|
|
sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]);
|
2017-07-18 12:48:19 +00:00
|
|
|
if (unlikely(fs_tx_unsafe(sdev)))
|
2017-07-18 12:48:14 +00:00
|
|
|
return 0;
|
|
|
|
sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
|
2017-07-18 12:48:20 +00:00
|
|
|
FS_ATOMIC_P(txq->refcnt[sdev->sid]);
|
|
|
|
nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
|
|
|
|
FS_ATOMIC_V(txq->refcnt[sdev->sid]);
|
|
|
|
return nb_tx;
|
2017-07-18 12:48:14 +00:00
|
|
|
}
|
2017-07-18 12:48:19 +00:00
|
|
|
|
|
|
|
uint16_t
|
|
|
|
failsafe_tx_burst_fast(void *queue,
|
|
|
|
struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct sub_device *sdev;
|
|
|
|
struct txq *txq;
|
|
|
|
void *sub_txq;
|
2017-07-18 12:48:20 +00:00
|
|
|
uint16_t nb_tx;
|
2017-07-18 12:48:19 +00:00
|
|
|
|
|
|
|
txq = queue;
|
2019-03-18 16:05:25 +00:00
|
|
|
sdev = TX_SUBDEV(&rte_eth_devices[txq->priv->data->port_id]);
|
2017-07-18 12:48:19 +00:00
|
|
|
RTE_ASSERT(!fs_tx_unsafe(sdev));
|
|
|
|
sub_txq = ETH(sdev)->data->tx_queues[txq->qid];
|
2017-07-18 12:48:20 +00:00
|
|
|
FS_ATOMIC_P(txq->refcnt[sdev->sid]);
|
|
|
|
nb_tx = ETH(sdev)->tx_pkt_burst(sub_txq, tx_pkts, nb_pkts);
|
|
|
|
FS_ATOMIC_V(txq->refcnt[sdev->sid]);
|
|
|
|
return nb_tx;
|
2017-07-18 12:48:19 +00:00
|
|
|
}
|