net/sfc: add xstats for Rx/Tx doorbells

Rx/Tx doorbells statistics are collected in software and
available per queue. These stats are useful for performance
investigation.

Signed-off-by: Ivan Ilchenko <ivan.ilchenko@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Andy Moreton <amoreton@xilinx.com>
This commit is contained in:
Ivan Ilchenko 2021-07-23 16:15:14 +03:00 committed by Thomas Monjalon
parent 21ca2629cf
commit fdd7719eb3
16 changed files with 772 additions and 36 deletions

View File

@ -78,6 +78,7 @@ sources = files(
'sfc.c',
'sfc_mcdi.c',
'sfc_sriov.c',
'sfc_sw_stats.c',
'sfc_intr.c',
'sfc_ev.c',
'sfc_port.c',

View File

@ -24,6 +24,7 @@
#include "sfc_tx.h"
#include "sfc_kvargs.h"
#include "sfc_tweak.h"
#include "sfc_sw_stats.h"
int
@ -636,10 +637,17 @@ sfc_configure(struct sfc_adapter *sa)
if (rc != 0)
goto fail_tx_configure;
rc = sfc_sw_xstats_configure(sa);
if (rc != 0)
goto fail_sw_xstats_configure;
sa->state = SFC_ADAPTER_CONFIGURED;
sfc_log_init(sa, "done");
return 0;
fail_sw_xstats_configure:
sfc_tx_close(sa);
fail_tx_configure:
sfc_rx_close(sa);
@ -666,6 +674,7 @@ sfc_close(struct sfc_adapter *sa)
SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
sa->state = SFC_ADAPTER_CLOSING;
sfc_sw_xstats_close(sa);
sfc_tx_close(sa);
sfc_rx_close(sa);
sfc_port_close(sa);
@ -891,6 +900,10 @@ sfc_attach(struct sfc_adapter *sa)
sfc_flow_init(sa);
rc = sfc_sw_xstats_init(sa);
if (rc != 0)
goto fail_sw_xstats_init;
/*
* Create vSwitch to be able to use VFs when PF is not started yet
* as DPDK port. VFs should be able to talk to each other even
@ -906,6 +919,9 @@ sfc_attach(struct sfc_adapter *sa)
return 0;
fail_sriov_vswitch_create:
sfc_sw_xstats_close(sa);
fail_sw_xstats_init:
sfc_flow_fini(sa);
sfc_mae_detach(sa);

View File

@ -217,6 +217,14 @@ struct sfc_counter_rxq {
struct rte_mempool *mp;
};
struct sfc_sw_xstats {
uint64_t *reset_vals;
rte_spinlock_t queues_bitmap_lock;
void *queues_bitmap_mem;
struct rte_bitmap *queues_bitmap;
};
/* Adapter private data */
struct sfc_adapter {
/*
@ -249,6 +257,7 @@ struct sfc_adapter {
struct sfc_sriov sriov;
struct sfc_intr intr;
struct sfc_port port;
struct sfc_sw_xstats sw_xstats;
struct sfc_filter filter;
struct sfc_mae mae;

View File

@ -42,6 +42,16 @@ enum sfc_dp_type {
/** Datapath queue run-time information */
struct sfc_dp_queue {
/*
* Typically the structure is located at the end of Rx/Tx queue
* data structure and not used on datapath. So, it is not a
* problem to have extra fields even if not used. However,
* put stats at top of the structure to be closer to fields
* used on datapath or reap to have more chances to be cache-hot.
*/
uint32_t rx_dbells;
uint32_t tx_dbells;
uint16_t port_id;
uint16_t queue_id;
struct rte_pci_addr pci_addr;

View File

@ -99,7 +99,7 @@ sfc_ef10_ev_present(const efx_qword_t ev)
static inline void
sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added,
unsigned int ptr_mask)
unsigned int ptr_mask, uint32_t *dbell_counter)
{
efx_dword_t dword;
@ -118,6 +118,7 @@ sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added,
* operations that follow it (i.e. doorbell write).
*/
rte_write32(dword.ed_u32[0], doorbell);
(*dbell_counter)++;
}
static inline void

View File

@ -119,6 +119,7 @@ sfc_ef100_rx_qpush(struct sfc_ef100_rxq *rxq, unsigned int added)
* operations that follow it (i.e. doorbell write).
*/
rte_write32(dword.ed_u32[0], rxq->doorbell);
rxq->dp.dpq.rx_dbells++;
sfc_ef100_rx_debug(rxq, "RxQ pushed doorbell at pidx %u (added=%u)",
EFX_DWORD_FIELD(dword, ERF_GZ_RX_RING_PIDX),

View File

@ -489,6 +489,7 @@ sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added)
* operations that follow it (i.e. doorbell write).
*/
rte_write32(dword.ed_u32[0], txq->doorbell);
txq->dp.dpq.tx_dbells++;
sfc_ef100_tx_debug(txq, "TxQ pushed doorbell at pidx %u (added=%u)",
EFX_DWORD_FIELD(dword, ERF_GZ_TX_RING_PIDX),

View File

@ -220,7 +220,8 @@ sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
SFC_ASSERT(rxq->added != added);
rxq->added = added;
sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask,
&rxq->dp.dpq.rx_dbells);
}
static bool

View File

@ -171,7 +171,8 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
SFC_ASSERT(rxq->added != added);
rxq->added = added;
sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask);
sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask,
&rxq->dp.dpq.rx_dbells);
}
static void

View File

@ -248,6 +248,7 @@ sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
rte_io_wmb();
*(volatile efsys_uint128_t *)txq->doorbell = oword.eo_u128[0];
txq->dp.dpq.tx_dbells++;
}
static unsigned int

View File

@ -28,6 +28,10 @@
#include "sfc_flow.h"
#include "sfc_dp.h"
#include "sfc_dp_rx.h"
#include "sfc_sw_stats.h"
#define SFC_XSTAT_ID_INVALID_VAL UINT64_MAX
#define SFC_XSTAT_ID_INVALID_NAME '\0'
uint32_t sfc_logtype_driver;
@ -714,29 +718,49 @@ sfc_stats_reset(struct rte_eth_dev *dev)
if (rc != 0)
sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
sfc_sw_xstats_reset(sa);
sfc_adapter_unlock(sa);
SFC_ASSERT(rc >= 0);
return -rc;
}
static unsigned int
sfc_xstats_get_nb_supported(struct sfc_adapter *sa)
{
struct sfc_port *port = &sa->port;
unsigned int nb_supported;
sfc_adapter_lock(sa);
nb_supported = port->mac_stats_nb_supported +
sfc_sw_xstats_get_nb_supported(sa);
sfc_adapter_unlock(sa);
return nb_supported;
}
static int
sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned int xstats_count)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
unsigned int nb_written = 0;
unsigned int nb_supp;
unsigned int nb_supported = 0;
int rc;
if (unlikely(xstats == NULL)) {
sfc_adapter_lock(sa);
nb_supp = port->mac_stats_nb_supported;
sfc_adapter_unlock(sa);
return nb_supp;
}
if (unlikely(xstats == NULL))
return sfc_xstats_get_nb_supported(sa);
return sfc_port_get_mac_stats(sa, xstats, xstats_count, &nb_written);
rc = sfc_port_get_mac_stats(sa, xstats, xstats_count, &nb_written);
if (rc < 0)
return rc;
nb_supported = rc;
sfc_sw_xstats_get_vals(sa, xstats, xstats_count, &nb_written,
&nb_supported);
return nb_supported;
}
static int
@ -748,24 +772,31 @@ sfc_xstats_get_names(struct rte_eth_dev *dev,
struct sfc_port *port = &sa->port;
unsigned int i;
unsigned int nstats = 0;
unsigned int nb_written = 0;
int ret;
if (unlikely(xstats_names == NULL)) {
sfc_adapter_lock(sa);
nstats = port->mac_stats_nb_supported;
sfc_adapter_unlock(sa);
return nstats;
}
if (unlikely(xstats_names == NULL))
return sfc_xstats_get_nb_supported(sa);
for (i = 0; i < EFX_MAC_NSTATS; ++i) {
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
if (nstats < xstats_count)
if (nstats < xstats_count) {
strlcpy(xstats_names[nstats].name,
efx_mac_stat_name(sa->nic, i),
sizeof(xstats_names[0].name));
nb_written++;
}
nstats++;
}
}
ret = sfc_sw_xstats_get_names(sa, xstats_names, xstats_count,
&nb_written, &nstats);
if (ret != 0) {
SFC_ASSERT(ret < 0);
return ret;
}
return nstats;
}
@ -774,11 +805,35 @@ sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
uint64_t *values, unsigned int n)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct sfc_port *port = &sa->port;
unsigned int nb_supported;
unsigned int i;
int rc;
if (unlikely(ids == NULL || values == NULL))
return -EINVAL;
return sfc_port_get_mac_stats_by_id(sa, ids, values, n);
/*
* Values array could be filled in nonsequential order. Fill values with
* constant indicating invalid ID first.
*/
for (i = 0; i < n; i++)
values[i] = SFC_XSTAT_ID_INVALID_VAL;
rc = sfc_port_get_mac_stats_by_id(sa, ids, values, n);
if (rc != 0)
return rc;
nb_supported = port->mac_stats_nb_supported;
sfc_sw_xstats_get_vals_by_id(sa, ids, values, n, &nb_supported);
/* Return number of written stats before invalid ID is encountered. */
for (i = 0; i < n; i++) {
if (values[i] == SFC_XSTAT_ID_INVALID_VAL)
return i;
}
return n;
}
static int
@ -790,18 +845,23 @@ sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
struct sfc_port *port = &sa->port;
unsigned int nb_supported;
unsigned int i;
int ret;
if (unlikely(xstats_names == NULL && ids != NULL) ||
unlikely(xstats_names != NULL && ids == NULL))
return -EINVAL;
sfc_adapter_lock(sa);
if (unlikely(xstats_names == NULL && ids == NULL))
return sfc_xstats_get_nb_supported(sa);
if (unlikely(xstats_names == NULL && ids == NULL)) {
nb_supported = port->mac_stats_nb_supported;
sfc_adapter_unlock(sa);
return nb_supported;
}
/*
* Names array could be filled in nonsequential order. Fill names with
* string indicating invalid ID first.
*/
for (i = 0; i < size; i++)
xstats_names[i].name[0] = SFC_XSTAT_ID_INVALID_NAME;
sfc_adapter_lock(sa);
SFC_ASSERT(port->mac_stats_nb_supported <=
RTE_DIM(port->mac_stats_by_id));
@ -812,14 +872,26 @@ sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
efx_mac_stat_name(sa->nic,
port->mac_stats_by_id[ids[i]]),
sizeof(xstats_names[0].name));
} else {
sfc_adapter_unlock(sa);
return i;
}
}
nb_supported = port->mac_stats_nb_supported;
sfc_adapter_unlock(sa);
ret = sfc_sw_xstats_get_names_by_id(sa, ids, xstats_names, size,
&nb_supported);
if (ret != 0) {
SFC_ASSERT(ret < 0);
return ret;
}
/* Return number of written names before invalid ID is encountered. */
for (i = 0; i < size; i++) {
if (xstats_names[i].name[0] == SFC_XSTAT_ID_INVALID_NAME)
return i;
}
return size;
}

View File

@ -7,6 +7,8 @@
* for Solarflare) and Solarflare Communications, Inc.
*/
#include <rte_bitmap.h>
#include "efx.h"
#include "sfc.h"
@ -701,15 +703,11 @@ sfc_port_get_mac_stats_by_id(struct sfc_adapter *sa, const uint64_t *ids,
RTE_DIM(port->mac_stats_by_id));
for (i = 0; i < n; i++) {
if (ids[i] < port->mac_stats_nb_supported) {
if (ids[i] < port->mac_stats_nb_supported)
values[i] = mac_stats[port->mac_stats_by_id[ids[i]]];
} else {
ret = i;
goto unlock;
}
}
ret = n;
ret = 0;
unlock:
sfc_adapter_unlock(sa);

View File

@ -138,6 +138,7 @@ sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
SFC_ASSERT(added != rxq->added);
rxq->added = added;
efx_rx_qpush(rxq->common, added, &rxq->pushed);
rxq->dp.dpq.rx_dbells++;
}
static uint64_t

View File

@ -0,0 +1,572 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright(c) 2021 Xilinx, Inc.
*/
#include <rte_dev.h>
#include <rte_bitmap.h>
#include "sfc.h"
#include "sfc_rx.h"
#include "sfc_tx.h"
#include "sfc_sw_stats.h"
enum sfc_sw_stats_type {
SFC_SW_STATS_RX,
SFC_SW_STATS_TX,
};
typedef uint64_t sfc_get_sw_xstat_val_t(struct sfc_adapter *sa, uint16_t qid);
struct sfc_sw_xstat_descr {
const char *name;
enum sfc_sw_stats_type type;
sfc_get_sw_xstat_val_t *get_val;
};
static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_rx_dbells;
static uint64_t
sfc_get_sw_xstat_val_rx_dbells(struct sfc_adapter *sa, uint16_t qid)
{
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
struct sfc_rxq_info *rxq_info;
rxq_info = sfc_rxq_info_by_ethdev_qid(sas, qid);
if (rxq_info->state & SFC_RXQ_INITIALIZED)
return rxq_info->dp->dpq.rx_dbells;
return 0;
}
static sfc_get_sw_xstat_val_t sfc_get_sw_xstat_val_tx_dbells;
static uint64_t
sfc_get_sw_xstat_val_tx_dbells(struct sfc_adapter *sa, uint16_t qid)
{
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
struct sfc_txq_info *txq_info;
txq_info = sfc_txq_info_by_ethdev_qid(sas, qid);
if (txq_info->state & SFC_TXQ_INITIALIZED)
return txq_info->dp->dpq.tx_dbells;
return 0;
}
struct sfc_sw_xstat_descr sfc_sw_xstats[] = {
{
.name = "dbells",
.type = SFC_SW_STATS_RX,
.get_val = sfc_get_sw_xstat_val_rx_dbells,
},
{
.name = "dbells",
.type = SFC_SW_STATS_TX,
.get_val = sfc_get_sw_xstat_val_tx_dbells,
}
};
static int
sfc_sw_stat_get_name(struct sfc_adapter *sa,
const struct sfc_sw_xstat_descr *sw_xstat, char *name,
size_t name_size, unsigned int id_off)
{
const char *prefix;
int ret;
switch (sw_xstat->type) {
case SFC_SW_STATS_RX:
prefix = "rx";
break;
case SFC_SW_STATS_TX:
prefix = "tx";
break;
default:
sfc_err(sa, "%s: unknown software statistics type %d",
__func__, sw_xstat->type);
return -EINVAL;
}
if (id_off == 0) {
ret = snprintf(name, name_size, "%s_%s", prefix,
sw_xstat->name);
if (ret < 0 || ret >= (int)name_size) {
sfc_err(sa, "%s: failed to fill xstat name %s_%s, err %d",
__func__, prefix, sw_xstat->name, ret);
return ret > 0 ? -EINVAL : ret;
}
} else {
uint16_t qid = id_off - 1;
ret = snprintf(name, name_size, "%s_q%u_%s", prefix, qid,
sw_xstat->name);
if (ret < 0 || ret >= (int)name_size) {
sfc_err(sa, "%s: failed to fill xstat name %s_q%u_%s, err %d",
__func__, prefix, qid, sw_xstat->name, ret);
return ret > 0 ? -EINVAL : ret;
}
}
return 0;
}
static unsigned int
sfc_sw_stat_get_queue_count(struct sfc_adapter *sa,
const struct sfc_sw_xstat_descr *sw_xstat)
{
struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
switch (sw_xstat->type) {
case SFC_SW_STATS_RX:
return sas->ethdev_rxq_count;
case SFC_SW_STATS_TX:
return sas->ethdev_txq_count;
default:
sfc_err(sa, "%s: unknown software statistics type %d",
__func__, sw_xstat->type);
return 0;
}
}
static unsigned int
sfc_sw_xstat_per_queue_get_count(unsigned int nb_queues)
{
/* Take into account the accumulative xstat of all queues */
return nb_queues > 0 ? 1 + nb_queues : 0;
}
static unsigned int
sfc_sw_xstat_get_nb_supported(struct sfc_adapter *sa,
const struct sfc_sw_xstat_descr *sw_xstat)
{
unsigned int nb_queues;
nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
return sfc_sw_xstat_per_queue_get_count(nb_queues);
}
static int
sfc_sw_stat_get_names(struct sfc_adapter *sa,
const struct sfc_sw_xstat_descr *sw_xstat,
struct rte_eth_xstat_name *xstats_names,
unsigned int xstats_names_sz,
unsigned int *nb_written,
unsigned int *nb_supported)
{
const size_t name_size = sizeof(xstats_names[0].name);
unsigned int id_base = *nb_supported;
unsigned int nb_queues;
unsigned int qid;
int rc;
nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
if (nb_queues == 0)
return 0;
*nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
/*
* The order of each software xstat type is the accumulative xstat
* followed by per-queue xstats.
*/
if (*nb_written < xstats_names_sz) {
rc = sfc_sw_stat_get_name(sa, sw_xstat,
xstats_names[*nb_written].name,
name_size, *nb_written - id_base);
if (rc != 0)
return rc;
(*nb_written)++;
}
for (qid = 0; qid < nb_queues; ++qid) {
if (*nb_written < xstats_names_sz) {
rc = sfc_sw_stat_get_name(sa, sw_xstat,
xstats_names[*nb_written].name,
name_size, *nb_written - id_base);
if (rc != 0)
return rc;
(*nb_written)++;
}
}
return 0;
}
static int
sfc_sw_xstat_get_names_by_id(struct sfc_adapter *sa,
const struct sfc_sw_xstat_descr *sw_xstat,
const uint64_t *ids,
struct rte_eth_xstat_name *xstats_names,
unsigned int size,
unsigned int *nb_supported)
{
const size_t name_size = sizeof(xstats_names[0].name);
unsigned int id_base = *nb_supported;
unsigned int nb_queues;
unsigned int i;
int rc;
nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
if (nb_queues == 0)
return 0;
*nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
/*
* The order of each software xstat type is the accumulative xstat
* followed by per-queue xstats.
*/
for (i = 0; i < size; i++) {
if (id_base <= ids[i] && ids[i] <= id_base + nb_queues) {
rc = sfc_sw_stat_get_name(sa, sw_xstat,
xstats_names[i].name,
name_size, ids[i] - id_base);
if (rc != 0)
return rc;
}
}
return 0;
}
static void
sfc_sw_xstat_get_values(struct sfc_adapter *sa,
const struct sfc_sw_xstat_descr *sw_xstat,
struct rte_eth_xstat *xstats,
unsigned int xstats_size,
unsigned int *nb_written,
unsigned int *nb_supported)
{
unsigned int qid;
uint64_t value;
struct rte_eth_xstat *accum_xstat;
bool count_accum_value = false;
unsigned int nb_queues;
nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
if (nb_queues == 0)
return;
*nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
/*
* The order of each software xstat type is the accumulative xstat
* followed by per-queue xstats.
*/
if (*nb_written < xstats_size) {
count_accum_value = true;
accum_xstat = &xstats[*nb_written];
xstats[*nb_written].id = *nb_written;
xstats[*nb_written].value = 0;
(*nb_written)++;
}
for (qid = 0; qid < nb_queues; ++qid) {
value = sw_xstat->get_val(sa, qid);
if (*nb_written < xstats_size) {
xstats[*nb_written].id = *nb_written;
xstats[*nb_written].value = value;
(*nb_written)++;
}
if (count_accum_value)
accum_xstat->value += value;
}
}
static void
sfc_sw_xstat_get_values_by_id(struct sfc_adapter *sa,
const struct sfc_sw_xstat_descr *sw_xstat,
const uint64_t *ids,
uint64_t *values,
unsigned int ids_size,
unsigned int *nb_supported)
{
rte_spinlock_t *bmp_lock = &sa->sw_xstats.queues_bitmap_lock;
struct rte_bitmap *bmp = sa->sw_xstats.queues_bitmap;
unsigned int id_base = *nb_supported;
bool count_accum_value = false;
unsigned int accum_value_idx;
uint64_t accum_value = 0;
unsigned int i, qid;
unsigned int nb_queues;
rte_spinlock_lock(bmp_lock);
rte_bitmap_reset(bmp);
nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
if (nb_queues == 0)
goto unlock;
*nb_supported += sfc_sw_xstat_per_queue_get_count(nb_queues);
/*
* The order of each software xstat type is the accumulative xstat
* followed by per-queue xstats.
*/
for (i = 0; i < ids_size; i++) {
if (id_base <= ids[i] && ids[i] <= (id_base + nb_queues)) {
if (ids[i] == id_base) { /* Accumulative value */
count_accum_value = true;
accum_value_idx = i;
continue;
}
qid = ids[i] - id_base - 1;
values[i] = sw_xstat->get_val(sa, qid);
accum_value += values[i];
rte_bitmap_set(bmp, qid);
}
}
if (count_accum_value) {
for (qid = 0; qid < nb_queues; ++qid) {
if (rte_bitmap_get(bmp, qid) != 0)
continue;
values[accum_value_idx] += sw_xstat->get_val(sa, qid);
}
values[accum_value_idx] += accum_value;
}
unlock:
rte_spinlock_unlock(bmp_lock);
}
unsigned int
sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa)
{
unsigned int nb_supported = 0;
unsigned int i;
SFC_ASSERT(sfc_adapter_is_locked(sa));
for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
nb_supported += sfc_sw_xstat_get_nb_supported(sa,
&sfc_sw_xstats[i]);
}
return nb_supported;
}
void
sfc_sw_xstats_get_vals(struct sfc_adapter *sa,
struct rte_eth_xstat *xstats,
unsigned int xstats_count,
unsigned int *nb_written,
unsigned int *nb_supported)
{
uint64_t *reset_vals = sa->sw_xstats.reset_vals;
unsigned int sw_xstats_offset;
unsigned int i;
sfc_adapter_lock(sa);
sw_xstats_offset = *nb_supported;
for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
sfc_sw_xstat_get_values(sa, &sfc_sw_xstats[i], xstats,
xstats_count, nb_written, nb_supported);
}
for (i = sw_xstats_offset; i < *nb_written; i++)
xstats[i].value -= reset_vals[i - sw_xstats_offset];
sfc_adapter_unlock(sa);
}
int
sfc_sw_xstats_get_names(struct sfc_adapter *sa,
struct rte_eth_xstat_name *xstats_names,
unsigned int xstats_count,
unsigned int *nb_written,
unsigned int *nb_supported)
{
unsigned int i;
int ret;
sfc_adapter_lock(sa);
for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
ret = sfc_sw_stat_get_names(sa, &sfc_sw_xstats[i],
xstats_names, xstats_count,
nb_written, nb_supported);
if (ret != 0) {
sfc_adapter_unlock(sa);
return ret;
}
}
sfc_adapter_unlock(sa);
return 0;
}
void
sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa,
const uint64_t *ids,
uint64_t *values,
unsigned int n,
unsigned int *nb_supported)
{
uint64_t *reset_vals = sa->sw_xstats.reset_vals;
unsigned int sw_xstats_offset;
unsigned int i;
sfc_adapter_lock(sa);
sw_xstats_offset = *nb_supported;
for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
sfc_sw_xstat_get_values_by_id(sa, &sfc_sw_xstats[i], ids,
values, n, nb_supported);
}
for (i = 0; i < n; i++) {
if (sw_xstats_offset <= ids[i] && ids[i] < *nb_supported)
values[i] -= reset_vals[ids[i] - sw_xstats_offset];
}
sfc_adapter_unlock(sa);
}
int
sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa,
const uint64_t *ids,
struct rte_eth_xstat_name *xstats_names,
unsigned int size,
unsigned int *nb_supported)
{
unsigned int i;
int ret;
sfc_adapter_lock(sa);
for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
ret = sfc_sw_xstat_get_names_by_id(sa, &sfc_sw_xstats[i], ids,
xstats_names, size,
nb_supported);
if (ret != 0) {
sfc_adapter_unlock(sa);
SFC_ASSERT(ret < 0);
return ret;
}
}
sfc_adapter_unlock(sa);
return 0;
}
static void
sfc_sw_xstat_reset(struct sfc_adapter *sa, struct sfc_sw_xstat_descr *sw_xstat,
uint64_t *reset_vals)
{
unsigned int nb_queues;
unsigned int qid;
uint64_t *accum_xstat_reset;
SFC_ASSERT(sfc_adapter_is_locked(sa));
nb_queues = sfc_sw_stat_get_queue_count(sa, sw_xstat);
if (nb_queues == 0)
return;
/*
* The order of each software xstat type is the accumulative xstat
* followed by per-queue xstats.
*/
accum_xstat_reset = reset_vals;
*accum_xstat_reset = 0;
reset_vals++;
for (qid = 0; qid < nb_queues; ++qid) {
reset_vals[qid] = sw_xstat->get_val(sa, qid);
*accum_xstat_reset += reset_vals[qid];
}
}
void
sfc_sw_xstats_reset(struct sfc_adapter *sa)
{
uint64_t *reset_vals = sa->sw_xstats.reset_vals;
struct sfc_sw_xstat_descr *sw_xstat;
unsigned int i;
SFC_ASSERT(sfc_adapter_is_locked(sa));
for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++) {
sw_xstat = &sfc_sw_xstats[i];
sfc_sw_xstat_reset(sa, sw_xstat, reset_vals);
reset_vals += sfc_sw_xstat_get_nb_supported(sa, sw_xstat);
}
}
int
sfc_sw_xstats_configure(struct sfc_adapter *sa)
{
uint64_t **reset_vals = &sa->sw_xstats.reset_vals;
size_t nb_supported = 0;
unsigned int i;
for (i = 0; i < RTE_DIM(sfc_sw_xstats); i++)
nb_supported += sfc_sw_xstat_get_nb_supported(sa,
&sfc_sw_xstats[i]);
*reset_vals = rte_realloc(*reset_vals,
nb_supported * sizeof(**reset_vals), 0);
if (*reset_vals == NULL)
return ENOMEM;
memset(*reset_vals, 0, nb_supported * sizeof(**reset_vals));
return 0;
}
static void
sfc_sw_xstats_free_queues_bitmap(struct sfc_adapter *sa)
{
rte_bitmap_free(sa->sw_xstats.queues_bitmap);
rte_free(sa->sw_xstats.queues_bitmap_mem);
}
static int
sfc_sw_xstats_alloc_queues_bitmap(struct sfc_adapter *sa)
{
struct rte_bitmap **queues_bitmap = &sa->sw_xstats.queues_bitmap;
void **queues_bitmap_mem = &sa->sw_xstats.queues_bitmap_mem;
uint32_t bmp_size;
int rc;
bmp_size = rte_bitmap_get_memory_footprint(RTE_MAX_QUEUES_PER_PORT);
*queues_bitmap_mem = NULL;
*queues_bitmap = NULL;
*queues_bitmap_mem = rte_calloc_socket("bitmap_mem", bmp_size, 1, 0,
sa->socket_id);
if (*queues_bitmap_mem == NULL)
return ENOMEM;
*queues_bitmap = rte_bitmap_init(RTE_MAX_QUEUES_PER_PORT,
*queues_bitmap_mem, bmp_size);
if (*queues_bitmap == NULL) {
rc = EINVAL;
goto fail;
}
rte_spinlock_init(&sa->sw_xstats.queues_bitmap_lock);
return 0;
fail:
sfc_sw_xstats_free_queues_bitmap(sa);
return rc;
}
int
sfc_sw_xstats_init(struct sfc_adapter *sa)
{
sa->sw_xstats.reset_vals = NULL;
return sfc_sw_xstats_alloc_queues_bitmap(sa);
}
void
sfc_sw_xstats_close(struct sfc_adapter *sa)
{
rte_free(sa->sw_xstats.reset_vals);
sa->sw_xstats.reset_vals = NULL;
sfc_sw_xstats_free_queues_bitmap(sa);
}

View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright(c) 2021 Xilinx, Inc.
*/
#ifndef _SFC_SW_STATS_H
#define _SFC_SW_STATS_H
#include <rte_dev.h>
#include "sfc.h"
#ifdef __cplusplus
extern "C" {
#endif
void sfc_sw_xstats_get_vals(struct sfc_adapter *sa,
struct rte_eth_xstat *xstats,
unsigned int xstats_count, unsigned int *nb_written,
unsigned int *nb_supported);
int sfc_sw_xstats_get_names(struct sfc_adapter *sa,
struct rte_eth_xstat_name *xstats_names,
unsigned int xstats_count, unsigned int *nb_written,
unsigned int *nb_supported);
void sfc_sw_xstats_get_vals_by_id(struct sfc_adapter *sa, const uint64_t *ids,
uint64_t *values, unsigned int n,
unsigned int *nb_supported);
int sfc_sw_xstats_get_names_by_id(struct sfc_adapter *sa, const uint64_t *ids,
struct rte_eth_xstat_name *xstats_names,
unsigned int size,
unsigned int *nb_supported);
unsigned int sfc_sw_xstats_get_nb_supported(struct sfc_adapter *sa);
int sfc_sw_xstats_configure(struct sfc_adapter *sa);
void sfc_sw_xstats_reset(struct sfc_adapter *sa);
int sfc_sw_xstats_init(struct sfc_adapter *sa);
void sfc_sw_xstats_close(struct sfc_adapter *sa);
#ifdef __cplusplus
}
#endif
#endif /* _SFC_SW_STATS_H */

View File

@ -980,8 +980,10 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->completed, &txq->added);
SFC_ASSERT(rc == 0);
if (likely(pushed != txq->added))
if (likely(pushed != txq->added)) {
efx_tx_qpush(txq->common, txq->added, pushed);
txq->dp.dpq.tx_dbells++;
}
}
#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE