net/mlx4: remove soft counters compilation option

Software counters are mandatory since hardware counters are not
implemented.

Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
This commit is contained in:
Adrien Mazarguil 2017-09-01 10:06:22 +02:00 committed by Ferruh Yigit
parent 863f34f710
commit 31a76ab0df
5 changed files with 0 additions and 60 deletions

View File

@ -218,7 +218,6 @@ CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS=n
CONFIG_RTE_LIBRTE_MLX4_SGE_WR_N=4
CONFIG_RTE_LIBRTE_MLX4_MAX_INLINE=0
CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8
CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS=1
#
# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD

View File

@ -97,7 +97,6 @@ Limitations
- RSS RETA cannot be configured
- RSS always includes L3 (IPv4/IPv6) and L4 (UDP/TCP). They cannot be
dissociated.
- Hardware counters are not implemented (they are software counters).
Configuration
-------------
@ -145,11 +144,6 @@ These options can be modified in the ``.config`` file.
This value is always 1 for RX queues since they use a single MP.
- ``CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS`` (default **1**)
Toggle software counters. No counters are available if this option is
disabled since hardware counters are not supported.
Environment variables
~~~~~~~~~~~~~~~~~~~~~

View File

@ -80,10 +80,6 @@ ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE
CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE)
endif
ifdef CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS
CFLAGS += -DMLX4_PMD_SOFT_COUNTERS=$(CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS)
endif
ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS),y)
CFLAGS += -DMLX4_PMD_DEBUG_BROKEN_VERBS
endif

View File

@ -34,7 +34,6 @@
/*
* Known limitations:
* - RSS hash key and options cannot be modified.
* - Hardware counters aren't implemented.
*/
/* System headers. */
@ -1372,9 +1371,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
struct txq_elt *elt = &(*txq->elts)[elts_head];
unsigned int segs = NB_SEGS(buf);
#ifdef MLX4_PMD_SOFT_COUNTERS
unsigned int sent_size = 0;
#endif
uint32_t send_flags = 0;
/* Clean up old buffer. */
@ -1452,9 +1449,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
send_flags);
if (unlikely(err))
goto stop;
#ifdef MLX4_PMD_SOFT_COUNTERS
sent_size += length;
#endif
} else {
#if MLX4_PMD_SGE_WR_N > 1
struct ibv_sge sges[MLX4_PMD_SGE_WR_N];
@ -1473,9 +1468,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
send_flags);
if (unlikely(err))
goto stop;
#ifdef MLX4_PMD_SOFT_COUNTERS
sent_size += ret.length;
#endif
#else /* MLX4_PMD_SGE_WR_N > 1 */
DEBUG("%p: TX scattered buffers support not"
" compiled in", (void *)txq);
@ -1483,19 +1476,15 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
#endif /* MLX4_PMD_SGE_WR_N > 1 */
}
elts_head = elts_head_next;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += sent_size;
#endif
}
stop:
/* Take a shortcut if nothing must be sent. */
if (unlikely(i == 0))
return 0;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increment sent packets counter. */
txq->stats.opackets += i;
#endif
/* Ring QP doorbell. */
err = txq->if_qp->send_flush(txq->qp);
if (unlikely(err)) {
@ -2786,10 +2775,8 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
" completion status (%d): %s",
(void *)rxq, wc.wr_id, wc.status,
ibv_wc_status_str(wc.status));
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increment dropped packets counter. */
++rxq->stats.idropped;
#endif
/* Link completed WRs together for repost. */
*next = wr;
next = &wr->next;
@ -2901,10 +2888,8 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Return packet. */
*(pkts++) = pkt_buf;
++pkts_ret;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase bytes counter. */
rxq->stats.ibytes += pkt_buf_len;
#endif
repost:
if (++elts_head >= elts_n)
elts_head = 0;
@ -2924,10 +2909,8 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
abort();
}
rxq->elts_head = elts_head;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase packets counter. */
rxq->stats.ipackets += pkts_ret;
#endif
return pkts_ret;
}
@ -3008,10 +2991,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
" completion status (%d): %s",
(void *)rxq, wc.wr_id, wc.status,
ibv_wc_status_str(wc.status));
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increment dropped packets counter. */
++rxq->stats.idropped;
#endif
/* Add SGE to array for repost. */
sges[i] = elt->sge;
goto repost;
@ -3062,10 +3043,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Return packet. */
*(pkts++) = seg;
++pkts_ret;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase bytes counter. */
rxq->stats.ibytes += len;
#endif
repost:
if (++elts_head >= elts_n)
elts_head = 0;
@ -3083,10 +3062,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
abort();
}
rxq->elts_head = elts_head;
#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase packets counter. */
rxq->stats.ipackets += pkts_ret;
#endif
return pkts_ret;
}
@ -4270,17 +4247,13 @@ mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
continue;
idx = rxq->stats.idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
#ifdef MLX4_PMD_SOFT_COUNTERS
tmp.q_ipackets[idx] += rxq->stats.ipackets;
tmp.q_ibytes[idx] += rxq->stats.ibytes;
#endif
tmp.q_errors[idx] += (rxq->stats.idropped +
rxq->stats.rx_nombuf);
}
#ifdef MLX4_PMD_SOFT_COUNTERS
tmp.ipackets += rxq->stats.ipackets;
tmp.ibytes += rxq->stats.ibytes;
#endif
tmp.ierrors += rxq->stats.idropped;
tmp.rx_nombuf += rxq->stats.rx_nombuf;
}
@ -4291,21 +4264,14 @@ mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
continue;
idx = txq->stats.idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
#ifdef MLX4_PMD_SOFT_COUNTERS
tmp.q_opackets[idx] += txq->stats.opackets;
tmp.q_obytes[idx] += txq->stats.obytes;
#endif
tmp.q_errors[idx] += txq->stats.odropped;
}
#ifdef MLX4_PMD_SOFT_COUNTERS
tmp.opackets += txq->stats.opackets;
tmp.obytes += txq->stats.obytes;
#endif
tmp.oerrors += txq->stats.odropped;
}
#ifndef MLX4_PMD_SOFT_COUNTERS
/* FIXME: retrieve and add hardware counters. */
#endif
*stats = tmp;
priv_unlock(priv);
}
@ -4340,9 +4306,6 @@ mlx4_stats_reset(struct rte_eth_dev *dev)
(*priv->txqs)[i]->stats =
(struct mlx4_txq_stats){ .idx = idx };
}
#ifndef MLX4_PMD_SOFT_COUNTERS
/* FIXME: reset hardware counters. */
#endif
priv_unlock(priv);
}

View File

@ -101,14 +101,6 @@
#define MLX4_PMD_TX_MP_CACHE 8
#endif
/*
* If defined, only use software counters. The PMD will never ask the hardware
* for these, and many of them won't be available.
*/
#ifndef MLX4_PMD_SOFT_COUNTERS
#define MLX4_PMD_SOFT_COUNTERS 1
#endif
/* Alarm timeout. */
#define MLX4_ALARM_TIMEOUT_US 100000
@ -186,10 +178,8 @@ enum {
struct mlx4_rxq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t ipackets; /**< Total of successfully received packets. */
uint64_t ibytes; /**< Total of successfully received bytes. */
#endif
uint64_t idropped; /**< Total of packets dropped when RX ring full. */
uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
};
@ -252,10 +242,8 @@ struct txq_elt {
struct mlx4_txq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
#endif
uint64_t odropped; /**< Total of packets not sent when TX ring full. */
};