net/mlx5: remove redundant queue index

Queue index is redundantly stored for both Rx and Tx structures.
E.g. txq_ctrl->idx and txq->stats.idx. Both are consolidated to single
storage - rxq->idx and txq->idx.

Also, rxq and txq are moved to the beginning of its control structure
(rxq_ctrl and txq_ctrl) for cacheline alignment.

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
This commit is contained in:
Yongseok Koh 2019-04-10 11:41:16 -07:00 committed by Ferruh Yigit
parent 227684feb8
commit d5c900d1dd
5 changed files with 35 additions and 42 deletions

View File

@ -156,7 +156,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
}
DRV_LOG(DEBUG,
"port %u Rx queue %u allocated and configured %u segments",
rxq->port_id, rxq_ctrl->idx, wqe_n);
rxq->port_id, rxq->idx, wqe_n);
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
@ -168,7 +168,7 @@ error:
(*rxq->mprq_bufs)[i] = NULL;
}
DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
rxq->port_id, rxq_ctrl->idx);
rxq->port_id, rxq->idx);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
@ -241,7 +241,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
DRV_LOG(DEBUG,
"port %u Rx queue %u allocated and configured %u segments"
" (max %u packets)",
PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
elts_n / (1 << rxq_ctrl->rxq.sges_n));
return 0;
error:
@ -253,7 +253,7 @@ error:
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
@ -287,7 +287,7 @@ rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
uint16_t i;
DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
rxq->port_id, rxq_ctrl->idx);
rxq->port_id, rxq->idx);
if (rxq->mprq_bufs == NULL)
return;
assert(mlx5_rxq_check_vec_support(rxq) < 0);
@ -318,7 +318,7 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
uint16_t i;
DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
PORT_ID(rxq_ctrl->priv), rxq->idx);
if (rxq->elts == NULL)
return;
/**
@ -364,7 +364,7 @@ void
mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
{
DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
if (rxq_ctrl->ibv)
mlx5_rxq_ibv_release(rxq_ctrl->ibv);
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
@ -495,11 +495,11 @@ mlx5_rx_queue_release(void *dpdk_rxq)
return;
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
rte_panic("port %u Rx queue %u is still used by a flow and"
" cannot be removed\n",
PORT_ID(priv), rxq_ctrl->idx);
mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
PORT_ID(priv), rxq->idx);
mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
}
/**
@ -793,7 +793,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (!tmpl) {
DRV_LOG(ERR,
"port %u Rx queue %u cannot allocate verbs resources",
dev->data->port_id, rxq_ctrl->idx);
dev->data->port_id, rxq_data->idx);
rte_errno = ENOMEM;
goto error;
}
@ -1104,7 +1104,7 @@ mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
dev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx);
++ret;
}
return ret;
@ -1470,7 +1470,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->rxq.port_id = dev->data->port_id;
tmpl->priv = priv;
tmpl->rxq.mp = mp;
tmpl->rxq.stats.idx = idx;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.rq_repl_thresh =
MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
@ -1479,7 +1478,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
#ifndef RTE_ARCH_64
tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
#endif
tmpl->idx = idx;
tmpl->rxq.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
@ -1592,7 +1591,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
dev->data->port_id, rxq_ctrl->idx);
dev->data->port_id, rxq_ctrl->rxq.idx);
++ret;
}
return ret;

View File

@ -41,7 +41,6 @@
#define MLX5_FLOW_TUNNEL 5
struct mlx5_rxq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX5_PMD_SOFT_COUNTERS
uint64_t ipackets; /**< Total of successfully received packets. */
uint64_t ibytes; /**< Total of successfully received bytes. */
@ -51,7 +50,6 @@ struct mlx5_rxq_stats {
};
struct mlx5_txq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX5_PMD_SOFT_COUNTERS
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
@ -116,6 +114,7 @@ struct mlx5_rxq_data {
struct rte_mempool *mp;
struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
uint16_t idx; /* Queue index. */
struct mlx5_rxq_stats stats;
uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
@ -141,14 +140,13 @@ struct mlx5_rxq_ibv {
/* RX queue control descriptor. */
struct mlx5_rxq_ctrl {
struct mlx5_rxq_data rxq; /* Data path structure. */
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
struct mlx5_priv *priv; /* Back pointer to private data. */
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int irq:1; /* Whether IRQ is enabled. */
uint16_t idx; /* Queue index. */
uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
};
@ -205,6 +203,7 @@ struct mlx5_txq_data {
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register remapped. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
uint16_t idx; /* Queue index. */
struct mlx5_txq_stats stats; /* TX queue counters. */
#ifndef RTE_ARCH_64
rte_spinlock_t *uar_lock;
@ -223,6 +222,7 @@ struct mlx5_txq_ibv {
/* TX queue control descriptor. */
struct mlx5_txq_ctrl {
struct mlx5_txq_data txq; /* Data path structure. */
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
unsigned int socket; /* CPU socket ID for allocations. */
@ -230,10 +230,8 @@ struct mlx5_txq_ctrl {
unsigned int max_tso_header; /* Max TSO header size. */
struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
struct mlx5_priv *priv; /* Back pointer to private data. */
struct mlx5_txq_data txq; /* Data path structure. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
volatile void *bf_reg_orig; /* Blueflame register from verbs. */
uint16_t idx; /* Queue index. */
};
/* mlx5_rxq.c */

View File

@ -386,7 +386,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (rxq == NULL)
continue;
idx = rxq->stats.idx;
idx = rxq->idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
#ifdef MLX5_PMD_SOFT_COUNTERS
tmp.q_ipackets[idx] += rxq->stats.ipackets;
@ -407,7 +407,7 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (txq == NULL)
continue;
idx = txq->stats.idx;
idx = txq->idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
#ifdef MLX5_PMD_SOFT_COUNTERS
tmp.q_opackets[idx] += txq->stats.opackets;
@ -442,21 +442,18 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl;
unsigned int i;
unsigned int idx;
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
continue;
idx = (*priv->rxqs)[i]->stats.idx;
(*priv->rxqs)[i]->stats =
(struct mlx5_rxq_stats){ .idx = idx };
memset(&(*priv->rxqs)[i]->stats, 0,
sizeof(struct mlx5_rxq_stats));
}
for (i = 0; (i != priv->txqs_n); ++i) {
if ((*priv->txqs)[i] == NULL)
continue;
idx = (*priv->txqs)[i]->stats.idx;
(*priv->txqs)[i]->stats =
(struct mlx5_txq_stats){ .idx = idx };
memset(&(*priv->txqs)[i]->stats, 0,
sizeof(struct mlx5_txq_stats));
}
mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base);
#ifndef MLX5_PMD_SOFT_COUNTERS

View File

@ -123,7 +123,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
DRV_LOG(DEBUG,
"port %u Rx queue %u registering"
" mp %s having %u chunks",
dev->data->port_id, rxq_ctrl->idx,
dev->data->port_id, rxq_ctrl->rxq.idx,
mp->name, mp->nb_mem_chunks);
mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
ret = rxq_alloc_elts(rxq_ctrl);

View File

@ -48,7 +48,7 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n);
PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@ -70,7 +70,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
PORT_ID(txq_ctrl->priv), txq_ctrl->idx);
PORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@ -224,7 +224,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
if ((*priv->txqs)[i] == txq) {
mlx5_txq_release(ETH_DEV(priv), i);
DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
PORT_ID(priv), txq_ctrl->idx);
PORT_ID(priv), txq->idx);
break;
}
}
@ -273,7 +273,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
continue;
txq = (*priv->txqs)[i];
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
assert(txq_ctrl->idx == (uint16_t)i);
assert(txq->idx == (uint16_t)i);
/* UAR addr form verbs used to find dup and offset in page. */
uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
off = uar_va & (page_size - 1); /* offset in page. */
@ -301,7 +301,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
DRV_LOG(ERR,
"port %u call to mmap failed on UAR"
" for txq %u",
dev->data->port_id, txq_ctrl->idx);
dev->data->port_id, txq->idx);
rte_errno = ENXIO;
return -rte_errno;
}
@ -629,7 +629,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
dev->data->port_id, txq_ibv->txq_ctrl->idx);
dev->data->port_id, txq_ibv->txq_ctrl->txq.idx);
++ret;
}
return ret;
@ -778,7 +778,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
tmpl->idx = idx;
tmpl->txq.idx = idx;
txq_set_params(tmpl);
DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
@ -786,7 +786,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
@ -893,12 +892,12 @@ int
mlx5_txq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
struct mlx5_txq_ctrl *txq_ctrl;
int ret = 0;
LIST_FOREACH(txq, &priv->txqsctrl, next) {
LIST_FOREACH(txq_ctrl, &priv->txqsctrl, next) {
DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
dev->data->port_id, txq->idx);
dev->data->port_id, txq_ctrl->txq.idx);
++ret;
}
return ret;