net/ionic: cut down completion queue structure

Add Q_NEXT_TO_POST() and Q_NEXT_TO_SRVC() macros.
Use a precomputed size mask.

This will conserve resources.

Signed-off-by: Andrew Boyer <aboyer@pensando.io>
This commit is contained in:
Andrew Boyer 2021-02-16 12:35:26 -08:00 committed by Ferruh Yigit
parent 378cd4887d
commit 2aed98657a
5 changed files with 27 additions and 33 deletions

View File

@ -358,14 +358,8 @@ ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq)
}
int
ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
uint32_t num_descs, size_t desc_size)
ionic_cq_init(struct ionic_cq *cq, uint16_t num_descs)
{
if (desc_size == 0) {
IONIC_PRINT(ERR, "Descriptor size is %zu", desc_size);
return -EINVAL;
}
if (!rte_is_power_of_2(num_descs) ||
num_descs < IONIC_MIN_RING_DESC ||
num_descs > IONIC_MAX_RING_DESC) {
@ -374,9 +368,8 @@ ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
return -EINVAL;
}
cq->lif = lif;
cq->num_descs = num_descs;
cq->desc_size = desc_size;
cq->size_mask = num_descs - 1;
cq->tail_idx = 0;
cq->done_color = 1;
@ -393,7 +386,6 @@ ionic_cq_map(struct ionic_cq *cq, void *base, rte_iova_t base_pa)
void
ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
{
cq->bound_q = q;
q->bound_cq = cq;
}
@ -407,7 +399,7 @@ ionic_cq_service(struct ionic_cq *cq, uint32_t work_to_do,
return 0;
while (cb(cq, cq->tail_idx, cb_arg)) {
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
if (cq->tail_idx == 0)
cq->done_color = !cq->done_color;

View File

@ -142,6 +142,9 @@ struct ionic_desc_info {
void *cb_arg;
};
#define Q_NEXT_TO_POST(_q, _n) (((_q)->head_idx + (_n)) & ((_q)->size_mask))
#define Q_NEXT_TO_SRVC(_q, _n) (((_q)->tail_idx + (_n)) & ((_q)->size_mask))
struct ionic_queue {
struct ionic_dev *idev;
struct ionic_lif *lif;
@ -174,11 +177,9 @@ struct ionic_intr_info {
};
struct ionic_cq {
struct ionic_lif *lif;
struct ionic_queue *bound_q;
uint32_t tail_idx;
uint32_t num_descs;
uint32_t desc_size;
uint16_t tail_idx;
uint16_t num_descs;
uint16_t size_mask;
bool done_color;
void *base;
rte_iova_t base_pa;
@ -240,8 +241,7 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq);
struct ionic_doorbell __iomem *ionic_db_map(struct ionic_lif *lif,
struct ionic_queue *q);
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
uint32_t num_descs, size_t desc_size);
int ionic_cq_init(struct ionic_cq *cq, uint16_t num_descs);
void ionic_cq_map(struct ionic_cq *cq, void *base, rte_iova_t base_pa);
void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q);
typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, uint32_t cq_desc_index,

View File

@ -656,7 +656,7 @@ ionic_qcq_alloc(struct ionic_lif *lif, uint8_t type,
goto err_out_free_info;
}
err = ionic_cq_init(lif, &new->cq, num_descs, cq_desc_size);
err = ionic_cq_init(&new->cq, num_descs);
if (err) {
IONIC_PRINT(ERR, "Completion queue initialization failed");
goto err_out_free_info;
@ -1169,11 +1169,12 @@ ionic_adminq_service(struct ionic_cq *cq, uint32_t cq_desc_index,
{
struct ionic_admin_comp *cq_desc_base = cq->base;
struct ionic_admin_comp *cq_desc = &cq_desc_base[cq_desc_index];
struct ionic_qcq *qcq = IONIC_CQ_TO_QCQ(cq);
if (!color_match(cq_desc->color, cq->done_color))
return false;
ionic_q_service(cq->bound_q, cq_desc_index, cq_desc->comp_index, NULL);
ionic_q_service(&qcq->q, cq_desc_index, cq_desc->comp_index, NULL);
return true;
}

View File

@ -71,7 +71,8 @@ struct ionic_qcq {
struct ionic_intr_info intr;
};
#define IONIC_Q_TO_QCQ(q) container_of(q, struct ionic_qcq, q)
#define IONIC_Q_TO_QCQ(_q) container_of(_q, struct ionic_qcq, q)
#define IONIC_CQ_TO_QCQ(_cq) container_of(_cq, struct ionic_qcq, cq)
#define IONIC_Q_TO_TX_STATS(q) (&IONIC_Q_TO_QCQ(q)->stats.tx)
#define IONIC_Q_TO_RX_STATS(q) (&IONIC_Q_TO_QCQ(q)->stats.rx)

View File

@ -68,9 +68,10 @@ ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
}
static __rte_always_inline void
ionic_tx_flush(struct ionic_cq *cq)
ionic_tx_flush(struct ionic_qcq *txq)
{
struct ionic_queue *q = cq->bound_q;
struct ionic_cq *cq = &txq->cq;
struct ionic_queue *q = &txq->q;
struct ionic_desc_info *q_desc_info;
struct rte_mbuf *txm, *next;
struct ionic_txq_comp *cq_desc_base = cq->base;
@ -79,7 +80,7 @@ ionic_tx_flush(struct ionic_cq *cq)
cq_desc = &cq_desc_base[cq->tail_idx];
while (color_match(cq_desc->color, cq->done_color)) {
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
/* Prefetch the next 4 descriptors (not really useful here) */
if ((cq->tail_idx & 0x3) == 0)
@ -149,7 +150,7 @@ ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
ionic_qcq_disable(txq);
ionic_tx_flush(&txq->cq);
ionic_tx_flush(txq);
return 0;
}
@ -521,7 +522,6 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
struct ionic_queue *q = &txq->q;
struct ionic_cq *cq = &txq->cq;
struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
uint32_t next_q_head_idx;
uint32_t bytes_tx = 0;
@ -530,7 +530,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
bool last;
/* Cleaning old buffers */
ionic_tx_flush(cq);
ionic_tx_flush(txq);
if (unlikely(ionic_q_space_avail(q) < nb_pkts)) {
stats->stop += nb_pkts;
@ -1018,10 +1018,11 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
}
static __rte_always_inline void
ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
ionic_rxq_service(struct ionic_qcq *rxq, uint32_t work_to_do,
void *service_cb_arg)
{
struct ionic_queue *q = cq->bound_q;
struct ionic_cq *cq = &rxq->cq;
struct ionic_queue *q = &rxq->q;
struct ionic_desc_info *q_desc_info;
struct ionic_rxq_comp *cq_desc_base = cq->base;
struct ionic_rxq_comp *cq_desc;
@ -1035,7 +1036,7 @@ ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
cq_desc = &cq_desc_base[cq->tail_idx];
while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
curr_cq_tail_idx = cq->tail_idx;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
if (cq->tail_idx == 0)
cq->done_color = !cq->done_color;
@ -1087,7 +1088,7 @@ ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
ionic_qcq_disable(rxq);
/* Flush */
ionic_rxq_service(&rxq->cq, -1, NULL);
ionic_rxq_service(rxq, -1, NULL);
return 0;
}
@ -1099,14 +1100,13 @@ ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
uint32_t frame_size =
rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
struct ionic_cq *cq = &rxq->cq;
struct ionic_rx_service service_cb_arg;
service_cb_arg.rx_pkts = rx_pkts;
service_cb_arg.nb_pkts = nb_pkts;
service_cb_arg.nb_rx = 0;
ionic_rxq_service(cq, nb_pkts, &service_cb_arg);
ionic_rxq_service(rxq, nb_pkts, &service_cb_arg);
ionic_rx_fill(rxq, frame_size);