baseband/acc: fix double MSI interrupt in TB mode

Fixed logical bug in SW causing MSI to be issued twice
when running in transport block mode.

Fixes: f404dfe35c ("baseband/acc100: support 4G processing")
Fixes: bec597b78a ("baseband/acc200: add LTE processing")
Cc: stable@dpdk.org

Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Hernan Vargas 2022-11-01 16:04:58 -07:00 committed by Akhil Goyal
parent 31f79cb518
commit beaf1f876c
3 changed files with 6 additions and 64 deletions

View File

@ -908,6 +908,7 @@ acc_dma_enqueue(struct acc_queue *q, uint16_t n,
struct rte_bbdev_stats *queue_stats)
{
union acc_enqueue_reg_fmt enq_req;
union acc_dma_desc *desc;
#ifdef RTE_BBDEV_OFFLOAD_COST
uint64_t start_time = 0;
queue_stats->acc_offload_cycles = 0;
@ -915,13 +916,17 @@ acc_dma_enqueue(struct acc_queue *q, uint16_t n,
RTE_SET_USED(queue_stats);
#endif
/* Set Sdone and IRQ enable bit on last descriptor. */
desc = acc_desc(q, n - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
enq_req.val = 0;
/* Setting offset, 100b for 256 DMA Desc */
enq_req.addr_offset = ACC_DESC_OFFSET;
/* Split ops into batches */
do {
union acc_dma_desc *desc;
uint16_t enq_batch_size;
uint64_t offset;
rte_iova_t req_elem_addr;

View File

@ -2671,7 +2671,6 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
/* Set SDone on last CB descriptor for TB mode. */
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
return current_enqueued_cbs;
}
@ -2741,7 +2740,6 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
desc_idx = ((q->sw_ring_head + enq_descs - 1) & q->sw_ring_wrap_mask);
desc = q->ring_addr + desc_idx;
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
desc->req.op_addr = op;
return return_descs;
}
@ -3303,7 +3301,6 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
#endif
/* Set SDone on last CB descriptor for TB mode */
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
return current_enqueued_cbs;
}
@ -3408,7 +3405,6 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
#endif
/* Set SDone on last CB descriptor for TB mode */
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
return current_enqueued_cbs;
}
@ -3421,7 +3417,6 @@ acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
struct acc_queue *q = q_data->queue_private;
int32_t avail = acc_ring_avail_enq(q);
uint16_t i;
union acc_dma_desc *desc;
int ret;
for (i = 0; i < num; ++i) {
@ -3442,11 +3437,6 @@ acc100_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
if (unlikely(i == 0))
return 0; /* Nothing to enqueue */
/* Set SDone in last CB in enqueued ops for CB mode*/
desc = acc_desc(q, i - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
acc_dma_enqueue(q, i, &q_data->queue_stats);
/* Update stats */
@ -3463,7 +3453,6 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
struct acc_queue *q = q_data->queue_private;
int32_t avail = acc_ring_avail_enq(q);
uint16_t i = 0;
union acc_dma_desc *desc;
int ret, desc_idx = 0;
int16_t enq, left = num;
@ -3497,11 +3486,6 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
if (unlikely(i == 0))
return 0; /* Nothing to enqueue */
/* Set SDone in last CB in enqueued ops for CB mode*/
desc = acc_desc(q, desc_idx - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
acc_dma_enqueue(q, desc_idx, &q_data->queue_stats);
/* Update stats */
@ -3625,7 +3609,6 @@ acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,
struct acc_queue *q = q_data->queue_private;
int32_t avail = acc_ring_avail_enq(q);
uint16_t i;
union acc_dma_desc *desc;
int ret;
for (i = 0; i < num; ++i) {
@ -3646,11 +3629,6 @@ acc100_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,
if (unlikely(i == 0))
return 0; /* Nothing to enqueue */
/* Set SDone in last CB in enqueued ops for CB mode*/
desc = acc_desc(q, i - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
acc_dma_enqueue(q, i, &q_data->queue_stats);
/* Update stats */
@ -3705,7 +3683,6 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
struct acc_queue *q = q_data->queue_private;
int32_t avail = acc_ring_avail_enq(q);
uint16_t i;
union acc_dma_desc *desc;
int ret;
bool same_op = false;
for (i = 0; i < num; ++i) {
@ -3735,12 +3712,6 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
if (unlikely(i == 0))
return 0; /* Nothing to enqueue */
/* Set SDone in last CB in enqueued ops for CB mode*/
desc = acc_desc(q, i - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
acc_dma_enqueue(q, i, &q_data->queue_stats);
/* Update stats */

View File

@ -1884,7 +1884,6 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
/* Set SDone on last CB descriptor for TB mode. */
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
return current_enqueued_cbs;
}
@ -1945,7 +1944,6 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op,
/* Set SDone on last CB descriptor for TB mode. */
desc = acc_desc(q, enq_descs - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
desc->req.op_addr = op;
return return_descs;
}
@ -2181,7 +2179,6 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
#endif
/* Set SDone on last CB descriptor for TB mode. */
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
return current_enqueued_cbs;
}
@ -2270,7 +2267,6 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op,
/* Set SDone on last CB descriptor for TB mode */
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
return current_enqueued_cbs;
}
@ -2283,7 +2279,6 @@ acc200_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
struct acc_queue *q = q_data->queue_private;
int32_t avail = acc_ring_avail_enq(q);
uint16_t i;
union acc_dma_desc *desc;
int ret;
for (i = 0; i < num; ++i) {
@ -2304,11 +2299,6 @@ acc200_enqueue_enc_cb(struct rte_bbdev_queue_data *q_data,
if (unlikely(i == 0))
return 0; /* Nothing to enqueue */
/* Set SDone in last CB in enqueued ops for CB mode*/
desc = acc_desc(q, i - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
acc_dma_enqueue(q, i, &q_data->queue_stats);
/* Update stats */
@ -2325,7 +2315,6 @@ acc200_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
struct acc_queue *q = q_data->queue_private;
int32_t avail = acc_ring_avail_enq(q);
uint16_t i = 0;
union acc_dma_desc *desc;
int ret, desc_idx = 0;
int16_t enq, left = num;
@ -2350,11 +2339,6 @@ acc200_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data,
if (unlikely(i == 0))
return 0; /* Nothing to enqueue. */
/* Set SDone in last CB in enqueued ops for CB mode. */
desc = acc_desc(q, desc_idx - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
acc_dma_enqueue(q, desc_idx, &q_data->queue_stats);
/* Update stats. */
@ -2479,7 +2463,6 @@ acc200_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,
struct acc_queue *q = q_data->queue_private;
int32_t avail = acc_ring_avail_enq(q);
uint16_t i;
union acc_dma_desc *desc;
int ret;
for (i = 0; i < num; ++i) {
@ -2496,11 +2479,6 @@ acc200_enqueue_dec_cb(struct rte_bbdev_queue_data *q_data,
if (unlikely(i == 0))
return 0; /* Nothing to enqueue. */
/* Set SDone in last CB in enqueued ops for CB mode. */
desc = acc_desc(q, i - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
acc_dma_enqueue(q, i, &q_data->queue_stats);
/* Update stats. */
@ -2552,7 +2530,6 @@ acc200_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
struct acc_queue *q = q_data->queue_private;
int32_t avail = acc_ring_avail_enq(q);
uint16_t i;
union acc_dma_desc *desc;
int ret;
bool same_op = false;
@ -2581,11 +2558,6 @@ acc200_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data,
if (unlikely(i == 0))
return 0; /* Nothing to enqueue. */
/* Set SDone in last CB in enqueued ops for CB mode. */
desc = acc_desc(q, i - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
acc_dma_enqueue(q, i, &q_data->queue_stats);
/* Update stats. */
@ -3234,7 +3206,6 @@ acc200_enqueue_fft(struct rte_bbdev_queue_data *q_data,
struct acc_queue *q;
int32_t aq_avail, avail;
uint16_t i;
union acc_dma_desc *desc;
int ret;
aq_avail = acc_aq_avail(q_data, num);
@ -3256,11 +3227,6 @@ acc200_enqueue_fft(struct rte_bbdev_queue_data *q_data,
if (unlikely(i == 0))
return 0; /* Nothing to enqueue. */
/* Set SDone in last CB in enqueued ops for CB mode. */
desc = acc_desc(q, i - 1);
desc->req.sdone_enable = 1;
desc->req.irq_enable = q->irq_enable;
acc_dma_enqueue(q, i, &q_data->queue_stats);
/* Update stats */