net/liquidio: support queue re-configuration

Support for re-configuration of number of queues per port and descriptor
size. Renamed variable representing number of descriptors as nb_desc
from max_count.

Signed-off-by: Shijith Thotton <shijith.thotton@caviumnetworks.com>
This commit is contained in:
Shijith Thotton 2017-11-20 17:29:51 +05:30 committed by Ferruh Yigit
parent 7e9834276b
commit 37a725d66d
6 changed files with 132 additions and 172 deletions

View File

@ -121,6 +121,8 @@ cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev)
reg_val &= 0xEFFFFFFFFFFFFFFFL;
lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no), reg_val);
reg_val =
lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
@ -182,7 +184,7 @@ cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)
/* Write the start of the input queue's ring and its size */
lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
iq->base_addr_dma);
lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->nb_desc);
/* Remember the doorbell & instruction count register addr
* for this queue
@ -214,7 +216,7 @@ cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no)
lio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
droq->desc_ring_dma);
lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->nb_desc);
lio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
(droq->buffer_size | (OCTEON_RH_SIZE << 16)));
@ -509,51 +511,3 @@ cn23xx_vf_setup_device(struct lio_device *lio_dev)
return 0;
}
int
cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev)
{
uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
uint64_t q_no;
/* Disable the i/p and o/p queues for this Octeon.
* IOQs will already be in reset.
* If RST bit is set, wait for Quiet bit to be set
* Once Quiet bit is set, clear the RST bit
*/
PMD_INIT_FUNC_TRACE();
for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
volatile uint64_t reg_val;
reg_val = lio_read_csr64(lio_dev,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && !(reg_val &
CN23XX_PKT_INPUT_CTL_QUIET) && loop) {
reg_val = lio_read_csr64(
lio_dev,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
loop = loop - 1;
}
if (loop == 0) {
lio_dev_err(lio_dev,
"clearing the reset reg failed or setting the quiet reg failed for qno %lu\n",
(unsigned long)q_no);
return -1;
}
reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
reg_val);
reg_val = lio_read_csr64(lio_dev,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
lio_dev_err(lio_dev, "unable to reset qno %lu\n",
(unsigned long)q_no);
return -1;
}
}
return 0;
}

View File

@ -51,11 +51,6 @@ lio_get_conf(struct lio_device *lio_dev)
return default_lio_conf;
}
/** Turns off the input and output queues for the device
* @param lio_dev which device io queues to disable
*/
int cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev);
#define CN23XX_VF_BUSY_READING_REG_LOOP_COUNT 100000
void cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev);

View File

@ -84,6 +84,7 @@ enum lio_card_type {
#define LIO_FW_VERSION_LENGTH 32
#define LIO_Q_RECONF_MIN_VERSION "1.7.0"
#define LIO_VF_TRUST_MIN_VERSION "1.7.1"
/** Tag types used by Octeon cores in its work. */
@ -127,6 +128,7 @@ enum octeon_tag_type {
#define LIO_CMD_ADD_VLAN_FILTER 0x17
#define LIO_CMD_DEL_VLAN_FILTER 0x18
#define LIO_CMD_VXLAN_PORT_CONFIG 0x19
#define LIO_CMD_QUEUE_COUNT_CTL 0x1f
#define LIO_CMD_VXLAN_PORT_ADD 0x0
#define LIO_CMD_VXLAN_PORT_DEL 0x1

View File

@ -1199,12 +1199,10 @@ lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
if ((lio_dev->droq[fw_mapped_oq]) &&
(num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
lio_dev_err(lio_dev,
"Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
lio_dev->droq[fw_mapped_oq]->max_count);
return -ENOTSUP;
/* Free previous allocation if any */
if (eth_dev->data->rx_queues[q_no] != NULL) {
lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
eth_dev->data->rx_queues[q_no] = NULL;
}
mbp_priv = rte_mempool_get_priv(mp);
@ -1238,10 +1236,6 @@ lio_dev_rx_queue_release(void *rxq)
int oq_no;
if (droq) {
/* Run time queue deletion not supported */
if (droq->lio_dev->port_configured)
return;
oq_no = droq->q_no;
lio_delete_droq_queue(droq->lio_dev, oq_no);
}
@ -1285,12 +1279,10 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
(num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
lio_dev_err(lio_dev,
"Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
lio_dev->instr_queue[fw_mapped_iq]->max_count);
return -ENOTSUP;
/* Free previous allocation if any */
if (eth_dev->data->tx_queues[q_no] != NULL) {
lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
eth_dev->data->tx_queues[q_no] = NULL;
}
retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
@ -1302,7 +1294,7 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
}
retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
lio_dev->instr_queue[fw_mapped_iq]->max_count,
lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
socket_id);
if (retval) {
@ -1333,10 +1325,6 @@ lio_dev_tx_queue_release(void *txq)
if (tq) {
/* Run time queue deletion not supported */
if (tq->lio_dev->port_configured)
return;
/* Free sg_list */
lio_delete_sglist(tq);
@ -1505,6 +1493,8 @@ lio_dev_stop(struct rte_eth_dev *eth_dev)
lio_send_rx_ctrl_cmd(eth_dev, 0);
lio_wait_for_instr_fetch(lio_dev);
/* Clear recorded link status */
lio_dev->linfo.link.link_status64 = 0;
}
@ -1578,34 +1568,14 @@ static void
lio_dev_close(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint32_t i;
lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
if (lio_dev->intf_open)
lio_dev_stop(eth_dev);
lio_wait_for_instr_fetch(lio_dev);
lio_dev->fn_list.disable_io_queues(lio_dev);
cn23xx_vf_set_io_queues_off(lio_dev);
/* Reset iq regs (IQ_DBELL).
* Clear sli_pktx_cnts (OQ_PKTS_SENT).
*/
for (i = 0; i < lio_dev->nb_rx_queues; i++) {
struct lio_droq *droq = lio_dev->droq[i];
if (droq == NULL)
break;
uint32_t pkt_count = rte_read32(droq->pkts_sent_reg);
lio_dev_dbg(lio_dev,
"pending oq count %u\n", pkt_count);
rte_write32(pkt_count, droq->pkts_sent_reg);
}
/* Reset ioq regs */
lio_dev->fn_list.setup_device_regs(lio_dev);
if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) {
cn23xx_vf_ask_pf_to_do_flr(lio_dev);
@ -1695,7 +1665,76 @@ lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
}
static int lio_dev_configure(struct rte_eth_dev *eth_dev)
static int
lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
int num_rxq)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
struct lio_dev_ctrl_cmd ctrl_cmd;
struct lio_ctrl_pkt ctrl_pkt;
if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
lio_dev_err(lio_dev, "Require firmware version >= %s\n",
LIO_Q_RECONF_MIN_VERSION);
return -ENOTSUP;
}
/* flush added to prevent cmd failure
* incase the queue is full
*/
lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
ctrl_cmd.eth_dev = eth_dev;
ctrl_cmd.cond = 0;
ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
ctrl_pkt.ncmd.s.param1 = num_txq;
ctrl_pkt.ncmd.s.param2 = num_rxq;
ctrl_pkt.ctrl_cmd = &ctrl_cmd;
if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
lio_dev_err(lio_dev, "Failed to send queue count control command\n");
return -1;
}
if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
lio_dev_err(lio_dev, "Queue count control command timed out\n");
return -1;
}
return 0;
}
static int
lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
if (lio_dev->nb_rx_queues != num_rxq ||
lio_dev->nb_tx_queues != num_txq) {
if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
return -1;
lio_dev->nb_rx_queues = num_rxq;
lio_dev->nb_tx_queues = num_txq;
}
if (lio_dev->intf_open)
lio_dev_stop(eth_dev);
/* Reset ioq registers */
if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
lio_dev_err(lio_dev, "Failed to configure device registers\n");
return -1;
}
return 0;
}
static int
lio_dev_configure(struct rte_eth_dev *eth_dev)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
@ -1708,22 +1747,21 @@ static int lio_dev_configure(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
/* Re-configuring firmware not supported.
* Can't change tx/rx queues per port from initial value.
/* Inform firmware about change in number of queues to use.
* Disable IO queues and reset registers for re-configuration.
*/
if (lio_dev->port_configured) {
if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
(lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
lio_dev_err(lio_dev,
"rxq/txq re-conf not supported. Restart application with new value.\n");
return -ENOTSUP;
}
return 0;
}
if (lio_dev->port_configured)
return lio_reconf_queues(eth_dev,
eth_dev->data->nb_tx_queues,
eth_dev->data->nb_rx_queues);
lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
/* Set max number of queues which can be re-configured. */
lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
resp_size = sizeof(struct lio_if_cfg_resp);
sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
if (sc == NULL)
@ -1850,9 +1888,6 @@ static int lio_dev_configure(struct rte_eth_dev *eth_dev)
lio_free_soft_command(sc);
/* Disable iq_0 for reconf */
lio_dev->fn_list.disable_io_queues(lio_dev);
/* Reset ioq regs */
lio_dev->fn_list.setup_device_regs(lio_dev);
@ -1992,11 +2027,6 @@ lio_first_time_init(struct lio_device *lio_dev,
rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
}
if (cn23xx_vf_set_io_queues_off(lio_dev)) {
lio_dev_err(lio_dev, "Setting io queues off failed\n");
goto error;
}
if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
lio_dev_err(lio_dev, "Failed to configure device registers\n");
goto error;

View File

@ -13,7 +13,7 @@
#define LIO_MAX_SG 12
/* Flush iq if available tx_desc fall below LIO_FLUSH_WM */
#define LIO_FLUSH_WM(_iq) ((_iq)->max_count / 2)
#define LIO_FLUSH_WM(_iq) ((_iq)->nb_desc / 2)
#define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
static void
@ -41,7 +41,7 @@ lio_droq_destroy_ring_buffers(struct lio_droq *droq)
{
uint32_t i;
for (i = 0; i < droq->max_count; i++) {
for (i = 0; i < droq->nb_desc; i++) {
if (droq->recv_buf_list[i].buffer) {
rte_pktmbuf_free((struct rte_mbuf *)
droq->recv_buf_list[i].buffer);
@ -60,7 +60,7 @@ lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
uint32_t i;
void *buf;
for (i = 0; i < droq->max_count; i++) {
for (i = 0; i < droq->nb_desc; i++) {
buf = rte_pktmbuf_alloc(droq->mpool);
if (buf == NULL) {
lio_dev_err(lio_dev, "buffer alloc failed\n");
@ -135,7 +135,7 @@ lio_alloc_info_buffer(struct lio_device *lio_dev,
{
droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
"info_list", droq->q_no,
(droq->max_count *
(droq->nb_desc *
LIO_DROQ_INFO_SIZE),
RTE_CACHE_LINE_SIZE,
socket_id);
@ -177,10 +177,10 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
droq->max_count = num_descs;
droq->nb_desc = num_descs;
droq->buffer_size = desc_size;
desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
desc_ring_size = droq->nb_desc * LIO_DROQ_DESC_SIZE;
droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
"droq", q_no,
desc_ring_size,
@ -199,7 +199,7 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
droq->nb_desc);
droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
if (droq->info_list == NULL) {
@ -208,7 +208,7 @@ lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
}
droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
(droq->max_count *
(droq->nb_desc *
LIO_DROQ_RECVBUF_SIZE),
RTE_CACHE_LINE_SIZE,
socket_id);
@ -245,11 +245,6 @@ lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
PMD_INIT_FUNC_TRACE();
if (lio_dev->droq[oq_no]) {
lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no);
return 0;
}
/* Allocate the DS for the new droq. */
droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
RTE_CACHE_LINE_SIZE, socket_id);
@ -274,7 +269,7 @@ lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
/* Send credit for octeon output queues. credits are always
* sent after the output queue is enabled.
*/
rte_write32(lio_dev->droq[oq_no]->max_count,
rte_write32(lio_dev->droq[oq_no]->nb_desc,
lio_dev->droq[oq_no]->pkts_credit_reg);
rte_wmb();
@ -313,13 +308,13 @@ lio_droq_refill_pullup_descs(struct lio_droq *droq,
do {
droq->refill_idx = lio_incr_index(
droq->refill_idx, 1,
droq->max_count);
droq->nb_desc);
desc_refilled++;
droq->refill_count--;
} while (droq->recv_buf_list[droq->refill_idx].buffer);
}
refill_index = lio_incr_index(refill_index, 1,
droq->max_count);
droq->nb_desc);
} /* while */
return desc_refilled;
@ -350,7 +345,7 @@ lio_droq_refill(struct lio_droq *droq)
desc_ring = droq->desc_ring;
while (droq->refill_count && (desc_refilled < droq->max_count)) {
while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
/* If a valid buffer exists (happens if there is no dispatch),
* reuse the buffer, else allocate.
*/
@ -373,7 +368,7 @@ lio_droq_refill(struct lio_droq *droq)
droq->info_list[droq->refill_idx].length = 0;
droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
droq->max_count);
droq->nb_desc);
desc_refilled++;
droq->refill_count--;
}
@ -420,7 +415,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
(uint32_t)info->length);
droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
droq->max_count);
droq->nb_desc);
droq->refill_count += buf_cnt;
} else {
if (info->length <= droq->buffer_size) {
@ -433,7 +428,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
droq->recv_buf_list[droq->read_idx].buffer = NULL;
droq->read_idx = lio_incr_index(
droq->read_idx, 1,
droq->max_count);
droq->nb_desc);
droq->refill_count++;
if (likely(nicbuf != NULL)) {
@ -527,7 +522,7 @@ lio_droq_fast_process_packet(struct lio_device *lio_dev,
pkt_len += cpy_len;
droq->read_idx = lio_incr_index(
droq->read_idx,
1, droq->max_count);
1, droq->nb_desc);
droq->refill_count++;
/* Prefetch buffer pointers when on a
@ -708,7 +703,7 @@ lio_init_instr_queue(struct lio_device *lio_dev,
iq->base_addr_dma = iq->iq_mz->iova;
iq->base_addr = (uint8_t *)iq->iq_mz->addr;
iq->max_count = num_descs;
iq->nb_desc = num_descs;
/* Initialize a list to holds requests that have been posted to Octeon
* but has yet to be fetched by octeon
@ -727,7 +722,7 @@ lio_init_instr_queue(struct lio_device *lio_dev,
lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
iq->max_count);
iq->nb_desc);
iq->lio_dev = lio_dev;
iq->txpciq.txpciq64 = txpciq.txpciq64;
@ -824,14 +819,6 @@ lio_setup_iq(struct lio_device *lio_dev, int q_index,
{
uint32_t iq_no = (uint32_t)txpciq.s.q_no;
if (lio_dev->instr_queue[iq_no]) {
lio_dev_dbg(lio_dev, "IQ is in use. Cannot create the IQ: %d again\n",
iq_no);
lio_dev->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
return 0;
}
lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
sizeof(struct lio_instr_queue),
RTE_CACHE_LINE_SIZE, socket_id);
@ -841,23 +828,15 @@ lio_setup_iq(struct lio_device *lio_dev, int q_index,
lio_dev->instr_queue[iq_no]->q_index = q_index;
lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id))
goto release_lio_iq;
if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id)) {
rte_free(lio_dev->instr_queue[iq_no]);
lio_dev->instr_queue[iq_no] = NULL;
return -1;
}
lio_dev->num_iqs++;
if (lio_dev->fn_list.enable_io_queues(lio_dev))
goto delete_lio_iq;
return 0;
delete_lio_iq:
lio_delete_instr_queue(lio_dev, iq_no);
lio_dev->num_iqs--;
release_lio_iq:
rte_free(lio_dev->instr_queue[iq_no]);
lio_dev->instr_queue[iq_no] = NULL;
return -1;
}
int
@ -928,14 +907,14 @@ post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
* position if queue gets full before Octeon could fetch any instr.
*/
if (rte_atomic64_read(&iq->instr_pending) >=
(int32_t)(iq->max_count - 1)) {
(int32_t)(iq->nb_desc - 1)) {
st.status = LIO_IQ_SEND_FAILED;
st.index = -1;
return st;
}
if (rte_atomic64_read(&iq->instr_pending) >=
(int32_t)(iq->max_count - 2))
(int32_t)(iq->nb_desc - 2))
st.status = LIO_IQ_SEND_STOP;
copy_cmd_into_iq(iq, cmd);
@ -943,7 +922,7 @@ post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
/* "index" is returned, host_write_index is modified. */
st.index = iq->host_write_index;
iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
iq->max_count);
iq->nb_desc);
iq->fill_cnt++;
/* Flush the command into memory. We need to be sure the data is in
@ -1045,7 +1024,7 @@ lio_process_iq_request_list(struct lio_device *lio_dev,
skip_this:
inst_count++;
old = lio_incr_index(old, 1, iq->max_count);
old = lio_incr_index(old, 1, iq->nb_desc);
}
iq->flush_index = old;
@ -1065,7 +1044,7 @@ lio_update_read_index(struct lio_instr_queue *iq)
/* Add last_done and modulo with the IQ size to get new index */
iq->lio_read_index = (iq->lio_read_index +
(uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %
iq->max_count;
iq->nb_desc;
}
int
@ -1523,7 +1502,7 @@ lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
static inline uint32_t
lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)
{
return ((lio_dev->instr_queue[q_no]->max_count - 1) -
return ((lio_dev->instr_queue[q_no]->nb_desc - 1) -
(uint32_t)rte_atomic64_read(
&lio_dev->instr_queue[q_no]->instr_pending));
}
@ -1533,7 +1512,7 @@ lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)
{
return ((uint32_t)rte_atomic64_read(
&lio_dev->instr_queue[q_no]->instr_pending) >=
(lio_dev->instr_queue[q_no]->max_count - 2));
(lio_dev->instr_queue[q_no]->nb_desc - 2));
}
static int

View File

@ -102,7 +102,7 @@ struct lio_droq {
rte_atomic64_t pkts_pending;
/** Number of descriptors in this ring. */
uint32_t max_count;
uint32_t nb_desc;
/** The number of descriptors pending refill. */
uint32_t refill_count;
@ -269,8 +269,8 @@ struct lio_instr_queue {
uint32_t status:8;
/** Maximum no. of instructions in this queue. */
uint32_t max_count;
/** Number of descriptors in this ring. */
uint32_t nb_desc;
/** Index in input ring where the driver should write the next packet */
uint32_t host_write_index;