i40e: full VMDQ pools support

1. Function i40e_vsi_* name change to i40e_dev_* since PF can contains
   more than 1 VSI after VMDQ enabled.
2. i40e_dev_rx/tx_queue_setup change to have capability of setup
   queues that belongs to VMDQ pools.
3. Add queue mapping. This will do a convertion between queue index
   that application used and real NIC queue index.
3. i40e_dev_start/stop change to have capability switching VMDQ queues.
4. i40e_pf_config_rss change to calculate actual main VSI queue numbers
   after VMDQ pools introduced.

Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
Tested-by: Min Cao <min.cao@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
Chen Jing D(Mark) 2014-11-04 18:01:29 +08:00 committed by Thomas Monjalon
parent 4805ed59e9
commit b6583ee402
3 changed files with 226 additions and 77 deletions

View File

@ -162,7 +162,7 @@ static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
static int i40e_get_cap(struct i40e_hw *hw);
static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
static int i40e_pf_setup(struct i40e_pf *pf);
static int i40e_vsi_init(struct i40e_vsi *vsi);
static int i40e_dev_rxtx_init(struct i40e_pf *pf);
static int i40e_vmdq_setup(struct rte_eth_dev *dev);
static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
bool offset_loaded, uint64_t *offset, uint64_t *stat);
@ -783,8 +783,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
int ret;
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
@ -795,26 +795,37 @@ i40e_dev_start(struct rte_eth_dev *dev)
}
/* Initialize VSI */
ret = i40e_vsi_init(vsi);
ret = i40e_dev_rxtx_init(pf);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to init VSI");
PMD_DRV_LOG(ERR, "Failed to init rx/tx queues");
goto err_up;
}
/* Map queues with MSIX interrupt */
i40e_vsi_queues_bind_intr(vsi);
i40e_vsi_enable_queues_intr(vsi);
i40e_vsi_queues_bind_intr(main_vsi);
i40e_vsi_enable_queues_intr(main_vsi);
/* Map VMDQ VSI queues with MSIX interrupt */
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
}
/* Enable all queues which have been configured */
ret = i40e_vsi_switch_queues(vsi, TRUE);
ret = i40e_dev_switch_queues(pf, TRUE);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to enable VSI");
goto err_up;
}
/* Enable receiving broadcast packets */
if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
true, NULL);
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
}
@ -829,7 +840,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
return I40E_SUCCESS;
err_up:
i40e_vsi_switch_queues(vsi, FALSE);
i40e_dev_switch_queues(pf, FALSE);
i40e_dev_clear_queues(dev);
return ret;
}
@ -838,17 +850,26 @@ static void
i40e_dev_stop(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
struct i40e_vsi *main_vsi = pf->main_vsi;
int i;
/* Disable all queues */
i40e_vsi_switch_queues(vsi, FALSE);
i40e_dev_switch_queues(pf, FALSE);
/* un-map queues with interrupt registers */
i40e_vsi_disable_queues_intr(main_vsi);
i40e_vsi_queues_unbind_intr(main_vsi);
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
}
/* Clear all queues and release memory */
i40e_dev_clear_queues(dev);
/* Set link down */
i40e_dev_set_link_down(dev);
/* un-map queues with interrupt registers */
i40e_vsi_disable_queues_intr(vsi);
i40e_vsi_queues_unbind_intr(vsi);
}
static void
@ -3251,11 +3272,11 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
/* Swith on or off the tx queues */
static int
i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
{
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
struct rte_eth_dev_data *dev_data = pf->dev_data;
struct i40e_tx_queue *txq;
struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_eth_dev *dev = pf->adapter->eth_dev;
uint16_t i;
int ret;
@ -3263,7 +3284,7 @@ i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
txq = dev_data->tx_queues[i];
/* Don't operate the queue if not configured or
* if starting only per queue */
if (!txq->q_set || (on && txq->tx_deferred_start))
if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
continue;
if (on)
ret = i40e_dev_tx_queue_start(dev, i);
@ -3329,11 +3350,11 @@ i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
}
/* Switch on or off the rx queues */
static int
i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
{
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
struct rte_eth_dev_data *dev_data = pf->dev_data;
struct i40e_rx_queue *rxq;
struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
struct rte_eth_dev *dev = pf->adapter->eth_dev;
uint16_t i;
int ret;
@ -3341,7 +3362,7 @@ i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
rxq = dev_data->rx_queues[i];
/* Don't operate the queue if not configured or
* if starting only per queue */
if (!rxq->q_set || (on && rxq->rx_deferred_start))
if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
continue;
if (on)
ret = i40e_dev_rx_queue_start(dev, i);
@ -3356,26 +3377,26 @@ i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
/* Switch on or off all the rx/tx queues */
int
i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
{
int ret;
if (on) {
/* enable rx queues before enabling tx queues */
ret = i40e_vsi_switch_rx_queues(vsi, on);
ret = i40e_dev_switch_rx_queues(pf, on);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to switch rx queues");
return ret;
}
ret = i40e_vsi_switch_tx_queues(vsi, on);
ret = i40e_dev_switch_tx_queues(pf, on);
} else {
/* Stop tx queues before stopping rx queues */
ret = i40e_vsi_switch_tx_queues(vsi, on);
ret = i40e_dev_switch_tx_queues(pf, on);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to switch tx queues");
return ret;
}
ret = i40e_vsi_switch_rx_queues(vsi, on);
ret = i40e_dev_switch_rx_queues(pf, on);
}
return ret;
@ -3383,15 +3404,18 @@ i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
/* Initialize VSI for TX */
static int
i40e_vsi_tx_init(struct i40e_vsi *vsi)
i40e_dev_tx_init(struct i40e_pf *pf)
{
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct rte_eth_dev_data *data = pf->dev_data;
uint16_t i;
uint32_t ret = I40E_SUCCESS;
struct i40e_tx_queue *txq;
for (i = 0; i < data->nb_tx_queues; i++) {
ret = i40e_tx_queue_init(data->tx_queues[i]);
txq = data->tx_queues[i];
if (!txq || !txq->q_set)
continue;
ret = i40e_tx_queue_init(txq);
if (ret != I40E_SUCCESS)
break;
}
@ -3401,16 +3425,20 @@ i40e_vsi_tx_init(struct i40e_vsi *vsi)
/* Initialize VSI for RX */
static int
i40e_vsi_rx_init(struct i40e_vsi *vsi)
i40e_dev_rx_init(struct i40e_pf *pf)
{
struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
struct rte_eth_dev_data *data = pf->dev_data;
int ret = I40E_SUCCESS;
uint16_t i;
struct i40e_rx_queue *rxq;
i40e_pf_config_mq_rx(pf);
for (i = 0; i < data->nb_rx_queues; i++) {
ret = i40e_rx_queue_init(data->rx_queues[i]);
rxq = data->rx_queues[i];
if (!rxq || !rxq->q_set)
continue;
ret = i40e_rx_queue_init(rxq);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to do RX queue "
"initialization");
@ -3421,20 +3449,19 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi)
return ret;
}
/* Initialize VSI */
static int
i40e_vsi_init(struct i40e_vsi *vsi)
i40e_dev_rxtx_init(struct i40e_pf *pf)
{
int err;
err = i40e_vsi_tx_init(vsi);
err = i40e_dev_tx_init(pf);
if (err) {
PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
PMD_DRV_LOG(ERR, "Failed to do TX initialization");
return err;
}
err = i40e_vsi_rx_init(vsi);
err = i40e_dev_rx_init(pf);
if (err) {
PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
PMD_DRV_LOG(ERR, "Failed to do RX initialization");
return err;
}
@ -4803,6 +4830,26 @@ i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
return ret;
}
/* Calculate the maximum number of contiguous PF queues that are configured */
static int
i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
{
struct rte_eth_dev_data *data = pf->dev_data;
int i, num;
struct i40e_rx_queue *rxq;
num = 0;
for (i = 0; i < pf->lan_nb_qps; i++) {
rxq = data->rx_queues[i];
if (rxq && rxq->q_set)
num++;
else
break;
}
return num;
}
/* Configure RSS */
static int
i40e_pf_config_rss(struct i40e_pf *pf)
@ -4810,7 +4857,25 @@ i40e_pf_config_rss(struct i40e_pf *pf)
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct rte_eth_rss_conf rss_conf;
uint32_t i, lut = 0;
uint16_t j, num = i40e_align_floor(pf->dev_data->nb_rx_queues);
uint16_t j, num;
/*
* If both VMDQ and RSS enabled, not all of PF queues are configured.
* It's necessary to calulate the actual PF queues that are configured.
*/
if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
num = i40e_pf_calc_configured_queues_num(pf);
num = i40e_align_floor(num);
} else
num = i40e_align_floor(pf->dev_data->nb_rx_queues);
PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
num);
if (num == 0) {
PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
return -ENOTSUP;
}
for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
if (j == num)
@ -4908,18 +4973,21 @@ i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
static int
i40e_pf_config_mq_rx(struct i40e_pf *pf)
{
if (!pf->dev_data->sriov.active) {
switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_RSS:
i40e_pf_config_rss(pf);
break;
default:
i40e_pf_disable_rss(pf);
break;
}
int ret = 0;
enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
return -ENOTSUP;
}
return 0;
/* RSS setup */
if (mq_mode & ETH_MQ_RX_RSS_FLAG)
ret = i40e_pf_config_rss(pf);
else
i40e_pf_disable_rss(pf);
return ret;
}
static int

View File

@ -355,7 +355,7 @@ struct i40e_adapter {
};
};
int i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on);
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
enum i40e_vsi_type type,
@ -409,7 +409,7 @@ i40e_get_vsi_from_adapter(struct i40e_adapter *adapter)
return pf->main_vsi;
}
}
#define I40E_DEV_PRIVATE_TO_VSI(adapter) \
#define I40E_DEV_PRIVATE_TO_MAIN_VSI(adapter) \
i40e_get_vsi_from_adapter((struct i40e_adapter *)adapter)
/* I40E_VSI_TO */

View File

@ -1486,14 +1486,58 @@ i40e_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
* application used, which assume having sequential ones. But from driver's
* perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64
* to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application
* running on host, q1-64 and q97-128 can be used, total 96 queues. They can
* use queue_idx from 0 to 95 to access queues, while real queue would be
* different. This function will do a queue mapping to find VSI the queue
* belongs to.
*/
static struct i40e_vsi*
i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
{
/* the queue in MAIN VSI range */
if (queue_idx < pf->main_vsi->nb_qps)
return pf->main_vsi;
queue_idx -= pf->main_vsi->nb_qps;
/* queue_idx is greater than VMDQ VSIs range */
if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
return NULL;
}
return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
}
static uint16_t
i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
{
/* the queue in MAIN VSI range */
if (queue_idx < pf->main_vsi->nb_qps)
return queue_idx;
/* It's VMDQ queues */
queue_idx -= pf->main_vsi->nb_qps;
if (pf->nb_cfg_vmdq_vsi)
return queue_idx % pf->vmdq_nb_qps;
else {
PMD_INIT_LOG(ERR, "Fail to get queue offset");
return (uint16_t)(-1);
}
}
int
i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
struct i40e_rx_queue *rxq;
int err = -1;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t q_base = vsi->base_queue;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@ -1511,7 +1555,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
/* Init the RX tail regieter. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, TRUE);
err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
@ -1528,16 +1572,18 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
struct i40e_rx_queue *rxq;
int err;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t q_base = vsi->base_queue;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (rx_queue_id < dev->data->nb_rx_queues) {
rxq = dev->data->rx_queues[rx_queue_id];
err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, FALSE);
/*
* rx_queue_id is queue id aplication refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@ -1554,15 +1600,20 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
int err = -1;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t q_base = vsi->base_queue;
struct i40e_tx_queue *txq;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
if (tx_queue_id < dev->data->nb_tx_queues) {
err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);
txq = dev->data->tx_queues[tx_queue_id];
/*
* tx_queue_id is queue id aplication refers to, while
* rxq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
if (err)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
@ -1574,16 +1625,18 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
struct i40e_tx_queue *txq;
int err;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t q_base = vsi->base_queue;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (tx_queue_id < dev->data->nb_tx_queues) {
txq = dev->data->tx_queues[tx_queue_id];
err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, FALSE);
/*
* tx_queue_id is queue id aplication refers to, while
* txq->reg_idx is the real queue index.
*/
err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
@ -1606,14 +1659,23 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
uint16_t len;
int use_def_burst_func = 1;
if (!vsi || queue_idx >= vsi->nb_qps) {
if (hw->mac.type == I40E_MAC_VF) {
struct i40e_vf *vf =
I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
} else
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
if (vsi == NULL) {
PMD_DRV_LOG(ERR, "VSI not available or queue "
"index exceeds the maximum");
return I40E_ERR_PARAM;
@ -1646,7 +1708,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->reg_idx = vsi->base_queue + queue_idx;
if (hw->mac.type == I40E_MAC_VF)
rxq->reg_idx = queue_idx;
else /* PF device */
rxq->reg_idx = vsi->base_queue +
i40e_get_queue_offset_by_qindex(pf, queue_idx);
rxq->port_id = dev->data->port_id;
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
0 : ETHER_CRC_LEN);
@ -1804,13 +1871,22 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_tx_queue *txq;
const struct rte_memzone *tz;
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
if (!vsi || queue_idx >= vsi->nb_qps) {
if (hw->mac.type == I40E_MAC_VF) {
struct i40e_vf *vf =
I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
} else
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
if (vsi == NULL) {
PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
"exceeds the maximum", queue_idx);
return I40E_ERR_PARAM;
@ -1934,7 +2010,12 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->hthresh = tx_conf->tx_thresh.hthresh;
txq->wthresh = tx_conf->tx_thresh.wthresh;
txq->queue_id = queue_idx;
txq->reg_idx = vsi->base_queue + queue_idx;
if (hw->mac.type == I40E_MAC_VF)
txq->reg_idx = queue_idx;
else /* PF device */
txq->reg_idx = vsi->base_queue +
i40e_get_queue_offset_by_qindex(pf, queue_idx);
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
txq->vsi = vsi;