i40e: enlarge the number of supported queues
It enlarges the number of supported queues to hardware allowed maximum. There was a software limitation of 64 per physical port which is not reasonable. Signed-off-by: Helin Zhang <helin.zhang@intel.com>
This commit is contained in:
parent
50641113d0
commit
705b57f820
@ -141,7 +141,7 @@ CONFIG_RTE_LIBRTE_KVARGS=y
|
||||
CONFIG_RTE_LIBRTE_ETHER=y
|
||||
CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
|
||||
CONFIG_RTE_MAX_ETHPORTS=32
|
||||
CONFIG_RTE_MAX_QUEUES_PER_PORT=256
|
||||
CONFIG_RTE_MAX_QUEUES_PER_PORT=1024
|
||||
CONFIG_RTE_LIBRTE_IEEE1588=n
|
||||
CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
|
||||
CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
|
||||
@ -189,6 +189,7 @@ CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
|
||||
CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
|
||||
CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
|
||||
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
|
||||
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
|
||||
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
|
||||
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
|
||||
# interval up to 8160 us, aligned to 2 (or default value)
|
||||
|
@ -139,7 +139,7 @@ CONFIG_RTE_LIBRTE_KVARGS=y
|
||||
CONFIG_RTE_LIBRTE_ETHER=y
|
||||
CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
|
||||
CONFIG_RTE_MAX_ETHPORTS=32
|
||||
CONFIG_RTE_MAX_QUEUES_PER_PORT=256
|
||||
CONFIG_RTE_MAX_QUEUES_PER_PORT=1024
|
||||
CONFIG_RTE_LIBRTE_IEEE1588=n
|
||||
CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
|
||||
CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
|
||||
@ -187,6 +187,7 @@ CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
|
||||
CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
|
||||
CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
|
||||
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
|
||||
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
|
||||
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
|
||||
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
|
||||
# interval up to 8160 us, aligned to 2 (or default value)
|
||||
|
@ -8,11 +8,6 @@ API and ABI deprecation notices are to be posted here.
|
||||
Deprecation Notices
|
||||
-------------------
|
||||
|
||||
* Significant ABI changes are planned for struct rte_eth_dev to support up to
|
||||
1024 queues per port. This change will be in release 2.2.
|
||||
There is no backward compatibility planned from release 2.2.
|
||||
All binaries will need to be rebuilt from release 2.2.
|
||||
|
||||
* The following fields have been deprecated in rte_eth_stats:
|
||||
ibadcrc, ibadlen, imcasts, fdirmatch, fdirmiss,
|
||||
tx_pause_xon, rx_pause_xon, tx_pause_xoff, rx_pause_xoff
|
||||
|
@ -118,6 +118,13 @@ Drivers
|
||||
Fixed i40e issue that occurred when a DPDK application didn't initialize
|
||||
ports if memory wasn't available on socket 0.
|
||||
|
||||
* **i40e: Fixed maximum of 64 queues per port.**
|
||||
|
||||
Fixed the issue in i40e of cannot supporting more than 64 queues per port,
|
||||
though hardware actually supports that. The real number of queues may vary,
|
||||
as long as the total number of queues used in PF, VFs, VMDq and FD does not
|
||||
exceeds the hardware maximum.
|
||||
|
||||
* **vhost: Fixed Qemu shutdown.**
|
||||
|
||||
Fixed issue with libvirt ``virsh destroy`` not killing the VM.
|
||||
@ -191,6 +198,9 @@ ABI Changes
|
||||
* The new fields rx_desc_lim and tx_desc_lim are added into rte_eth_dev_info
|
||||
structure.
|
||||
|
||||
* The maximum number of queues per port CONFIG_RTE_MAX_QUEUES_PER_PORT is
|
||||
increased to 1024.
|
||||
|
||||
* The mbuf structure was changed to support unified packet type.
|
||||
It was already done in 2.1 for CONFIG_RTE_NEXT_ABI.
|
||||
|
||||
|
@ -2747,9 +2747,8 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
|
||||
uint16_t sum_queues = 0, sum_vsis, left_queues;
|
||||
uint16_t qp_count = 0, vsi_count = 0;
|
||||
|
||||
/* First check if FW support SRIOV */
|
||||
if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
|
||||
PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
|
||||
return -EINVAL;
|
||||
@ -2760,109 +2759,85 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
|
||||
pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_LOW_WATER;
|
||||
|
||||
pf->flags = I40E_FLAG_HEADER_SPLIT_DISABLED;
|
||||
pf->max_num_vsi = RTE_MIN(hw->func_caps.num_vsis, I40E_MAX_NUM_VSIS);
|
||||
PMD_INIT_LOG(INFO, "Max supported VSIs:%u", pf->max_num_vsi);
|
||||
/* Allocate queues for pf */
|
||||
if (hw->func_caps.rss) {
|
||||
pf->max_num_vsi = hw->func_caps.num_vsis;
|
||||
pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF;
|
||||
pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
|
||||
pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
|
||||
|
||||
/* FDir queue/VSI allocation */
|
||||
pf->fdir_qp_offset = 0;
|
||||
if (hw->func_caps.fd) {
|
||||
pf->flags |= I40E_FLAG_FDIR;
|
||||
pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
|
||||
} else {
|
||||
pf->fdir_nb_qps = 0;
|
||||
}
|
||||
qp_count += pf->fdir_nb_qps;
|
||||
vsi_count += 1;
|
||||
|
||||
/* LAN queue/VSI allocation */
|
||||
pf->lan_qp_offset = pf->fdir_qp_offset + pf->fdir_nb_qps;
|
||||
if (!hw->func_caps.rss) {
|
||||
pf->lan_nb_qps = 1;
|
||||
} else {
|
||||
pf->flags |= I40E_FLAG_RSS;
|
||||
if (hw->mac.type == I40E_MAC_X722)
|
||||
pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE;
|
||||
pf->lan_nb_qps = RTE_MIN(hw->func_caps.num_tx_qp,
|
||||
(uint32_t)(1 << hw->func_caps.rss_table_entry_width));
|
||||
pf->lan_nb_qps = i40e_align_floor(pf->lan_nb_qps);
|
||||
} else
|
||||
pf->lan_nb_qps = 1;
|
||||
sum_queues = pf->lan_nb_qps;
|
||||
/* Default VSI is not counted in */
|
||||
sum_vsis = 0;
|
||||
PMD_INIT_LOG(INFO, "PF queue pairs:%u", pf->lan_nb_qps);
|
||||
pf->lan_nb_qps = pf->lan_nb_qp_max;
|
||||
}
|
||||
qp_count += pf->lan_nb_qps;
|
||||
vsi_count += 1;
|
||||
|
||||
/* VF queue/VSI allocation */
|
||||
pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
|
||||
if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
|
||||
pf->flags |= I40E_FLAG_SRIOV;
|
||||
pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
|
||||
if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
|
||||
PMD_INIT_LOG(ERR, "Config VF number %u, "
|
||||
"max supported %u.",
|
||||
dev->pci_dev->max_vfs,
|
||||
hw->func_caps.num_vfs);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
|
||||
PMD_INIT_LOG(ERR, "FVL VF queue %u, "
|
||||
"max support %u queues.",
|
||||
pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
|
||||
return -EINVAL;
|
||||
}
|
||||
pf->vf_num = dev->pci_dev->max_vfs;
|
||||
sum_queues += pf->vf_nb_qps * pf->vf_num;
|
||||
sum_vsis += pf->vf_num;
|
||||
PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u",
|
||||
pf->vf_num, pf->vf_nb_qps);
|
||||
} else
|
||||
PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
|
||||
"in total %u queues", pf->vf_num, pf->vf_nb_qps,
|
||||
pf->vf_nb_qps * pf->vf_num);
|
||||
} else {
|
||||
pf->vf_nb_qps = 0;
|
||||
pf->vf_num = 0;
|
||||
}
|
||||
qp_count += pf->vf_nb_qps * pf->vf_num;
|
||||
vsi_count += pf->vf_num;
|
||||
|
||||
/* VMDq queue/VSI allocation */
|
||||
pf->vmdq_qp_offset = pf->vf_qp_offset + pf->vf_nb_qps * pf->vf_num;
|
||||
if (hw->func_caps.vmdq) {
|
||||
pf->flags |= I40E_FLAG_VMDQ;
|
||||
pf->vmdq_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM;
|
||||
pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
|
||||
pf->max_nb_vmdq_vsi = 1;
|
||||
/*
|
||||
* If VMDQ available, assume a single VSI can be created. Will adjust
|
||||
* later.
|
||||
*/
|
||||
sum_queues += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
|
||||
sum_vsis += pf->max_nb_vmdq_vsi;
|
||||
PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues per VMDQ VSI, "
|
||||
"in total %u queues", pf->max_nb_vmdq_vsi,
|
||||
pf->vmdq_nb_qps,
|
||||
pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
|
||||
} else {
|
||||
pf->vmdq_nb_qps = 0;
|
||||
pf->max_nb_vmdq_vsi = 0;
|
||||
}
|
||||
pf->nb_cfg_vmdq_vsi = 0;
|
||||
|
||||
if (hw->func_caps.fd) {
|
||||
pf->flags |= I40E_FLAG_FDIR;
|
||||
pf->fdir_nb_qps = I40E_DEFAULT_QP_NUM_FDIR;
|
||||
/**
|
||||
* Each flow director consumes one VSI and one queue,
|
||||
* but can't calculate out predictably here.
|
||||
*/
|
||||
}
|
||||
qp_count += pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi;
|
||||
vsi_count += pf->max_nb_vmdq_vsi;
|
||||
|
||||
if (hw->func_caps.dcb)
|
||||
pf->flags |= I40E_FLAG_DCB;
|
||||
|
||||
if (sum_vsis > pf->max_num_vsi ||
|
||||
sum_queues > hw->func_caps.num_rx_qp) {
|
||||
PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied");
|
||||
PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u",
|
||||
pf->max_num_vsi, sum_vsis);
|
||||
PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u",
|
||||
hw->func_caps.num_rx_qp, sum_queues);
|
||||
if (qp_count > hw->func_caps.num_tx_qp) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
|
||||
"the hardware maximum %u", qp_count,
|
||||
hw->func_caps.num_tx_qp);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (vsi_count > hw->func_caps.num_vsis) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
|
||||
"the hardware maximum %u", vsi_count,
|
||||
hw->func_caps.num_vsis);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Adjust VMDQ setting to support as many VMs as possible */
|
||||
if (pf->flags & I40E_FLAG_VMDQ) {
|
||||
left_queues = hw->func_caps.num_rx_qp - sum_queues;
|
||||
|
||||
pf->max_nb_vmdq_vsi += RTE_MIN(left_queues / pf->vmdq_nb_qps,
|
||||
pf->max_num_vsi - sum_vsis);
|
||||
|
||||
/* Limit the max VMDQ number that rte_ether that can support */
|
||||
pf->max_nb_vmdq_vsi = RTE_MIN(pf->max_nb_vmdq_vsi,
|
||||
ETH_64_POOLS - 1);
|
||||
|
||||
PMD_INIT_LOG(INFO, "Max VMDQ VSI num:%u",
|
||||
pf->max_nb_vmdq_vsi);
|
||||
PMD_INIT_LOG(INFO, "VMDQ queue pairs:%u", pf->vmdq_nb_qps);
|
||||
}
|
||||
|
||||
/* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr
|
||||
* cause */
|
||||
if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
|
||||
PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough",
|
||||
sum_vsis, hw->func_caps.num_msix_vectors);
|
||||
return -EINVAL;
|
||||
}
|
||||
return I40E_SUCCESS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -3252,7 +3227,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
|
||||
bsf = rte_bsf32(qpnum_per_tc);
|
||||
|
||||
/* Adjust the queue number to actual queues that can be applied */
|
||||
vsi->nb_qps = qpnum_per_tc * total_tc;
|
||||
if (!(vsi->type == I40E_VSI_MAIN && total_tc == 1))
|
||||
vsi->nb_qps = qpnum_per_tc * total_tc;
|
||||
|
||||
/**
|
||||
* Configure TC and queue mapping parameters, for enabled TC,
|
||||
|
@ -402,10 +402,18 @@ struct i40e_pf {
|
||||
uint16_t vf_num;
|
||||
/* Each of below queue pairs should be power of 2 since it's the
|
||||
precondition after TC configuration applied */
|
||||
uint16_t lan_nb_qp_max;
|
||||
uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
|
||||
uint16_t lan_qp_offset;
|
||||
uint16_t vmdq_nb_qp_max;
|
||||
uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */
|
||||
uint16_t vmdq_qp_offset;
|
||||
uint16_t vf_nb_qp_max;
|
||||
uint16_t vf_nb_qps; /* The number of queue pairs of VF */
|
||||
uint16_t vf_qp_offset;
|
||||
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
|
||||
uint16_t fdir_qp_offset;
|
||||
|
||||
uint16_t hash_lut_size; /* The size of hash lookup table */
|
||||
/* store VXLAN UDP ports */
|
||||
uint16_t vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
|
||||
|
Loading…
x
Reference in New Issue
Block a user