net/i40e: enable DCB on SRIOV VFs
enable DCB on SRIOV VFs, including - UP and TC mapping according to dcb_tc in struct rte_eth_dcb_rx_conf. - TC and queue mapping: queues are divided equally for each TC. - UP insert when sending packet according to the TC the Tx queue belongs to. Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
This commit is contained in:
parent
bcd0e43266
commit
6f0a707e5b
@ -1243,6 +1243,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
|
||||
ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
|
||||
&dev->data->mac_addrs[0]);
|
||||
|
||||
/* Init dcb to sw mode by default */
|
||||
ret = i40e_dcb_init_configure(dev, TRUE);
|
||||
if (ret != I40E_SUCCESS) {
|
||||
PMD_INIT_LOG(INFO, "Failed to init dcb.");
|
||||
pf->flags &= ~I40E_FLAG_DCB;
|
||||
}
|
||||
/* Update HW struct after DCB configuration */
|
||||
i40e_get_cap(hw);
|
||||
|
||||
/* initialize pf host driver to setup SRIOV resource if applicable */
|
||||
i40e_pf_host_init(dev);
|
||||
|
||||
@ -1271,13 +1280,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
|
||||
/* initialize mirror rule list */
|
||||
TAILQ_INIT(&pf->mirror_list);
|
||||
|
||||
/* Init dcb to sw mode by default */
|
||||
ret = i40e_dcb_init_configure(dev, TRUE);
|
||||
if (ret != I40E_SUCCESS) {
|
||||
PMD_INIT_LOG(INFO, "Failed to init dcb.");
|
||||
pf->flags &= ~I40E_FLAG_DCB;
|
||||
}
|
||||
|
||||
ret = i40e_init_ethtype_filter_list(dev);
|
||||
if (ret < 0)
|
||||
goto err_init_ethtype_filter_list;
|
||||
@ -4844,13 +4846,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
|
||||
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
|
||||
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
|
||||
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
|
||||
I40E_DEFAULT_TCMAP);
|
||||
hw->func_caps.enabled_tcmap);
|
||||
if (ret != I40E_SUCCESS) {
|
||||
PMD_DRV_LOG(ERR,
|
||||
"Failed to configure TC queue mapping");
|
||||
goto fail_msix_alloc;
|
||||
}
|
||||
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
|
||||
|
||||
ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
|
||||
ctxt.info.valid_sections |=
|
||||
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
|
||||
/**
|
||||
@ -9872,7 +9875,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
|
||||
{
|
||||
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
int ret = 0;
|
||||
int i, ret = 0;
|
||||
|
||||
if ((pf->flags & I40E_FLAG_DCB) == 0) {
|
||||
PMD_INIT_LOG(ERR, "HW doesn't support DCB");
|
||||
@ -9899,11 +9902,16 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
|
||||
hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
|
||||
hw->local_dcbx_config.etscfg.tsatable[0] =
|
||||
I40E_IEEE_TSA_ETS;
|
||||
/* all UPs mapping to TC0 */
|
||||
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
|
||||
hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
|
||||
hw->local_dcbx_config.etsrec =
|
||||
hw->local_dcbx_config.etscfg;
|
||||
hw->local_dcbx_config.pfc.willing = 0;
|
||||
hw->local_dcbx_config.pfc.pfccap =
|
||||
I40E_MAX_TRAFFIC_CLASS;
|
||||
hw->local_dcbx_config.pfc.pfcenable =
|
||||
I40E_DEFAULT_TCMAP;
|
||||
/* FW needs one App to configure HW */
|
||||
hw->local_dcbx_config.numapps = 1;
|
||||
hw->local_dcbx_config.app[0].selector =
|
||||
|
@ -405,6 +405,29 @@ i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline uint8_t
|
||||
i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
|
||||
uint16_t queue_id)
|
||||
{
|
||||
struct i40e_aqc_vsi_properties_data *info = &vsi->info;
|
||||
uint16_t bsf, qp_idx;
|
||||
uint8_t i;
|
||||
|
||||
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
|
||||
if (vsi->enabled_tc & (1 << i)) {
|
||||
qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
|
||||
I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
|
||||
I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
|
||||
bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
|
||||
I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
|
||||
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
|
||||
if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
|
||||
struct i40e_pf_vf *vf,
|
||||
@ -412,15 +435,17 @@ i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
|
||||
{
|
||||
int err = I40E_SUCCESS;
|
||||
struct i40e_hmc_obj_txq tx_ctx;
|
||||
struct i40e_vsi *vsi = vf->vsi;
|
||||
uint32_t qtx_ctl;
|
||||
uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;
|
||||
|
||||
uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
|
||||
uint8_t dcb_tc;
|
||||
|
||||
/* clear the context structure first */
|
||||
memset(&tx_ctx, 0, sizeof(tx_ctx));
|
||||
tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
|
||||
tx_ctx.qlen = txq->ring_len;
|
||||
tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
|
||||
dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
|
||||
tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
|
||||
tx_ctx.head_wb_ena = txq->headwb_enabled;
|
||||
tx_ctx.head_wb_addr = txq->dma_headwb_addr;
|
||||
|
||||
@ -1351,6 +1376,7 @@ i40e_pf_host_init(struct rte_eth_dev *dev)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
|
||||
/* restore irq0 */
|
||||
i40e_pf_enable_irq0(hw);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user