ixgbe: DCB / flow control
Signed-off-by: Intel
This commit is contained in:
parent
dbb0b8737f
commit
0807f80d35
@ -129,6 +129,8 @@ static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
|
|||||||
static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
|
static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
|
||||||
static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
|
static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
|
||||||
struct rte_eth_fc_conf *fc_conf);
|
struct rte_eth_fc_conf *fc_conf);
|
||||||
|
static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
|
||||||
|
struct rte_eth_pfc_conf *pfc_conf);
|
||||||
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
|
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
|
||||||
static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
|
static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
|
||||||
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
|
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
|
||||||
@ -139,6 +141,7 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param);
|
|||||||
static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
|
static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
|
||||||
uint32_t index, uint32_t pool);
|
uint32_t index, uint32_t pool);
|
||||||
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
|
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
|
||||||
|
static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
|
||||||
|
|
||||||
/* For Virtual Function support */
|
/* For Virtual Function support */
|
||||||
static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
|
static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
|
||||||
@ -245,6 +248,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
|
|||||||
.dev_led_on = ixgbe_dev_led_on,
|
.dev_led_on = ixgbe_dev_led_on,
|
||||||
.dev_led_off = ixgbe_dev_led_off,
|
.dev_led_off = ixgbe_dev_led_off,
|
||||||
.flow_ctrl_set = ixgbe_flow_ctrl_set,
|
.flow_ctrl_set = ixgbe_flow_ctrl_set,
|
||||||
|
.priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
|
||||||
.mac_addr_add = ixgbe_add_rar,
|
.mac_addr_add = ixgbe_add_rar,
|
||||||
.mac_addr_remove = ixgbe_remove_rar,
|
.mac_addr_remove = ixgbe_remove_rar,
|
||||||
.fdir_add_signature_filter = ixgbe_fdir_add_signature_filter,
|
.fdir_add_signature_filter = ixgbe_fdir_add_signature_filter,
|
||||||
@ -471,6 +475,46 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
|
||||||
|
{
|
||||||
|
uint8_t i;
|
||||||
|
struct ixgbe_dcb_tc_config *tc;
|
||||||
|
int dcb_max_tc = IXGBE_DCB_MAX_TRAFFIC_CLASS;
|
||||||
|
|
||||||
|
dcb_config->num_tcs.pg_tcs = dcb_max_tc;
|
||||||
|
dcb_config->num_tcs.pfc_tcs = dcb_max_tc;
|
||||||
|
for (i = 0; i < dcb_max_tc; i++) {
|
||||||
|
tc = &dcb_config->tc_config[i];
|
||||||
|
tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = i;
|
||||||
|
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1);
|
||||||
|
tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
|
||||||
|
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100/dcb_max_tc + (i & 1);
|
||||||
|
tc->pfc = ixgbe_dcb_pfc_disabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Initialize default user to priority mapping, UPx->TC0 */
|
||||||
|
tc = &dcb_config->tc_config[0];
|
||||||
|
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
|
||||||
|
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
|
||||||
|
for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
|
||||||
|
dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
|
||||||
|
dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
|
||||||
|
}
|
||||||
|
dcb_config->rx_pba_cfg = ixgbe_dcb_pba_equal;
|
||||||
|
dcb_config->pfc_mode_enable = false;
|
||||||
|
dcb_config->vt_mode = true;
|
||||||
|
dcb_config->round_robin_enable = false;
|
||||||
|
/* support all DCB capabilities in 82599 */
|
||||||
|
dcb_config->support.capabilities = 0xFF;
|
||||||
|
|
||||||
|
/*we only support 4 Tcs for X540*/
|
||||||
|
if (hw->mac.type == ixgbe_mac_X540) {
|
||||||
|
dcb_config->num_tcs.pg_tcs = 4;
|
||||||
|
dcb_config->num_tcs.pfc_tcs = 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
|
* This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
|
||||||
* It returns 0 on success.
|
* It returns 0 on success.
|
||||||
@ -520,13 +564,17 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Initialize DCB configuration*/
|
||||||
|
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
|
||||||
|
ixgbe_dcb_init(hw,dcb_config);
|
||||||
/* Get Hardware Flow Control setting */
|
/* Get Hardware Flow Control setting */
|
||||||
hw->fc.requested_mode = ixgbe_fc_full;
|
hw->fc.requested_mode = ixgbe_fc_full;
|
||||||
hw->fc.current_mode = ixgbe_fc_full;
|
hw->fc.current_mode = ixgbe_fc_full;
|
||||||
hw->fc.pause_time = IXGBE_FC_PAUSE;
|
hw->fc.pause_time = IXGBE_FC_PAUSE;
|
||||||
hw->fc.low_water = IXGBE_FC_LO;
|
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
|
||||||
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
|
hw->fc.low_water[i] = IXGBE_FC_LO;
|
||||||
hw->fc.high_water[i] = IXGBE_FC_HI;
|
hw->fc.high_water[i] = IXGBE_FC_HI;
|
||||||
|
}
|
||||||
hw->fc.send_xon = 1;
|
hw->fc.send_xon = 1;
|
||||||
|
|
||||||
ixgbe_disable_intr(hw);
|
ixgbe_disable_intr(hw);
|
||||||
@ -1133,6 +1181,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
|
|||||||
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
|
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
|
||||||
ETH_VLAN_EXTEND_MASK;
|
ETH_VLAN_EXTEND_MASK;
|
||||||
ixgbe_vlan_offload_set(dev, mask);
|
ixgbe_vlan_offload_set(dev, mask);
|
||||||
|
|
||||||
|
/* Configure DCB hw */
|
||||||
|
ixgbe_configure_dcb(dev);
|
||||||
|
|
||||||
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
|
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
|
||||||
err = ixgbe_fdir_configure(dev);
|
err = ixgbe_fdir_configure(dev);
|
||||||
@ -1823,10 +1874,10 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
|||||||
hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
|
hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
|
||||||
hw->fc.pause_time = fc_conf->pause_time;
|
hw->fc.pause_time = fc_conf->pause_time;
|
||||||
hw->fc.high_water[0] = fc_conf->high_water;
|
hw->fc.high_water[0] = fc_conf->high_water;
|
||||||
hw->fc.low_water = fc_conf->low_water;
|
hw->fc.low_water[0] = fc_conf->low_water;
|
||||||
hw->fc.send_xon = fc_conf->send_xon;
|
hw->fc.send_xon = fc_conf->send_xon;
|
||||||
|
|
||||||
err = ixgbe_fc_enable(hw, 0);
|
err = ixgbe_fc_enable(hw);
|
||||||
/* Not negotiated is not an error case */
|
/* Not negotiated is not an error case */
|
||||||
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
|
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
|
||||||
return 0;
|
return 0;
|
||||||
@ -1836,6 +1887,211 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ixgbe_pfc_enable_generic - Enable flow control
|
||||||
|
* @hw: pointer to hardware structure
|
||||||
|
* @tc_num: traffic class number
|
||||||
|
* Enable flow control according to the current settings.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
|
||||||
|
{
|
||||||
|
int ret_val = 0;
|
||||||
|
uint32_t mflcn_reg, fccfg_reg;
|
||||||
|
uint32_t reg;
|
||||||
|
uint32_t fcrtl, fcrth;
|
||||||
|
uint8_t i;
|
||||||
|
uint8_t nb_rx_en;
|
||||||
|
|
||||||
|
/* Validate the water mark configuration */
|
||||||
|
if (!hw->fc.pause_time) {
|
||||||
|
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Low water mark of zero causes XOFF floods */
|
||||||
|
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
|
||||||
|
/* High/Low water can not be 0 */
|
||||||
|
if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
|
||||||
|
PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
|
||||||
|
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
|
||||||
|
PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
|
||||||
|
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Negotiate the fc mode to use */
|
||||||
|
ixgbe_fc_autoneg(hw);
|
||||||
|
|
||||||
|
/* Disable any previous flow control settings */
|
||||||
|
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
|
||||||
|
mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_SHIFT | IXGBE_MFLCN_RFCE|IXGBE_MFLCN_RPFCE);
|
||||||
|
|
||||||
|
fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
|
||||||
|
fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
|
||||||
|
|
||||||
|
switch (hw->fc.current_mode) {
|
||||||
|
case ixgbe_fc_none:
|
||||||
|
/*
|
||||||
|
* If the count of enabled RX Priority Flow control >1,
|
||||||
|
* and the TX pause can not be disabled
|
||||||
|
*/
|
||||||
|
nb_rx_en = 0;
|
||||||
|
for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
|
||||||
|
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
|
||||||
|
if (reg & IXGBE_FCRTH_FCEN)
|
||||||
|
nb_rx_en++;
|
||||||
|
}
|
||||||
|
if (nb_rx_en > 1)
|
||||||
|
fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
|
||||||
|
break;
|
||||||
|
case ixgbe_fc_rx_pause:
|
||||||
|
/*
|
||||||
|
* Rx Flow control is enabled and Tx Flow control is
|
||||||
|
* disabled by software override. Since there really
|
||||||
|
* isn't a way to advertise that we are capable of RX
|
||||||
|
* Pause ONLY, we will advertise that we support both
|
||||||
|
* symmetric and asymmetric Rx PAUSE. Later, we will
|
||||||
|
* disable the adapter's ability to send PAUSE frames.
|
||||||
|
*/
|
||||||
|
mflcn_reg |= IXGBE_MFLCN_RPFCE;
|
||||||
|
/*
|
||||||
|
* If the count of enabled RX Priority Flow control >1,
|
||||||
|
* and the TX pause can not be disabled
|
||||||
|
*/
|
||||||
|
nb_rx_en = 0;
|
||||||
|
for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
|
||||||
|
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
|
||||||
|
if (reg & IXGBE_FCRTH_FCEN)
|
||||||
|
nb_rx_en++;
|
||||||
|
}
|
||||||
|
if (nb_rx_en > 1)
|
||||||
|
fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
|
||||||
|
break;
|
||||||
|
case ixgbe_fc_tx_pause:
|
||||||
|
/*
|
||||||
|
* Tx Flow control is enabled, and Rx Flow control is
|
||||||
|
* disabled by software override.
|
||||||
|
*/
|
||||||
|
fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
|
||||||
|
break;
|
||||||
|
case ixgbe_fc_full:
|
||||||
|
/* Flow control (both Rx and Tx) is enabled by SW override. */
|
||||||
|
mflcn_reg |= IXGBE_MFLCN_RPFCE;
|
||||||
|
fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
DEBUGOUT("Flow control param set incorrectly\n");
|
||||||
|
ret_val = IXGBE_ERR_CONFIG;
|
||||||
|
goto out;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set 802.3x based flow control settings. */
|
||||||
|
mflcn_reg |= IXGBE_MFLCN_DPF;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
|
||||||
|
|
||||||
|
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
|
||||||
|
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
|
||||||
|
hw->fc.high_water[tc_num]) {
|
||||||
|
fcrtl = (hw->fc.low_water[tc_num] << 10) | IXGBE_FCRTL_XONE;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), fcrtl);
|
||||||
|
fcrth = (hw->fc.high_water[tc_num] << 10) | IXGBE_FCRTH_FCEN;
|
||||||
|
} else {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(tc_num), 0);
|
||||||
|
/*
|
||||||
|
* In order to prevent Tx hangs when the internal Tx
|
||||||
|
* switch is enabled we must set the high water mark
|
||||||
|
* to the maximum FCRTH value. This allows the Tx
|
||||||
|
* switch to function even under heavy Rx workloads.
|
||||||
|
*/
|
||||||
|
fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num)) - 32;
|
||||||
|
}
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(tc_num), fcrth);
|
||||||
|
|
||||||
|
/* Configure pause time (2 TCs per register) */
|
||||||
|
reg = hw->fc.pause_time * 0x00010001;
|
||||||
|
for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
|
||||||
|
|
||||||
|
/* Configure flow control refresh threshold value */
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
|
||||||
|
|
||||||
|
out:
|
||||||
|
return ret_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
|
||||||
|
{
|
||||||
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
|
||||||
|
|
||||||
|
if(hw->mac.type != ixgbe_mac_82598EB) {
|
||||||
|
ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
|
||||||
|
}
|
||||||
|
return ret_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
uint32_t rx_buf_size;
|
||||||
|
uint32_t max_high_water;
|
||||||
|
uint8_t tc_num;
|
||||||
|
uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
|
||||||
|
struct ixgbe_hw *hw =
|
||||||
|
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
struct ixgbe_dcb_config *dcb_config =
|
||||||
|
IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
|
||||||
|
|
||||||
|
enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
|
||||||
|
ixgbe_fc_none,
|
||||||
|
ixgbe_fc_rx_pause,
|
||||||
|
ixgbe_fc_tx_pause,
|
||||||
|
ixgbe_fc_full
|
||||||
|
};
|
||||||
|
|
||||||
|
PMD_INIT_FUNC_TRACE();
|
||||||
|
|
||||||
|
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
|
||||||
|
tc_num = map[pfc_conf->priority];
|
||||||
|
rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
|
||||||
|
PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
|
||||||
|
/*
|
||||||
|
* At least reserve one Ethernet frame for watermark
|
||||||
|
* high_water/low_water in kilo bytes for ixgbe
|
||||||
|
*/
|
||||||
|
max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
|
||||||
|
if ((pfc_conf->fc.high_water > max_high_water) ||
|
||||||
|
(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
|
||||||
|
PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
|
||||||
|
PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water);
|
||||||
|
return (-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
|
||||||
|
hw->fc.pause_time = pfc_conf->fc.pause_time;
|
||||||
|
hw->fc.send_xon = pfc_conf->fc.send_xon;
|
||||||
|
hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
|
||||||
|
hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
|
||||||
|
|
||||||
|
err = ixgbe_dcb_pfc_enable(dev,tc_num);
|
||||||
|
|
||||||
|
/* Not negotiated is not an error case */
|
||||||
|
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
|
ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
|
||||||
uint32_t index, uint32_t pool)
|
uint32_t index, uint32_t pool)
|
||||||
|
@ -34,6 +34,9 @@
|
|||||||
|
|
||||||
#ifndef _IXGBE_ETHDEV_H_
|
#ifndef _IXGBE_ETHDEV_H_
|
||||||
#define _IXGBE_ETHDEV_H_
|
#define _IXGBE_ETHDEV_H_
|
||||||
|
#include "ixgbe/ixgbe_dcb.h"
|
||||||
|
#include "ixgbe/ixgbe_dcb_82599.h"
|
||||||
|
#include "ixgbe/ixgbe_dcb_82598.h"
|
||||||
|
|
||||||
/* need update link, bit flag */
|
/* need update link, bit flag */
|
||||||
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
|
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
|
||||||
@ -97,6 +100,7 @@ struct ixgbe_adapter {
|
|||||||
struct ixgbe_stat_mapping_registers stat_mappings;
|
struct ixgbe_stat_mapping_registers stat_mappings;
|
||||||
struct ixgbe_vfta shadow_vfta;
|
struct ixgbe_vfta shadow_vfta;
|
||||||
struct ixgbe_hwstrip hwstrip;
|
struct ixgbe_hwstrip hwstrip;
|
||||||
|
struct ixgbe_dcb_config dcb_config;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
|
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
|
||||||
@ -120,6 +124,9 @@ struct ixgbe_adapter {
|
|||||||
#define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \
|
#define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \
|
||||||
(&((struct ixgbe_adapter *)adapter)->hwstrip)
|
(&((struct ixgbe_adapter *)adapter)->hwstrip)
|
||||||
|
|
||||||
|
#define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \
|
||||||
|
(&((struct ixgbe_adapter *)adapter)->dcb_config)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RX/TX function prototypes
|
* RX/TX function prototypes
|
||||||
*/
|
*/
|
||||||
@ -198,4 +205,6 @@ int ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
|
|||||||
int ixgbe_fdir_set_masks(struct rte_eth_dev *dev,
|
int ixgbe_fdir_set_masks(struct rte_eth_dev *dev,
|
||||||
struct rte_fdir_masks *fdir_masks);
|
struct rte_fdir_masks *fdir_masks);
|
||||||
|
|
||||||
|
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
|
||||||
|
|
||||||
#endif /* _IXGBE_ETHDEV_H_ */
|
#endif /* _IXGBE_ETHDEV_H_ */
|
||||||
|
@ -77,6 +77,7 @@
|
|||||||
#include "ixgbe/ixgbe_api.h"
|
#include "ixgbe/ixgbe_api.h"
|
||||||
#include "ixgbe/ixgbe_vf.h"
|
#include "ixgbe/ixgbe_vf.h"
|
||||||
#include "ixgbe_ethdev.h"
|
#include "ixgbe_ethdev.h"
|
||||||
|
#include "ixgbe/ixgbe_dcb.h"
|
||||||
|
|
||||||
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
|
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
|
||||||
#define RTE_PMD_IXGBE_RX_MAX_BURST 32
|
#define RTE_PMD_IXGBE_RX_MAX_BURST 32
|
||||||
@ -2189,6 +2190,478 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
|
||||||
|
* @hw: pointer to hardware structure
|
||||||
|
* @dcb_config: pointer to ixgbe_dcb_config structure
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
|
||||||
|
struct ixgbe_dcb_config *dcb_config)
|
||||||
|
{
|
||||||
|
uint32_t reg;
|
||||||
|
uint32_t q;
|
||||||
|
|
||||||
|
PMD_INIT_FUNC_TRACE();
|
||||||
|
if (hw->mac.type != ixgbe_mac_82598EB) {
|
||||||
|
/* Disable the Tx desc arbiter so that MTQC can be changed */
|
||||||
|
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
|
||||||
|
reg |= IXGBE_RTTDCS_ARBDIS;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
|
||||||
|
|
||||||
|
/* Enable DCB for Tx with 8 TCs */
|
||||||
|
if (dcb_config->num_tcs.pg_tcs == 8) {
|
||||||
|
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
|
||||||
|
}
|
||||||
|
if (dcb_config->vt_mode)
|
||||||
|
reg |= IXGBE_MTQC_VT_ENA;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
|
||||||
|
|
||||||
|
/* Disable drop for all queues */
|
||||||
|
for (q = 0; q < 128; q++)
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_QDE,
|
||||||
|
(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
|
||||||
|
|
||||||
|
/* Enable the Tx desc arbiter */
|
||||||
|
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
|
||||||
|
reg &= ~IXGBE_RTTDCS_ARBDIS;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
|
||||||
|
|
||||||
|
/* Enable Security TX Buffer IFG for DCB */
|
||||||
|
reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
|
||||||
|
reg |= IXGBE_SECTX_DCB;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ixgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
|
||||||
|
* @dev: pointer to rte_eth_dev structure
|
||||||
|
* @dcb_config: pointer to ixgbe_dcb_config structure
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
|
||||||
|
struct ixgbe_dcb_config *dcb_config)
|
||||||
|
{
|
||||||
|
struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
|
||||||
|
&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
|
||||||
|
struct ixgbe_hw *hw =
|
||||||
|
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
|
||||||
|
PMD_INIT_FUNC_TRACE();
|
||||||
|
if (hw->mac.type != ixgbe_mac_82598EB)
|
||||||
|
/*PF VF Transmit Enable*/
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_VFTE(0),
|
||||||
|
vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
|
||||||
|
|
||||||
|
/*Configure general DCB TX parameters*/
|
||||||
|
ixgbe_dcb_tx_hw_config(hw,dcb_config);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
|
||||||
|
struct ixgbe_dcb_config *dcb_config)
|
||||||
|
{
|
||||||
|
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
|
||||||
|
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
|
||||||
|
struct ixgbe_dcb_tc_config *tc;
|
||||||
|
uint8_t i,j;
|
||||||
|
|
||||||
|
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
|
||||||
|
if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
|
||||||
|
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
|
||||||
|
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
|
||||||
|
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
|
||||||
|
}
|
||||||
|
/* User Priority to Traffic Class mapping */
|
||||||
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
|
j = vmdq_rx_conf->dcb_queue[i];
|
||||||
|
tc = &dcb_config->tc_config[j];
|
||||||
|
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
|
||||||
|
struct ixgbe_dcb_config *dcb_config)
|
||||||
|
{
|
||||||
|
struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
|
||||||
|
&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
|
||||||
|
struct ixgbe_dcb_tc_config *tc;
|
||||||
|
uint8_t i,j;
|
||||||
|
|
||||||
|
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
|
||||||
|
if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
|
||||||
|
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
|
||||||
|
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
|
||||||
|
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* User Priority to Traffic Class mapping */
|
||||||
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
|
j = vmdq_tx_conf->dcb_queue[i];
|
||||||
|
tc = &dcb_config->tc_config[j];
|
||||||
|
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ixgbe_dcb_rx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config)
|
||||||
|
{
|
||||||
|
struct rte_eth_dcb_rx_conf *rx_conf =
|
||||||
|
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
|
||||||
|
struct ixgbe_dcb_tc_config *tc;
|
||||||
|
uint8_t i,j;
|
||||||
|
|
||||||
|
dcb_config->num_tcs.pg_tcs = rx_conf->nb_tcs;
|
||||||
|
dcb_config->num_tcs.pfc_tcs = rx_conf->nb_tcs;
|
||||||
|
|
||||||
|
/* User Priority to Traffic Class mapping */
|
||||||
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
|
j = rx_conf->dcb_queue[i];
|
||||||
|
tc = &dcb_config->tc_config[j];
|
||||||
|
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (1 << j);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ixgbe_dcb_tx_config(struct rte_eth_dev *dev,struct ixgbe_dcb_config *dcb_config)
|
||||||
|
{
|
||||||
|
struct rte_eth_dcb_tx_conf *tx_conf =
|
||||||
|
&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
|
||||||
|
struct ixgbe_dcb_tc_config *tc;
|
||||||
|
uint8_t i,j;
|
||||||
|
|
||||||
|
dcb_config->num_tcs.pg_tcs = tx_conf->nb_tcs;
|
||||||
|
dcb_config->num_tcs.pfc_tcs = tx_conf->nb_tcs;
|
||||||
|
|
||||||
|
/* User Priority to Traffic Class mapping */
|
||||||
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
|
j = tx_conf->dcb_queue[i];
|
||||||
|
tc = &dcb_config->tc_config[j];
|
||||||
|
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (1 << j);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
|
||||||
|
* @hw: pointer to hardware structure
|
||||||
|
* @dcb_config: pointer to ixgbe_dcb_config structure
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
|
||||||
|
struct ixgbe_dcb_config *dcb_config)
|
||||||
|
{
|
||||||
|
uint32_t reg;
|
||||||
|
uint32_t vlanctrl;
|
||||||
|
uint8_t i;
|
||||||
|
|
||||||
|
PMD_INIT_FUNC_TRACE();
|
||||||
|
/*
|
||||||
|
* Disable the arbiter before changing parameters
|
||||||
|
* (always enable recycle mode; WSP)
|
||||||
|
*/
|
||||||
|
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
|
||||||
|
|
||||||
|
if (hw->mac.type != ixgbe_mac_82598EB) {
|
||||||
|
reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
|
||||||
|
if (dcb_config->num_tcs.pg_tcs == 4) {
|
||||||
|
if (dcb_config->vt_mode)
|
||||||
|
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
|
||||||
|
IXGBE_MRQC_VMDQRT4TCEN;
|
||||||
|
else {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
|
||||||
|
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
|
||||||
|
IXGBE_MRQC_RT4TCEN;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (dcb_config->num_tcs.pg_tcs == 8) {
|
||||||
|
if (dcb_config->vt_mode)
|
||||||
|
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
|
||||||
|
IXGBE_MRQC_VMDQRT8TCEN;
|
||||||
|
else {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
|
||||||
|
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
|
||||||
|
IXGBE_MRQC_RT8TCEN;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
|
||||||
|
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
|
||||||
|
vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
|
||||||
|
|
||||||
|
/* VFTA - enable all vlan filters */
|
||||||
|
for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Configure Rx packet plane (recycle mode; WSP) and
|
||||||
|
* enable arbiter
|
||||||
|
*/
|
||||||
|
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
|
||||||
|
uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
|
||||||
|
{
|
||||||
|
switch (hw->mac.type) {
|
||||||
|
case ixgbe_mac_82598EB:
|
||||||
|
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
|
||||||
|
break;
|
||||||
|
case ixgbe_mac_82599EB:
|
||||||
|
case ixgbe_mac_X540:
|
||||||
|
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
|
||||||
|
tsa, map);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *max,
|
||||||
|
uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
|
||||||
|
{
|
||||||
|
switch (hw->mac.type) {
|
||||||
|
case ixgbe_mac_82598EB:
|
||||||
|
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
|
||||||
|
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
|
||||||
|
break;
|
||||||
|
case ixgbe_mac_82599EB:
|
||||||
|
case ixgbe_mac_X540:
|
||||||
|
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
|
||||||
|
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DCB_RX_CONFIG 1
|
||||||
|
#define DCB_TX_CONFIG 1
|
||||||
|
#define DCB_TX_PB 1024
|
||||||
|
/**
|
||||||
|
* ixgbe_dcb_hw_configure - Enable DCB and configure
|
||||||
|
* general DCB in VT mode and non-VT mode parameters
|
||||||
|
* @dev: pointer to rte_eth_dev structure
|
||||||
|
* @dcb_config: pointer to ixgbe_dcb_config structure
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
|
||||||
|
struct ixgbe_dcb_config *dcb_config)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
uint8_t i,pfc_en,nb_tcs;
|
||||||
|
uint16_t pbsize;
|
||||||
|
uint8_t config_dcb_rx = 0;
|
||||||
|
uint8_t config_dcb_tx = 0;
|
||||||
|
uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
|
||||||
|
uint8_t bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
|
||||||
|
uint16_t refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
|
||||||
|
uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
|
||||||
|
uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
|
||||||
|
struct ixgbe_dcb_tc_config *tc;
|
||||||
|
uint32_t max_frame = dev->data->max_frame_size;
|
||||||
|
struct ixgbe_hw *hw =
|
||||||
|
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||||
|
|
||||||
|
switch(dev->data->dev_conf.rxmode.mq_mode){
|
||||||
|
case ETH_VMDQ_DCB:
|
||||||
|
dcb_config->vt_mode = true;
|
||||||
|
if (hw->mac.type != ixgbe_mac_82598EB) {
|
||||||
|
config_dcb_rx = DCB_RX_CONFIG;
|
||||||
|
/*
|
||||||
|
*get dcb and VT rx configuration parameters
|
||||||
|
*from rte_eth_conf
|
||||||
|
*/
|
||||||
|
ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
|
||||||
|
/*Configure general VMDQ and DCB RX parameters*/
|
||||||
|
ixgbe_vmdq_dcb_configure(dev);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case ETH_DCB_RX:
|
||||||
|
dcb_config->vt_mode = false;
|
||||||
|
config_dcb_rx = DCB_RX_CONFIG;
|
||||||
|
/* Get dcb TX configuration parameters from rte_eth_conf */
|
||||||
|
ixgbe_dcb_rx_config(dev,dcb_config);
|
||||||
|
/*Configure general DCB RX parameters*/
|
||||||
|
ixgbe_dcb_rx_hw_config(hw, dcb_config);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
switch (dev->data->dev_conf.txmode.mq_mode) {
|
||||||
|
case ETH_VMDQ_DCB_TX:
|
||||||
|
dcb_config->vt_mode = true;
|
||||||
|
config_dcb_tx = DCB_TX_CONFIG;
|
||||||
|
/* get DCB and VT TX configuration parameters from rte_eth_conf */
|
||||||
|
ixgbe_dcb_vt_tx_config(dev,dcb_config);
|
||||||
|
/*Configure general VMDQ and DCB TX parameters*/
|
||||||
|
ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case ETH_DCB_TX:
|
||||||
|
dcb_config->vt_mode = false;
|
||||||
|
config_dcb_tx = DCB_RX_CONFIG;
|
||||||
|
/*get DCB TX configuration parameters from rte_eth_conf*/
|
||||||
|
ixgbe_dcb_tx_config(dev,dcb_config);
|
||||||
|
/*Configure general DCB TX parameters*/
|
||||||
|
ixgbe_dcb_tx_hw_config(hw, dcb_config);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
nb_tcs = dcb_config->num_tcs.pfc_tcs;
|
||||||
|
/* Unpack map */
|
||||||
|
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
|
||||||
|
if(nb_tcs == ETH_4_TCS) {
|
||||||
|
/* Avoid un-configured priority mapping to TC0 */
|
||||||
|
uint8_t j = 4;
|
||||||
|
uint8_t mask = 0xFF;
|
||||||
|
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
|
||||||
|
mask &= ~ (1 << map[i]);
|
||||||
|
for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
|
||||||
|
if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
|
||||||
|
map[j++] = i;
|
||||||
|
mask >>= 1;
|
||||||
|
}
|
||||||
|
/* Re-configure 4 TCs BW */
|
||||||
|
for (i = 0; i < nb_tcs; i++) {
|
||||||
|
tc = &dcb_config->tc_config[i];
|
||||||
|
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 100 / nb_tcs;
|
||||||
|
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 100 / nb_tcs;
|
||||||
|
}
|
||||||
|
for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
|
||||||
|
tc = &dcb_config->tc_config[i];
|
||||||
|
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
|
||||||
|
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(config_dcb_rx) {
|
||||||
|
/* Set RX buffer size */
|
||||||
|
pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
|
||||||
|
uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
|
||||||
|
for (i = 0 ; i < nb_tcs; i++) {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
|
||||||
|
}
|
||||||
|
/* zero alloc all unused TCs */
|
||||||
|
for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(config_dcb_tx) {
|
||||||
|
/* Only support an equally distributed Tx packet buffer strategy. */
|
||||||
|
uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
|
||||||
|
uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
|
||||||
|
for (i = 0; i < nb_tcs; i++) {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
|
||||||
|
}
|
||||||
|
/* Clear unused TCs, if any, to zero buffer size*/
|
||||||
|
for (; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
|
||||||
|
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*Calculates traffic class credits*/
|
||||||
|
ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
|
||||||
|
IXGBE_DCB_TX_CONFIG);
|
||||||
|
ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
|
||||||
|
IXGBE_DCB_RX_CONFIG);
|
||||||
|
|
||||||
|
if(config_dcb_rx) {
|
||||||
|
/* Unpack CEE standard containers */
|
||||||
|
ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
|
||||||
|
ixgbe_dcb_unpack_max_cee(dcb_config, max);
|
||||||
|
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
|
||||||
|
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
|
||||||
|
/* Configure PG(ETS) RX */
|
||||||
|
ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
|
||||||
|
}
|
||||||
|
|
||||||
|
if(config_dcb_tx) {
|
||||||
|
/* Unpack CEE standard containers */
|
||||||
|
ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
|
||||||
|
ixgbe_dcb_unpack_max_cee(dcb_config, max);
|
||||||
|
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
|
||||||
|
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
|
||||||
|
/* Configure PG(ETS) TX */
|
||||||
|
ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*Configure queue statistics registers*/
|
||||||
|
ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
|
||||||
|
|
||||||
|
/* Check if the PFC is supported */
|
||||||
|
if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
|
||||||
|
pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
|
||||||
|
for (i = 0; i < nb_tcs; i++) {
|
||||||
|
/*
|
||||||
|
* If the TC count is 8,and the default high_water is 48,
|
||||||
|
* the low_water is 16 as default.
|
||||||
|
*/
|
||||||
|
hw->fc.high_water[i] = (pbsize * 3 ) / 4;
|
||||||
|
hw->fc.low_water[i] = pbsize / 4;
|
||||||
|
/* Enable pfc for this TC */
|
||||||
|
tc = &dcb_config->tc_config[i];
|
||||||
|
tc->pfc = ixgbe_dcb_pfc_enabled;
|
||||||
|
}
|
||||||
|
ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
|
||||||
|
if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
|
||||||
|
pfc_en &= 0x0F;
|
||||||
|
ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ixgbe_configure_dcb - Configure DCB Hardware
|
||||||
|
* @dev: pointer to rte_eth_dev
|
||||||
|
*/
|
||||||
|
void ixgbe_configure_dcb(struct rte_eth_dev *dev)
|
||||||
|
{
|
||||||
|
struct ixgbe_dcb_config *dcb_cfg =
|
||||||
|
IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
|
||||||
|
|
||||||
|
PMD_INIT_FUNC_TRACE();
|
||||||
|
/** Configure DCB hardware **/
|
||||||
|
if(((dev->data->dev_conf.rxmode.mq_mode != ETH_RSS) &&
|
||||||
|
(dev->data->nb_rx_queues == ETH_DCB_NUM_QUEUES))||
|
||||||
|
((dev->data->dev_conf.txmode.mq_mode != ETH_DCB_NONE) &&
|
||||||
|
(dev->data->nb_tx_queues == ETH_DCB_NUM_QUEUES))) {
|
||||||
|
ixgbe_dcb_hw_configure(dev,dcb_cfg);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
|
ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user