ethdev: rename DCB field in config structs
Signed-off-by: Jingjing Wu <jingjing.wu@intel.com> Acked-by: Jijiang Liu <jijiang.liu@intel.com> Acked-by: Helin Zhang <helin.zhang@intel.com>
This commit is contained in:
parent
198a3c339a
commit
cb60ede6e3
@ -1878,8 +1878,8 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
|
||||
vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
|
||||
}
|
||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||
vmdq_rx_conf.dcb_queue[i] = i;
|
||||
vmdq_tx_conf.dcb_queue[i] = i;
|
||||
vmdq_rx_conf.dcb_tc[i] = i;
|
||||
vmdq_tx_conf.dcb_tc[i] = i;
|
||||
}
|
||||
|
||||
/*set DCB mode of RX and TX of multiple queues*/
|
||||
@ -1909,8 +1909,8 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
|
||||
tx_conf.nb_tcs = dcb_conf->num_tcs;
|
||||
|
||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
|
||||
rx_conf.dcb_queue[i] = i;
|
||||
tx_conf.dcb_queue[i] = i;
|
||||
rx_conf.dcb_tc[i] = i;
|
||||
tx_conf.dcb_tc[i] = i;
|
||||
}
|
||||
eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
|
||||
eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
|
||||
|
@ -122,6 +122,10 @@ API Changes
|
||||
* The deprecated flow director API is removed.
|
||||
It was replaced by rte_eth_dev_filter_ctrl().
|
||||
|
||||
* The dcb_queue is renamed to dcb_tc in following dcb configuration
|
||||
structures: rte_eth_dcb_rx_conf, rte_eth_dcb_tx_conf,
|
||||
rte_eth_vmdq_dcb_conf, rte_eth_vmdq_dcb_tx_conf.
|
||||
|
||||
* The function rte_eal_pci_close_one() is removed.
|
||||
It was replaced by rte_eal_pci_detach().
|
||||
|
||||
|
@ -2928,7 +2928,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
|
||||
* mapping is done with 3 bits per priority,
|
||||
* so shift by i*3 each time
|
||||
*/
|
||||
queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
|
||||
queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
|
||||
|
||||
@ -3063,7 +3063,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
|
||||
}
|
||||
/* User Priority to Traffic Class mapping */
|
||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||
j = vmdq_rx_conf->dcb_queue[i];
|
||||
j = vmdq_rx_conf->dcb_tc[i];
|
||||
tc = &dcb_config->tc_config[j];
|
||||
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
|
||||
(uint8_t)(1 << j);
|
||||
@ -3091,7 +3091,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
|
||||
|
||||
/* User Priority to Traffic Class mapping */
|
||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||
j = vmdq_tx_conf->dcb_queue[i];
|
||||
j = vmdq_tx_conf->dcb_tc[i];
|
||||
tc = &dcb_config->tc_config[j];
|
||||
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
|
||||
(uint8_t)(1 << j);
|
||||
@ -3113,7 +3113,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
|
||||
|
||||
/* User Priority to Traffic Class mapping */
|
||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||
j = rx_conf->dcb_queue[i];
|
||||
j = rx_conf->dcb_tc[i];
|
||||
tc = &dcb_config->tc_config[j];
|
||||
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
|
||||
(uint8_t)(1 << j);
|
||||
@ -3134,7 +3134,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
|
||||
|
||||
/* User Priority to Traffic Class mapping */
|
||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
|
||||
j = tx_conf->dcb_queue[i];
|
||||
j = tx_conf->dcb_tc[i];
|
||||
tc = &dcb_config->tc_config[j];
|
||||
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
|
||||
(uint8_t)(1 << j);
|
||||
|
@ -107,7 +107,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
|
||||
.default_pool = 0,
|
||||
.nb_pool_maps = 0,
|
||||
.pool_map = {{0, 0},},
|
||||
.dcb_queue = {0},
|
||||
.dcb_tc = {0},
|
||||
},
|
||||
},
|
||||
};
|
||||
@ -144,7 +144,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf, enum rte_eth_nb_pools num_pools)
|
||||
conf.pool_map[i].pools = 1 << (i % num_pools);
|
||||
}
|
||||
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
|
||||
conf.dcb_queue[i] = (uint8_t)(i % (NUM_QUEUES/num_pools));
|
||||
conf.dcb_tc[i] = (uint8_t)(i % (NUM_QUEUES/num_pools));
|
||||
}
|
||||
(void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)));
|
||||
(void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
|
||||
|
@ -543,20 +543,20 @@ enum rte_eth_nb_pools {
|
||||
/* This structure may be extended in future. */
|
||||
struct rte_eth_dcb_rx_conf {
|
||||
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
|
||||
uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
|
||||
/**< Possible DCB queue,4 or 8. */
|
||||
/** Traffic class each UP mapped to. */
|
||||
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
|
||||
};
|
||||
|
||||
struct rte_eth_vmdq_dcb_tx_conf {
|
||||
enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
|
||||
uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
|
||||
/**< Possible DCB queue,4 or 8. */
|
||||
/** Traffic class each UP mapped to. */
|
||||
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
|
||||
};
|
||||
|
||||
struct rte_eth_dcb_tx_conf {
|
||||
enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
|
||||
uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
|
||||
/**< Possible DCB queue,4 or 8. */
|
||||
/** Traffic class each UP mapped to. */
|
||||
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
|
||||
};
|
||||
|
||||
struct rte_eth_vmdq_tx_conf {
|
||||
@ -583,7 +583,7 @@ struct rte_eth_vmdq_dcb_conf {
|
||||
uint16_t vlan_id; /**< The vlan id of the received frame */
|
||||
uint64_t pools; /**< Bitmask of pools for packet rx */
|
||||
} pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
|
||||
uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
|
||||
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES];
|
||||
/**< Selects a queue in a pool */
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user