ethdev: move the multi-queue mode check to specific drivers

Different NIC has its specific constraint on the multi-queue
configuration, so move the checking from ethdev lib to drivers.

Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jijiang Liu <jijiang.liu@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
This commit is contained in:
Jingjing Wu 2015-10-31 23:57:24 +08:00 committed by Thomas Monjalon
parent cb60ede6e3
commit 27b609cbd1
4 changed files with 257 additions and 200 deletions

View File

@ -864,17 +864,99 @@ rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unuse
return (0);
}
static int
igb_check_mq_mode(struct rte_eth_dev *dev)
{
enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_rx_queues;
if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
tx_mq_mode == ETH_MQ_TX_DCB ||
tx_mq_mode == ETH_MQ_TX_VMDQ_DCB) {
PMD_INIT_LOG(ERR, "DCB mode is not supported.");
return -EINVAL;
}
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* Check multi-queue mode.
* To no break software we accept ETH_MQ_RX_NONE as this might
* be used to turn off VLAN filter.
*/
if (rx_mq_mode == ETH_MQ_RX_NONE ||
rx_mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
} else {
/* Only support one queue on VFs.
* RSS together with SRIOV is not supported.
*/
PMD_INIT_LOG(ERR, "SRIOV is active,"
" wrong mq_mode rx %d.",
rx_mq_mode);
return -EINVAL;
}
/* TX mode is not used here, so mode might be ignored.*/
if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
/* SRIOV only works in VMDq enable mode */
PMD_INIT_LOG(WARNING, "SRIOV is active,"
" TX mode %d is not supported. "
" Driver will behave as %d mode.",
tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
}
/* check valid queue number */
if ((nb_rx_q > 1) || (nb_tx_q > 1)) {
PMD_INIT_LOG(ERR, "SRIOV is active,"
" only support one queue on VFs.");
return -EINVAL;
}
} else {
/* To no break software that set invalid mode, only display
* warning if invalid mode is used.
*/
if (rx_mq_mode != ETH_MQ_RX_NONE &&
rx_mq_mode != ETH_MQ_RX_VMDQ_ONLY &&
rx_mq_mode != ETH_MQ_RX_RSS) {
/* RSS together with VMDq not supported*/
PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
rx_mq_mode);
return -EINVAL;
}
if (tx_mq_mode != ETH_MQ_TX_NONE &&
tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
" Due to txmode is meaningless in this"
" driver, just ignore.",
tx_mq_mode);
}
}
return 0;
}
static int
eth_igb_configure(struct rte_eth_dev *dev)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int ret;
PMD_INIT_FUNC_TRACE();
/* multipe queue mode checking */
ret = igb_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
ret);
return ret;
}
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
PMD_INIT_FUNC_TRACE();
return (0);
return 0;
}
static int

View File

@ -1639,6 +1639,169 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
static int
ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
{
switch (nb_rx_q) {
case 1:
case 2:
RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
break;
case 4:
RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
break;
default:
return -EINVAL;
}
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
return 0;
}
static int
ixgbe_check_mq_mode(struct rte_eth_dev *dev)
{
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_rx_queues;
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* check multi-queue mode */
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
case ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
PMD_INIT_LOG(ERR, "SRIOV active,"
" unsupported mq_mode rx %d.",
dev_conf->rxmode.mq_mode);
return -EINVAL;
case ETH_MQ_RX_RSS:
case ETH_MQ_RX_VMDQ_RSS:
dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
PMD_INIT_LOG(ERR, "SRIOV is active,"
" invalid queue number"
" for VMDQ RSS, allowed"
" value are 1, 2 or 4.");
return -EINVAL;
}
break;
case ETH_MQ_RX_VMDQ_ONLY:
case ETH_MQ_RX_NONE:
/* if nothing mq mode configure, use default scheme */
dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
break;
default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
/* SRIOV only works in VMDq enable mode */
PMD_INIT_LOG(ERR, "SRIOV is active,"
" wrong mq_mode rx %d.",
dev_conf->rxmode.mq_mode);
return -EINVAL;
}
switch (dev_conf->txmode.mq_mode) {
case ETH_MQ_TX_VMDQ_DCB:
/* DCB VMDQ in SRIOV mode, not implement yet */
PMD_INIT_LOG(ERR, "SRIOV is active,"
" unsupported VMDQ mq_mode tx %d.",
dev_conf->txmode.mq_mode);
return -EINVAL;
default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
break;
}
/* check valid queue number */
if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
(nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
PMD_INIT_LOG(ERR, "SRIOV is active,"
" queue number must less equal to %d.",
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
return -EINVAL;
}
} else {
/* check configuration for vmdb+dcb mode */
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_conf *conf;
if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
IXGBE_VMDQ_DCB_NB_QUEUES);
return -EINVAL;
}
conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
if (!(conf->nb_queue_pools == ETH_16_POOLS ||
conf->nb_queue_pools == ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools must be %d or %d.",
ETH_16_POOLS, ETH_32_POOLS);
return -EINVAL;
}
}
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_tx_conf *conf;
if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
IXGBE_VMDQ_DCB_NB_QUEUES);
return -EINVAL;
}
conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
if (!(conf->nb_queue_pools == ETH_16_POOLS ||
conf->nb_queue_pools == ETH_32_POOLS)) {
PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
" nb_queue_pools != %d and"
" nb_queue_pools != %d.",
ETH_16_POOLS, ETH_32_POOLS);
return -EINVAL;
}
}
/* For DCB mode check our configuration before we go further */
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
IXGBE_DCB_NB_QUEUES);
return -EINVAL;
}
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
ETH_4_TCS, ETH_8_TCS);
return -EINVAL;
}
}
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
IXGBE_DCB_NB_QUEUES);
return -EINVAL;
}
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
" and nb_tcs != %d.",
ETH_4_TCS, ETH_8_TCS);
return -EINVAL;
}
}
}
return 0;
}
static int
ixgbe_dev_configure(struct rte_eth_dev *dev)
{
@ -1646,8 +1809,16 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
int ret;
PMD_INIT_FUNC_TRACE();
/* multipe queue mode checking */
ret = ixgbe_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
ret);
return ret;
}
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;

View File

@ -57,6 +57,9 @@
#define IXGBE_VFTA_SIZE 128
#define IXGBE_VLAN_TAG_SIZE 4
#define IXGBE_MAX_RX_QUEUE_NUM 128
#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
#define IXGBE_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
#ifndef NBBY
#define NBBY 8 /* number of bits in a byte */
#endif

View File

@ -880,197 +880,6 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
return 0;
}
static int
rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
switch (nb_rx_q) {
case 1:
case 2:
RTE_ETH_DEV_SRIOV(dev).active =
ETH_64_POOLS;
break;
case 4:
RTE_ETH_DEV_SRIOV(dev).active =
ETH_32_POOLS;
break;
default:
return -EINVAL;
}
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
dev->pci_dev->max_vfs * nb_rx_q;
return 0;
}
static int
rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
/* check multi-queue mode */
if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
(dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
(dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
/* SRIOV only works in VMDq enable mode */
PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
" SRIOV active, "
"wrong VMDQ mq_mode rx %u tx %u\n",
port_id,
dev_conf->rxmode.mq_mode,
dev_conf->txmode.mq_mode);
return -EINVAL;
}
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
case ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
" SRIOV active, "
"unsupported VMDQ mq_mode rx %u\n",
port_id, dev_conf->rxmode.mq_mode);
return -EINVAL;
case ETH_MQ_RX_RSS:
PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
" SRIOV active, "
"Rx mq mode is changed from:"
"mq_mode %u into VMDQ mq_mode %u\n",
port_id,
dev_conf->rxmode.mq_mode,
dev->data->dev_conf.rxmode.mq_mode);
case ETH_MQ_RX_VMDQ_RSS:
dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
PMD_DEBUG_TRACE("ethdev port_id=%d"
" SRIOV active, invalid queue"
" number for VMDQ RSS, allowed"
" value are 1, 2 or 4\n",
port_id);
return -EINVAL;
}
break;
default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
/* if nothing mq mode configure, use default scheme */
dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
break;
}
switch (dev_conf->txmode.mq_mode) {
case ETH_MQ_TX_VMDQ_DCB:
/* DCB VMDQ in SRIOV mode, not implement yet */
PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
" SRIOV active, "
"unsupported VMDQ mq_mode tx %u\n",
port_id, dev_conf->txmode.mq_mode);
return -EINVAL;
default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
/* if nothing mq mode configure, use default scheme */
dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
break;
}
/* check valid queue number */
if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
(nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
"queue number must less equal to %d\n",
port_id,
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
return -EINVAL;
}
} else {
/* For vmdb+dcb mode check our configuration before we go further */
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_conf *conf;
if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
"!= %d\n",
port_id, ETH_VMDQ_DCB_NUM_QUEUES);
return -EINVAL;
}
conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
if (!(conf->nb_queue_pools == ETH_16_POOLS ||
conf->nb_queue_pools == ETH_32_POOLS)) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
"nb_queue_pools must be %d or %d\n",
port_id, ETH_16_POOLS, ETH_32_POOLS);
return -EINVAL;
}
}
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
const struct rte_eth_vmdq_dcb_tx_conf *conf;
if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
"!= %d\n",
port_id, ETH_VMDQ_DCB_NUM_QUEUES);
return -EINVAL;
}
conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
if (!(conf->nb_queue_pools == ETH_16_POOLS ||
conf->nb_queue_pools == ETH_32_POOLS)) {
PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
"nb_queue_pools != %d or nb_queue_pools "
"!= %d\n",
port_id, ETH_16_POOLS, ETH_32_POOLS);
return -EINVAL;
}
}
/* For DCB mode check our configuration before we go further */
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
"!= %d\n",
port_id, ETH_DCB_NUM_QUEUES);
return -EINVAL;
}
conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
"nb_tcs != %d or nb_tcs "
"!= %d\n",
port_id, ETH_4_TCS, ETH_8_TCS);
return -EINVAL;
}
}
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
"!= %d\n",
port_id, ETH_DCB_NUM_QUEUES);
return -EINVAL;
}
conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
"nb_tcs != %d or nb_tcs "
"!= %d\n",
port_id, ETH_4_TCS, ETH_8_TCS);
return -EINVAL;
}
}
}
return 0;
}
int
rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
@ -1182,14 +991,6 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
ETHER_MAX_LEN;
}
/* multiple queue mode checking */
diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
if (diag != 0) {
PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
port_id, diag);
return diag;
}
/*
* Setup new number of RX/TX queues and reconfigure device.
*/