net/idpf: support device start and stop

Add dev ops dev_start, dev_stop and link_update.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
This commit is contained in:
Junfeng Guo 2022-10-31 08:33:33 +00:00 committed by Thomas Monjalon
parent 9c47c29739
commit 14aa6ed8f2
2 changed files with 75 additions and 0 deletions

View File

@ -29,6 +29,22 @@ static const char * const idpf_valid_args[] = {
NULL
};
static int
idpf_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct rte_eth_link new_link;
memset(&new_link, 0, sizeof(new_link));
new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
RTE_ETH_LINK_SPEED_FIXED);
return rte_eth_linkstatus_set(dev, &new_link);
}
static int
idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@ -267,6 +283,42 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return 0;
}
static int
idpf_dev_start(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
int ret;
if (dev->data->mtu > vport->max_mtu) {
PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
return -EINVAL;
}
vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
/* TODO: start queues */
ret = idpf_vc_ena_dis_vport(vport, true);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to enable vport");
return ret;
}
return 0;
}
static int
idpf_dev_stop(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
idpf_vc_ena_dis_vport(vport, false);
/* TODO: stop queues */
return 0;
}
static int
idpf_dev_close(struct rte_eth_dev *dev)
{
@ -656,6 +708,9 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.rx_queue_setup = idpf_rx_queue_setup,
.tx_queue_setup = idpf_tx_queue_setup,
.dev_infos_get = idpf_dev_info_get,
.dev_start = idpf_dev_start,
.dev_stop = idpf_dev_stop,
.link_update = idpf_dev_link_update,
};
static uint16_t

View File

@ -334,6 +334,11 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
return -EINVAL;
if (rx_conf->rx_deferred_start) {
PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
return -EINVAL;
}
/* Setup Rx description queue */
rxq = rte_zmalloc_socket("idpf rxq",
sizeof(struct idpf_rx_queue),
@ -465,6 +470,11 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
return -EINVAL;
if (rx_conf->rx_deferred_start) {
PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
return -EINVAL;
}
/* Setup Rx description queue */
rxq = rte_zmalloc_socket("idpf rxq",
sizeof(struct idpf_rx_queue),
@ -569,6 +579,11 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
return -EINVAL;
if (tx_conf->tx_deferred_start) {
PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
return -EINVAL;
}
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("idpf split txq",
sizeof(struct idpf_tx_queue),
@ -691,6 +706,11 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
return -EINVAL;
if (tx_conf->tx_deferred_start) {
PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
return -EINVAL;
}
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("idpf txq",
sizeof(struct idpf_tx_queue),