net/idpf: support queue stop

Add support for these device ops:
 - rx_queue_stop
 - tx_queue_stop

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
This commit is contained in:
Junfeng Guo 2022-10-31 08:33:35 +00:00 committed by Thomas Monjalon
parent c2494d783d
commit c03af6fa0e
4 changed files with 242 additions and 5 deletions

View File

@ -324,7 +324,8 @@ idpf_dev_start(struct rte_eth_dev *dev)
if (dev->data->mtu > vport->max_mtu) {
PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
return -EINVAL;
ret = -EINVAL;
goto err_mtu;
}
vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
@ -332,17 +333,21 @@ idpf_dev_start(struct rte_eth_dev *dev)
ret = idpf_start_queues(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to start queues");
return ret;
goto err_mtu;
}
ret = idpf_vc_ena_dis_vport(vport, true);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to enable vport");
/* TODO: stop queues */
return ret;
goto err_vport;
}
return 0;
err_vport:
idpf_stop_queues(dev);
err_mtu:
return ret;
}
static int
@ -352,7 +357,7 @@ idpf_dev_stop(struct rte_eth_dev *dev)
idpf_vc_ena_dis_vport(vport, false);
/* TODO: stop queues */
idpf_stop_queues(dev);
return 0;
}
@ -751,6 +756,8 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.link_update = idpf_dev_link_update,
.rx_queue_start = idpf_rx_queue_start,
.tx_queue_start = idpf_tx_queue_start,
.rx_queue_stop = idpf_rx_queue_stop,
.tx_queue_stop = idpf_tx_queue_stop,
};
static uint16_t

View File

@ -71,6 +71,55 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
return 0;
}
static void
release_rxq_mbufs(struct idpf_rx_queue *rxq)
{
uint16_t i;
if (rxq->sw_ring == NULL)
return;
for (i = 0; i < rxq->nb_rx_desc; i++) {
if (rxq->sw_ring[i] != NULL) {
rte_pktmbuf_free_seg(rxq->sw_ring[i]);
rxq->sw_ring[i] = NULL;
}
}
}
static void
release_txq_mbufs(struct idpf_tx_queue *txq)
{
uint16_t nb_desc, i;
if (txq == NULL || txq->sw_ring == NULL) {
PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
return;
}
if (txq->sw_nb_desc != 0) {
/* For split queue model, descriptor ring */
nb_desc = txq->sw_nb_desc;
} else {
/* For single queue model */
nb_desc = txq->nb_tx_desc;
}
for (i = 0; i < nb_desc; i++) {
if (txq->sw_ring[i].mbuf != NULL) {
rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
txq->sw_ring[i].mbuf = NULL;
}
}
}
static const struct idpf_rxq_ops def_rxq_ops = {
.release_mbufs = release_rxq_mbufs,
};
static const struct idpf_txq_ops def_txq_ops = {
.release_mbufs = release_txq_mbufs,
};
static void
reset_split_rx_descq(struct idpf_rx_queue *rxq)
{
@ -122,6 +171,14 @@ reset_split_rx_bufq(struct idpf_rx_queue *rxq)
rxq->bufq2 = NULL;
}
static inline void
reset_split_rx_queue(struct idpf_rx_queue *rxq)
{
reset_split_rx_descq(rxq);
reset_split_rx_bufq(rxq->bufq1);
reset_split_rx_bufq(rxq->bufq2);
}
static void
reset_single_rx_queue(struct idpf_rx_queue *rxq)
{
@ -301,6 +358,7 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
bufq->q_set = true;
bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
bufq->ops = &def_rxq_ops;
/* TODO: allow bulk or vec */
@ -527,6 +585,7 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
queue_idx * vport->chunks_info.rx_qtail_spacing);
rxq->ops = &def_rxq_ops;
return 0;
}
@ -621,6 +680,7 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
reset_split_tx_descq(txq);
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
/* Allocate the TX completion queue data structure. */
txq->complq = rte_zmalloc_socket("idpf splitq cq",
@ -748,6 +808,7 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
return 0;
}
@ -979,3 +1040,90 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
int
idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
err = idpf_switch_queue(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
return err;
}
rxq = dev->data->rx_queues[rx_queue_id];
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
reset_single_rx_queue(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
rxq->bufq2->ops->release_mbufs(rxq->bufq2);
reset_split_rx_queue(rxq);
}
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
int
idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
err = idpf_switch_queue(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
return err;
}
txq = dev->data->tx_queues[tx_queue_id];
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
reset_single_tx_queue(txq);
} else {
reset_split_tx_descq(txq);
reset_split_tx_complq(txq->complq);
}
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
void
idpf_stop_queues(struct rte_eth_dev *dev)
{
struct idpf_rx_queue *rxq;
struct idpf_tx_queue *txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (rxq == NULL)
continue;
if (idpf_rx_queue_stop(dev, i) != 0)
PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
if (txq == NULL)
continue;
if (idpf_tx_queue_stop(dev, i) != 0)
PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i);
}
}

View File

@ -51,6 +51,7 @@ struct idpf_rx_queue {
bool q_set; /* if rx queue has been configured */
bool q_started; /* if rx queue has been started */
bool rx_deferred_start; /* don't start this queue in dev start */
const struct idpf_rxq_ops *ops;
/* only valid for split queue mode */
uint8_t expected_gen_id;
@ -97,6 +98,7 @@ struct idpf_tx_queue {
bool q_set; /* if tx queue has been configured */
bool q_started; /* if tx queue has been started */
bool tx_deferred_start; /* don't start this queue in dev start */
const struct idpf_txq_ops *ops;
/* only valid for split queue mode */
uint16_t sw_nb_desc;
@ -107,16 +109,27 @@ struct idpf_tx_queue {
struct idpf_tx_queue *complq;
};
struct idpf_rxq_ops {
void (*release_mbufs)(struct idpf_rx_queue *rxq);
};
struct idpf_txq_ops {
void (*release_mbufs)(struct idpf_tx_queue *txq);
};
int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
void idpf_stop_queues(struct rte_eth_dev *dev);
#endif /* _IDPF_RXTX_H_ */

View File

@ -837,6 +837,75 @@ idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
return err;
}
#define IDPF_RXTX_QUEUE_CHUNKS_NUM 2
int
idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_del_ena_dis_queues *queue_select;
struct virtchnl2_queue_chunk *queue_chunk;
uint32_t type;
struct idpf_cmd_info args;
uint16_t num_chunks;
int err, len;
num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
num_chunks++;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
num_chunks++;
len = sizeof(struct virtchnl2_del_ena_dis_queues) +
sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
queue_select = rte_zmalloc("queue_select", len, 0);
if (queue_select == NULL)
return -ENOMEM;
queue_chunk = queue_select->chunks.chunks;
queue_select->chunks.num_chunks = num_chunks;
queue_select->vport_id = vport->vport_id;
type = VIRTCHNL_QUEUE_TYPE_RX;
queue_chunk[type].type = type;
queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;
queue_chunk[type].num_queues = vport->num_rx_q;
type = VIRTCHNL2_QUEUE_TYPE_TX;
queue_chunk[type].type = type;
queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;
queue_chunk[type].num_queues = vport->num_tx_q;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
queue_chunk[type].type = type;
queue_chunk[type].start_queue_id =
vport->chunks_info.rx_buf_start_qid;
queue_chunk[type].num_queues = vport->num_rx_bufq;
}
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
queue_chunk[type].type = type;
queue_chunk[type].start_queue_id =
vport->chunks_info.tx_compl_start_qid;
queue_chunk[type].num_queues = vport->num_tx_complq;
}
args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
VIRTCHNL2_OP_DISABLE_QUEUES;
args.in_args = (u8 *)queue_select;
args.in_args_size = len;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
if (err != 0)
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
enable ? "ENABLE" : "DISABLE");
rte_free(queue_select);
return err;
}
int
idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
{