net/idpf: support queue start

Add support for these device ops:
 - rx_queue_start
 - tx_queue_start

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
This commit is contained in:
Junfeng Guo 2022-10-31 08:33:34 +00:00 committed by Thomas Monjalon
parent 14aa6ed8f2
commit c2494d783d
5 changed files with 720 additions and 21 deletions

View File

@ -283,6 +283,39 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return 0;
}
static int
idpf_start_queues(struct rte_eth_dev *dev)
{
struct idpf_rx_queue *rxq;
struct idpf_tx_queue *txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
if (txq == NULL || txq->tx_deferred_start)
continue;
err = idpf_tx_queue_start(dev, i);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
return err;
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
if (rxq == NULL || rxq->rx_deferred_start)
continue;
err = idpf_rx_queue_start(dev, i);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
return err;
}
}
return err;
}
static int
idpf_dev_start(struct rte_eth_dev *dev)
{
@ -296,11 +329,16 @@ idpf_dev_start(struct rte_eth_dev *dev)
vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
/* TODO: start queues */
ret = idpf_start_queues(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to start queues");
return ret;
}
ret = idpf_vc_ena_dis_vport(vport, true);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to enable vport");
/* TODO: stop queues */
return ret;
}
@ -711,6 +749,8 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.dev_start = idpf_dev_start,
.dev_stop = idpf_dev_stop,
.link_update = idpf_dev_link_update,
.rx_queue_start = idpf_rx_queue_start,
.tx_queue_start = idpf_tx_queue_start,
};
static uint16_t

View File

@ -24,7 +24,9 @@
#define IDPF_DEFAULT_TXQ_NUM 16
#define IDPF_INVALID_VPORT_IDX 0xffff
#define IDPF_TXQ_PER_GRP 1
#define IDPF_TX_COMPLQ_PER_GRP 1
#define IDPF_RXQ_PER_GRP 1
#define IDPF_RX_BUFQ_PER_GRP 2
#define IDPF_CTLQ_ID -1
@ -182,6 +184,13 @@ int idpf_vc_check_api_version(struct idpf_adapter *adapter);
int idpf_vc_get_caps(struct idpf_adapter *adapter);
int idpf_vc_create_vport(struct idpf_adapter *adapter);
int idpf_vc_destroy_vport(struct idpf_vport *vport);
int idpf_vc_config_rxqs(struct idpf_vport *vport);
int idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);
int idpf_vc_config_txqs(struct idpf_vport *vport);
int idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id);
int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
bool rx, bool on);
int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
uint16_t buf_len, uint8_t *buf);

View File

@ -334,11 +334,6 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
return -EINVAL;
if (rx_conf->rx_deferred_start) {
PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
return -EINVAL;
}
/* Setup Rx description queue */
rxq = rte_zmalloc_socket("idpf rxq",
sizeof(struct idpf_rx_queue),
@ -354,6 +349,7 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->rx_free_thresh = rx_free_thresh;
rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
rxq->port_id = dev->data->port_id;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->rx_hdr_len = 0;
rxq->adapter = adapter;
rxq->offloads = offloads;
@ -470,11 +466,6 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
return -EINVAL;
if (rx_conf->rx_deferred_start) {
PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
return -EINVAL;
}
/* Setup Rx description queue */
rxq = rte_zmalloc_socket("idpf rxq",
sizeof(struct idpf_rx_queue),
@ -490,6 +481,7 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->rx_free_thresh = rx_free_thresh;
rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
rxq->port_id = dev->data->port_id;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->rx_hdr_len = 0;
rxq->adapter = adapter;
rxq->offloads = offloads;
@ -579,11 +571,6 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
return -EINVAL;
if (tx_conf->tx_deferred_start) {
PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
return -EINVAL;
}
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("idpf split txq",
sizeof(struct idpf_tx_queue),
@ -600,6 +587,7 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
/* Allocate software ring */
txq->sw_nb_desc = 2 * nb_desc;
@ -706,11 +694,6 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
return -EINVAL;
if (tx_conf->tx_deferred_start) {
PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
return -EINVAL;
}
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("idpf txq",
sizeof(struct idpf_tx_queue),
@ -729,6 +712,7 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
/* Allocate software ring */
txq->sw_ring =
@ -782,3 +766,216 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return idpf_tx_split_queue_setup(dev, queue_idx, nb_desc,
socket_id, tx_conf);
}
static int
idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
{
volatile struct virtchnl2_singleq_rx_buf_desc *rxd;
struct rte_mbuf *mbuf = NULL;
uint64_t dma_addr;
uint16_t i;
for (i = 0; i < rxq->nb_rx_desc; i++) {
mbuf = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(mbuf == NULL)) {
PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
return -ENOMEM;
}
rte_mbuf_refcnt_set(mbuf, 1);
mbuf->next = NULL;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
mbuf->port = rxq->port_id;
dma_addr =
rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &((volatile struct virtchnl2_singleq_rx_buf_desc *)(rxq->rx_ring))[i];
rxd->pkt_addr = dma_addr;
rxd->hdr_addr = 0;
rxd->rsvd1 = 0;
rxd->rsvd2 = 0;
rxq->sw_ring[i] = mbuf;
}
return 0;
}
static int
idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)
{
volatile struct virtchnl2_splitq_rx_buf_desc *rxd;
struct rte_mbuf *mbuf = NULL;
uint64_t dma_addr;
uint16_t i;
for (i = 0; i < rxq->nb_rx_desc; i++) {
mbuf = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(mbuf == NULL)) {
PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
return -ENOMEM;
}
rte_mbuf_refcnt_set(mbuf, 1);
mbuf->next = NULL;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
mbuf->port = rxq->port_id;
dma_addr =
rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &((volatile struct virtchnl2_splitq_rx_buf_desc *)(rxq->rx_ring))[i];
rxd->qword0.buf_id = i;
rxd->qword0.rsvd0 = 0;
rxd->qword0.rsvd1 = 0;
rxd->pkt_addr = dma_addr;
rxd->hdr_addr = 0;
rxd->rsvd2 = 0;
rxq->sw_ring[i] = mbuf;
}
rxq->nb_rx_hold = 0;
rxq->rx_tail = rxq->nb_rx_desc - 1;
return 0;
}
int
idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
rxq = dev->data->rx_queues[rx_queue_id];
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
rx_queue_id);
return -EINVAL;
}
if (rxq->bufq1 == NULL) {
/* Single queue */
err = idpf_alloc_single_rxq_mbufs(rxq);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
return err;
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
err = idpf_alloc_split_rxq_mbufs(rxq->bufq1);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
return err;
}
err = idpf_alloc_split_rxq_mbufs(rxq->bufq2);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
return err;
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
}
int
idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_rx_queue *rxq =
dev->data->rx_queues[rx_queue_id];
int err = 0;
err = idpf_vc_config_rxq(vport, rx_queue_id);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Rx queue %u", rx_queue_id);
return err;
}
err = idpf_rx_queue_init(dev, rx_queue_id);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to init RX queue %u",
rx_queue_id);
return err;
}
/* Ready to switch the queue on */
err = idpf_switch_queue(vport, rx_queue_id, true, true);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
rx_queue_id);
} else {
rxq->q_started = true;
dev->data->rx_queue_state[rx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
}
int
idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct idpf_tx_queue *txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
return 0;
}
int
idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct idpf_vport *vport = dev->data->dev_private;
struct idpf_tx_queue *txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
err = idpf_vc_config_txq(vport, tx_queue_id);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
}
err = idpf_tx_queue_init(dev, tx_queue_id);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to init TX queue %u",
tx_queue_id);
return err;
}
/* Ready to switch the queue on */
err = idpf_switch_queue(vport, tx_queue_id, false, true);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
txq->q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
}

View File

@ -50,6 +50,7 @@ struct idpf_rx_queue {
bool q_set; /* if rx queue has been configured */
bool q_started; /* if rx queue has been started */
bool rx_deferred_start; /* don't start this queue in dev start */
/* only valid for split queue mode */
uint8_t expected_gen_id;
@ -95,6 +96,7 @@ struct idpf_tx_queue {
bool q_set; /* if tx queue has been configured */
bool q_started; /* if tx queue has been started */
bool tx_deferred_start; /* don't start this queue in dev start */
/* only valid for split queue mode */
uint16_t sw_nb_desc;
@ -109,8 +111,12 @@ int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
#endif /* _IDPF_RXTX_H_ */

View File

@ -21,6 +21,7 @@
#include <rte_dev.h>
#include "idpf_ethdev.h"
#include "idpf_rxtx.h"
static int
idpf_vc_clean(struct idpf_adapter *adapter)
@ -223,6 +224,10 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
case VIRTCHNL2_OP_GET_CAPS:
case VIRTCHNL2_OP_CREATE_VPORT:
case VIRTCHNL2_OP_DESTROY_VPORT:
case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
case VIRTCHNL2_OP_ENABLE_QUEUES:
case VIRTCHNL2_OP_DISABLE_QUEUES:
case VIRTCHNL2_OP_ENABLE_VPORT:
case VIRTCHNL2_OP_DISABLE_VPORT:
/* for init virtchnl ops, need to poll the response */
@ -390,6 +395,448 @@ idpf_vc_destroy_vport(struct idpf_vport *vport)
return err;
}
#define IDPF_RX_BUF_STRIDE 64
int
idpf_vc_config_rxqs(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_rx_queue **rxq =
(struct idpf_rx_queue **)vport->dev_data->rx_queues;
struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
struct virtchnl2_rxq_info *rxq_info;
struct idpf_cmd_info args;
uint16_t total_qs, num_qs;
int size, i, j;
int err = 0;
int k = 0;
total_qs = vport->num_rx_q + vport->num_rx_bufq;
while (total_qs) {
if (total_qs > adapter->max_rxq_per_msg) {
num_qs = adapter->max_rxq_per_msg;
total_qs -= adapter->max_rxq_per_msg;
} else {
num_qs = total_qs;
total_qs = 0;
}
size = sizeof(*vc_rxqs) + (num_qs - 1) *
sizeof(struct virtchnl2_rxq_info);
vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
if (vc_rxqs == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
err = -ENOMEM;
break;
}
vc_rxqs->vport_id = vport->vport_id;
vc_rxqs->num_qinfo = num_qs;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
for (i = 0; i < num_qs; i++, k++) {
rxq_info = &vc_rxqs->qinfo[i];
rxq_info->dma_ring_addr = rxq[k]->rx_ring_phys_addr;
rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
rxq_info->queue_id = rxq[k]->queue_id;
rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
rxq_info->max_pkt_size = vport->max_pkt_len;
rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
rxq_info->ring_len = rxq[k]->nb_rx_desc;
}
} else {
for (i = 0; i < num_qs / 3; i++, k++) {
/* Rx queue */
rxq_info = &vc_rxqs->qinfo[i * 3];
rxq_info->dma_ring_addr =
rxq[k]->rx_ring_phys_addr;
rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
rxq_info->queue_id = rxq[k]->queue_id;
rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
rxq_info->max_pkt_size = vport->max_pkt_len;
rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
rxq_info->ring_len = rxq[k]->nb_rx_desc;
rxq_info->rx_bufq1_id = rxq[k]->bufq1->queue_id;
rxq_info->rx_bufq2_id = rxq[k]->bufq2->queue_id;
rxq_info->rx_buffer_low_watermark = 64;
/* Buffer queue */
for (j = 1; j <= IDPF_RX_BUFQ_PER_GRP; j++) {
struct idpf_rx_queue *bufq = j == 1 ?
rxq[k]->bufq1 : rxq[k]->bufq2;
rxq_info = &vc_rxqs->qinfo[i * 3 + j];
rxq_info->dma_ring_addr =
bufq->rx_ring_phys_addr;
rxq_info->type =
VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
rxq_info->queue_id = bufq->queue_id;
rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
rxq_info->data_buffer_size = bufq->rx_buf_len;
rxq_info->desc_ids =
VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
rxq_info->ring_len = bufq->nb_rx_desc;
rxq_info->buffer_notif_stride =
IDPF_RX_BUF_STRIDE;
rxq_info->rx_buffer_low_watermark = 64;
}
}
}
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
args.in_args = (uint8_t *)vc_rxqs;
args.in_args_size = size;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
rte_free(vc_rxqs);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
break;
}
}
return err;
}
int
idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_rx_queue **rxq =
(struct idpf_rx_queue **)vport->dev_data->rx_queues;
struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
struct virtchnl2_rxq_info *rxq_info;
struct idpf_cmd_info args;
uint16_t num_qs;
int size, err, i;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
num_qs = IDPF_RXQ_PER_GRP;
else
num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
size = sizeof(*vc_rxqs) + (num_qs - 1) *
sizeof(struct virtchnl2_rxq_info);
vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
if (vc_rxqs == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
err = -ENOMEM;
return err;
}
vc_rxqs->vport_id = vport->vport_id;
vc_rxqs->num_qinfo = num_qs;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq_info = &vc_rxqs->qinfo[0];
rxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;
rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
rxq_info->queue_id = rxq[rxq_id]->queue_id;
rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
rxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;
rxq_info->max_pkt_size = vport->max_pkt_len;
rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
rxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;
} else {
/* Rx queue */
rxq_info = &vc_rxqs->qinfo[0];
rxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;
rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
rxq_info->queue_id = rxq[rxq_id]->queue_id;
rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
rxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;
rxq_info->max_pkt_size = vport->max_pkt_len;
rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
rxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;
rxq_info->rx_bufq1_id = rxq[rxq_id]->bufq1->queue_id;
rxq_info->rx_bufq2_id = rxq[rxq_id]->bufq2->queue_id;
rxq_info->rx_buffer_low_watermark = 64;
/* Buffer queue */
for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
struct idpf_rx_queue *bufq =
i == 1 ? rxq[rxq_id]->bufq1 : rxq[rxq_id]->bufq2;
rxq_info = &vc_rxqs->qinfo[i];
rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
rxq_info->queue_id = bufq->queue_id;
rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
rxq_info->data_buffer_size = bufq->rx_buf_len;
rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
rxq_info->ring_len = bufq->nb_rx_desc;
rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
rxq_info->rx_buffer_low_watermark = 64;
}
}
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
args.in_args = (uint8_t *)vc_rxqs;
args.in_args_size = size;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
rte_free(vc_rxqs);
if (err != 0)
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
return err;
}
int
idpf_vc_config_txqs(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_tx_queue **txq =
(struct idpf_tx_queue **)vport->dev_data->tx_queues;
struct virtchnl2_config_tx_queues *vc_txqs = NULL;
struct virtchnl2_txq_info *txq_info;
struct idpf_cmd_info args;
uint16_t total_qs, num_qs;
int size, i;
int err = 0;
int k = 0;
total_qs = vport->num_tx_q + vport->num_tx_complq;
while (total_qs) {
if (total_qs > adapter->max_txq_per_msg) {
num_qs = adapter->max_txq_per_msg;
total_qs -= adapter->max_txq_per_msg;
} else {
num_qs = total_qs;
total_qs = 0;
}
size = sizeof(*vc_txqs) + (num_qs - 1) *
sizeof(struct virtchnl2_txq_info);
vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
if (vc_txqs == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
err = -ENOMEM;
break;
}
vc_txqs->vport_id = vport->vport_id;
vc_txqs->num_qinfo = num_qs;
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
for (i = 0; i < num_qs; i++, k++) {
txq_info = &vc_txqs->qinfo[i];
txq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;
txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
txq_info->queue_id = txq[k]->queue_id;
txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
txq_info->ring_len = txq[k]->nb_tx_desc;
}
} else {
for (i = 0; i < num_qs / 2; i++, k++) {
/* txq info */
txq_info = &vc_txqs->qinfo[2 * i];
txq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;
txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
txq_info->queue_id = txq[k]->queue_id;
txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
txq_info->ring_len = txq[k]->nb_tx_desc;
txq_info->tx_compl_queue_id =
txq[k]->complq->queue_id;
txq_info->relative_queue_id = txq_info->queue_id;
/* tx completion queue info */
txq_info = &vc_txqs->qinfo[2 * i + 1];
txq_info->dma_ring_addr =
txq[k]->complq->tx_ring_phys_addr;
txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
txq_info->queue_id = txq[k]->complq->queue_id;
txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
txq_info->ring_len = txq[k]->complq->nb_tx_desc;
}
}
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
args.in_args = (uint8_t *)vc_txqs;
args.in_args_size = size;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
rte_free(vc_txqs);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
break;
}
}
return err;
}
int
idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)
{
struct idpf_adapter *adapter = vport->adapter;
struct idpf_tx_queue **txq =
(struct idpf_tx_queue **)vport->dev_data->tx_queues;
struct virtchnl2_config_tx_queues *vc_txqs = NULL;
struct virtchnl2_txq_info *txq_info;
struct idpf_cmd_info args;
uint16_t num_qs;
int size, err;
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
num_qs = IDPF_TXQ_PER_GRP;
else
num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
size = sizeof(*vc_txqs) + (num_qs - 1) *
sizeof(struct virtchnl2_txq_info);
vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
if (vc_txqs == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
err = -ENOMEM;
return err;
}
vc_txqs->vport_id = vport->vport_id;
vc_txqs->num_qinfo = num_qs;
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
txq_info = &vc_txqs->qinfo[0];
txq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;
txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
txq_info->queue_id = txq[txq_id]->queue_id;
txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
txq_info->ring_len = txq[txq_id]->nb_tx_desc;
} else {
/* txq info */
txq_info = &vc_txqs->qinfo[0];
txq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;
txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
txq_info->queue_id = txq[txq_id]->queue_id;
txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
txq_info->ring_len = txq[txq_id]->nb_tx_desc;
txq_info->tx_compl_queue_id = txq[txq_id]->complq->queue_id;
txq_info->relative_queue_id = txq_info->queue_id;
/* tx completion queue info */
txq_info = &vc_txqs->qinfo[1];
txq_info->dma_ring_addr = txq[txq_id]->complq->tx_ring_phys_addr;
txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
txq_info->queue_id = txq[txq_id]->complq->queue_id;
txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
txq_info->ring_len = txq[txq_id]->complq->nb_tx_desc;
}
memset(&args, 0, sizeof(args));
args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
args.in_args = (uint8_t *)vc_txqs;
args.in_args_size = size;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
rte_free(vc_txqs);
if (err != 0)
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
return err;
}
static int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_del_ena_dis_queues *queue_select;
struct virtchnl2_queue_chunk *queue_chunk;
struct idpf_cmd_info args;
int err, len;
len = sizeof(struct virtchnl2_del_ena_dis_queues);
queue_select = rte_zmalloc("queue_select", len, 0);
if (queue_select == NULL)
return -ENOMEM;
queue_chunk = queue_select->chunks.chunks;
queue_select->chunks.num_chunks = 1;
queue_select->vport_id = vport->vport_id;
queue_chunk->type = type;
queue_chunk->start_queue_id = qid;
queue_chunk->num_queues = 1;
args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :
VIRTCHNL2_OP_DISABLE_QUEUES;
args.in_args = (u8 *)queue_select;
args.in_args_size = len;
args.out_buffer = adapter->mbx_resp;
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
err = idpf_execute_vc_cmd(adapter, &args);
if (err != 0)
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
on ? "ENABLE" : "DISABLE");
rte_free(queue_select);
return err;
}
int
idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
bool rx, bool on)
{
uint32_t type;
int err, queue_id;
/* switch txq/rxq */
type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
if (type == VIRTCHNL2_QUEUE_TYPE_RX)
queue_id = vport->chunks_info.rx_start_qid + qid;
else
queue_id = vport->chunks_info.tx_start_qid + qid;
err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
if (err != 0)
return err;
/* switch tx completion queue */
if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
queue_id = vport->chunks_info.tx_compl_start_qid + qid;
err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
if (err != 0)
return err;
}
/* switch rx buffer queue */
if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;
err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
if (err != 0)
return err;
queue_id++;
err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
if (err != 0)
return err;
}
return err;
}
int
idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
{