net/idpf: support write back based on ITR expire
Enable write back on ITR expire, then packets can be received one by one. Signed-off-by: Beilei Xing <beilei.xing@intel.com> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
This commit is contained in:
parent
99af8fe570
commit
37291a68fd
@ -297,6 +297,90 @@ idpf_dev_configure(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
idpf_config_rx_queues_irqs(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct idpf_vport *vport = dev->data->dev_private;
|
||||
struct idpf_adapter *adapter = vport->adapter;
|
||||
struct virtchnl2_queue_vector *qv_map;
|
||||
struct idpf_hw *hw = &adapter->hw;
|
||||
uint32_t dynctl_reg_start;
|
||||
uint32_t itrn_reg_start;
|
||||
uint32_t dynctl_val, itrn_val;
|
||||
uint16_t i;
|
||||
|
||||
qv_map = rte_zmalloc("qv_map",
|
||||
dev->data->nb_rx_queues *
|
||||
sizeof(struct virtchnl2_queue_vector), 0);
|
||||
if (qv_map == NULL) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
|
||||
dev->data->nb_rx_queues);
|
||||
goto qv_map_alloc_err;
|
||||
}
|
||||
|
||||
/* Rx interrupt disabled, Map interrupt only for writeback */
|
||||
|
||||
/* The capability flags adapter->caps->other_caps should be
|
||||
* compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
|
||||
* condition should be updated when the FW can return the
|
||||
* correct flag bits.
|
||||
*/
|
||||
dynctl_reg_start =
|
||||
vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
|
||||
itrn_reg_start =
|
||||
vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
|
||||
dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
|
||||
PMD_DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x",
|
||||
dynctl_val);
|
||||
itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
|
||||
PMD_DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
|
||||
/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
|
||||
* register. WB_ON_ITR and INTENA are mutually exclusive
|
||||
* bits. Setting WB_ON_ITR bits means TX and RX Descs
|
||||
* are written back based on ITR expiration irrespective
|
||||
* of INTENA setting.
|
||||
*/
|
||||
/* TBD: need to tune INTERVAL value for better performance. */
|
||||
if (itrn_val != 0)
|
||||
IDPF_WRITE_REG(hw,
|
||||
dynctl_reg_start,
|
||||
VIRTCHNL2_ITR_IDX_0 <<
|
||||
PF_GLINT_DYN_CTL_ITR_INDX_S |
|
||||
PF_GLINT_DYN_CTL_WB_ON_ITR_M |
|
||||
itrn_val <<
|
||||
PF_GLINT_DYN_CTL_INTERVAL_S);
|
||||
else
|
||||
IDPF_WRITE_REG(hw,
|
||||
dynctl_reg_start,
|
||||
VIRTCHNL2_ITR_IDX_0 <<
|
||||
PF_GLINT_DYN_CTL_ITR_INDX_S |
|
||||
PF_GLINT_DYN_CTL_WB_ON_ITR_M |
|
||||
IDPF_DFLT_INTERVAL <<
|
||||
PF_GLINT_DYN_CTL_INTERVAL_S);
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
/* map all queues to the same vector */
|
||||
qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
|
||||
qv_map[i].vector_id =
|
||||
vport->recv_vectors->vchunks.vchunks->start_vector_id;
|
||||
}
|
||||
vport->qv_map = qv_map;
|
||||
|
||||
if (idpf_vc_config_irq_map_unmap(vport, true) != 0) {
|
||||
PMD_DRV_LOG(ERR, "config interrupt mapping failed");
|
||||
goto config_irq_map_err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
config_irq_map_err:
|
||||
rte_free(vport->qv_map);
|
||||
vport->qv_map = NULL;
|
||||
|
||||
qv_map_alloc_err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
idpf_start_queues(struct rte_eth_dev *dev)
|
||||
{
|
||||
@ -334,6 +418,10 @@ static int
|
||||
idpf_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct idpf_vport *vport = dev->data->dev_private;
|
||||
struct idpf_adapter *adapter = vport->adapter;
|
||||
uint16_t num_allocated_vectors =
|
||||
adapter->caps->num_allocated_vectors;
|
||||
uint16_t req_vecs_num;
|
||||
int ret;
|
||||
|
||||
if (dev->data->mtu > vport->max_mtu) {
|
||||
@ -344,6 +432,27 @@ idpf_dev_start(struct rte_eth_dev *dev)
|
||||
|
||||
vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
|
||||
|
||||
req_vecs_num = IDPF_DFLT_Q_VEC_NUM;
|
||||
if (req_vecs_num + adapter->used_vecs_num > num_allocated_vectors) {
|
||||
PMD_DRV_LOG(ERR, "The accumulated request vectors' number should be less than %d",
|
||||
num_allocated_vectors);
|
||||
ret = -EINVAL;
|
||||
goto err_mtu;
|
||||
}
|
||||
|
||||
ret = idpf_vc_alloc_vectors(vport, req_vecs_num);
|
||||
if (ret != 0) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate interrupt vectors");
|
||||
goto err_mtu;
|
||||
}
|
||||
adapter->used_vecs_num += req_vecs_num;
|
||||
|
||||
ret = idpf_config_rx_queues_irqs(dev);
|
||||
if (ret != 0) {
|
||||
PMD_DRV_LOG(ERR, "Failed to configure irqs");
|
||||
goto err_mtu;
|
||||
}
|
||||
|
||||
ret = idpf_start_queues(dev);
|
||||
if (ret != 0) {
|
||||
PMD_DRV_LOG(ERR, "Failed to start queues");
|
||||
@ -376,6 +485,10 @@ idpf_dev_stop(struct rte_eth_dev *dev)
|
||||
|
||||
idpf_stop_queues(dev);
|
||||
|
||||
idpf_vc_config_irq_map_unmap(vport, false);
|
||||
|
||||
idpf_vc_dealloc_vectors(vport);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -387,6 +500,11 @@ idpf_dev_close(struct rte_eth_dev *dev)
|
||||
|
||||
idpf_vc_destroy_vport(vport);
|
||||
|
||||
rte_free(vport->recv_vectors);
|
||||
vport->recv_vectors = NULL;
|
||||
|
||||
rte_free(vport->qv_map);
|
||||
vport->qv_map = NULL;
|
||||
|
||||
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
|
||||
|
||||
@ -748,6 +866,8 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
|
||||
adapter->cur_vports = 0;
|
||||
adapter->cur_vport_nb = 0;
|
||||
|
||||
adapter->used_vecs_num = 0;
|
||||
|
||||
return ret;
|
||||
|
||||
err_vports:
|
||||
|
@ -33,6 +33,9 @@
|
||||
#define IDPF_CTLQ_LEN 64
|
||||
#define IDPF_DFLT_MBX_BUF_SIZE 4096
|
||||
|
||||
#define IDPF_DFLT_Q_VEC_NUM 1
|
||||
#define IDPF_DFLT_INTERVAL 16
|
||||
|
||||
#define IDPF_MIN_BUF_SIZE 1024
|
||||
#define IDPF_MAX_FRAME_SIZE 9728
|
||||
#define IDPF_MIN_FRAME_SIZE 14
|
||||
@ -92,6 +95,11 @@ struct idpf_vport {
|
||||
struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
|
||||
uint16_t max_pkt_len; /* Maximum packet length */
|
||||
|
||||
/* MSIX info*/
|
||||
struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */
|
||||
uint16_t max_vectors;
|
||||
struct virtchnl2_alloc_vectors *recv_vectors;
|
||||
|
||||
/* Chunk info */
|
||||
struct idpf_chunks_info chunks_info;
|
||||
|
||||
@ -124,6 +132,8 @@ struct idpf_adapter {
|
||||
uint16_t cur_vport_nb;
|
||||
uint16_t cur_vport_idx;
|
||||
|
||||
uint16_t used_vecs_num;
|
||||
|
||||
/* Max config queue number per VC message */
|
||||
uint32_t max_rxq_per_msg;
|
||||
uint32_t max_txq_per_msg;
|
||||
@ -198,6 +208,9 @@ int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
|
||||
bool rx, bool on);
|
||||
int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
|
||||
int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
|
||||
int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, bool map);
|
||||
int idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors);
|
||||
int idpf_vc_dealloc_vectors(struct idpf_vport *vport);
|
||||
int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
|
||||
int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
|
||||
uint16_t buf_len, uint8_t *buf);
|
||||
|
@ -230,6 +230,10 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
|
||||
case VIRTCHNL2_OP_DISABLE_QUEUES:
|
||||
case VIRTCHNL2_OP_ENABLE_VPORT:
|
||||
case VIRTCHNL2_OP_DISABLE_VPORT:
|
||||
case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
|
||||
case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
|
||||
case VIRTCHNL2_OP_ALLOC_VECTORS:
|
||||
case VIRTCHNL2_OP_DEALLOC_VECTORS:
|
||||
/* for init virtchnl ops, need to poll the response */
|
||||
err = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);
|
||||
clear_cmd(adapter);
|
||||
@ -521,6 +525,8 @@ idpf_vc_get_caps(struct idpf_adapter *adapter)
|
||||
|
||||
memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));
|
||||
|
||||
caps_msg.other_caps = VIRTCHNL2_CAP_WB_ON_ITR;
|
||||
|
||||
args.ops = VIRTCHNL2_OP_GET_CAPS;
|
||||
args.in_args = (uint8_t *)&caps_msg;
|
||||
args.in_args_size = sizeof(caps_msg);
|
||||
@ -970,6 +976,113 @@ idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)
|
||||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, bool map)
|
||||
{
|
||||
struct idpf_adapter *adapter = vport->adapter;
|
||||
struct virtchnl2_queue_vector_maps *map_info;
|
||||
struct virtchnl2_queue_vector *vecmap;
|
||||
uint16_t nb_rxq = vport->dev_data->nb_rx_queues;
|
||||
struct idpf_cmd_info args;
|
||||
int len, i, err = 0;
|
||||
|
||||
len = sizeof(struct virtchnl2_queue_vector_maps) +
|
||||
(nb_rxq - 1) * sizeof(struct virtchnl2_queue_vector);
|
||||
|
||||
map_info = rte_zmalloc("map_info", len, 0);
|
||||
if (map_info == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
map_info->vport_id = vport->vport_id;
|
||||
map_info->num_qv_maps = nb_rxq;
|
||||
for (i = 0; i < nb_rxq; i++) {
|
||||
vecmap = &map_info->qv_maps[i];
|
||||
vecmap->queue_id = vport->qv_map[i].queue_id;
|
||||
vecmap->vector_id = vport->qv_map[i].vector_id;
|
||||
vecmap->itr_idx = VIRTCHNL2_ITR_IDX_0;
|
||||
vecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX;
|
||||
}
|
||||
|
||||
args.ops = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
|
||||
VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
|
||||
args.in_args = (u8 *)map_info;
|
||||
args.in_args_size = len;
|
||||
args.out_buffer = adapter->mbx_resp;
|
||||
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
|
||||
err = idpf_execute_vc_cmd(adapter, &args);
|
||||
if (err != 0)
|
||||
PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR",
|
||||
map ? "MAP" : "UNMAP");
|
||||
|
||||
rte_free(map_info);
|
||||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors)
|
||||
{
|
||||
struct idpf_adapter *adapter = vport->adapter;
|
||||
struct virtchnl2_alloc_vectors *alloc_vec;
|
||||
struct idpf_cmd_info args;
|
||||
int err, len;
|
||||
|
||||
len = sizeof(struct virtchnl2_alloc_vectors) +
|
||||
(num_vectors - 1) * sizeof(struct virtchnl2_vector_chunk);
|
||||
alloc_vec = rte_zmalloc("alloc_vec", len, 0);
|
||||
if (alloc_vec == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
alloc_vec->num_vectors = num_vectors;
|
||||
|
||||
args.ops = VIRTCHNL2_OP_ALLOC_VECTORS;
|
||||
args.in_args = (u8 *)alloc_vec;
|
||||
args.in_args_size = sizeof(struct virtchnl2_alloc_vectors);
|
||||
args.out_buffer = adapter->mbx_resp;
|
||||
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
|
||||
err = idpf_execute_vc_cmd(adapter, &args);
|
||||
if (err != 0)
|
||||
PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS");
|
||||
|
||||
if (vport->recv_vectors == NULL) {
|
||||
vport->recv_vectors = rte_zmalloc("recv_vectors", len, 0);
|
||||
if (vport->recv_vectors == NULL) {
|
||||
rte_free(alloc_vec);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
rte_memcpy(vport->recv_vectors, args.out_buffer, len);
|
||||
rte_free(alloc_vec);
|
||||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
idpf_vc_dealloc_vectors(struct idpf_vport *vport)
|
||||
{
|
||||
struct idpf_adapter *adapter = vport->adapter;
|
||||
struct virtchnl2_alloc_vectors *alloc_vec;
|
||||
struct virtchnl2_vector_chunks *vcs;
|
||||
struct idpf_cmd_info args;
|
||||
int err, len;
|
||||
|
||||
alloc_vec = vport->recv_vectors;
|
||||
vcs = &alloc_vec->vchunks;
|
||||
|
||||
len = sizeof(struct virtchnl2_vector_chunks) +
|
||||
(vcs->num_vchunks - 1) * sizeof(struct virtchnl2_vector_chunk);
|
||||
|
||||
args.ops = VIRTCHNL2_OP_DEALLOC_VECTORS;
|
||||
args.in_args = (u8 *)vcs;
|
||||
args.in_args_size = len;
|
||||
args.out_buffer = adapter->mbx_resp;
|
||||
args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
|
||||
err = idpf_execute_vc_cmd(adapter, &args);
|
||||
if (err != 0)
|
||||
PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
|
||||
uint32_t type, bool on)
|
||||
|
Loading…
Reference in New Issue
Block a user