net/ice: add queue config in DCF
Add queues and Rx queue irqs configuration during device start in DCF. The setup is sent to PF via virtchnl. Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> Signed-off-by: Ting Xu <ting.xu@intel.com> Reviewed-by: Qiming Yang <qiming.yang@intel.com>
This commit is contained in:
parent
3220d86538
commit
4b0d391f0e
@ -24,6 +24,7 @@
|
||||
#include <rte_dev.h>
|
||||
|
||||
#include "ice_dcf.h"
|
||||
#include "ice_rxtx.h"
|
||||
|
||||
#define ICE_DCF_AQ_LEN 32
|
||||
#define ICE_DCF_AQ_BUF_SZ 4096
|
||||
@ -825,3 +826,113 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define IAVF_RXDID_LEGACY_1 1
|
||||
#define IAVF_RXDID_COMMS_GENERIC 16
|
||||
|
||||
int
|
||||
ice_dcf_configure_queues(struct ice_dcf_hw *hw)
|
||||
{
|
||||
struct ice_rx_queue **rxq =
|
||||
(struct ice_rx_queue **)hw->eth_dev->data->rx_queues;
|
||||
struct ice_tx_queue **txq =
|
||||
(struct ice_tx_queue **)hw->eth_dev->data->tx_queues;
|
||||
struct virtchnl_vsi_queue_config_info *vc_config;
|
||||
struct virtchnl_queue_pair_info *vc_qp;
|
||||
struct dcf_virtchnl_cmd args;
|
||||
uint16_t i, size;
|
||||
int err;
|
||||
|
||||
size = sizeof(*vc_config) +
|
||||
sizeof(vc_config->qpair[0]) * hw->num_queue_pairs;
|
||||
vc_config = rte_zmalloc("cfg_queue", size, 0);
|
||||
if (!vc_config)
|
||||
return -ENOMEM;
|
||||
|
||||
vc_config->vsi_id = hw->vsi_res->vsi_id;
|
||||
vc_config->num_queue_pairs = hw->num_queue_pairs;
|
||||
|
||||
for (i = 0, vc_qp = vc_config->qpair;
|
||||
i < hw->num_queue_pairs;
|
||||
i++, vc_qp++) {
|
||||
vc_qp->txq.vsi_id = hw->vsi_res->vsi_id;
|
||||
vc_qp->txq.queue_id = i;
|
||||
if (i < hw->eth_dev->data->nb_tx_queues) {
|
||||
vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
|
||||
vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
|
||||
}
|
||||
vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
|
||||
vc_qp->rxq.queue_id = i;
|
||||
vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
|
||||
|
||||
if (i >= hw->eth_dev->data->nb_rx_queues)
|
||||
continue;
|
||||
|
||||
vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
|
||||
vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
|
||||
vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
|
||||
|
||||
if (hw->vf_res->vf_cap_flags &
|
||||
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
|
||||
hw->supported_rxdid &
|
||||
BIT(IAVF_RXDID_COMMS_GENERIC)) {
|
||||
vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC;
|
||||
PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
|
||||
"Queue[%d]", vc_qp->rxq.rxdid, i);
|
||||
} else {
|
||||
PMD_DRV_LOG(ERR, "RXDID 16 is not supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.v_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
|
||||
args.req_msg = (uint8_t *)vc_config;
|
||||
args.req_msglen = size;
|
||||
|
||||
err = ice_dcf_execute_virtchnl_cmd(hw, &args);
|
||||
if (err)
|
||||
PMD_DRV_LOG(ERR, "Failed to execute command of"
|
||||
" VIRTCHNL_OP_CONFIG_VSI_QUEUES");
|
||||
|
||||
rte_free(vc_config);
|
||||
return err;
|
||||
}
|
||||
|
||||
int
|
||||
ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
|
||||
{
|
||||
struct virtchnl_irq_map_info *map_info;
|
||||
struct virtchnl_vector_map *vecmap;
|
||||
struct dcf_virtchnl_cmd args;
|
||||
int len, i, err;
|
||||
|
||||
len = sizeof(struct virtchnl_irq_map_info) +
|
||||
sizeof(struct virtchnl_vector_map) * hw->nb_msix;
|
||||
|
||||
map_info = rte_zmalloc("map_info", len, 0);
|
||||
if (!map_info)
|
||||
return -ENOMEM;
|
||||
|
||||
map_info->num_vectors = hw->nb_msix;
|
||||
for (i = 0; i < hw->nb_msix; i++) {
|
||||
vecmap = &map_info->vecmap[i];
|
||||
vecmap->vsi_id = hw->vsi_res->vsi_id;
|
||||
vecmap->rxitr_idx = 0;
|
||||
vecmap->vector_id = hw->msix_base + i;
|
||||
vecmap->txq_map = 0;
|
||||
vecmap->rxq_map = hw->rxq_map[hw->msix_base + i];
|
||||
}
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.v_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
|
||||
args.req_msg = (u8 *)map_info;
|
||||
args.req_msglen = len;
|
||||
|
||||
err = ice_dcf_execute_virtchnl_cmd(hw, &args);
|
||||
if (err)
|
||||
PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
|
||||
|
||||
rte_free(map_info);
|
||||
return err;
|
||||
}
|
||||
|
@ -54,6 +54,10 @@ struct ice_dcf_hw {
|
||||
uint8_t *rss_key;
|
||||
uint64_t supported_rxdid;
|
||||
uint16_t num_queue_pairs;
|
||||
|
||||
uint16_t msix_base;
|
||||
uint16_t nb_msix;
|
||||
uint16_t rxq_map[16];
|
||||
};
|
||||
|
||||
int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
|
||||
@ -64,5 +68,7 @@ int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw);
|
||||
int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
|
||||
void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
|
||||
int ice_dcf_init_rss(struct ice_dcf_hw *hw);
|
||||
int ice_dcf_configure_queues(struct ice_dcf_hw *hw);
|
||||
int ice_dcf_config_irq_map(struct ice_dcf_hw *hw);
|
||||
|
||||
#endif /* _ICE_DCF_H_ */
|
||||
|
@ -114,10 +114,124 @@ ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
|
||||
#define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
|
||||
|
||||
#define IAVF_ITR_INDEX_DEFAULT 0
|
||||
#define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
|
||||
#define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
|
||||
|
||||
static inline uint16_t
|
||||
iavf_calc_itr_interval(int16_t interval)
|
||||
{
|
||||
if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
|
||||
interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
|
||||
|
||||
/* Convert to hardware count, as writing each 1 represents 2 us */
|
||||
return interval / 2;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
|
||||
struct rte_intr_handle *intr_handle)
|
||||
{
|
||||
struct ice_dcf_adapter *adapter = dev->data->dev_private;
|
||||
struct ice_dcf_hw *hw = &adapter->real_hw;
|
||||
uint16_t interval, i;
|
||||
int vec;
|
||||
|
||||
if (rte_intr_cap_multiple(intr_handle) &&
|
||||
dev->data->dev_conf.intr_conf.rxq) {
|
||||
if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
|
||||
intr_handle->intr_vec =
|
||||
rte_zmalloc("intr_vec",
|
||||
dev->data->nb_rx_queues * sizeof(int), 0);
|
||||
if (!intr_handle->intr_vec) {
|
||||
PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
|
||||
dev->data->nb_rx_queues);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dev->data->dev_conf.intr_conf.rxq ||
|
||||
!rte_intr_dp_is_en(intr_handle)) {
|
||||
/* Rx interrupt disabled, Map interrupt only for writeback */
|
||||
hw->nb_msix = 1;
|
||||
if (hw->vf_res->vf_cap_flags &
|
||||
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
|
||||
/* If WB_ON_ITR supports, enable it */
|
||||
hw->msix_base = IAVF_RX_VEC_START;
|
||||
IAVF_WRITE_REG(&hw->avf,
|
||||
IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
|
||||
IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
|
||||
IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
|
||||
} else {
|
||||
/* If no WB_ON_ITR offload flags, need to set
|
||||
* interrupt for descriptor write back.
|
||||
*/
|
||||
hw->msix_base = IAVF_MISC_VEC_ID;
|
||||
|
||||
/* set ITR to max */
|
||||
interval =
|
||||
iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
|
||||
IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
|
||||
IAVF_VFINT_DYN_CTL01_INTENA_MASK |
|
||||
(IAVF_ITR_INDEX_DEFAULT <<
|
||||
IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
|
||||
(interval <<
|
||||
IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
|
||||
}
|
||||
IAVF_WRITE_FLUSH(&hw->avf);
|
||||
/* map all queues to the same interrupt */
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++)
|
||||
hw->rxq_map[hw->msix_base] |= 1 << i;
|
||||
} else {
|
||||
if (!rte_intr_allow_others(intr_handle)) {
|
||||
hw->nb_msix = 1;
|
||||
hw->msix_base = IAVF_MISC_VEC_ID;
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
hw->rxq_map[hw->msix_base] |= 1 << i;
|
||||
intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
|
||||
}
|
||||
PMD_DRV_LOG(DEBUG,
|
||||
"vector %u are mapping to all Rx queues",
|
||||
hw->msix_base);
|
||||
} else {
|
||||
/* If Rx interrupt is reuquired, and we can use
|
||||
* multi interrupts, then the vec is from 1
|
||||
*/
|
||||
hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
|
||||
intr_handle->nb_efd);
|
||||
hw->msix_base = IAVF_MISC_VEC_ID;
|
||||
vec = IAVF_MISC_VEC_ID;
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
hw->rxq_map[vec] |= 1 << i;
|
||||
intr_handle->intr_vec[i] = vec++;
|
||||
if (vec >= hw->nb_msix)
|
||||
vec = IAVF_RX_VEC_START;
|
||||
}
|
||||
PMD_DRV_LOG(DEBUG,
|
||||
"%u vectors are mapping to %u Rx queues",
|
||||
hw->nb_msix, dev->data->nb_rx_queues);
|
||||
}
|
||||
}
|
||||
|
||||
if (ice_dcf_config_irq_map(hw)) {
|
||||
PMD_DRV_LOG(ERR, "config interrupt mapping failed");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ice_dcf_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
|
||||
struct rte_intr_handle *intr_handle = dev->intr_handle;
|
||||
struct ice_adapter *ad = &dcf_ad->parent;
|
||||
struct ice_dcf_hw *hw = &dcf_ad->real_hw;
|
||||
int ret;
|
||||
@ -141,6 +255,18 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
ret = ice_dcf_configure_queues(hw);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(ERR, "Fail to config queues");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
|
||||
if (ret) {
|
||||
PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev->data->dev_link.link_status = ETH_LINK_UP;
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user