net/ice: support queue information getting

Add below ops,
rxq_info_get
txq_info_get
rx_queue_count

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: Qiming Yang <qiming.yang@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>
This commit is contained in:
Wenzhuo Lu 2018-12-18 16:46:28 +08:00 committed by Ferruh Yigit
parent cf911d90e3
commit e6683b1d9e
4 changed files with 76 additions and 71 deletions

View File

@ -47,6 +47,9 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
.tx_queue_release = ice_tx_queue_release,
.dev_infos_get = ice_dev_info_get,
.link_update = ice_link_update,
.rxq_info_get = ice_rxq_info_get,
.txq_info_get = ice_txq_info_get,
.rx_queue_count = ice_rx_queue_count,
};
static void
@ -1024,69 +1027,13 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->rx_offload_capa = 0;
dev_info->tx_offload_capa = 0;
dev_info->rx_queue_offload_capa = 0;
dev_info->tx_queue_offload_capa = 0;
dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = ICE_DEFAULT_RX_PTHRESH,
.hthresh = ICE_DEFAULT_RX_HTHRESH,
.wthresh = ICE_DEFAULT_RX_WTHRESH,
},
.rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
.offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_thresh = {
.pthresh = ICE_DEFAULT_TX_PTHRESH,
.hthresh = ICE_DEFAULT_TX_HTHRESH,
.wthresh = ICE_DEFAULT_TX_WTHRESH,
},
.tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
.offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = ICE_MAX_RING_DESC,
.nb_min = ICE_MIN_RING_DESC,
.nb_align = ICE_ALIGN_RING_DESC,
};
dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = ICE_MAX_RING_DESC,
.nb_min = ICE_MIN_RING_DESC,
.nb_align = ICE_ALIGN_RING_DESC,
};
dev_info->speed_capa = ETH_LINK_SPEED_10M |
ETH_LINK_SPEED_100M |

View File

@ -102,19 +102,6 @@
ICE_FLAG_RSS_AQ_CAPABLE | \
ICE_FLAG_VF_MAC_BY_PF)
#define ICE_RSS_OFFLOAD_ALL ( \
ETH_RSS_FRAG_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
ETH_RSS_NONFRAG_IPV4_UDP | \
ETH_RSS_NONFRAG_IPV4_SCTP | \
ETH_RSS_NONFRAG_IPV4_OTHER | \
ETH_RSS_FRAG_IPV6 | \
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP | \
ETH_RSS_NONFRAG_IPV6_SCTP | \
ETH_RSS_NONFRAG_IPV6_OTHER | \
ETH_RSS_L2_PAYLOAD)
struct ice_adapter;
/**

View File

@ -880,6 +880,72 @@ ice_tx_queue_release(void *txq)
rte_free(q);
}
void
ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo)
{
struct ice_rx_queue *rxq;
rxq = dev->data->rx_queues[queue_id];
qinfo->mp = rxq->mp;
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
}
void
ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo)
{
struct ice_tx_queue *txq;
txq = dev->data->tx_queues[queue_id];
qinfo->nb_desc = txq->nb_tx_desc;
qinfo->conf.tx_thresh.pthresh = txq->pthresh;
qinfo->conf.tx_thresh.hthresh = txq->hthresh;
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
uint32_t
ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define ICE_RXQ_SCAN_INTERVAL 4
volatile union ice_rx_desc *rxdp;
struct ice_rx_queue *rxq;
uint16_t desc = 0;
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &rxq->rx_ring[rxq->rx_tail];
while ((desc < rxq->nb_rx_desc) &&
((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
ICE_RXD_QW1_STATUS_M) >> ICE_RXD_QW1_STATUS_S) &
(1 << ICE_RX_DESC_STATUS_DD_S)) {
/**
* Check the DD bit of a rx descriptor of each 4 in a group,
* to avoid checking too frequently and downgrading performance
* too much.
*/
desc += ICE_RXQ_SCAN_INTERVAL;
rxdp += ICE_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
rxdp = &(rxq->rx_ring[rxq->rx_tail +
desc - rxq->nb_rx_desc]);
}
return desc;
}
void
ice_clear_queues(struct rte_eth_dev *dev)
{

View File

@ -134,4 +134,9 @@ void ice_rx_queue_release(void *rxq);
void ice_tx_queue_release(void *txq);
void ice_clear_queues(struct rte_eth_dev *dev);
void ice_free_queues(struct rte_eth_dev *dev);
uint32_t ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
#endif /* _ICE_RXTX_H_ */