2018-04-06 12:36:41 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
|
|
|
|
* Copyright(c) 2018 Synopsys, Inc. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _AXGBE_RXTX_H_
|
|
|
|
#define _AXGBE_RXTX_H_
|
|
|
|
|
|
|
|
/* to suppress gcc warnings related to descriptor casting*/
|
|
|
|
#ifdef RTE_TOOLCHAIN_GCC
|
|
|
|
#pragma GCC diagnostic ignored "-Wcast-qual"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef RTE_TOOLCHAIN_CLANG
|
|
|
|
#pragma GCC diagnostic ignored "-Wcast-qual"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Descriptor related defines */
|
|
|
|
#define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/
|
|
|
|
#define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3)
|
|
|
|
#define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1)
|
|
|
|
#define AXGBE_MIN_RING_DESC 32
|
|
|
|
#define RTE_AXGBE_DESCS_PER_LOOP 4
|
|
|
|
#define RTE_AXGBE_MAX_RX_BURST 32
|
|
|
|
|
|
|
|
#define AXGBE_RX_FREE_THRESH 32
|
|
|
|
#define AXGBE_TX_FREE_THRESH 32
|
|
|
|
|
|
|
|
#define AXGBE_DESC_ALIGN 128
|
|
|
|
#define AXGBE_DESC_OWN 0x80000000
|
|
|
|
#define AXGBE_ERR_STATUS 0x000f0000
|
|
|
|
#define AXGBE_L3_CSUM_ERR 0x00050000
|
|
|
|
#define AXGBE_L4_CSUM_ERR 0x00060000
|
|
|
|
|
|
|
|
#include "axgbe_common.h"
|
|
|
|
|
|
|
|
#define AXGBE_GET_DESC_PT(_queue, _idx) \
|
|
|
|
(((_queue)->desc) + \
|
|
|
|
((_idx) & ((_queue)->nb_desc - 1)))
|
|
|
|
|
|
|
|
#define AXGBE_GET_DESC_IDX(_queue, _idx) \
|
|
|
|
((_idx) & ((_queue)->nb_desc - 1)) \
|
|
|
|
|
|
|
|
/* Rx desc format */
|
|
|
|
union axgbe_rx_desc {
|
|
|
|
struct {
|
|
|
|
uint64_t baddr;
|
|
|
|
uint32_t desc2;
|
|
|
|
uint32_t desc3;
|
|
|
|
} read;
|
|
|
|
struct {
|
|
|
|
uint32_t desc0;
|
|
|
|
uint32_t desc1;
|
|
|
|
uint32_t desc2;
|
|
|
|
uint32_t desc3;
|
|
|
|
} write;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct axgbe_rx_queue {
|
|
|
|
/* membuf pool for rx buffers */
|
|
|
|
struct rte_mempool *mb_pool;
|
|
|
|
/* H/w Rx buffer size configured in DMA */
|
|
|
|
unsigned int buf_size;
|
|
|
|
/* CRC h/w offload */
|
|
|
|
uint16_t crc_len;
|
|
|
|
/* address of s/w rx buffers */
|
|
|
|
struct rte_mbuf **sw_ring;
|
2022-09-08 03:31:09 +00:00
|
|
|
|
|
|
|
/* For segemented packets - save the current state
|
|
|
|
* of packet, if next descriptor is not ready yet
|
|
|
|
*/
|
|
|
|
struct rte_mbuf *saved_mbuf;
|
|
|
|
|
2018-04-06 12:36:41 +00:00
|
|
|
/* Port private data */
|
|
|
|
struct axgbe_port *pdata;
|
|
|
|
/* Number of Rx descriptors in queue */
|
|
|
|
uint16_t nb_desc;
|
|
|
|
/* max free RX desc to hold */
|
|
|
|
uint16_t free_thresh;
|
|
|
|
/* Index of descriptor to check for packet availability */
|
|
|
|
uint64_t cur;
|
|
|
|
/* Index of descriptor to check for buffer reallocation */
|
|
|
|
uint64_t dirty;
|
|
|
|
/* Software Rx descriptor ring*/
|
|
|
|
volatile union axgbe_rx_desc *desc;
|
|
|
|
/* Ring physical address */
|
|
|
|
uint64_t ring_phys_addr;
|
|
|
|
/* Dma Channel register address */
|
2018-04-06 12:36:49 +00:00
|
|
|
void *dma_regs;
|
2018-04-06 12:36:41 +00:00
|
|
|
/* Dma channel tail register address*/
|
|
|
|
volatile uint32_t *dma_tail_reg;
|
|
|
|
/* DPDK queue index */
|
|
|
|
uint16_t queue_id;
|
|
|
|
/* dpdk port id*/
|
|
|
|
uint16_t port_id;
|
|
|
|
/* queue stats */
|
|
|
|
uint64_t pkts;
|
|
|
|
uint64_t bytes;
|
|
|
|
uint64_t errors;
|
2019-12-20 12:43:21 +00:00
|
|
|
uint64_t rx_mbuf_alloc_failed;
|
2018-04-06 12:36:41 +00:00
|
|
|
/* Number of mbufs allocated from pool*/
|
|
|
|
uint64_t mbuf_alloc;
|
|
|
|
|
2018-04-17 20:04:58 +00:00
|
|
|
} __rte_cache_aligned;
|
2018-04-06 12:36:41 +00:00
|
|
|
|
|
|
|
/*Tx descriptor format */
|
|
|
|
struct axgbe_tx_desc {
|
|
|
|
phys_addr_t baddr;
|
|
|
|
uint32_t desc2;
|
|
|
|
uint32_t desc3;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct axgbe_tx_queue {
|
|
|
|
/* Port private data reference */
|
|
|
|
struct axgbe_port *pdata;
|
|
|
|
/* Number of Tx descriptors in queue*/
|
|
|
|
uint16_t nb_desc;
|
|
|
|
/* Start freeing TX buffers if there are less free descriptors than
|
|
|
|
* this value
|
|
|
|
*/
|
|
|
|
uint16_t free_thresh;
|
|
|
|
/* Available descriptors for Tx processing*/
|
|
|
|
uint16_t nb_desc_free;
|
|
|
|
/* Batch of mbufs/descs to release */
|
|
|
|
uint16_t free_batch_cnt;
|
|
|
|
/* Flag for vector support */
|
|
|
|
uint16_t vector_disable;
|
|
|
|
/* Index of descriptor to be used for current transfer */
|
|
|
|
uint64_t cur;
|
|
|
|
/* Index of descriptor to check for transfer complete */
|
|
|
|
uint64_t dirty;
|
|
|
|
/* Virtual address of ring */
|
|
|
|
volatile struct axgbe_tx_desc *desc;
|
|
|
|
/* Physical address of ring */
|
|
|
|
uint64_t ring_phys_addr;
|
|
|
|
/* Dma channel register space */
|
2018-04-06 12:36:49 +00:00
|
|
|
void *dma_regs;
|
2018-04-06 12:36:41 +00:00
|
|
|
/* Dma tail register address of ring*/
|
|
|
|
volatile uint32_t *dma_tail_reg;
|
|
|
|
/* Tx queue index/id*/
|
|
|
|
uint16_t queue_id;
|
|
|
|
/* Reference to hold Tx mbufs mapped to Tx descriptors freed
|
|
|
|
* after transmission confirmation
|
|
|
|
*/
|
|
|
|
struct rte_mbuf **sw_ring;
|
|
|
|
/* dpdk port id*/
|
|
|
|
uint16_t port_id;
|
|
|
|
/* queue stats */
|
|
|
|
uint64_t pkts;
|
|
|
|
uint64_t bytes;
|
|
|
|
uint64_t errors;
|
|
|
|
|
|
|
|
} __rte_cache_aligned;
|
|
|
|
|
|
|
|
/*Queue related APIs */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RX/TX function prototypes
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2021-10-06 11:18:22 +00:00
|
|
|
void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
|
2018-04-06 12:36:41 +00:00
|
|
|
int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
|
|
|
|
uint16_t nb_tx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf);
|
2018-04-06 12:36:43 +00:00
|
|
|
void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
|
|
|
|
void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
|
|
|
|
int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
|
|
|
int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
|
|
|
|
|
2021-01-06 08:00:58 +00:00
|
|
|
int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
|
|
|
|
char *fw_version, size_t fw_size);
|
|
|
|
|
2018-04-06 12:36:43 +00:00
|
|
|
uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
2022-09-08 18:15:03 +00:00
|
|
|
|
|
|
|
uint16_t axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
2018-04-06 12:36:43 +00:00
|
|
|
uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
|
|
|
|
2018-04-06 12:36:41 +00:00
|
|
|
|
2021-10-06 11:18:22 +00:00
|
|
|
void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
|
2018-04-06 12:36:41 +00:00
|
|
|
int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
|
|
|
|
uint16_t nb_rx_desc, unsigned int socket_id,
|
|
|
|
const struct rte_eth_rxconf *rx_conf,
|
|
|
|
struct rte_mempool *mb_pool);
|
2018-04-06 12:36:43 +00:00
|
|
|
void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
|
|
|
|
void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
|
|
|
|
int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
|
|
|
int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
|
|
|
|
uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
2020-03-04 13:44:14 +00:00
|
|
|
uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
|
|
|
|
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
|
2018-04-06 12:36:43 +00:00
|
|
|
uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
|
|
|
|
struct rte_mbuf **rx_pkts,
|
|
|
|
uint16_t nb_pkts);
|
2018-04-06 12:36:41 +00:00
|
|
|
void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
|
2020-03-02 08:16:55 +00:00
|
|
|
int axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
|
|
|
|
int axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
|
2018-04-06 12:36:41 +00:00
|
|
|
|
|
|
|
#endif /* _AXGBE_RXTX_H_ */
|