net/dpaa: support scatter offload
This patch implement the sg support, which can be enabled/disabled w.r.t configuration. Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
This commit is contained in:
parent
deeec8ef57
commit
55576ac26e
@ -47,15 +47,15 @@
|
||||
|
||||
/* Supported Rx offloads */
|
||||
static uint64_t dev_rx_offloads_sup =
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME |
|
||||
DEV_RX_OFFLOAD_SCATTER;
|
||||
|
||||
/* Rx offloads which cannot be disabled */
|
||||
static uint64_t dev_rx_offloads_nodis =
|
||||
DEV_RX_OFFLOAD_IPV4_CKSUM |
|
||||
DEV_RX_OFFLOAD_UDP_CKSUM |
|
||||
DEV_RX_OFFLOAD_TCP_CKSUM |
|
||||
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
|
||||
DEV_RX_OFFLOAD_SCATTER;
|
||||
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
|
||||
|
||||
/* Supported Tx offloads */
|
||||
static uint64_t dev_tx_offloads_sup;
|
||||
@ -147,11 +147,30 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
|
||||
struct dpaa_if *dpaa_intf = dev->data->dev_private;
|
||||
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
|
||||
+ VLAN_TAG_SIZE;
|
||||
uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Refuse mtu that requires the support of scattered packets
|
||||
* when this feature has not been enabled before.
|
||||
*/
|
||||
if (dev->data->min_rx_buf_size &&
|
||||
!dev->data->scattered_rx && frame_size > buffsz) {
|
||||
DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* check <seg size> * <max_seg> >= max_frame */
|
||||
if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
|
||||
(frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
|
||||
DPAA_PMD_ERR("Too big to fit for Max SG list %d",
|
||||
buffsz * DPAA_SGT_MAX_ENTRIES);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (frame_size > ETHER_MAX_LEN)
|
||||
dev->data->dev_conf.rxmode.offloads &=
|
||||
DEV_RX_OFFLOAD_JUMBO_FRAME;
|
||||
@ -212,6 +231,13 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
|
||||
dev->data->mtu = max_len
|
||||
- ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
|
||||
}
|
||||
|
||||
if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
|
||||
DPAA_PMD_DEBUG("enabling scatter mode");
|
||||
fman_if_set_sg(dpaa_intf->fif, 1);
|
||||
dev->data->scattered_rx = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -309,7 +335,6 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
|
||||
|
||||
dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
|
||||
dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
|
||||
dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
|
||||
dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
|
||||
dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
|
||||
dev_info->max_hash_mac_addrs = 0;
|
||||
@ -523,6 +548,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
struct qm_mcc_initfq opts = {0};
|
||||
u32 flags = 0;
|
||||
int ret;
|
||||
u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
|
||||
|
||||
PMD_INIT_FUNC_TRACE();
|
||||
|
||||
@ -536,6 +562,28 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
|
||||
queue_idx, rxq->fqid);
|
||||
|
||||
/* Max packet can fit in single buffer */
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
|
||||
;
|
||||
} else if (dev->data->dev_conf.rxmode.offloads &
|
||||
DEV_RX_OFFLOAD_SCATTER) {
|
||||
if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
|
||||
buffsz * DPAA_SGT_MAX_ENTRIES) {
|
||||
DPAA_PMD_ERR("max RxPkt size %d too big to fit "
|
||||
"MaxSGlist %d",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
buffsz * DPAA_SGT_MAX_ENTRIES);
|
||||
rte_errno = EOVERFLOW;
|
||||
return -rte_errno;
|
||||
}
|
||||
} else {
|
||||
DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
|
||||
" larger than a single mbuf (%u) and scattered"
|
||||
" mode has not been requested",
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len,
|
||||
buffsz - RTE_PKTMBUF_HEADROOM);
|
||||
}
|
||||
|
||||
if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
|
||||
struct fman_if_ic_params icp;
|
||||
uint32_t fd_offset;
|
||||
@ -566,6 +614,9 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
|
||||
dpaa_intf->name, fd_offset,
|
||||
fman_if_get_fdoff(dpaa_intf->fif));
|
||||
}
|
||||
DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
|
||||
fman_if_get_sg_enable(dpaa_intf->fif),
|
||||
dev->data->dev_conf.rxmode.max_rx_pkt_len);
|
||||
/* checking if push mode only, no error check for now */
|
||||
if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
|
||||
dpaa_push_queue_idx++;
|
||||
@ -1316,6 +1367,9 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
|
||||
fman_if_reset_mcast_filter_table(fman_intf);
|
||||
/* Reset interface statistics */
|
||||
fman_if_stats_reset(fman_intf);
|
||||
/* Disable SG by default */
|
||||
fman_if_set_sg(fman_intf, 0);
|
||||
fman_if_set_maxfrm(fman_intf, ETHER_MAX_LEN + VLAN_TAG_SIZE);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -39,9 +39,10 @@
|
||||
/* Alignment to use for cpu-local structs to avoid coherency problems. */
|
||||
#define MAX_CACHELINE 64
|
||||
|
||||
#define DPAA_MIN_RX_BUF_SIZE 512
|
||||
#define DPAA_MAX_RX_PKT_LEN 10240
|
||||
|
||||
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
|
||||
|
||||
/* RX queue tail drop threshold (CGR Based) in frame count */
|
||||
#define CGR_RX_PERFQ_THRESH 256
|
||||
|
||||
|
@ -306,8 +306,6 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
|
||||
int i = 0;
|
||||
uint8_t fd_offset = fd->offset;
|
||||
|
||||
DPAA_DP_LOG(DEBUG, "Received an SG frame");
|
||||
|
||||
vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
|
||||
if (!vaddr) {
|
||||
DPAA_PMD_ERR("unable to convert physical address");
|
||||
@ -349,6 +347,8 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
|
||||
}
|
||||
prev_seg = cur_seg;
|
||||
}
|
||||
DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d",
|
||||
first_seg->pkt_len, first_seg->nb_segs);
|
||||
|
||||
dpaa_eth_packet_info(first_seg, vaddr);
|
||||
rte_pktmbuf_free_seg(temp);
|
||||
@ -367,8 +367,6 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
|
||||
uint16_t offset;
|
||||
uint32_t length;
|
||||
|
||||
DPAA_DP_LOG(DEBUG, " FD--->MBUF");
|
||||
|
||||
if (unlikely(format == qm_fd_sg))
|
||||
return dpaa_eth_sg_to_mbuf(fd, ifid);
|
||||
|
||||
@ -379,6 +377,8 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
|
||||
offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
|
||||
length = fd->opaque & DPAA_FD_LENGTH_MASK;
|
||||
|
||||
DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length);
|
||||
|
||||
/* Ignoring case when format != qm_fd_contig */
|
||||
dpaa_display_frame(fd);
|
||||
|
||||
|
@ -32,8 +32,6 @@
|
||||
/* L4 Type field: TCP */
|
||||
#define DPAA_L4_PARSE_RESULT_TCP 0x20
|
||||
|
||||
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
|
||||
|
||||
#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
|
||||
/** <Maximum number of frames to be dequeued in a single rx call*/
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user