2018-07-14 01:34:27 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
2018-05-23 20:56:55 +00:00
|
|
|
* Copyright (c) 2016 - 2018 Cavium Inc.
|
2016-04-27 14:18:37 +00:00
|
|
|
* All rights reserved.
|
2018-05-23 20:56:55 +00:00
|
|
|
* www.cavium.com
|
2016-04-27 14:18:37 +00:00
|
|
|
*/
|
|
|
|
|
2017-03-29 20:37:01 +00:00
|
|
|
#include <rte_net.h>
|
2016-04-27 14:18:37 +00:00
|
|
|
#include "qede_rxtx.h"
|
|
|
|
|
|
|
|
static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
|
|
|
|
{
|
|
|
|
struct rte_mbuf *new_mb = NULL;
|
|
|
|
struct eth_rx_bd *rx_bd;
|
|
|
|
dma_addr_t mapping;
|
|
|
|
uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
|
|
|
|
|
2016-05-11 14:43:46 +00:00
|
|
|
new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
|
2016-04-27 14:18:37 +00:00
|
|
|
if (unlikely(!new_mb)) {
|
|
|
|
PMD_RX_LOG(ERR, rxq,
|
|
|
|
"Failed to allocate rx buffer "
|
|
|
|
"sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
|
|
|
|
idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
|
2016-06-30 12:49:25 +00:00
|
|
|
rte_mempool_avail_count(rxq->mb_pool),
|
|
|
|
rte_mempool_in_use_count(rxq->mb_pool));
|
2016-04-27 14:18:37 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2021-03-26 11:01:30 +00:00
|
|
|
rxq->sw_rx_ring[idx] = new_mb;
|
2017-11-05 23:22:55 +00:00
|
|
|
mapping = rte_mbuf_data_iova_default(new_mb);
|
2016-04-27 14:18:37 +00:00
|
|
|
/* Advance PROD and get BD pointer */
|
|
|
|
rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
|
|
|
|
rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
|
|
|
|
rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
|
|
|
|
rxq->sw_rx_prod++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-18 10:29:29 +00:00
|
|
|
#define QEDE_MAX_BULK_ALLOC_COUNT 512
|
|
|
|
|
|
|
|
static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
|
|
|
|
{
|
2022-03-04 12:08:32 +00:00
|
|
|
void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
|
2019-01-18 10:29:29 +00:00
|
|
|
struct rte_mbuf *mbuf = NULL;
|
|
|
|
struct eth_rx_bd *rx_bd;
|
|
|
|
dma_addr_t mapping;
|
|
|
|
int i, ret = 0;
|
|
|
|
uint16_t idx;
|
|
|
|
|
2021-03-26 11:01:30 +00:00
|
|
|
idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
|
|
|
|
|
2022-03-04 12:08:32 +00:00
|
|
|
ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
|
2019-01-18 10:29:29 +00:00
|
|
|
if (unlikely(ret)) {
|
|
|
|
PMD_RX_LOG(ERR, rxq,
|
|
|
|
"Failed to allocate %d rx buffers "
|
|
|
|
"sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
|
2022-03-04 12:08:32 +00:00
|
|
|
count, idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
|
2019-01-18 10:29:29 +00:00
|
|
|
rte_mempool_avail_count(rxq->mb_pool),
|
|
|
|
rte_mempool_in_use_count(rxq->mb_pool));
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
2022-03-04 12:08:32 +00:00
|
|
|
mbuf = obj_p[i];
|
|
|
|
if (likely(i < count - 1))
|
|
|
|
rte_prefetch0(obj_p[i + 1]);
|
2019-01-18 10:29:29 +00:00
|
|
|
|
2022-03-04 12:08:32 +00:00
|
|
|
idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
|
|
|
|
rxq->sw_rx_ring[idx] = mbuf;
|
2019-01-18 10:29:29 +00:00
|
|
|
mapping = rte_mbuf_data_iova_default(mbuf);
|
|
|
|
rx_bd = (struct eth_rx_bd *)
|
|
|
|
ecore_chain_produce(&rxq->rx_bd_ring);
|
|
|
|
rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
|
|
|
|
rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
|
2022-03-04 12:08:32 +00:00
|
|
|
rxq->sw_rx_prod++;
|
2019-01-18 10:29:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-08 20:31:03 +00:00
|
|
|
/* Criterias for calculating Rx buffer size -
|
|
|
|
* 1) rx_buf_size should not exceed the size of mbuf
|
|
|
|
* 2) In scattered_rx mode - minimum rx_buf_size should be
|
|
|
|
* (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
|
|
|
|
* 3) In regular mode - minimum rx_buf_size should be
|
|
|
|
* (MTU + Maximum L2 Header Size + 2)
|
2021-11-29 16:08:02 +00:00
|
|
|
* In above cases +2 corresponds to 2 bytes padding in front of L2
|
2018-09-08 20:31:03 +00:00
|
|
|
* header.
|
|
|
|
* 4) rx_buf_size should be cacheline-size aligned. So considering
|
|
|
|
* criteria 1, we need to adjust the size to floor instead of ceil,
|
|
|
|
* so that we don't exceed mbuf size while ceiling rx_buf_size.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
|
|
|
|
uint16_t max_frame_size)
|
|
|
|
{
|
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
|
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
|
|
|
int rx_buf_size;
|
|
|
|
|
|
|
|
if (dev->data->scattered_rx) {
|
|
|
|
/* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
|
2021-11-29 16:08:02 +00:00
|
|
|
* buffers can be used for single packet. So need to make sure
|
2018-09-08 20:31:03 +00:00
|
|
|
* mbuf size is sufficient enough for this.
|
|
|
|
*/
|
|
|
|
if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
|
|
|
|
(max_frame_size + QEDE_ETH_OVERHEAD)) {
|
|
|
|
DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
|
|
|
|
mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_buf_size = RTE_MAX(mbufsz,
|
|
|
|
(max_frame_size + QEDE_ETH_OVERHEAD) /
|
|
|
|
ETH_RX_MAX_BUFF_PER_PKT);
|
|
|
|
} else {
|
|
|
|
rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Align to cache-line size if needed */
|
|
|
|
return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
|
|
|
|
}
|
|
|
|
|
2019-09-12 15:24:12 +00:00
|
|
|
static struct qede_rx_queue *
|
|
|
|
qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
|
|
|
|
uint16_t queue_idx,
|
|
|
|
uint16_t nb_desc,
|
|
|
|
unsigned int socket_id,
|
|
|
|
struct rte_mempool *mp,
|
|
|
|
uint16_t bufsz)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
2017-06-07 07:42:19 +00:00
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
|
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
2016-04-27 14:18:37 +00:00
|
|
|
struct qede_rx_queue *rxq;
|
|
|
|
size_t size;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* First allocate the rx queue data structure */
|
|
|
|
rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
|
|
|
|
RTE_CACHE_LINE_SIZE, socket_id);
|
|
|
|
|
|
|
|
if (!rxq) {
|
|
|
|
DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
|
|
|
|
socket_id);
|
2019-09-12 15:24:12 +00:00
|
|
|
return NULL;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rxq->qdev = qdev;
|
|
|
|
rxq->mb_pool = mp;
|
|
|
|
rxq->nb_rx_desc = nb_desc;
|
|
|
|
rxq->queue_id = queue_idx;
|
|
|
|
rxq->port_id = dev->data->port_id;
|
2017-07-01 19:29:58 +00:00
|
|
|
|
2017-01-06 08:16:46 +00:00
|
|
|
|
2019-09-12 15:24:12 +00:00
|
|
|
rxq->rx_buf_size = bufsz;
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2017-01-06 08:16:46 +00:00
|
|
|
DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
|
|
|
|
qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
/* Allocate the parallel driver ring for Rx buffers */
|
|
|
|
size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
|
|
|
|
rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
|
|
|
|
RTE_CACHE_LINE_SIZE, socket_id);
|
|
|
|
if (!rxq->sw_rx_ring) {
|
2017-07-01 19:29:59 +00:00
|
|
|
DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
|
|
|
|
" socket %u\n", socket_id);
|
2016-04-27 14:18:37 +00:00
|
|
|
rte_free(rxq);
|
2019-09-12 15:24:12 +00:00
|
|
|
return NULL;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate FW Rx ring */
|
|
|
|
rc = qdev->ops->common->chain_alloc(edev,
|
|
|
|
ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
|
|
|
|
ECORE_CHAIN_MODE_NEXT_PTR,
|
|
|
|
ECORE_CHAIN_CNT_TYPE_U16,
|
|
|
|
rxq->nb_rx_desc,
|
|
|
|
sizeof(struct eth_rx_bd),
|
2017-01-05 07:03:57 +00:00
|
|
|
&rxq->rx_bd_ring,
|
|
|
|
NULL);
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
if (rc != ECORE_SUCCESS) {
|
2017-07-01 19:29:59 +00:00
|
|
|
DP_ERR(edev, "Memory allocation fails for RX BD ring"
|
|
|
|
" on socket %u\n", socket_id);
|
2016-04-27 14:18:37 +00:00
|
|
|
rte_free(rxq->sw_rx_ring);
|
|
|
|
rte_free(rxq);
|
2019-09-12 15:24:12 +00:00
|
|
|
return NULL;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate FW completion ring */
|
|
|
|
rc = qdev->ops->common->chain_alloc(edev,
|
|
|
|
ECORE_CHAIN_USE_TO_CONSUME,
|
|
|
|
ECORE_CHAIN_MODE_PBL,
|
|
|
|
ECORE_CHAIN_CNT_TYPE_U16,
|
|
|
|
rxq->nb_rx_desc,
|
|
|
|
sizeof(union eth_rx_cqe),
|
2017-01-05 07:03:57 +00:00
|
|
|
&rxq->rx_comp_ring,
|
|
|
|
NULL);
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
if (rc != ECORE_SUCCESS) {
|
2017-07-01 19:29:59 +00:00
|
|
|
DP_ERR(edev, "Memory allocation fails for RX CQE ring"
|
|
|
|
" on socket %u\n", socket_id);
|
2017-07-01 19:29:55 +00:00
|
|
|
qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
|
2016-04-27 14:18:37 +00:00
|
|
|
rte_free(rxq->sw_rx_ring);
|
|
|
|
rte_free(rxq);
|
2019-09-12 15:24:12 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rxq;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
|
|
|
|
uint16_t nb_desc, unsigned int socket_id,
|
|
|
|
__rte_unused const struct rte_eth_rxconf *rx_conf,
|
|
|
|
struct rte_mempool *mp)
|
|
|
|
{
|
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
|
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
|
|
|
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
|
|
|
struct qede_rx_queue *rxq;
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
uint16_t max_rx_pktlen;
|
2019-09-12 15:24:12 +00:00
|
|
|
uint16_t bufsz;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE(edev);
|
|
|
|
|
|
|
|
/* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
|
|
|
|
if (!rte_is_power_of_2(nb_desc)) {
|
|
|
|
DP_ERR(edev, "Ring size %u is not power of 2\n",
|
|
|
|
nb_desc);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free memory prior to re-allocation if needed... */
|
|
|
|
if (dev->data->rx_queues[qid] != NULL) {
|
|
|
|
qede_rx_queue_release(dev->data->rx_queues[qid]);
|
|
|
|
dev->data->rx_queues[qid] = NULL;
|
|
|
|
}
|
|
|
|
|
2022-03-04 12:08:33 +00:00
|
|
|
max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
|
2019-09-12 15:24:12 +00:00
|
|
|
|
|
|
|
/* Fix up RX buffer size */
|
|
|
|
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
|
2021-11-29 16:08:02 +00:00
|
|
|
/* cache align the mbuf size to simplify rx_buf_size calculation */
|
2019-09-12 15:24:12 +00:00
|
|
|
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
|
2021-10-22 11:03:12 +00:00
|
|
|
if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
(max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
|
2019-09-12 15:24:12 +00:00
|
|
|
if (!dev->data->scattered_rx) {
|
|
|
|
DP_INFO(edev, "Forcing scatter-gather mode\n");
|
|
|
|
dev->data->scattered_rx = 1;
|
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pktlen);
|
2019-09-12 15:24:12 +00:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
bufsz = rc;
|
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
if (ECORE_IS_CMT(edev)) {
|
|
|
|
rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,
|
|
|
|
socket_id, mp, bufsz);
|
|
|
|
if (!rxq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
qdev->fp_array[qid * 2].rxq = rxq;
|
|
|
|
rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,
|
|
|
|
socket_id, mp, bufsz);
|
|
|
|
if (!rxq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
qdev->fp_array[qid * 2 + 1].rxq = rxq;
|
|
|
|
/* provide per engine fp struct as rx queue */
|
|
|
|
dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];
|
|
|
|
} else {
|
|
|
|
rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
|
|
|
|
socket_id, mp, bufsz);
|
|
|
|
if (!rxq)
|
|
|
|
return -ENOMEM;
|
2019-09-12 15:24:12 +00:00
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
dev->data->rx_queues[qid] = rxq;
|
|
|
|
qdev->fp_array[qid].rxq = rxq;
|
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
|
2019-09-12 15:24:12 +00:00
|
|
|
qid, nb_desc, rxq->rx_buf_size, socket_id);
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
static void
|
|
|
|
qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
|
|
|
|
struct qede_rx_queue *rxq)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
2017-06-07 07:42:19 +00:00
|
|
|
DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
|
|
|
|
ecore_chain_reset(&rxq->rx_bd_ring);
|
|
|
|
ecore_chain_reset(&rxq->rx_comp_ring);
|
|
|
|
rxq->sw_rx_prod = 0;
|
|
|
|
rxq->sw_rx_cons = 0;
|
|
|
|
*rxq->hw_cons_ptr = 0;
|
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
|
|
|
|
{
|
|
|
|
uint16_t i;
|
|
|
|
|
|
|
|
if (rxq->sw_rx_ring) {
|
|
|
|
for (i = 0; i < rxq->nb_rx_desc; i++) {
|
2021-03-26 11:01:30 +00:00
|
|
|
if (rxq->sw_rx_ring[i]) {
|
|
|
|
rte_pktmbuf_free(rxq->sw_rx_ring[i]);
|
|
|
|
rxq->sw_rx_ring[i] = NULL;
|
2017-06-07 07:42:19 +00:00
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
}
|
2017-06-07 07:42:19 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 15:24:12 +00:00
|
|
|
static void _qede_rx_queue_release(struct qede_dev *qdev,
|
|
|
|
struct ecore_dev *edev,
|
|
|
|
struct qede_rx_queue *rxq)
|
|
|
|
{
|
|
|
|
qede_rx_queue_release_mbufs(rxq);
|
|
|
|
qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
|
|
|
|
qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
|
|
|
|
rte_free(rxq->sw_rx_ring);
|
|
|
|
rte_free(rxq);
|
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
void qede_rx_queue_release(void *rx_queue)
|
|
|
|
{
|
|
|
|
struct qede_rx_queue *rxq = rx_queue;
|
2019-09-12 15:24:13 +00:00
|
|
|
struct qede_fastpath_cmt *fp_cmt;
|
2018-11-09 15:45:40 +00:00
|
|
|
struct qede_dev *qdev;
|
|
|
|
struct ecore_dev *edev;
|
2017-06-07 07:42:19 +00:00
|
|
|
|
|
|
|
if (rxq) {
|
2018-11-09 15:45:40 +00:00
|
|
|
qdev = rxq->qdev;
|
|
|
|
edev = QEDE_INIT_EDEV(qdev);
|
|
|
|
PMD_INIT_FUNC_TRACE(edev);
|
2019-09-12 15:24:13 +00:00
|
|
|
if (ECORE_IS_CMT(edev)) {
|
|
|
|
fp_cmt = rx_queue;
|
|
|
|
_qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);
|
|
|
|
_qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);
|
|
|
|
} else {
|
|
|
|
_qede_rx_queue_release(qdev, edev, rxq);
|
|
|
|
}
|
2017-06-07 07:42:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stops a given RX queue in the HW */
|
|
|
|
static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
|
|
|
|
{
|
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
|
|
|
struct ecore_hwfn *p_hwfn;
|
|
|
|
struct qede_rx_queue *rxq;
|
|
|
|
int hwfn_index;
|
|
|
|
int rc;
|
|
|
|
|
2019-09-12 15:24:12 +00:00
|
|
|
if (rx_queue_id < qdev->num_rx_queues) {
|
|
|
|
rxq = qdev->fp_array[rx_queue_id].rxq;
|
2017-06-07 07:42:19 +00:00
|
|
|
hwfn_index = rx_queue_id % edev->num_hwfns;
|
|
|
|
p_hwfn = &edev->hwfns[hwfn_index];
|
|
|
|
rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
|
|
|
|
true, false);
|
|
|
|
if (rc != ECORE_SUCCESS) {
|
|
|
|
DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
qede_rx_queue_release_mbufs(rxq);
|
|
|
|
qede_rx_queue_reset(qdev, rxq);
|
|
|
|
eth_dev->data->rx_queue_state[rx_queue_id] =
|
|
|
|
RTE_ETH_QUEUE_STATE_STOPPED;
|
|
|
|
DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
|
|
|
|
} else {
|
|
|
|
DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
|
|
|
|
rc = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 15:24:12 +00:00
|
|
|
static struct qede_tx_queue *
|
|
|
|
qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
|
|
|
|
uint16_t queue_idx,
|
|
|
|
uint16_t nb_desc,
|
|
|
|
unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
|
|
|
struct qede_dev *qdev = dev->data->dev_private;
|
|
|
|
struct ecore_dev *edev = &qdev->edev;
|
|
|
|
struct qede_tx_queue *txq;
|
|
|
|
int rc;
|
2021-03-26 11:01:24 +00:00
|
|
|
size_t sw_tx_ring_size;
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
|
|
|
|
RTE_CACHE_LINE_SIZE, socket_id);
|
|
|
|
|
|
|
|
if (txq == NULL) {
|
|
|
|
DP_ERR(edev,
|
|
|
|
"Unable to allocate memory for txq on socket %u",
|
|
|
|
socket_id);
|
2019-09-12 15:24:12 +00:00
|
|
|
return NULL;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
txq->nb_tx_desc = nb_desc;
|
|
|
|
txq->qdev = qdev;
|
|
|
|
txq->port_id = dev->data->port_id;
|
|
|
|
|
|
|
|
rc = qdev->ops->common->chain_alloc(edev,
|
|
|
|
ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
|
|
|
|
ECORE_CHAIN_MODE_PBL,
|
|
|
|
ECORE_CHAIN_CNT_TYPE_U16,
|
|
|
|
txq->nb_tx_desc,
|
|
|
|
sizeof(union eth_tx_bd_types),
|
2017-01-05 07:03:57 +00:00
|
|
|
&txq->tx_pbl,
|
|
|
|
NULL);
|
2016-04-27 14:18:37 +00:00
|
|
|
if (rc != ECORE_SUCCESS) {
|
|
|
|
DP_ERR(edev,
|
|
|
|
"Unable to allocate memory for txbd ring on socket %u",
|
|
|
|
socket_id);
|
|
|
|
qede_tx_queue_release(txq);
|
2019-09-12 15:24:12 +00:00
|
|
|
return NULL;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate software ring */
|
2021-03-26 11:01:24 +00:00
|
|
|
sw_tx_ring_size = sizeof(txq->sw_tx_ring) * txq->nb_tx_desc;
|
2016-04-27 14:18:37 +00:00
|
|
|
txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
|
2021-03-26 11:01:24 +00:00
|
|
|
sw_tx_ring_size,
|
2016-04-27 14:18:37 +00:00
|
|
|
RTE_CACHE_LINE_SIZE, socket_id);
|
|
|
|
|
|
|
|
if (!txq->sw_tx_ring) {
|
|
|
|
DP_ERR(edev,
|
|
|
|
"Unable to allocate memory for txbd ring on socket %u",
|
|
|
|
socket_id);
|
2017-07-01 19:29:55 +00:00
|
|
|
qdev->ops->common->chain_free(edev, &txq->tx_pbl);
|
2016-04-27 14:18:37 +00:00
|
|
|
qede_tx_queue_release(txq);
|
2019-09-12 15:24:12 +00:00
|
|
|
return NULL;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
txq->queue_id = queue_idx;
|
|
|
|
|
|
|
|
txq->nb_tx_avail = txq->nb_tx_desc;
|
|
|
|
|
|
|
|
txq->tx_free_thresh =
|
|
|
|
tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
|
|
|
|
(txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
|
|
|
|
|
|
|
|
DP_INFO(edev,
|
|
|
|
"txq %u num_desc %u tx_free_thresh %u socket %u\n",
|
|
|
|
queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
|
2019-09-12 15:24:12 +00:00
|
|
|
return txq;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qede_tx_queue_setup(struct rte_eth_dev *dev,
|
|
|
|
uint16_t queue_idx,
|
|
|
|
uint16_t nb_desc,
|
|
|
|
unsigned int socket_id,
|
|
|
|
const struct rte_eth_txconf *tx_conf)
|
|
|
|
{
|
|
|
|
struct qede_dev *qdev = dev->data->dev_private;
|
|
|
|
struct ecore_dev *edev = &qdev->edev;
|
|
|
|
struct qede_tx_queue *txq;
|
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE(edev);
|
|
|
|
|
|
|
|
if (!rte_is_power_of_2(nb_desc)) {
|
|
|
|
DP_ERR(edev, "Ring size %u is not power of 2\n",
|
|
|
|
nb_desc);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free memory prior to re-allocation if needed... */
|
|
|
|
if (dev->data->tx_queues[queue_idx] != NULL) {
|
|
|
|
qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
|
|
|
|
dev->data->tx_queues[queue_idx] = NULL;
|
|
|
|
}
|
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
if (ECORE_IS_CMT(edev)) {
|
|
|
|
txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,
|
|
|
|
socket_id, tx_conf);
|
|
|
|
if (!txq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
qdev->fp_array[queue_idx * 2].txq = txq;
|
|
|
|
txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,
|
|
|
|
socket_id, tx_conf);
|
|
|
|
if (!txq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
qdev->fp_array[(queue_idx * 2) + 1].txq = txq;
|
|
|
|
dev->data->tx_queues[queue_idx] =
|
|
|
|
&qdev->fp_array_cmt[queue_idx];
|
|
|
|
} else {
|
|
|
|
txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
|
|
|
|
socket_id, tx_conf);
|
|
|
|
if (!txq)
|
|
|
|
return -ENOMEM;
|
2019-09-12 15:24:12 +00:00
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
dev->data->tx_queues[queue_idx] = txq;
|
|
|
|
qdev->fp_array[queue_idx].txq = txq;
|
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
static void
|
|
|
|
qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
|
|
|
|
struct qede_tx_queue *txq)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
2017-06-07 07:42:19 +00:00
|
|
|
DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
|
|
|
|
ecore_chain_reset(&txq->tx_pbl);
|
|
|
|
txq->sw_tx_cons = 0;
|
|
|
|
txq->sw_tx_prod = 0;
|
|
|
|
*txq->hw_cons_ptr = 0;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
2017-06-07 07:42:19 +00:00
|
|
|
uint16_t i;
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
if (txq->sw_tx_ring) {
|
|
|
|
for (i = 0; i < txq->nb_tx_desc; i++) {
|
2021-03-26 11:01:24 +00:00
|
|
|
if (txq->sw_tx_ring[i]) {
|
|
|
|
rte_pktmbuf_free(txq->sw_tx_ring[i]);
|
|
|
|
txq->sw_tx_ring[i] = NULL;
|
2017-06-07 07:42:19 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-12 15:24:12 +00:00
|
|
|
static void _qede_tx_queue_release(struct qede_dev *qdev,
|
|
|
|
struct ecore_dev *edev,
|
|
|
|
struct qede_tx_queue *txq)
|
|
|
|
{
|
|
|
|
qede_tx_queue_release_mbufs(txq);
|
|
|
|
qdev->ops->common->chain_free(edev, &txq->tx_pbl);
|
|
|
|
rte_free(txq->sw_tx_ring);
|
|
|
|
rte_free(txq);
|
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
void qede_tx_queue_release(void *tx_queue)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
2017-06-07 07:42:19 +00:00
|
|
|
struct qede_tx_queue *txq = tx_queue;
|
2019-09-12 15:24:13 +00:00
|
|
|
struct qede_fastpath_cmt *fp_cmt;
|
2018-11-09 15:45:40 +00:00
|
|
|
struct qede_dev *qdev;
|
|
|
|
struct ecore_dev *edev;
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
if (txq) {
|
2018-11-09 15:45:40 +00:00
|
|
|
qdev = txq->qdev;
|
|
|
|
edev = QEDE_INIT_EDEV(qdev);
|
|
|
|
PMD_INIT_FUNC_TRACE(edev);
|
2019-09-12 15:24:13 +00:00
|
|
|
|
|
|
|
if (ECORE_IS_CMT(edev)) {
|
|
|
|
fp_cmt = tx_queue;
|
|
|
|
_qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);
|
|
|
|
_qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);
|
|
|
|
} else {
|
|
|
|
_qede_tx_queue_release(qdev, edev, txq);
|
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function allocates fast-path status block memory */
|
|
|
|
static int
|
|
|
|
qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
|
|
|
|
uint16_t sb_id)
|
|
|
|
{
|
2017-07-01 19:29:55 +00:00
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
2019-10-06 20:14:04 +00:00
|
|
|
struct status_block *sb_virt;
|
2016-04-27 14:18:37 +00:00
|
|
|
dma_addr_t sb_phys;
|
|
|
|
int rc;
|
|
|
|
|
2017-07-01 19:29:55 +00:00
|
|
|
sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
|
2019-10-06 20:14:04 +00:00
|
|
|
sizeof(struct status_block));
|
2016-04-27 14:18:37 +00:00
|
|
|
if (!sb_virt) {
|
|
|
|
DP_ERR(edev, "Status block allocation failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2017-06-07 07:42:19 +00:00
|
|
|
rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
|
|
|
|
sb_phys, sb_id);
|
2016-04-27 14:18:37 +00:00
|
|
|
if (rc) {
|
|
|
|
DP_ERR(edev, "Status block initialization failed\n");
|
2017-07-01 19:29:55 +00:00
|
|
|
OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
|
2019-10-06 20:14:04 +00:00
|
|
|
sizeof(struct status_block));
|
2016-04-27 14:18:37 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-19 04:11:30 +00:00
|
|
|
int qede_alloc_fp_resc(struct qede_dev *qdev)
|
|
|
|
{
|
2020-05-15 06:34:19 +00:00
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
2016-10-19 04:11:30 +00:00
|
|
|
struct qede_fastpath *fp;
|
2016-10-19 04:11:42 +00:00
|
|
|
uint32_t num_sbs;
|
2017-01-06 08:16:47 +00:00
|
|
|
uint16_t sb_idx;
|
2019-09-12 15:24:13 +00:00
|
|
|
int i;
|
2016-10-19 04:11:30 +00:00
|
|
|
|
2020-05-15 06:34:19 +00:00
|
|
|
PMD_INIT_FUNC_TRACE(edev);
|
|
|
|
|
2016-10-19 04:11:42 +00:00
|
|
|
if (IS_VF(edev))
|
|
|
|
ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
|
|
|
|
else
|
2017-01-06 08:16:47 +00:00
|
|
|
num_sbs = ecore_cxt_get_proto_cid_count
|
|
|
|
(ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
|
2016-10-19 04:11:42 +00:00
|
|
|
|
|
|
|
if (num_sbs == 0) {
|
|
|
|
DP_ERR(edev, "No status blocks available\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
|
|
|
|
sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
|
2016-10-19 04:11:30 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
if (!qdev->fp_array) {
|
|
|
|
DP_ERR(edev, "fp array allocation failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2016-10-19 04:11:30 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
|
|
|
|
sizeof(*qdev->fp_array));
|
2016-10-19 04:11:30 +00:00
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
if (ECORE_IS_CMT(edev)) {
|
|
|
|
qdev->fp_array_cmt = rte_calloc("fp_cmt",
|
|
|
|
QEDE_RXTX_MAX(qdev) / 2,
|
|
|
|
sizeof(*qdev->fp_array_cmt),
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
|
|
|
|
if (!qdev->fp_array_cmt) {
|
|
|
|
DP_ERR(edev, "fp array for CMT allocation failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset((void *)qdev->fp_array_cmt, 0,
|
|
|
|
(QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));
|
|
|
|
|
|
|
|
/* Establish the mapping of fp_array with fp_array_cmt */
|
|
|
|
for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {
|
|
|
|
qdev->fp_array_cmt[i].qdev = qdev;
|
|
|
|
qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];
|
|
|
|
qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
|
|
|
|
fp = &qdev->fp_array[sb_idx];
|
|
|
|
fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
|
|
|
|
RTE_CACHE_LINE_SIZE);
|
|
|
|
if (!fp->sb_info) {
|
|
|
|
DP_ERR(edev, "FP sb_info allocation fails\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-01-06 08:16:47 +00:00
|
|
|
if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
|
2017-06-07 07:42:19 +00:00
|
|
|
DP_ERR(edev, "FP status block allocation fails\n");
|
|
|
|
return -1;
|
2016-10-19 04:11:30 +00:00
|
|
|
}
|
2017-06-07 07:42:19 +00:00
|
|
|
DP_INFO(edev, "sb_info idx 0x%x initialized\n",
|
|
|
|
fp->sb_info->igu_sb_id);
|
2016-10-19 04:11:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
2017-07-01 19:29:55 +00:00
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
2017-06-07 07:42:19 +00:00
|
|
|
struct qede_fastpath *fp;
|
|
|
|
uint16_t sb_idx;
|
2017-07-01 19:29:55 +00:00
|
|
|
uint8_t i;
|
2016-10-19 04:11:30 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
PMD_INIT_FUNC_TRACE(edev);
|
|
|
|
|
|
|
|
for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
|
|
|
|
fp = &qdev->fp_array[sb_idx];
|
2017-07-01 19:29:55 +00:00
|
|
|
if (fp->sb_info) {
|
2020-08-24 11:46:53 +00:00
|
|
|
DP_INFO(edev, "Free sb_info index 0x%x\n",
|
|
|
|
fp->sb_info->igu_sb_id);
|
2017-07-01 19:29:55 +00:00
|
|
|
OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
|
|
|
|
fp->sb_info->sb_phys,
|
2019-10-06 20:14:04 +00:00
|
|
|
sizeof(struct status_block));
|
2017-06-07 07:42:19 +00:00
|
|
|
rte_free(fp->sb_info);
|
2017-07-01 19:29:55 +00:00
|
|
|
fp->sb_info = NULL;
|
|
|
|
}
|
2017-06-07 07:42:19 +00:00
|
|
|
}
|
2017-07-01 19:29:55 +00:00
|
|
|
|
|
|
|
/* Free packet buffers and ring memories */
|
|
|
|
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
|
|
|
|
if (eth_dev->data->rx_queues[i]) {
|
|
|
|
qede_rx_queue_release(eth_dev->data->rx_queues[i]);
|
|
|
|
eth_dev->data->rx_queues[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
|
|
|
|
if (eth_dev->data->tx_queues[i]) {
|
|
|
|
qede_tx_queue_release(eth_dev->data->tx_queues[i]);
|
|
|
|
eth_dev->data->tx_queues[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-09 19:17:15 +00:00
|
|
|
rte_free(qdev->fp_array);
|
2017-06-07 07:42:19 +00:00
|
|
|
qdev->fp_array = NULL;
|
2019-09-12 15:24:13 +00:00
|
|
|
|
2022-02-09 19:17:15 +00:00
|
|
|
rte_free(qdev->fp_array_cmt);
|
2019-09-12 15:24:13 +00:00
|
|
|
qdev->fp_array_cmt = NULL;
|
2016-10-19 04:11:30 +00:00
|
|
|
}
|
|
|
|
|
2016-04-27 14:18:37 +00:00
|
|
|
static inline void
|
2017-04-25 07:28:46 +00:00
|
|
|
qede_update_rx_prod(__rte_unused struct qede_dev *edev,
|
|
|
|
struct qede_rx_queue *rxq)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
|
|
|
uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
|
|
|
|
uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
|
2021-08-08 12:51:34 +00:00
|
|
|
struct eth_rx_prod_data rx_prods;
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
/* Update producers */
|
2021-08-08 12:51:34 +00:00
|
|
|
memset(&rx_prods, 0, sizeof(rx_prods));
|
2016-04-27 14:18:37 +00:00
|
|
|
rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
|
|
|
|
rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
|
|
|
|
|
|
|
|
/* Make sure that the BD and SGE data is updated before updating the
|
|
|
|
* producers since FW might read the BD/SGE right after the producer
|
|
|
|
* is updated.
|
|
|
|
*/
|
|
|
|
rte_wmb();
|
|
|
|
|
|
|
|
internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
|
|
|
|
(uint32_t *)&rx_prods);
|
|
|
|
|
|
|
|
/* mmiowb is needed to synchronize doorbell writes from more than one
|
|
|
|
* processor. It guarantees that the write arrives to the device before
|
|
|
|
* the napi lock is released and another qede_poll is called (possibly
|
|
|
|
* on another CPU). Without this barrier, the next doorbell can bypass
|
|
|
|
* this doorbell. This is applicable to IA64/Altix systems.
|
|
|
|
*/
|
|
|
|
rte_wmb();
|
|
|
|
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
/* Starts a given RX queue in HW */
|
|
|
|
static int
|
|
|
|
qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
|
2016-04-27 14:18:39 +00:00
|
|
|
{
|
2017-06-07 07:42:19 +00:00
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
|
|
|
struct ecore_queue_start_common_params params;
|
|
|
|
struct ecore_rxq_start_ret_params ret_params;
|
|
|
|
struct qede_rx_queue *rxq;
|
2016-10-19 04:11:30 +00:00
|
|
|
struct qede_fastpath *fp;
|
2017-06-07 07:42:19 +00:00
|
|
|
struct ecore_hwfn *p_hwfn;
|
2016-10-19 04:11:30 +00:00
|
|
|
dma_addr_t p_phys_table;
|
|
|
|
uint16_t page_cnt;
|
2017-06-07 07:42:19 +00:00
|
|
|
uint16_t j;
|
|
|
|
int hwfn_index;
|
|
|
|
int rc;
|
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
if (rx_queue_id < qdev->num_rx_queues) {
|
2017-06-07 07:42:19 +00:00
|
|
|
fp = &qdev->fp_array[rx_queue_id];
|
2019-09-12 15:24:13 +00:00
|
|
|
rxq = fp->rxq;
|
2017-06-07 07:42:19 +00:00
|
|
|
/* Allocate buffers for the Rx ring */
|
|
|
|
for (j = 0; j < rxq->nb_rx_desc; j++) {
|
|
|
|
rc = qede_alloc_rx_buffer(rxq);
|
2016-10-19 04:11:29 +00:00
|
|
|
if (rc) {
|
2017-06-07 07:42:19 +00:00
|
|
|
DP_ERR(edev, "RX buffer allocation failed"
|
|
|
|
" for rxq = %u\n", rx_queue_id);
|
|
|
|
return -ENOMEM;
|
2016-10-19 04:11:30 +00:00
|
|
|
}
|
2017-06-07 07:42:19 +00:00
|
|
|
}
|
|
|
|
/* disable interrupts */
|
|
|
|
ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
|
|
|
|
/* Prepare ramrod */
|
|
|
|
memset(¶ms, 0, sizeof(params));
|
2017-07-24 10:19:03 +00:00
|
|
|
params.queue_id = rx_queue_id / edev->num_hwfns;
|
2017-06-07 07:42:19 +00:00
|
|
|
params.vport_id = 0;
|
2017-07-24 10:19:03 +00:00
|
|
|
params.stats_id = params.vport_id;
|
2017-09-19 01:29:47 +00:00
|
|
|
params.p_sb = fp->sb_info;
|
2017-06-07 07:42:19 +00:00
|
|
|
DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
|
|
|
|
fp->rxq->queue_id, fp->sb_info->igu_sb_id);
|
|
|
|
params.sb_idx = RX_PI;
|
|
|
|
hwfn_index = rx_queue_id % edev->num_hwfns;
|
|
|
|
p_hwfn = &edev->hwfns[hwfn_index];
|
|
|
|
p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
|
|
|
|
page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
|
|
|
|
memset(&ret_params, 0, sizeof(ret_params));
|
|
|
|
rc = ecore_eth_rx_queue_start(p_hwfn,
|
|
|
|
p_hwfn->hw_info.opaque_fid,
|
|
|
|
¶ms, fp->rxq->rx_buf_size,
|
|
|
|
fp->rxq->rx_bd_ring.p_phys_addr,
|
|
|
|
p_phys_table, page_cnt,
|
|
|
|
&ret_params);
|
|
|
|
if (rc) {
|
|
|
|
DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
|
|
|
|
rx_queue_id, rc);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* Update with the returned parameters */
|
|
|
|
fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
|
|
|
|
fp->rxq->handle = ret_params.p_handle;
|
|
|
|
|
2019-10-06 20:14:08 +00:00
|
|
|
fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];
|
2017-06-07 07:42:19 +00:00
|
|
|
qede_update_rx_prod(qdev, fp->rxq);
|
|
|
|
eth_dev->data->rx_queue_state[rx_queue_id] =
|
|
|
|
RTE_ETH_QUEUE_STATE_STARTED;
|
|
|
|
DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
|
|
|
|
} else {
|
|
|
|
DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
|
|
|
|
rc = -EINVAL;
|
|
|
|
}
|
2016-04-27 14:18:39 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
return rc;
|
|
|
|
}
|
2017-03-29 20:36:27 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
static int
|
|
|
|
qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
|
|
|
|
{
|
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
|
|
|
struct ecore_queue_start_common_params params;
|
|
|
|
struct ecore_txq_start_ret_params ret_params;
|
|
|
|
struct ecore_hwfn *p_hwfn;
|
|
|
|
dma_addr_t p_phys_table;
|
|
|
|
struct qede_tx_queue *txq;
|
|
|
|
struct qede_fastpath *fp;
|
|
|
|
uint16_t page_cnt;
|
|
|
|
int hwfn_index;
|
|
|
|
int rc;
|
2016-04-27 14:18:39 +00:00
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
if (tx_queue_id < qdev->num_tx_queues) {
|
2017-06-07 07:42:19 +00:00
|
|
|
fp = &qdev->fp_array[tx_queue_id];
|
2019-09-12 15:24:13 +00:00
|
|
|
txq = fp->txq;
|
2017-06-07 07:42:19 +00:00
|
|
|
memset(¶ms, 0, sizeof(params));
|
2017-07-24 10:19:03 +00:00
|
|
|
params.queue_id = tx_queue_id / edev->num_hwfns;
|
2017-06-07 07:42:19 +00:00
|
|
|
params.vport_id = 0;
|
2017-07-24 10:19:03 +00:00
|
|
|
params.stats_id = params.vport_id;
|
2017-09-19 01:29:47 +00:00
|
|
|
params.p_sb = fp->sb_info;
|
2017-06-07 07:42:19 +00:00
|
|
|
DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
|
|
|
|
fp->txq->queue_id, fp->sb_info->igu_sb_id);
|
|
|
|
params.sb_idx = TX_PI(0); /* tc = 0 */
|
|
|
|
p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
|
|
|
|
page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
|
|
|
|
hwfn_index = tx_queue_id % edev->num_hwfns;
|
|
|
|
p_hwfn = &edev->hwfns[hwfn_index];
|
|
|
|
if (qdev->dev_info.is_legacy)
|
|
|
|
fp->txq->is_legacy = true;
|
|
|
|
rc = ecore_eth_tx_queue_start(p_hwfn,
|
|
|
|
p_hwfn->hw_info.opaque_fid,
|
|
|
|
¶ms, 0 /* tc */,
|
|
|
|
p_phys_table, page_cnt,
|
|
|
|
&ret_params);
|
|
|
|
if (rc != ECORE_SUCCESS) {
|
|
|
|
DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
|
|
|
|
tx_queue_id, rc);
|
|
|
|
return -1;
|
2016-10-19 04:11:29 +00:00
|
|
|
}
|
2017-06-07 07:42:19 +00:00
|
|
|
txq->doorbell_addr = ret_params.p_doorbell;
|
|
|
|
txq->handle = ret_params.p_handle;
|
|
|
|
|
2019-10-06 20:14:08 +00:00
|
|
|
txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];
|
2017-06-07 07:42:19 +00:00
|
|
|
SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
|
|
|
|
DB_DEST_XCM);
|
|
|
|
SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
|
|
|
|
DB_AGG_CMD_SET);
|
|
|
|
SET_FIELD(txq->tx_db.data.params,
|
|
|
|
ETH_DB_DATA_AGG_VAL_SEL,
|
|
|
|
DQ_XCM_ETH_TX_BD_PROD_CMD);
|
|
|
|
txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
|
|
|
|
eth_dev->data->tx_queue_state[tx_queue_id] =
|
|
|
|
RTE_ETH_QUEUE_STATE_STARTED;
|
|
|
|
DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
|
|
|
|
} else {
|
|
|
|
DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
|
|
|
|
rc = -EINVAL;
|
|
|
|
}
|
2016-04-27 14:18:39 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
return rc;
|
|
|
|
}
|
2017-03-29 20:36:27 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
static inline void
|
2022-03-04 12:08:31 +00:00
|
|
|
qede_free_tx_pkt(struct qede_tx_queue *txq)
|
2017-06-07 07:42:19 +00:00
|
|
|
{
|
2021-03-26 11:01:26 +00:00
|
|
|
struct rte_mbuf *mbuf;
|
|
|
|
uint16_t nb_segs;
|
|
|
|
uint16_t idx;
|
2016-10-19 04:11:38 +00:00
|
|
|
|
2022-03-04 12:08:31 +00:00
|
|
|
idx = TX_CONS(txq);
|
|
|
|
mbuf = txq->sw_tx_ring[idx];
|
|
|
|
if (mbuf) {
|
2021-03-26 11:01:26 +00:00
|
|
|
nb_segs = mbuf->nb_segs;
|
|
|
|
PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
|
|
|
|
while (nb_segs) {
|
2022-03-04 12:08:31 +00:00
|
|
|
/* It's like consuming rxbuf in recv() */
|
2021-03-26 11:01:26 +00:00
|
|
|
ecore_chain_consume(&txq->tx_pbl);
|
2022-03-04 12:08:31 +00:00
|
|
|
txq->nb_tx_avail++;
|
2021-03-26 11:01:26 +00:00
|
|
|
nb_segs--;
|
|
|
|
}
|
2022-03-04 12:08:31 +00:00
|
|
|
rte_pktmbuf_free(mbuf);
|
|
|
|
txq->sw_tx_ring[idx] = NULL;
|
|
|
|
txq->sw_tx_cons++;
|
2021-03-26 11:01:26 +00:00
|
|
|
PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
|
2021-03-26 11:01:27 +00:00
|
|
|
} else {
|
2022-03-04 12:08:31 +00:00
|
|
|
ecore_chain_consume(&txq->tx_pbl);
|
|
|
|
txq->nb_tx_avail++;
|
2021-03-26 11:01:27 +00:00
|
|
|
}
|
2017-06-07 07:42:19 +00:00
|
|
|
}
|
2016-04-27 14:18:39 +00:00
|
|
|
|
2022-03-04 12:08:31 +00:00
|
|
|
static inline void
|
|
|
|
qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
|
|
|
|
struct qede_tx_queue *txq)
|
|
|
|
{
|
|
|
|
uint16_t hw_bd_cons;
|
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
|
|
|
|
uint16_t sw_tx_cons;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
|
|
|
|
/* read barrier prevents speculative execution on stale data */
|
|
|
|
rte_rmb();
|
|
|
|
|
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
|
|
|
|
sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
|
|
|
|
PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
|
|
|
|
abs(hw_bd_cons - sw_tx_cons));
|
|
|
|
#endif
|
|
|
|
while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
|
|
|
|
qede_free_tx_pkt(txq);
|
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
static int qede_drain_txq(struct qede_dev *qdev,
|
|
|
|
struct qede_tx_queue *txq, bool allow_drain)
|
|
|
|
{
|
|
|
|
struct ecore_dev *edev = &qdev->edev;
|
|
|
|
int rc, cnt = 1000;
|
2016-04-27 14:18:39 +00:00
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
while (txq->sw_tx_cons != txq->sw_tx_prod) {
|
|
|
|
qede_process_tx_compl(edev, txq);
|
|
|
|
if (!cnt) {
|
|
|
|
if (allow_drain) {
|
|
|
|
DP_ERR(edev, "Tx queue[%u] is stuck,"
|
|
|
|
"requesting MCP to drain\n",
|
|
|
|
txq->queue_id);
|
|
|
|
rc = qdev->ops->common->drain(edev);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return qede_drain_txq(qdev, txq, false);
|
|
|
|
}
|
|
|
|
DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
|
|
|
|
"PROD=%d, CONS=%d\n",
|
|
|
|
txq->queue_id, txq->sw_tx_prod,
|
|
|
|
txq->sw_tx_cons);
|
|
|
|
return -1;
|
2016-04-27 14:18:39 +00:00
|
|
|
}
|
2017-06-07 07:42:19 +00:00
|
|
|
cnt--;
|
|
|
|
DELAY(1000);
|
|
|
|
rte_compiler_barrier();
|
2016-04-27 14:18:39 +00:00
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
/* FW finished processing, wait for HW to transmit all tx packets */
|
|
|
|
DELAY(2000);
|
|
|
|
|
2016-04-27 14:18:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-07 07:42:19 +00:00
|
|
|
/* Stops a given TX queue in the HW */
|
|
|
|
static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
|
|
|
|
{
|
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
|
|
|
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
|
|
|
|
struct ecore_hwfn *p_hwfn;
|
|
|
|
struct qede_tx_queue *txq;
|
|
|
|
int hwfn_index;
|
|
|
|
int rc;
|
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
if (tx_queue_id < qdev->num_tx_queues) {
|
|
|
|
txq = qdev->fp_array[tx_queue_id].txq;
|
2017-06-07 07:42:19 +00:00
|
|
|
/* Drain txq */
|
|
|
|
if (qede_drain_txq(qdev, txq, true))
|
|
|
|
return -1; /* For the lack of retcodes */
|
|
|
|
/* Stop txq */
|
|
|
|
hwfn_index = tx_queue_id % edev->num_hwfns;
|
|
|
|
p_hwfn = &edev->hwfns[hwfn_index];
|
|
|
|
rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
|
|
|
|
if (rc != ECORE_SUCCESS) {
|
|
|
|
DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
qede_tx_queue_release_mbufs(txq);
|
|
|
|
qede_tx_queue_reset(qdev, txq);
|
|
|
|
eth_dev->data->tx_queue_state[tx_queue_id] =
|
|
|
|
RTE_ETH_QUEUE_STATE_STOPPED;
|
|
|
|
DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
|
|
|
|
} else {
|
|
|
|
DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
|
|
|
|
rc = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qede_start_queues(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
|
|
|
uint8_t id;
|
2017-09-11 15:13:25 +00:00
|
|
|
int rc = -1;
|
2017-06-07 07:42:19 +00:00
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
for (id = 0; id < qdev->num_rx_queues; id++) {
|
2017-06-07 07:42:19 +00:00
|
|
|
rc = qede_rx_queue_start(eth_dev, id);
|
|
|
|
if (rc != ECORE_SUCCESS)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
for (id = 0; id < qdev->num_tx_queues; id++) {
|
2017-06-07 07:42:19 +00:00
|
|
|
rc = qede_tx_queue_start(eth_dev, id);
|
|
|
|
if (rc != ECORE_SUCCESS)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qede_stop_queues(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
|
|
|
|
uint8_t id;
|
|
|
|
|
|
|
|
/* Stopping RX/TX queues */
|
2019-09-12 15:24:13 +00:00
|
|
|
for (id = 0; id < qdev->num_tx_queues; id++)
|
2017-06-07 07:42:19 +00:00
|
|
|
qede_tx_queue_stop(eth_dev, id);
|
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
for (id = 0; id < qdev->num_rx_queues; id++)
|
2017-06-07 07:42:19 +00:00
|
|
|
qede_rx_queue_stop(eth_dev, id);
|
|
|
|
}
|
|
|
|
|
2018-01-27 21:15:27 +00:00
|
|
|
static inline bool qede_tunn_exist(uint16_t flag)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
|
|
|
return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
|
|
|
|
}
|
|
|
|
|
2018-01-27 21:15:27 +00:00
|
|
|
static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
|
|
|
|
{
|
|
|
|
return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
|
|
|
|
}
|
|
|
|
|
2016-12-23 00:48:08 +00:00
|
|
|
/*
|
|
|
|
* qede_check_tunn_csum_l4:
|
|
|
|
* Returns:
|
|
|
|
* 1 : If L4 csum is enabled AND if the validation has failed.
|
|
|
|
* 0 : Otherwise
|
|
|
|
*/
|
|
|
|
static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
|
|
|
if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
|
2016-12-23 00:48:08 +00:00
|
|
|
return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-23 00:48:08 +00:00
|
|
|
static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
|
|
|
if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
|
2016-12-23 00:48:08 +00:00
|
|
|
PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
|
|
|
|
return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2016-12-23 00:48:08 +00:00
|
|
|
return 0;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
2018-01-27 21:15:27 +00:00
|
|
|
/* Returns outer L2, L3 and L4 packet_type for tunneled packets */
|
2017-10-19 01:13:29 +00:00
|
|
|
static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
|
|
|
|
{
|
|
|
|
uint32_t packet_type = RTE_PTYPE_UNKNOWN;
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_ether_hdr *eth_hdr;
|
2019-05-21 16:13:10 +00:00
|
|
|
struct rte_ipv4_hdr *ipv4_hdr;
|
|
|
|
struct rte_ipv6_hdr *ipv6_hdr;
|
2019-05-21 16:13:03 +00:00
|
|
|
struct rte_vlan_hdr *vlan_hdr;
|
2018-01-27 21:15:27 +00:00
|
|
|
uint16_t ethertype;
|
|
|
|
bool vlan_tagged = 0;
|
|
|
|
uint16_t len;
|
2017-10-19 01:13:29 +00:00
|
|
|
|
2019-05-21 16:13:03 +00:00
|
|
|
eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
|
|
|
|
len = sizeof(struct rte_ether_hdr);
|
2018-01-27 21:15:27 +00:00
|
|
|
ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
|
|
|
|
|
|
|
|
/* Note: Valid only if VLAN stripping is disabled */
|
2019-05-21 16:13:05 +00:00
|
|
|
if (ethertype == RTE_ETHER_TYPE_VLAN) {
|
2018-01-27 21:15:27 +00:00
|
|
|
vlan_tagged = 1;
|
2019-05-21 16:13:03 +00:00
|
|
|
vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
|
|
|
|
len += sizeof(struct rte_vlan_hdr);
|
2018-01-27 21:15:27 +00:00
|
|
|
ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
|
|
|
|
}
|
|
|
|
|
2019-05-29 11:29:16 +00:00
|
|
|
if (ethertype == RTE_ETHER_TYPE_IPV4) {
|
2017-10-19 01:13:29 +00:00
|
|
|
packet_type |= RTE_PTYPE_L3_IPV4;
|
2019-05-21 16:13:10 +00:00
|
|
|
ipv4_hdr = rte_pktmbuf_mtod_offset(m,
|
|
|
|
struct rte_ipv4_hdr *, len);
|
2017-10-19 01:13:29 +00:00
|
|
|
if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
|
|
|
|
packet_type |= RTE_PTYPE_L4_TCP;
|
|
|
|
else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
|
|
|
|
packet_type |= RTE_PTYPE_L4_UDP;
|
2019-05-29 11:29:16 +00:00
|
|
|
} else if (ethertype == RTE_ETHER_TYPE_IPV6) {
|
2017-10-19 01:13:29 +00:00
|
|
|
packet_type |= RTE_PTYPE_L3_IPV6;
|
2019-05-21 16:13:10 +00:00
|
|
|
ipv6_hdr = rte_pktmbuf_mtod_offset(m,
|
|
|
|
struct rte_ipv6_hdr *, len);
|
2017-10-19 01:13:29 +00:00
|
|
|
if (ipv6_hdr->proto == IPPROTO_TCP)
|
|
|
|
packet_type |= RTE_PTYPE_L4_TCP;
|
|
|
|
else if (ipv6_hdr->proto == IPPROTO_UDP)
|
|
|
|
packet_type |= RTE_PTYPE_L4_UDP;
|
|
|
|
}
|
|
|
|
|
2018-01-27 21:15:27 +00:00
|
|
|
if (vlan_tagged)
|
|
|
|
packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
|
|
|
|
else
|
|
|
|
packet_type |= RTE_PTYPE_L2_ETHER;
|
|
|
|
|
2017-10-19 01:13:29 +00:00
|
|
|
return packet_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
|
|
|
|
{
|
|
|
|
uint16_t val;
|
|
|
|
|
|
|
|
/* Lookup table */
|
|
|
|
static const uint32_t
|
|
|
|
ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
|
|
|
|
[QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
|
|
|
|
RTE_PTYPE_INNER_L4_TCP |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
|
|
|
|
RTE_PTYPE_INNER_L4_TCP |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
|
|
|
|
RTE_PTYPE_INNER_L4_UDP |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
|
|
|
|
RTE_PTYPE_INNER_L4_UDP |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER,
|
|
|
|
/* Frags with no VLAN */
|
|
|
|
[QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
|
|
|
|
RTE_PTYPE_INNER_L4_FRAG |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
|
|
|
|
RTE_PTYPE_INNER_L4_FRAG |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER,
|
|
|
|
/* VLANs */
|
|
|
|
[QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
|
|
|
|
RTE_PTYPE_INNER_L4_TCP |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
|
|
|
|
RTE_PTYPE_INNER_L4_TCP |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
|
|
|
|
RTE_PTYPE_INNER_L4_UDP |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
|
|
|
|
RTE_PTYPE_INNER_L4_UDP |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_VLAN,
|
|
|
|
/* Frags with VLAN */
|
|
|
|
[QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
|
|
|
|
RTE_PTYPE_INNER_L4_FRAG |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
|
|
|
|
RTE_PTYPE_INNER_L4_FRAG |
|
|
|
|
RTE_PTYPE_INNER_L2_ETHER_VLAN,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Bits (0..3) provides L3/L4 protocol type */
|
|
|
|
/* Bits (4,5) provides frag and VLAN info */
|
|
|
|
val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
|
|
|
|
(PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
|
|
|
|
(PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
|
|
|
|
(PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
|
|
|
|
|
|
|
|
if (val < QEDE_PKT_TYPE_MAX)
|
|
|
|
return ptype_lkup_tbl[val];
|
|
|
|
|
|
|
|
return RTE_PTYPE_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2017-04-25 07:28:46 +00:00
|
|
|
static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
|
|
|
|
{
|
|
|
|
uint16_t val;
|
|
|
|
|
|
|
|
/* Lookup table */
|
|
|
|
static const uint32_t
|
|
|
|
ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
|
2017-10-19 01:13:29 +00:00
|
|
|
[QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
|
|
|
|
RTE_PTYPE_L4_TCP |
|
|
|
|
RTE_PTYPE_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
|
|
|
|
RTE_PTYPE_L4_TCP |
|
|
|
|
RTE_PTYPE_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
|
|
|
|
RTE_PTYPE_L4_UDP |
|
|
|
|
RTE_PTYPE_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
|
|
|
|
RTE_PTYPE_L4_UDP |
|
|
|
|
RTE_PTYPE_L2_ETHER,
|
|
|
|
/* Frags with no VLAN */
|
|
|
|
[QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
|
|
|
|
RTE_PTYPE_L4_FRAG |
|
|
|
|
RTE_PTYPE_L2_ETHER,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
|
|
|
|
RTE_PTYPE_L4_FRAG |
|
|
|
|
RTE_PTYPE_L2_ETHER,
|
|
|
|
/* VLANs */
|
|
|
|
[QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
|
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
|
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
|
|
|
|
RTE_PTYPE_L4_TCP |
|
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
|
|
|
|
RTE_PTYPE_L4_TCP |
|
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
|
|
|
|
RTE_PTYPE_L4_UDP |
|
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
|
|
|
|
RTE_PTYPE_L4_UDP |
|
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
|
|
/* Frags with VLAN */
|
|
|
|
[QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
|
|
|
|
RTE_PTYPE_L4_FRAG |
|
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
|
|
|
[QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
|
|
|
|
RTE_PTYPE_L4_FRAG |
|
|
|
|
RTE_PTYPE_L2_ETHER_VLAN,
|
2017-04-25 07:28:46 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Bits (0..3) provides L3/L4 protocol type */
|
2017-10-19 01:13:29 +00:00
|
|
|
/* Bits (4,5) provides frag and VLAN info */
|
2017-04-25 07:28:46 +00:00
|
|
|
val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
|
|
|
|
(PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
|
2017-10-19 01:13:29 +00:00
|
|
|
PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
|
|
|
|
(PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
|
|
|
|
(PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
|
2017-04-25 07:28:46 +00:00
|
|
|
|
|
|
|
if (val < QEDE_PKT_TYPE_MAX)
|
2017-10-19 01:13:29 +00:00
|
|
|
return ptype_lkup_tbl[val];
|
|
|
|
|
|
|
|
return RTE_PTYPE_UNKNOWN;
|
2017-04-25 07:28:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-23 00:48:08 +00:00
|
|
|
static inline uint8_t
|
|
|
|
qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
2019-05-21 16:13:10 +00:00
|
|
|
struct rte_ipv4_hdr *ip;
|
2016-12-23 00:48:08 +00:00
|
|
|
uint16_t pkt_csum;
|
|
|
|
uint16_t calc_csum;
|
|
|
|
uint16_t val;
|
|
|
|
|
|
|
|
val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
|
|
|
|
PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
|
|
|
|
|
|
|
|
if (unlikely(val)) {
|
|
|
|
m->packet_type = qede_rx_cqe_to_pkt_type(flag);
|
|
|
|
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
|
2019-05-21 16:13:10 +00:00
|
|
|
ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
|
2019-05-21 16:13:03 +00:00
|
|
|
sizeof(struct rte_ether_hdr));
|
2016-12-23 00:48:08 +00:00
|
|
|
pkt_csum = ip->hdr_checksum;
|
|
|
|
ip->hdr_checksum = 0;
|
|
|
|
calc_csum = rte_ipv4_cksum(ip);
|
|
|
|
ip->hdr_checksum = pkt_csum;
|
|
|
|
return (calc_csum != pkt_csum);
|
|
|
|
} else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
|
|
|
|
{
|
|
|
|
ecore_chain_consume(&rxq->rx_bd_ring);
|
|
|
|
rxq->sw_rx_cons++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2017-04-25 07:28:46 +00:00
|
|
|
qede_reuse_page(__rte_unused struct qede_dev *qdev,
|
2021-03-26 11:01:30 +00:00
|
|
|
struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons)
|
2016-04-27 14:18:37 +00:00
|
|
|
{
|
|
|
|
struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
|
2019-01-18 10:29:29 +00:00
|
|
|
uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
|
2016-04-27 14:18:37 +00:00
|
|
|
dma_addr_t new_mapping;
|
|
|
|
|
2021-03-26 11:01:30 +00:00
|
|
|
rxq->sw_rx_ring[idx] = curr_cons;
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2021-03-26 11:01:30 +00:00
|
|
|
new_mapping = rte_mbuf_data_iova_default(curr_cons);
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
|
|
|
|
rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
|
|
|
|
|
|
|
|
rxq->sw_rx_prod++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
|
|
|
|
struct qede_dev *qdev, uint8_t count)
|
|
|
|
{
|
2021-03-26 11:01:30 +00:00
|
|
|
struct rte_mbuf *curr_cons;
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
for (; count > 0; count--) {
|
2021-03-26 11:01:30 +00:00
|
|
|
curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
|
2016-04-27 14:18:37 +00:00
|
|
|
qede_reuse_page(qdev, rxq, curr_cons);
|
|
|
|
qede_rx_bd_ring_consume(rxq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-29 20:37:01 +00:00
|
|
|
static inline void
|
2017-04-25 07:28:46 +00:00
|
|
|
qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
|
2017-04-25 07:28:38 +00:00
|
|
|
struct qede_rx_queue *rxq,
|
|
|
|
uint8_t agg_index, uint16_t len)
|
2017-03-29 20:37:01 +00:00
|
|
|
{
|
|
|
|
struct qede_agg_info *tpa_info;
|
2017-04-25 07:28:38 +00:00
|
|
|
struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
|
2017-03-29 20:37:01 +00:00
|
|
|
uint16_t cons_idx;
|
|
|
|
|
2017-04-25 07:28:38 +00:00
|
|
|
/* Under certain conditions it is possible that FW may not consume
|
|
|
|
* additional or new BD. So decision to consume the BD must be made
|
|
|
|
* based on len_list[0].
|
|
|
|
*/
|
|
|
|
if (rte_le_to_cpu_16(len)) {
|
|
|
|
tpa_info = &rxq->tpa_info[agg_index];
|
2017-03-29 20:37:01 +00:00
|
|
|
cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
|
2021-03-26 11:01:30 +00:00
|
|
|
curr_frag = rxq->sw_rx_ring[cons_idx];
|
2017-04-25 07:28:38 +00:00
|
|
|
assert(curr_frag);
|
|
|
|
curr_frag->nb_segs = 1;
|
|
|
|
curr_frag->pkt_len = rte_le_to_cpu_16(len);
|
|
|
|
curr_frag->data_len = curr_frag->pkt_len;
|
|
|
|
tpa_info->tpa_tail->next = curr_frag;
|
|
|
|
tpa_info->tpa_tail = curr_frag;
|
2017-03-29 20:37:01 +00:00
|
|
|
qede_rx_bd_ring_consume(rxq);
|
|
|
|
if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
|
2017-04-25 07:28:38 +00:00
|
|
|
PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
|
|
|
|
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
|
|
|
|
rxq->rx_alloc_errors++;
|
2017-03-29 20:37:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-25 07:28:38 +00:00
|
|
|
static inline void
|
|
|
|
qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
|
|
|
|
struct qede_rx_queue *rxq,
|
|
|
|
struct eth_fast_path_rx_tpa_cont_cqe *cqe)
|
|
|
|
{
|
|
|
|
PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
|
|
|
|
cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
|
|
|
|
/* only len_list[0] will have value */
|
|
|
|
qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
|
|
|
|
cqe->len_list[0]);
|
|
|
|
}
|
|
|
|
|
2017-03-29 20:37:01 +00:00
|
|
|
static inline void
|
|
|
|
qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
|
|
|
|
struct qede_rx_queue *rxq,
|
|
|
|
struct eth_fast_path_rx_tpa_end_cqe *cqe)
|
|
|
|
{
|
2017-04-25 07:28:38 +00:00
|
|
|
struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
|
2017-03-29 20:37:01 +00:00
|
|
|
|
2017-04-25 07:28:38 +00:00
|
|
|
qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
|
|
|
|
cqe->len_list[0]);
|
2017-03-29 20:37:01 +00:00
|
|
|
/* Update total length and frags based on end TPA */
|
2017-04-25 07:28:38 +00:00
|
|
|
rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
|
|
|
|
/* TODO: Add Sanity Checks */
|
2017-03-29 20:37:01 +00:00
|
|
|
rx_mb->nb_segs = cqe->num_of_bds;
|
|
|
|
rx_mb->pkt_len = cqe->total_packet_len;
|
2017-04-25 07:28:38 +00:00
|
|
|
|
|
|
|
PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
|
|
|
|
" pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
|
|
|
|
rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
|
|
|
|
rx_mb->pkt_len);
|
2017-03-29 20:37:01 +00:00
|
|
|
}
|
|
|
|
|
2016-12-23 00:48:08 +00:00
|
|
|
static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
|
|
|
|
{
|
|
|
|
uint32_t val;
|
|
|
|
|
|
|
|
/* Lookup table */
|
|
|
|
static const uint32_t
|
|
|
|
ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
|
|
|
|
[QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
|
2018-01-27 21:15:27 +00:00
|
|
|
RTE_PTYPE_TUNNEL_GENEVE,
|
2016-12-23 00:48:08 +00:00
|
|
|
[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
|
2018-01-27 21:15:27 +00:00
|
|
|
RTE_PTYPE_TUNNEL_GRE,
|
2016-12-23 00:48:08 +00:00
|
|
|
[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
|
2018-01-27 21:15:27 +00:00
|
|
|
RTE_PTYPE_TUNNEL_VXLAN,
|
2016-12-23 00:48:08 +00:00
|
|
|
[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
|
2018-01-27 21:15:27 +00:00
|
|
|
RTE_PTYPE_TUNNEL_GENEVE,
|
2016-12-23 00:48:08 +00:00
|
|
|
[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
|
2018-01-27 21:15:27 +00:00
|
|
|
RTE_PTYPE_TUNNEL_GRE,
|
2016-12-23 00:48:08 +00:00
|
|
|
[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
|
2018-01-27 21:15:27 +00:00
|
|
|
RTE_PTYPE_TUNNEL_VXLAN,
|
2016-12-23 00:48:08 +00:00
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
|
|
|
|
RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
|
|
|
|
RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
|
|
|
|
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
|
|
|
|
RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
|
|
|
|
RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
|
|
|
|
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
|
|
|
|
RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
|
|
|
|
RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
|
|
|
|
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
|
|
|
|
RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
|
|
|
|
RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
|
|
|
|
[QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
|
|
|
|
RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Cover bits[4-0] to include tunn_type and next protocol */
|
|
|
|
val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
|
|
|
|
ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
|
|
|
|
(ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
|
|
|
|
ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
|
|
|
|
|
|
|
|
if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
|
|
|
|
return ptype_tunn_lkup_tbl[val];
|
|
|
|
else
|
|
|
|
return RTE_PTYPE_UNKNOWN;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 08:16:45 +00:00
|
|
|
static inline int
|
|
|
|
qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
|
|
|
|
uint8_t num_segs, uint16_t pkt_len)
|
2016-10-19 04:11:37 +00:00
|
|
|
{
|
|
|
|
struct qede_rx_queue *rxq = p_rxq;
|
|
|
|
struct qede_dev *qdev = rxq->qdev;
|
|
|
|
register struct rte_mbuf *seg1 = NULL;
|
|
|
|
register struct rte_mbuf *seg2 = NULL;
|
2017-01-06 08:16:45 +00:00
|
|
|
uint16_t sw_rx_index;
|
|
|
|
uint16_t cur_size;
|
2016-10-19 04:11:37 +00:00
|
|
|
|
|
|
|
seg1 = rx_mb;
|
2016-10-19 04:11:43 +00:00
|
|
|
while (num_segs) {
|
2017-01-06 08:16:45 +00:00
|
|
|
cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
|
|
|
|
pkt_len;
|
|
|
|
if (unlikely(!cur_size)) {
|
|
|
|
PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
|
2017-07-01 19:29:58 +00:00
|
|
|
" left for mapping jumbo\n", num_segs);
|
2016-10-19 04:11:43 +00:00
|
|
|
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
|
2016-10-19 04:11:37 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
|
2021-03-26 11:01:30 +00:00
|
|
|
seg2 = rxq->sw_rx_ring[sw_rx_index];
|
2016-10-19 04:11:37 +00:00
|
|
|
qede_rx_bd_ring_consume(rxq);
|
|
|
|
pkt_len -= cur_size;
|
|
|
|
seg2->data_len = cur_size;
|
|
|
|
seg1->next = seg2;
|
|
|
|
seg1 = seg1->next;
|
2016-10-19 04:11:43 +00:00
|
|
|
num_segs--;
|
|
|
|
rxq->rx_segs++;
|
2016-10-19 04:11:37 +00:00
|
|
|
}
|
|
|
|
|
2017-01-06 08:16:45 +00:00
|
|
|
return 0;
|
2016-10-19 04:11:37 +00:00
|
|
|
}
|
|
|
|
|
2017-10-19 01:13:29 +00:00
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
|
|
|
|
static inline void
|
|
|
|
print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
|
|
|
|
uint8_t bitfield)
|
|
|
|
{
|
|
|
|
PMD_RX_LOG(INFO, rxq,
|
2018-01-27 21:15:27 +00:00
|
|
|
"len 0x%04x bf 0x%04x hash_val 0x%x"
|
2017-10-19 01:13:29 +00:00
|
|
|
" ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
|
|
|
|
" inner_l2=%s inner_l3=%s inner_l4=%s\n",
|
|
|
|
m->data_len, bitfield, m->hash.rss,
|
|
|
|
(unsigned long)m->ol_flags,
|
|
|
|
rte_get_ptype_l2_name(m->packet_type),
|
|
|
|
rte_get_ptype_l3_name(m->packet_type),
|
|
|
|
rte_get_ptype_l4_name(m->packet_type),
|
|
|
|
rte_get_ptype_tunnel_name(m->packet_type),
|
|
|
|
rte_get_ptype_inner_l2_name(m->packet_type),
|
|
|
|
rte_get_ptype_inner_l3_name(m->packet_type),
|
|
|
|
rte_get_ptype_inner_l4_name(m->packet_type));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-01-02 17:59:02 +00:00
|
|
|
uint16_t
|
|
|
|
qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
|
|
|
|
register struct rte_mbuf *rx_mb = NULL;
|
|
|
|
struct qede_rx_queue *rxq = p_rxq;
|
|
|
|
struct qede_dev *qdev = rxq->qdev;
|
|
|
|
struct ecore_dev *edev = &qdev->edev;
|
|
|
|
union eth_rx_cqe *cqe;
|
|
|
|
uint64_t ol_flags;
|
|
|
|
enum eth_rx_cqe_type cqe_type;
|
|
|
|
int rss_enable = qdev->rss_enable;
|
|
|
|
int rx_alloc_count = 0;
|
|
|
|
uint32_t packet_type;
|
|
|
|
uint32_t rss_hash;
|
|
|
|
uint16_t vlan_tci, port_id;
|
|
|
|
uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds;
|
|
|
|
uint16_t rx_pkt = 0;
|
|
|
|
uint16_t pkt_len = 0;
|
|
|
|
uint16_t len; /* Length of first BD */
|
|
|
|
uint16_t preload_idx;
|
|
|
|
uint16_t parse_flag;
|
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
|
|
|
|
uint8_t bitfield_val;
|
|
|
|
#endif
|
|
|
|
uint8_t offset, flags, bd_num;
|
2022-03-04 12:08:32 +00:00
|
|
|
uint16_t count = 0;
|
2020-01-02 17:59:02 +00:00
|
|
|
|
|
|
|
/* Allocate buffers that we used in previous loop */
|
|
|
|
if (rxq->rx_alloc_count) {
|
2022-03-04 12:08:32 +00:00
|
|
|
count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
|
|
|
|
QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
|
|
|
|
|
|
|
|
if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
|
2020-01-02 17:59:02 +00:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
PMD_RX_LOG(ERR, rxq,
|
2022-03-04 12:08:32 +00:00
|
|
|
"New buffers allocation failed,"
|
|
|
|
"dropping incoming packets\n");
|
2020-01-02 17:59:02 +00:00
|
|
|
dev = &rte_eth_devices[rxq->port_id];
|
2022-03-04 12:08:32 +00:00
|
|
|
dev->data->rx_mbuf_alloc_failed += count;
|
|
|
|
rxq->rx_alloc_errors += count;
|
2020-01-02 17:59:02 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
qede_update_rx_prod(qdev, rxq);
|
2022-03-04 12:08:32 +00:00
|
|
|
rxq->rx_alloc_count -= count;
|
2020-01-02 17:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
|
|
|
|
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
|
|
|
|
|
|
|
|
rte_rmb();
|
|
|
|
|
|
|
|
if (hw_comp_cons == sw_comp_cons)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
num_rx_bds = NUM_RX_BDS(rxq);
|
|
|
|
port_id = rxq->port_id;
|
|
|
|
|
|
|
|
while (sw_comp_cons != hw_comp_cons) {
|
|
|
|
ol_flags = 0;
|
|
|
|
packet_type = RTE_PTYPE_UNKNOWN;
|
|
|
|
vlan_tci = 0;
|
|
|
|
rss_hash = 0;
|
|
|
|
|
|
|
|
/* Get the CQE from the completion ring */
|
|
|
|
cqe =
|
|
|
|
(union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
|
|
|
|
cqe_type = cqe->fast_path_regular.type;
|
|
|
|
PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
|
|
|
|
|
|
|
|
if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) {
|
|
|
|
fp_cqe = &cqe->fast_path_regular;
|
|
|
|
} else {
|
|
|
|
if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
|
|
|
|
PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
|
|
|
|
ecore_eth_cqe_completion
|
|
|
|
(&edev->hwfns[rxq->queue_id %
|
|
|
|
edev->num_hwfns],
|
|
|
|
(struct eth_slow_path_rx_cqe *)cqe);
|
|
|
|
}
|
|
|
|
goto next_cqe;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the data from the SW ring */
|
|
|
|
sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
|
2021-03-26 11:01:30 +00:00
|
|
|
rx_mb = rxq->sw_rx_ring[sw_rx_index];
|
2020-01-02 17:59:02 +00:00
|
|
|
assert(rx_mb != NULL);
|
|
|
|
|
|
|
|
parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
|
|
|
|
offset = fp_cqe->placement_offset;
|
|
|
|
len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
|
|
|
|
pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
|
|
|
|
vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
|
|
|
|
rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
|
|
|
|
bd_num = fp_cqe->bd_num;
|
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
|
|
|
|
bitfield_val = fp_cqe->bitfields;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (unlikely(qede_tunn_exist(parse_flag))) {
|
|
|
|
PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
|
|
|
|
if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
|
|
|
|
PMD_RX_LOG(ERR, rxq,
|
|
|
|
"L4 csum failed, flags = 0x%x\n",
|
|
|
|
parse_flag);
|
|
|
|
rxq->rx_hw_errors++;
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
|
2020-01-02 17:59:02 +00:00
|
|
|
} else {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
|
2020-01-02 17:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
|
|
|
|
PMD_RX_LOG(ERR, rxq,
|
|
|
|
"Outer L3 csum failed, flags = 0x%x\n",
|
|
|
|
parse_flag);
|
|
|
|
rxq->rx_hw_errors++;
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
|
2020-01-02 17:59:02 +00:00
|
|
|
} else {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
|
2020-01-02 17:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
flags = fp_cqe->tunnel_pars_flags.flags;
|
|
|
|
|
|
|
|
/* Tunnel_type */
|
|
|
|
packet_type =
|
|
|
|
qede_rx_cqe_to_tunn_pkt_type(flags);
|
|
|
|
|
|
|
|
/* Inner header */
|
|
|
|
packet_type |=
|
|
|
|
qede_rx_cqe_to_pkt_type_inner(parse_flag);
|
|
|
|
|
|
|
|
/* Outer L3/L4 types is not available in CQE */
|
|
|
|
packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
|
|
|
|
|
|
|
|
/* Outer L3/L4 types is not available in CQE.
|
|
|
|
* Need to add offset to parse correctly,
|
|
|
|
*/
|
|
|
|
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
|
|
|
|
packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
|
|
|
|
} else {
|
|
|
|
packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Common handling for non-tunnel packets and for inner
|
|
|
|
* headers in the case of tunnel.
|
|
|
|
*/
|
|
|
|
if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
|
|
|
|
PMD_RX_LOG(ERR, rxq,
|
|
|
|
"L4 csum failed, flags = 0x%x\n",
|
|
|
|
parse_flag);
|
|
|
|
rxq->rx_hw_errors++;
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
|
2020-01-02 17:59:02 +00:00
|
|
|
} else {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
|
2020-01-02 17:59:02 +00:00
|
|
|
}
|
|
|
|
if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
|
|
|
|
PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
|
|
|
|
parse_flag);
|
|
|
|
rxq->rx_hw_errors++;
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
|
2020-01-02 17:59:02 +00:00
|
|
|
} else {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
|
2020-01-02 17:59:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(CQE_HAS_VLAN(parse_flag) ||
|
|
|
|
CQE_HAS_OUTER_VLAN(parse_flag))) {
|
|
|
|
/* Note: FW doesn't indicate Q-in-Q packet */
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_VLAN;
|
2020-01-02 17:59:02 +00:00
|
|
|
if (qdev->vlan_strip_flg) {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
|
2020-01-02 17:59:02 +00:00
|
|
|
rx_mb->vlan_tci = vlan_tci;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rss_enable) {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
|
2020-01-02 17:59:02 +00:00
|
|
|
rx_mb->hash.rss = rss_hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_alloc_count++;
|
|
|
|
qede_rx_bd_ring_consume(rxq);
|
|
|
|
|
|
|
|
/* Prefetch next mbuf while processing current one. */
|
|
|
|
preload_idx = rxq->sw_rx_cons & num_rx_bds;
|
2021-03-26 11:01:30 +00:00
|
|
|
rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
|
2020-01-02 17:59:02 +00:00
|
|
|
|
|
|
|
/* Update rest of the MBUF fields */
|
|
|
|
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
|
|
|
|
rx_mb->port = port_id;
|
|
|
|
rx_mb->ol_flags = ol_flags;
|
|
|
|
rx_mb->data_len = len;
|
|
|
|
rx_mb->packet_type = packet_type;
|
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
|
|
|
|
print_rx_bd_info(rx_mb, rxq, bitfield_val);
|
|
|
|
#endif
|
|
|
|
rx_mb->nb_segs = bd_num;
|
|
|
|
rx_mb->pkt_len = pkt_len;
|
|
|
|
|
|
|
|
rx_pkts[rx_pkt] = rx_mb;
|
|
|
|
rx_pkt++;
|
|
|
|
|
|
|
|
next_cqe:
|
|
|
|
ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
|
|
|
|
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
|
|
|
|
if (rx_pkt == nb_pkts) {
|
|
|
|
PMD_RX_LOG(DEBUG, rxq,
|
|
|
|
"Budget reached nb_pkts=%u received=%u",
|
|
|
|
rx_pkt, nb_pkts);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-29 16:08:02 +00:00
|
|
|
/* Request number of buffers to be allocated in next loop */
|
2022-03-04 12:08:32 +00:00
|
|
|
rxq->rx_alloc_count += rx_alloc_count;
|
2020-01-02 17:59:02 +00:00
|
|
|
|
|
|
|
rxq->rcv_pkts += rx_pkt;
|
|
|
|
rxq->rx_segs += rx_pkt;
|
|
|
|
PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
|
|
|
|
|
|
|
|
return rx_pkt;
|
|
|
|
}
|
|
|
|
|
2016-04-27 14:18:37 +00:00
|
|
|
uint16_t
|
|
|
|
qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct qede_rx_queue *rxq = p_rxq;
|
|
|
|
struct qede_dev *qdev = rxq->qdev;
|
|
|
|
struct ecore_dev *edev = &qdev->edev;
|
|
|
|
uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
|
|
|
|
uint16_t rx_pkt = 0;
|
|
|
|
union eth_rx_cqe *cqe;
|
2017-04-25 07:28:46 +00:00
|
|
|
struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
|
2016-04-27 14:18:37 +00:00
|
|
|
register struct rte_mbuf *rx_mb = NULL;
|
2016-10-19 04:11:37 +00:00
|
|
|
register struct rte_mbuf *seg1 = NULL;
|
2016-04-27 14:18:37 +00:00
|
|
|
enum eth_rx_cqe_type cqe_type;
|
2017-04-25 07:28:46 +00:00
|
|
|
uint16_t pkt_len = 0; /* Sum of all BD segments */
|
2017-01-06 08:16:45 +00:00
|
|
|
uint16_t len; /* Length of first BD */
|
|
|
|
uint8_t num_segs = 1;
|
|
|
|
uint16_t preload_idx;
|
|
|
|
uint16_t parse_flag;
|
2017-04-25 07:28:46 +00:00
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
|
|
|
|
uint8_t bitfield_val;
|
|
|
|
#endif
|
2016-12-23 00:48:08 +00:00
|
|
|
uint8_t tunn_parse_flag;
|
2017-03-29 20:37:01 +00:00
|
|
|
struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
|
|
|
|
uint64_t ol_flags;
|
|
|
|
uint32_t packet_type;
|
|
|
|
uint16_t vlan_tci;
|
|
|
|
bool tpa_start_flg;
|
|
|
|
uint8_t offset, tpa_agg_idx, flags;
|
2017-04-25 07:28:46 +00:00
|
|
|
struct qede_agg_info *tpa_info = NULL;
|
2017-04-25 07:28:39 +00:00
|
|
|
uint32_t rss_hash;
|
2019-01-18 10:29:29 +00:00
|
|
|
int rx_alloc_count = 0;
|
2022-03-04 12:08:32 +00:00
|
|
|
uint16_t count = 0;
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2019-01-18 10:29:29 +00:00
|
|
|
/* Allocate buffers that we used in previous loop */
|
|
|
|
if (rxq->rx_alloc_count) {
|
2022-03-04 12:08:32 +00:00
|
|
|
count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
|
|
|
|
QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
|
|
|
|
|
|
|
|
if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
|
2019-01-18 10:29:29 +00:00
|
|
|
struct rte_eth_dev *dev;
|
|
|
|
|
|
|
|
PMD_RX_LOG(ERR, rxq,
|
2022-03-04 12:08:32 +00:00
|
|
|
"New buffers allocation failed,"
|
|
|
|
"dropping incoming packets\n");
|
2019-01-18 10:29:29 +00:00
|
|
|
dev = &rte_eth_devices[rxq->port_id];
|
2022-03-04 12:08:32 +00:00
|
|
|
dev->data->rx_mbuf_alloc_failed += count;
|
|
|
|
rxq->rx_alloc_errors += count;
|
2019-01-18 10:29:29 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
qede_update_rx_prod(qdev, rxq);
|
2022-03-04 12:08:32 +00:00
|
|
|
rxq->rx_alloc_count -= count;
|
2019-01-18 10:29:29 +00:00
|
|
|
}
|
|
|
|
|
2019-03-12 16:51:14 +00:00
|
|
|
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
|
|
|
|
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
|
|
|
|
|
|
|
|
rte_rmb();
|
|
|
|
|
|
|
|
if (hw_comp_cons == sw_comp_cons)
|
|
|
|
return 0;
|
|
|
|
|
2016-04-27 14:18:37 +00:00
|
|
|
while (sw_comp_cons != hw_comp_cons) {
|
2017-03-29 20:37:01 +00:00
|
|
|
ol_flags = 0;
|
|
|
|
packet_type = RTE_PTYPE_UNKNOWN;
|
|
|
|
vlan_tci = 0;
|
|
|
|
tpa_start_flg = false;
|
2017-04-25 07:28:39 +00:00
|
|
|
rss_hash = 0;
|
2017-03-29 20:37:01 +00:00
|
|
|
|
2016-04-27 14:18:37 +00:00
|
|
|
/* Get the CQE from the completion ring */
|
|
|
|
cqe =
|
|
|
|
(union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
|
|
|
|
cqe_type = cqe->fast_path_regular.type;
|
2017-03-29 20:37:01 +00:00
|
|
|
PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
|
|
|
|
|
|
|
|
switch (cqe_type) {
|
|
|
|
case ETH_RX_CQE_TYPE_REGULAR:
|
|
|
|
fp_cqe = &cqe->fast_path_regular;
|
|
|
|
break;
|
|
|
|
case ETH_RX_CQE_TYPE_TPA_START:
|
|
|
|
cqe_start_tpa = &cqe->fast_path_tpa_start;
|
|
|
|
tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
|
|
|
|
tpa_start_flg = true;
|
2017-04-25 07:28:38 +00:00
|
|
|
/* Mark it as LRO packet */
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_LRO;
|
2017-04-25 07:28:38 +00:00
|
|
|
/* In split mode, seg_len is same as len_on_first_bd
|
2019-10-06 20:14:07 +00:00
|
|
|
* and bw_ext_bd_len_list will be empty since there are
|
2017-04-25 07:28:38 +00:00
|
|
|
* no additional buffers
|
|
|
|
*/
|
2017-03-29 20:37:01 +00:00
|
|
|
PMD_RX_LOG(INFO, rxq,
|
2019-10-06 20:14:07 +00:00
|
|
|
"TPA start[%d] - len_on_first_bd %d header %d"
|
|
|
|
" [bd_list[0] %d], [seg_len %d]\n",
|
|
|
|
cqe_start_tpa->tpa_agg_index,
|
|
|
|
rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
|
|
|
|
cqe_start_tpa->header_len,
|
|
|
|
rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]),
|
|
|
|
rte_le_to_cpu_16(cqe_start_tpa->seg_len));
|
2017-03-29 20:37:01 +00:00
|
|
|
|
|
|
|
break;
|
|
|
|
case ETH_RX_CQE_TYPE_TPA_CONT:
|
|
|
|
qede_rx_process_tpa_cont_cqe(qdev, rxq,
|
|
|
|
&cqe->fast_path_tpa_cont);
|
2017-04-25 07:28:38 +00:00
|
|
|
goto next_cqe;
|
2017-03-29 20:37:01 +00:00
|
|
|
case ETH_RX_CQE_TYPE_TPA_END:
|
|
|
|
qede_rx_process_tpa_end_cqe(qdev, rxq,
|
|
|
|
&cqe->fast_path_tpa_end);
|
|
|
|
tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
|
2017-04-25 07:28:38 +00:00
|
|
|
tpa_info = &rxq->tpa_info[tpa_agg_idx];
|
|
|
|
rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
|
2017-03-29 20:37:01 +00:00
|
|
|
goto tpa_end;
|
|
|
|
case ETH_RX_CQE_TYPE_SLOW_PATH:
|
|
|
|
PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
|
2017-06-07 07:42:19 +00:00
|
|
|
ecore_eth_cqe_completion(
|
|
|
|
&edev->hwfns[rxq->queue_id % edev->num_hwfns],
|
2016-04-27 14:18:37 +00:00
|
|
|
(struct eth_slow_path_rx_cqe *)cqe);
|
2017-03-29 20:37:01 +00:00
|
|
|
/* fall-thru */
|
|
|
|
default:
|
2016-04-27 14:18:37 +00:00
|
|
|
goto next_cqe;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the data from the SW ring */
|
|
|
|
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
|
2021-03-26 11:01:30 +00:00
|
|
|
rx_mb = rxq->sw_rx_ring[sw_rx_index];
|
2016-04-27 14:18:37 +00:00
|
|
|
assert(rx_mb != NULL);
|
|
|
|
|
2017-03-29 20:37:01 +00:00
|
|
|
/* Handle regular CQE or TPA start CQE */
|
|
|
|
if (!tpa_start_flg) {
|
|
|
|
parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
|
|
|
|
offset = fp_cqe->placement_offset;
|
|
|
|
len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
|
|
|
|
pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
|
2017-04-25 07:28:39 +00:00
|
|
|
vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
|
|
|
|
rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
|
2017-04-25 07:28:46 +00:00
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
|
|
|
|
bitfield_val = fp_cqe->bitfields;
|
|
|
|
#endif
|
2017-03-29 20:37:01 +00:00
|
|
|
} else {
|
|
|
|
parse_flag =
|
|
|
|
rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
|
|
|
|
offset = cqe_start_tpa->placement_offset;
|
|
|
|
/* seg_len = len_on_first_bd */
|
|
|
|
len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
|
2017-04-25 07:28:39 +00:00
|
|
|
vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
|
2017-04-25 07:28:46 +00:00
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
|
|
|
|
bitfield_val = cqe_start_tpa->bitfields;
|
|
|
|
#endif
|
2017-04-25 07:28:39 +00:00
|
|
|
rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
|
2017-03-29 20:37:01 +00:00
|
|
|
}
|
2016-12-23 00:48:08 +00:00
|
|
|
if (qede_tunn_exist(parse_flag)) {
|
2017-03-29 20:37:01 +00:00
|
|
|
PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
|
2016-12-23 00:48:08 +00:00
|
|
|
if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
|
|
|
|
PMD_RX_LOG(ERR, rxq,
|
2017-03-29 20:37:01 +00:00
|
|
|
"L4 csum failed, flags = 0x%x\n",
|
2016-12-23 00:48:08 +00:00
|
|
|
parse_flag);
|
|
|
|
rxq->rx_hw_errors++;
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
|
2016-12-23 00:48:08 +00:00
|
|
|
} else {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
|
2016-12-23 00:48:08 +00:00
|
|
|
}
|
2018-01-27 21:15:27 +00:00
|
|
|
|
|
|
|
if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
|
2016-12-23 00:48:08 +00:00
|
|
|
PMD_RX_LOG(ERR, rxq,
|
2018-01-27 21:15:27 +00:00
|
|
|
"Outer L3 csum failed, flags = 0x%x\n",
|
|
|
|
parse_flag);
|
|
|
|
rxq->rx_hw_errors++;
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
|
2016-12-23 00:48:08 +00:00
|
|
|
} else {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
|
2016-12-23 00:48:08 +00:00
|
|
|
}
|
2018-01-27 21:15:27 +00:00
|
|
|
|
|
|
|
if (tpa_start_flg)
|
|
|
|
flags = cqe_start_tpa->tunnel_pars_flags.flags;
|
|
|
|
else
|
|
|
|
flags = fp_cqe->tunnel_pars_flags.flags;
|
|
|
|
tunn_parse_flag = flags;
|
|
|
|
|
|
|
|
/* Tunnel_type */
|
|
|
|
packet_type =
|
|
|
|
qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
|
|
|
|
|
|
|
|
/* Inner header */
|
|
|
|
packet_type |=
|
|
|
|
qede_rx_cqe_to_pkt_type_inner(parse_flag);
|
|
|
|
|
|
|
|
/* Outer L3/L4 types is not available in CQE */
|
|
|
|
packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
|
|
|
|
|
|
|
|
/* Outer L3/L4 types is not available in CQE.
|
|
|
|
* Need to add offset to parse correctly,
|
|
|
|
*/
|
|
|
|
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
|
|
|
|
packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
|
2018-03-28 00:15:55 +00:00
|
|
|
} else {
|
|
|
|
packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
|
2018-01-27 21:15:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Common handling for non-tunnel packets and for inner
|
|
|
|
* headers in the case of tunnel.
|
|
|
|
*/
|
|
|
|
if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
|
|
|
|
PMD_RX_LOG(ERR, rxq,
|
|
|
|
"L4 csum failed, flags = 0x%x\n",
|
|
|
|
parse_flag);
|
|
|
|
rxq->rx_hw_errors++;
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
|
2018-01-27 21:15:27 +00:00
|
|
|
} else {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
|
2018-01-27 21:15:27 +00:00
|
|
|
}
|
|
|
|
if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
|
|
|
|
PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
|
|
|
|
parse_flag);
|
|
|
|
rxq->rx_hw_errors++;
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
|
2018-01-27 21:15:27 +00:00
|
|
|
} else {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
2017-10-19 01:13:29 +00:00
|
|
|
if (CQE_HAS_VLAN(parse_flag) ||
|
|
|
|
CQE_HAS_OUTER_VLAN(parse_flag)) {
|
|
|
|
/* Note: FW doesn't indicate Q-in-Q packet */
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_VLAN;
|
2017-04-25 07:28:40 +00:00
|
|
|
if (qdev->vlan_strip_flg) {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
|
2017-04-25 07:28:40 +00:00
|
|
|
rx_mb->vlan_tci = vlan_tci;
|
|
|
|
}
|
2017-03-29 20:37:01 +00:00
|
|
|
}
|
2017-10-19 01:13:29 +00:00
|
|
|
|
2017-03-29 20:37:01 +00:00
|
|
|
/* RSS Hash */
|
2017-04-25 07:28:39 +00:00
|
|
|
if (qdev->rss_enable) {
|
2021-10-15 19:24:08 +00:00
|
|
|
ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
|
2017-04-25 07:28:39 +00:00
|
|
|
rx_mb->hash.rss = rss_hash;
|
2017-03-29 20:37:01 +00:00
|
|
|
}
|
2016-12-23 00:48:08 +00:00
|
|
|
|
2019-01-18 10:29:29 +00:00
|
|
|
rx_alloc_count++;
|
2016-04-27 14:18:37 +00:00
|
|
|
qede_rx_bd_ring_consume(rxq);
|
2017-03-29 20:37:01 +00:00
|
|
|
|
|
|
|
if (!tpa_start_flg && fp_cqe->bd_num > 1) {
|
2017-01-06 08:16:45 +00:00
|
|
|
PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
|
2017-01-27 15:16:32 +00:00
|
|
|
" len on first: %04x Total Len: %04x",
|
2017-01-06 08:16:45 +00:00
|
|
|
fp_cqe->bd_num, len, pkt_len);
|
2016-10-19 04:11:43 +00:00
|
|
|
num_segs = fp_cqe->bd_num - 1;
|
2016-10-19 04:11:37 +00:00
|
|
|
seg1 = rx_mb;
|
2017-01-06 08:16:45 +00:00
|
|
|
if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
|
|
|
|
pkt_len - len))
|
2016-10-19 04:11:37 +00:00
|
|
|
goto next_cqe;
|
2019-01-18 10:29:29 +00:00
|
|
|
|
|
|
|
rx_alloc_count += num_segs;
|
|
|
|
rxq->rx_segs += num_segs;
|
2016-10-19 04:11:37 +00:00
|
|
|
}
|
2017-01-06 08:16:45 +00:00
|
|
|
rxq->rx_segs++; /* for the first segment */
|
2016-10-19 04:11:37 +00:00
|
|
|
|
2016-04-27 14:18:37 +00:00
|
|
|
/* Prefetch next mbuf while processing current one. */
|
|
|
|
preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
|
2021-03-26 11:01:30 +00:00
|
|
|
rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2016-12-23 00:48:08 +00:00
|
|
|
/* Update rest of the MBUF fields */
|
2017-03-29 20:37:01 +00:00
|
|
|
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
|
2016-04-27 14:18:37 +00:00
|
|
|
rx_mb->port = rxq->port_id;
|
2017-03-29 20:37:01 +00:00
|
|
|
rx_mb->ol_flags = ol_flags;
|
|
|
|
rx_mb->data_len = len;
|
|
|
|
rx_mb->packet_type = packet_type;
|
2017-10-19 01:13:29 +00:00
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
|
|
|
|
print_rx_bd_info(rx_mb, rxq, bitfield_val);
|
|
|
|
#endif
|
2017-03-29 20:37:01 +00:00
|
|
|
if (!tpa_start_flg) {
|
|
|
|
rx_mb->nb_segs = fp_cqe->bd_num;
|
|
|
|
rx_mb->pkt_len = pkt_len;
|
2017-04-25 07:28:38 +00:00
|
|
|
} else {
|
|
|
|
/* store ref to the updated mbuf */
|
|
|
|
tpa_info->tpa_head = rx_mb;
|
|
|
|
tpa_info->tpa_tail = tpa_info->tpa_head;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
|
2017-03-29 20:37:01 +00:00
|
|
|
tpa_end:
|
|
|
|
if (!tpa_start_flg) {
|
|
|
|
rx_pkts[rx_pkt] = rx_mb;
|
|
|
|
rx_pkt++;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
next_cqe:
|
|
|
|
ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
|
|
|
|
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
|
|
|
|
if (rx_pkt == nb_pkts) {
|
|
|
|
PMD_RX_LOG(DEBUG, rxq,
|
2017-01-27 15:16:32 +00:00
|
|
|
"Budget reached nb_pkts=%u received=%u",
|
2016-04-27 14:18:37 +00:00
|
|
|
rx_pkt, nb_pkts);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-29 16:08:02 +00:00
|
|
|
/* Request number of buffers to be allocated in next loop */
|
2022-03-04 12:08:32 +00:00
|
|
|
rxq->rx_alloc_count += rx_alloc_count;
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2016-10-19 04:11:43 +00:00
|
|
|
rxq->rcv_pkts += rx_pkt;
|
|
|
|
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
return rx_pkt;
|
|
|
|
}
|
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
uint16_t
|
|
|
|
qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
|
|
|
|
uint16_t eng0_pkts, eng1_pkts;
|
|
|
|
|
|
|
|
eng0_pkts = nb_pkts / 2;
|
|
|
|
|
|
|
|
eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);
|
|
|
|
|
|
|
|
eng1_pkts = nb_pkts - eng0_pkts;
|
|
|
|
|
|
|
|
eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,
|
|
|
|
eng1_pkts);
|
|
|
|
|
|
|
|
return eng0_pkts + eng1_pkts;
|
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2016-10-19 04:11:37 +00:00
|
|
|
/* Populate scatter gather buffer descriptor fields */
|
2017-11-09 13:54:09 +00:00
|
|
|
static inline uint16_t
|
2017-01-06 08:16:45 +00:00
|
|
|
qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
|
2018-01-27 21:15:28 +00:00
|
|
|
struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
|
|
|
|
uint16_t start_seg)
|
2016-10-19 04:11:37 +00:00
|
|
|
{
|
|
|
|
struct qede_tx_queue *txq = p_txq;
|
|
|
|
struct eth_tx_bd *tx_bd = NULL;
|
|
|
|
dma_addr_t mapping;
|
2017-11-09 13:54:09 +00:00
|
|
|
uint16_t nb_segs = 0;
|
2016-10-19 04:11:37 +00:00
|
|
|
|
|
|
|
/* Check for scattered buffers */
|
|
|
|
while (m_seg) {
|
2018-01-27 21:15:28 +00:00
|
|
|
if (start_seg == 0) {
|
2017-03-29 20:37:01 +00:00
|
|
|
if (!*bd2) {
|
|
|
|
*bd2 = (struct eth_tx_2nd_bd *)
|
|
|
|
ecore_chain_produce(&txq->tx_pbl);
|
|
|
|
memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
|
|
|
|
nb_segs++;
|
|
|
|
}
|
2017-11-05 23:22:55 +00:00
|
|
|
mapping = rte_mbuf_data_iova(m_seg);
|
2017-03-29 20:37:01 +00:00
|
|
|
QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
|
|
|
|
PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
|
2018-01-27 21:15:28 +00:00
|
|
|
} else if (start_seg == 1) {
|
2017-03-29 20:37:01 +00:00
|
|
|
if (!*bd3) {
|
|
|
|
*bd3 = (struct eth_tx_3rd_bd *)
|
|
|
|
ecore_chain_produce(&txq->tx_pbl);
|
|
|
|
memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
|
|
|
|
nb_segs++;
|
|
|
|
}
|
2017-11-05 23:22:55 +00:00
|
|
|
mapping = rte_mbuf_data_iova(m_seg);
|
2017-03-29 20:37:01 +00:00
|
|
|
QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
|
|
|
|
PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
|
2016-10-19 04:11:37 +00:00
|
|
|
} else {
|
|
|
|
tx_bd = (struct eth_tx_bd *)
|
|
|
|
ecore_chain_produce(&txq->tx_pbl);
|
|
|
|
memset(tx_bd, 0, sizeof(*tx_bd));
|
2017-03-29 20:37:01 +00:00
|
|
|
nb_segs++;
|
2017-11-05 23:22:55 +00:00
|
|
|
mapping = rte_mbuf_data_iova(m_seg);
|
2017-01-06 08:16:45 +00:00
|
|
|
QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
|
2017-03-29 20:37:01 +00:00
|
|
|
PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
|
2016-10-19 04:11:37 +00:00
|
|
|
}
|
2018-05-20 00:15:44 +00:00
|
|
|
start_seg++;
|
2016-10-19 04:11:37 +00:00
|
|
|
m_seg = m_seg->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return total scattered buffers */
|
|
|
|
return nb_segs;
|
|
|
|
}
|
|
|
|
|
2017-03-29 20:37:01 +00:00
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
|
|
|
|
static inline void
|
|
|
|
print_tx_bd_info(struct qede_tx_queue *txq,
|
|
|
|
struct eth_tx_1st_bd *bd1,
|
|
|
|
struct eth_tx_2nd_bd *bd2,
|
|
|
|
struct eth_tx_3rd_bd *bd3,
|
|
|
|
uint64_t tx_ol_flags)
|
|
|
|
{
|
|
|
|
char ol_buf[256] = { 0 }; /* for verbose prints */
|
|
|
|
|
|
|
|
if (bd1)
|
|
|
|
PMD_TX_LOG(INFO, txq,
|
2018-01-27 21:15:28 +00:00
|
|
|
"BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
|
|
|
|
rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
|
|
|
|
bd1->data.bd_flags.bitfields,
|
|
|
|
rte_cpu_to_le_16(bd1->data.bitfields));
|
2017-03-29 20:37:01 +00:00
|
|
|
if (bd2)
|
|
|
|
PMD_TX_LOG(INFO, txq,
|
2018-01-27 21:15:28 +00:00
|
|
|
"BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
|
|
|
|
rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
|
|
|
|
bd2->data.bitfields2, bd2->data.tunn_ip_size);
|
2017-03-29 20:37:01 +00:00
|
|
|
if (bd3)
|
|
|
|
PMD_TX_LOG(INFO, txq,
|
2018-01-27 21:15:28 +00:00
|
|
|
"BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
|
|
|
|
"tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
|
|
|
|
rte_cpu_to_le_16(bd3->nbytes),
|
|
|
|
rte_cpu_to_le_16(bd3->data.bitfields),
|
|
|
|
rte_cpu_to_le_16(bd3->data.lso_mss),
|
|
|
|
bd3->data.tunn_l4_hdr_start_offset_w,
|
|
|
|
bd3->data.tunn_hdr_size_w);
|
2017-03-29 20:37:01 +00:00
|
|
|
|
|
|
|
rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
|
|
|
|
PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* TX prepare to check packets meets TX conditions */
|
|
|
|
uint16_t
|
2017-04-25 07:28:46 +00:00
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
|
2017-03-29 20:37:01 +00:00
|
|
|
qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct qede_tx_queue *txq = p_txq;
|
2017-04-25 07:28:46 +00:00
|
|
|
#else
|
|
|
|
qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
|
|
|
|
uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
#endif
|
2017-03-29 20:37:01 +00:00
|
|
|
uint64_t ol_flags;
|
|
|
|
struct rte_mbuf *m;
|
|
|
|
uint16_t i;
|
2017-06-27 07:21:32 +00:00
|
|
|
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
|
2017-03-29 20:37:01 +00:00
|
|
|
int ret;
|
2017-06-27 07:21:32 +00:00
|
|
|
#endif
|
2017-03-29 20:37:01 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nb_pkts; i++) {
|
|
|
|
m = tx_pkts[i];
|
|
|
|
ol_flags = m->ol_flags;
|
2021-10-15 19:24:08 +00:00
|
|
|
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
|
2017-03-29 20:37:01 +00:00
|
|
|
if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
|
2019-06-05 08:16:03 +00:00
|
|
|
rte_errno = EINVAL;
|
2017-03-29 20:37:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* TBD: confirm its ~9700B for both ? */
|
|
|
|
if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
|
2019-06-05 08:16:03 +00:00
|
|
|
rte_errno = EINVAL;
|
2017-03-29 20:37:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
|
2019-06-05 08:16:03 +00:00
|
|
|
rte_errno = EINVAL;
|
2017-03-29 20:37:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
|
2018-11-12 18:19:38 +00:00
|
|
|
/* We support only limited tunnel protocols */
|
2021-10-15 19:24:08 +00:00
|
|
|
if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
|
2018-11-12 18:19:38 +00:00
|
|
|
uint64_t temp;
|
|
|
|
|
2021-10-15 19:24:08 +00:00
|
|
|
temp = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
|
|
|
|
if (temp == RTE_MBUF_F_TX_TUNNEL_VXLAN ||
|
|
|
|
temp == RTE_MBUF_F_TX_TUNNEL_GENEVE ||
|
|
|
|
temp == RTE_MBUF_F_TX_TUNNEL_MPLSINUDP ||
|
|
|
|
temp == RTE_MBUF_F_TX_TUNNEL_GRE)
|
2019-02-13 17:53:33 +00:00
|
|
|
continue;
|
2018-11-12 18:19:38 +00:00
|
|
|
}
|
|
|
|
|
2019-06-05 08:16:03 +00:00
|
|
|
rte_errno = ENOTSUP;
|
2017-03-29 20:37:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
|
|
|
|
ret = rte_validate_tx_offload(m);
|
|
|
|
if (ret != 0) {
|
2019-06-05 08:16:03 +00:00
|
|
|
rte_errno = -ret;
|
2017-03-29 20:37:01 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-04-25 07:28:46 +00:00
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
|
2017-03-29 20:37:01 +00:00
|
|
|
if (unlikely(i != nb_pkts))
|
|
|
|
PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
|
|
|
|
nb_pkts - i);
|
2017-04-25 07:28:46 +00:00
|
|
|
#endif
|
2017-03-29 20:37:01 +00:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2017-06-27 07:21:32 +00:00
|
|
|
#define MPLSINUDP_HDR_SIZE (12)
|
|
|
|
|
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
|
|
|
|
static inline void
|
|
|
|
qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
|
|
|
|
struct qede_tx_queue *txq)
|
|
|
|
{
|
|
|
|
if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
|
|
|
|
PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
|
|
|
|
if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
|
|
|
|
MPLSINUDP_HDR_SIZE) / 2) > 0xff)
|
|
|
|
PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
|
|
|
|
if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
|
|
|
|
ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
|
|
|
|
PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
|
|
|
|
if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
|
|
|
|
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
|
|
|
|
PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-01-02 17:59:03 +00:00
|
|
|
uint16_t
|
|
|
|
qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct qede_tx_queue *txq = p_txq;
|
|
|
|
struct qede_dev *qdev = txq->qdev;
|
|
|
|
struct ecore_dev *edev = &qdev->edev;
|
|
|
|
struct eth_tx_1st_bd *bd1;
|
|
|
|
struct eth_tx_2nd_bd *bd2;
|
|
|
|
struct eth_tx_3rd_bd *bd3;
|
|
|
|
struct rte_mbuf *m_seg = NULL;
|
|
|
|
struct rte_mbuf *mbuf;
|
2021-03-26 11:01:24 +00:00
|
|
|
struct rte_mbuf **sw_tx_ring;
|
2020-01-02 17:59:03 +00:00
|
|
|
uint16_t nb_tx_pkts;
|
|
|
|
uint16_t bd_prod;
|
|
|
|
uint16_t idx;
|
|
|
|
uint16_t nb_frags = 0;
|
|
|
|
uint16_t nb_pkt_sent = 0;
|
|
|
|
uint8_t nbds;
|
|
|
|
uint64_t tx_ol_flags;
|
|
|
|
/* BD1 */
|
|
|
|
uint16_t bd1_bf;
|
|
|
|
uint8_t bd1_bd_flags_bf;
|
|
|
|
|
|
|
|
if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
|
|
|
|
PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
|
|
|
|
nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
|
|
|
|
qede_process_tx_compl(edev, txq);
|
|
|
|
}
|
|
|
|
|
|
|
|
nb_tx_pkts = nb_pkts;
|
|
|
|
bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
|
|
|
|
sw_tx_ring = txq->sw_tx_ring;
|
|
|
|
|
|
|
|
while (nb_tx_pkts--) {
|
|
|
|
/* Init flags/values */
|
|
|
|
nbds = 0;
|
|
|
|
bd1 = NULL;
|
|
|
|
bd2 = NULL;
|
|
|
|
bd3 = NULL;
|
|
|
|
bd1_bf = 0;
|
|
|
|
bd1_bd_flags_bf = 0;
|
|
|
|
nb_frags = 0;
|
|
|
|
|
|
|
|
mbuf = *tx_pkts++;
|
|
|
|
assert(mbuf);
|
|
|
|
|
|
|
|
|
|
|
|
/* Check minimum TX BDS availability against available BDs */
|
|
|
|
if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
|
|
|
|
break;
|
|
|
|
|
|
|
|
tx_ol_flags = mbuf->ol_flags;
|
|
|
|
bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
|
|
|
|
|
|
|
|
if (unlikely(txq->nb_tx_avail <
|
|
|
|
ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
|
|
|
|
break;
|
|
|
|
bd1_bf |=
|
|
|
|
(mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
|
|
|
|
<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
|
|
|
|
|
|
|
|
/* Offload the IP checksum in the hardware */
|
2021-10-15 19:24:08 +00:00
|
|
|
if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
|
2020-01-02 17:59:03 +00:00
|
|
|
bd1_bd_flags_bf |=
|
|
|
|
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
|
|
|
|
|
|
|
|
/* L4 checksum offload (tcp or udp) */
|
2021-10-15 19:24:08 +00:00
|
|
|
if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
|
|
|
|
(tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM)))
|
2020-01-02 17:59:03 +00:00
|
|
|
bd1_bd_flags_bf |=
|
|
|
|
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
|
|
|
|
|
|
|
|
/* Fill the entry in the SW ring and the BDs in the FW ring */
|
|
|
|
idx = TX_PROD(txq);
|
2021-03-26 11:01:24 +00:00
|
|
|
sw_tx_ring[idx] = mbuf;
|
2020-01-02 17:59:03 +00:00
|
|
|
|
|
|
|
/* BD1 */
|
|
|
|
bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
|
|
|
|
memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
|
|
|
|
nbds++;
|
|
|
|
|
|
|
|
/* Map MBUF linear data for DMA and set in the BD1 */
|
|
|
|
QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
|
|
|
|
mbuf->data_len);
|
|
|
|
bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
|
|
|
|
bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
|
|
|
|
|
|
|
|
/* Handle fragmented MBUF */
|
|
|
|
if (unlikely(mbuf->nb_segs > 1)) {
|
|
|
|
m_seg = mbuf->next;
|
|
|
|
|
|
|
|
/* Encode scatter gather buffer descriptors */
|
|
|
|
nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3,
|
|
|
|
nbds - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
bd1->data.nbds = nbds + nb_frags;
|
|
|
|
|
|
|
|
txq->nb_tx_avail -= bd1->data.nbds;
|
|
|
|
txq->sw_tx_prod++;
|
|
|
|
bd_prod =
|
|
|
|
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
|
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
|
|
|
|
print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
|
|
|
|
#endif
|
|
|
|
nb_pkt_sent++;
|
|
|
|
txq->xmit_pkts++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Write value of prod idx into bd_prod */
|
|
|
|
txq->tx_db.data.bd_prod = bd_prod;
|
|
|
|
rte_wmb();
|
|
|
|
rte_compiler_barrier();
|
|
|
|
DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
|
|
|
|
rte_wmb();
|
|
|
|
|
|
|
|
/* Check again for Tx completions */
|
|
|
|
qede_process_tx_compl(edev, txq);
|
|
|
|
|
|
|
|
PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
|
|
|
|
nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
|
|
|
|
|
|
|
|
return nb_pkt_sent;
|
|
|
|
}
|
|
|
|
|
2016-04-27 14:18:37 +00:00
|
|
|
uint16_t
|
|
|
|
qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct qede_tx_queue *txq = p_txq;
|
|
|
|
struct qede_dev *qdev = txq->qdev;
|
|
|
|
struct ecore_dev *edev = &qdev->edev;
|
2017-01-06 08:16:45 +00:00
|
|
|
struct rte_mbuf *mbuf;
|
2016-10-19 04:11:37 +00:00
|
|
|
struct rte_mbuf *m_seg = NULL;
|
2016-04-27 14:18:37 +00:00
|
|
|
uint16_t nb_tx_pkts;
|
|
|
|
uint16_t bd_prod;
|
|
|
|
uint16_t idx;
|
2017-01-06 08:16:45 +00:00
|
|
|
uint16_t nb_frags;
|
|
|
|
uint16_t nb_pkt_sent = 0;
|
2017-03-29 20:37:01 +00:00
|
|
|
uint8_t nbds;
|
|
|
|
bool lso_flg;
|
2017-06-27 07:21:32 +00:00
|
|
|
bool mplsoudp_flg;
|
2017-06-07 07:42:22 +00:00
|
|
|
__rte_unused bool tunn_flg;
|
2017-06-27 07:21:32 +00:00
|
|
|
bool tunn_ipv6_ext_flg;
|
2017-03-29 20:37:01 +00:00
|
|
|
struct eth_tx_1st_bd *bd1;
|
|
|
|
struct eth_tx_2nd_bd *bd2;
|
|
|
|
struct eth_tx_3rd_bd *bd3;
|
|
|
|
uint64_t tx_ol_flags;
|
|
|
|
uint16_t hdr_size;
|
2017-06-07 07:42:22 +00:00
|
|
|
/* BD1 */
|
|
|
|
uint16_t bd1_bf;
|
|
|
|
uint8_t bd1_bd_flags_bf;
|
|
|
|
uint16_t vlan;
|
|
|
|
/* BD2 */
|
|
|
|
uint16_t bd2_bf1;
|
|
|
|
uint16_t bd2_bf2;
|
|
|
|
/* BD3 */
|
|
|
|
uint16_t mss;
|
|
|
|
uint16_t bd3_bf;
|
|
|
|
|
2017-06-27 07:21:32 +00:00
|
|
|
uint8_t tunn_l4_hdr_start_offset;
|
|
|
|
uint8_t tunn_hdr_size;
|
|
|
|
uint8_t inner_l2_hdr_size;
|
|
|
|
uint16_t inner_l4_hdr_offset;
|
2016-10-19 04:11:29 +00:00
|
|
|
|
2016-04-27 14:18:37 +00:00
|
|
|
if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
|
2017-01-27 15:16:32 +00:00
|
|
|
PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
|
2016-04-27 14:18:37 +00:00
|
|
|
nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
|
2017-03-29 20:37:01 +00:00
|
|
|
qede_process_tx_compl(edev, txq);
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
2017-03-29 20:37:01 +00:00
|
|
|
nb_tx_pkts = nb_pkts;
|
|
|
|
bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
|
2016-04-27 14:18:37 +00:00
|
|
|
while (nb_tx_pkts--) {
|
2017-03-29 20:37:01 +00:00
|
|
|
/* Init flags/values */
|
|
|
|
tunn_flg = false;
|
|
|
|
lso_flg = false;
|
|
|
|
nbds = 0;
|
2017-06-07 07:42:22 +00:00
|
|
|
vlan = 0;
|
2017-03-29 20:37:01 +00:00
|
|
|
bd1 = NULL;
|
|
|
|
bd2 = NULL;
|
|
|
|
bd3 = NULL;
|
|
|
|
hdr_size = 0;
|
2017-06-07 07:42:22 +00:00
|
|
|
bd1_bf = 0;
|
|
|
|
bd1_bd_flags_bf = 0;
|
|
|
|
bd2_bf1 = 0;
|
|
|
|
bd2_bf2 = 0;
|
|
|
|
mss = 0;
|
|
|
|
bd3_bf = 0;
|
2017-06-27 07:21:32 +00:00
|
|
|
mplsoudp_flg = false;
|
|
|
|
tunn_ipv6_ext_flg = false;
|
|
|
|
tunn_hdr_size = 0;
|
|
|
|
tunn_l4_hdr_start_offset = 0;
|
2017-03-29 20:37:01 +00:00
|
|
|
|
2017-04-25 07:28:43 +00:00
|
|
|
mbuf = *tx_pkts++;
|
2017-03-29 20:37:01 +00:00
|
|
|
assert(mbuf);
|
|
|
|
|
|
|
|
/* Check minimum TX BDS availability against available BDs */
|
|
|
|
if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
|
|
|
|
break;
|
|
|
|
|
|
|
|
tx_ol_flags = mbuf->ol_flags;
|
2017-06-07 07:42:22 +00:00
|
|
|
bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
|
2017-03-29 20:37:01 +00:00
|
|
|
|
2017-06-27 07:21:32 +00:00
|
|
|
/* TX prepare would have already checked supported tunnel Tx
|
|
|
|
* offloads. Don't rely on pkt_type marked by Rx, instead use
|
|
|
|
* tx_ol_flags to decide.
|
|
|
|
*/
|
2021-10-15 19:24:08 +00:00
|
|
|
tunn_flg = !!(tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
|
2018-03-28 00:15:52 +00:00
|
|
|
|
|
|
|
if (tunn_flg) {
|
2017-06-27 07:21:32 +00:00
|
|
|
/* Check against max which is Tunnel IPv6 + ext */
|
2017-03-29 20:37:01 +00:00
|
|
|
if (unlikely(txq->nb_tx_avail <
|
2017-06-27 07:21:32 +00:00
|
|
|
ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
|
2017-06-07 07:42:22 +00:00
|
|
|
break;
|
2018-03-28 00:15:52 +00:00
|
|
|
|
2016-12-23 00:48:08 +00:00
|
|
|
/* First indicate its a tunnel pkt */
|
2017-06-07 07:42:22 +00:00
|
|
|
bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
|
|
|
|
ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
|
2016-12-23 00:48:08 +00:00
|
|
|
/* Legacy FW had flipped behavior in regard to this bit
|
|
|
|
* i.e. it needed to set to prevent FW from touching
|
|
|
|
* encapsulated packets when it didn't need to.
|
|
|
|
*/
|
2017-06-07 07:42:22 +00:00
|
|
|
if (unlikely(txq->is_legacy)) {
|
|
|
|
bd1_bf ^= 1 <<
|
|
|
|
ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
|
|
|
|
}
|
2017-06-27 07:21:32 +00:00
|
|
|
|
2016-12-23 00:48:08 +00:00
|
|
|
/* Outer IP checksum offload */
|
2021-10-15 19:24:08 +00:00
|
|
|
if (tx_ol_flags & (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
|
|
|
|
RTE_MBUF_F_TX_OUTER_IPV4)) {
|
2017-06-07 07:42:22 +00:00
|
|
|
bd1_bd_flags_bf |=
|
2016-12-23 00:48:08 +00:00
|
|
|
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
|
|
|
|
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
|
|
|
|
}
|
2017-06-27 07:21:32 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Currently, only inner checksum offload in MPLS-in-UDP
|
|
|
|
* tunnel with one MPLS label is supported. Both outer
|
|
|
|
* and inner layers lengths need to be provided in
|
|
|
|
* mbuf.
|
|
|
|
*/
|
2021-10-15 19:24:08 +00:00
|
|
|
if ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ==
|
|
|
|
RTE_MBUF_F_TX_TUNNEL_MPLSINUDP) {
|
2017-06-27 07:21:32 +00:00
|
|
|
mplsoudp_flg = true;
|
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
|
|
|
|
qede_mpls_tunn_tx_sanity_check(mbuf, txq);
|
|
|
|
#endif
|
|
|
|
/* Outer L4 offset in two byte words */
|
|
|
|
tunn_l4_hdr_start_offset =
|
|
|
|
(mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
|
|
|
|
/* Tunnel header size in two byte words */
|
|
|
|
tunn_hdr_size = (mbuf->outer_l2_len +
|
|
|
|
mbuf->outer_l3_len +
|
|
|
|
MPLSINUDP_HDR_SIZE) / 2;
|
|
|
|
/* Inner L2 header size in two byte words */
|
|
|
|
inner_l2_hdr_size = (mbuf->l2_len -
|
|
|
|
MPLSINUDP_HDR_SIZE) / 2;
|
2021-11-29 16:08:02 +00:00
|
|
|
/* Inner L4 header offset from the beginning
|
2017-06-27 07:21:32 +00:00
|
|
|
* of inner packet in two byte words
|
|
|
|
*/
|
|
|
|
inner_l4_hdr_offset = (mbuf->l2_len -
|
|
|
|
MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
|
|
|
|
|
|
|
|
/* Inner L2 size and address type */
|
|
|
|
bd2_bf1 |= (inner_l2_hdr_size &
|
|
|
|
ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
|
|
|
|
ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
|
|
|
|
bd2_bf1 |= (UNICAST_ADDRESS &
|
|
|
|
ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
|
|
|
|
ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
|
|
|
|
/* Treated as IPv6+Ext */
|
|
|
|
bd2_bf1 |=
|
|
|
|
1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
|
|
|
|
|
|
|
|
/* Mark inner IPv6 if present */
|
2021-10-15 19:24:08 +00:00
|
|
|
if (tx_ol_flags & RTE_MBUF_F_TX_IPV6)
|
2017-06-27 07:21:32 +00:00
|
|
|
bd2_bf1 |=
|
|
|
|
1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
|
|
|
|
|
|
|
|
/* Inner L4 offsets */
|
2021-10-15 19:24:08 +00:00
|
|
|
if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
|
|
|
|
(tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM |
|
|
|
|
RTE_MBUF_F_TX_TCP_CKSUM))) {
|
2017-06-27 07:21:32 +00:00
|
|
|
/* Determines if BD3 is needed */
|
|
|
|
tunn_ipv6_ext_flg = true;
|
2021-10-15 19:24:08 +00:00
|
|
|
if ((tx_ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
|
|
|
|
RTE_MBUF_F_TX_UDP_CKSUM) {
|
2017-06-27 07:21:32 +00:00
|
|
|
bd2_bf1 |=
|
|
|
|
1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO other pseudo checksum modes are
|
|
|
|
* not supported
|
|
|
|
*/
|
|
|
|
bd2_bf1 |=
|
|
|
|
ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
|
|
|
|
ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
|
|
|
|
bd2_bf2 |= (inner_l4_hdr_offset &
|
|
|
|
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
|
|
|
|
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
|
|
|
|
}
|
|
|
|
} /* End MPLSoUDP */
|
|
|
|
} /* End Tunnel handling */
|
2016-12-23 00:48:08 +00:00
|
|
|
|
2021-10-15 19:24:08 +00:00
|
|
|
if (tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
|
2017-06-07 07:42:22 +00:00
|
|
|
lso_flg = true;
|
|
|
|
if (unlikely(txq->nb_tx_avail <
|
|
|
|
ETH_TX_MIN_BDS_PER_LSO_PKT))
|
|
|
|
break;
|
|
|
|
/* For LSO, packet header and payload must reside on
|
|
|
|
* buffers pointed by different BDs. Using BD1 for HDR
|
|
|
|
* and BD2 onwards for data.
|
|
|
|
*/
|
|
|
|
hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
|
2018-01-27 21:15:28 +00:00
|
|
|
if (tunn_flg)
|
|
|
|
hdr_size += mbuf->outer_l2_len +
|
|
|
|
mbuf->outer_l3_len;
|
|
|
|
|
2017-06-07 07:42:22 +00:00
|
|
|
bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
|
|
|
|
bd1_bd_flags_bf |=
|
|
|
|
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
|
2021-10-15 19:24:08 +00:00
|
|
|
/* RTE_MBUF_F_TX_TCP_SEG implies RTE_MBUF_F_TX_TCP_CKSUM */
|
2017-06-07 07:42:22 +00:00
|
|
|
bd1_bd_flags_bf |=
|
|
|
|
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
|
|
|
|
mss = rte_cpu_to_le_16(mbuf->tso_segsz);
|
|
|
|
/* Using one header BD */
|
|
|
|
bd3_bf |= rte_cpu_to_le_16(1 <<
|
|
|
|
ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
|
|
|
|
} else {
|
|
|
|
if (unlikely(txq->nb_tx_avail <
|
|
|
|
ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
|
|
|
|
break;
|
|
|
|
bd1_bf |=
|
|
|
|
(mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
|
|
|
|
<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
|
|
|
|
}
|
|
|
|
|
2016-04-27 14:18:37 +00:00
|
|
|
/* Descriptor based VLAN insertion */
|
2021-10-15 19:24:08 +00:00
|
|
|
if (tx_ol_flags & RTE_MBUF_F_TX_VLAN) {
|
2017-06-07 07:42:22 +00:00
|
|
|
vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
|
|
|
|
bd1_bd_flags_bf |=
|
2016-04-27 14:18:37 +00:00
|
|
|
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Offload the IP checksum in the hardware */
|
2021-10-15 19:24:08 +00:00
|
|
|
if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
|
2017-06-07 07:42:22 +00:00
|
|
|
bd1_bd_flags_bf |=
|
|
|
|
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
|
2017-07-24 10:19:05 +00:00
|
|
|
/* There's no DPDK flag to request outer-L4 csum
|
|
|
|
* offload. But in the case of tunnel if inner L3 or L4
|
|
|
|
* csum offload is requested then we need to force
|
|
|
|
* recalculation of L4 tunnel header csum also.
|
|
|
|
*/
|
2021-10-15 19:24:08 +00:00
|
|
|
if (tunn_flg && ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) !=
|
|
|
|
RTE_MBUF_F_TX_TUNNEL_GRE)) {
|
2017-07-24 10:19:05 +00:00
|
|
|
bd1_bd_flags_bf |=
|
|
|
|
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
|
|
|
|
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
|
|
|
|
}
|
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
/* L4 checksum offload (tcp or udp) */
|
2021-10-15 19:24:08 +00:00
|
|
|
if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
|
|
|
|
(tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM))) {
|
2017-06-07 07:42:22 +00:00
|
|
|
bd1_bd_flags_bf |=
|
|
|
|
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
|
2017-07-24 10:19:05 +00:00
|
|
|
/* There's no DPDK flag to request outer-L4 csum
|
|
|
|
* offload. But in the case of tunnel if inner L3 or L4
|
|
|
|
* csum offload is requested then we need to force
|
|
|
|
* recalculation of L4 tunnel header csum also.
|
|
|
|
*/
|
|
|
|
if (tunn_flg) {
|
|
|
|
bd1_bd_flags_bf |=
|
|
|
|
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
|
|
|
|
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
|
|
|
|
}
|
2017-06-07 07:42:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill the entry in the SW ring and the BDs in the FW ring */
|
|
|
|
idx = TX_PROD(txq);
|
2021-03-26 11:01:24 +00:00
|
|
|
txq->sw_tx_ring[idx] = mbuf;
|
2017-06-07 07:42:22 +00:00
|
|
|
|
|
|
|
/* BD1 */
|
|
|
|
bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
|
|
|
|
memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
|
|
|
|
nbds++;
|
|
|
|
|
|
|
|
/* Map MBUF linear data for DMA and set in the BD1 */
|
2017-11-05 23:22:55 +00:00
|
|
|
QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
|
2017-06-27 07:21:32 +00:00
|
|
|
mbuf->data_len);
|
|
|
|
bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
|
2017-06-07 07:42:22 +00:00
|
|
|
bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
|
|
|
|
bd1->data.vlan = vlan;
|
2017-03-29 20:37:01 +00:00
|
|
|
|
2017-06-27 07:21:32 +00:00
|
|
|
if (lso_flg || mplsoudp_flg) {
|
2017-03-29 20:37:01 +00:00
|
|
|
bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
|
|
|
|
(&txq->tx_pbl);
|
|
|
|
memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
|
|
|
|
nbds++;
|
|
|
|
|
2017-06-07 07:42:22 +00:00
|
|
|
/* BD1 */
|
2017-11-05 23:22:55 +00:00
|
|
|
QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
|
2017-06-07 07:42:22 +00:00
|
|
|
hdr_size);
|
|
|
|
/* BD2 */
|
|
|
|
QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
|
2017-11-05 23:22:55 +00:00
|
|
|
rte_mbuf_data_iova(mbuf)),
|
2017-06-07 07:42:22 +00:00
|
|
|
mbuf->data_len - hdr_size);
|
2017-06-27 07:21:32 +00:00
|
|
|
bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
|
|
|
|
if (mplsoudp_flg) {
|
|
|
|
bd2->data.bitfields2 =
|
|
|
|
rte_cpu_to_le_16(bd2_bf2);
|
|
|
|
/* Outer L3 size */
|
|
|
|
bd2->data.tunn_ip_size =
|
|
|
|
rte_cpu_to_le_16(mbuf->outer_l3_len);
|
|
|
|
}
|
2017-06-07 07:42:22 +00:00
|
|
|
/* BD3 */
|
2017-06-27 07:21:32 +00:00
|
|
|
if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
|
|
|
|
bd3 = (struct eth_tx_3rd_bd *)
|
|
|
|
ecore_chain_produce(&txq->tx_pbl);
|
|
|
|
memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
|
|
|
|
nbds++;
|
|
|
|
bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
|
|
|
|
if (lso_flg)
|
|
|
|
bd3->data.lso_mss = mss;
|
|
|
|
if (mplsoudp_flg) {
|
|
|
|
bd3->data.tunn_l4_hdr_start_offset_w =
|
|
|
|
tunn_l4_hdr_start_offset;
|
|
|
|
bd3->data.tunn_hdr_size_w =
|
|
|
|
tunn_hdr_size;
|
|
|
|
}
|
|
|
|
}
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
2016-10-19 04:11:37 +00:00
|
|
|
|
|
|
|
/* Handle fragmented MBUF */
|
|
|
|
m_seg = mbuf->next;
|
2018-01-27 21:15:28 +00:00
|
|
|
|
2016-10-19 04:11:37 +00:00
|
|
|
/* Encode scatter gather buffer descriptors if required */
|
2018-01-27 21:15:28 +00:00
|
|
|
nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
|
2017-03-29 20:37:01 +00:00
|
|
|
bd1->data.nbds = nbds + nb_frags;
|
2018-01-27 21:15:28 +00:00
|
|
|
|
2017-03-29 20:37:01 +00:00
|
|
|
txq->nb_tx_avail -= bd1->data.nbds;
|
2016-04-27 14:18:37 +00:00
|
|
|
txq->sw_tx_prod++;
|
|
|
|
bd_prod =
|
|
|
|
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
|
2017-03-29 20:37:01 +00:00
|
|
|
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
|
|
|
|
print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
|
|
|
|
#endif
|
2016-04-27 14:18:37 +00:00
|
|
|
nb_pkt_sent++;
|
2016-10-19 04:11:43 +00:00
|
|
|
txq->xmit_pkts++;
|
2016-04-27 14:18:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Write value of prod idx into bd_prod */
|
|
|
|
txq->tx_db.data.bd_prod = bd_prod;
|
|
|
|
rte_wmb();
|
|
|
|
rte_compiler_barrier();
|
2017-01-18 01:21:39 +00:00
|
|
|
DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
|
2016-04-27 14:18:37 +00:00
|
|
|
rte_wmb();
|
|
|
|
|
|
|
|
/* Check again for Tx completions */
|
2017-03-29 20:37:01 +00:00
|
|
|
qede_process_tx_compl(edev, txq);
|
2016-04-27 14:18:37 +00:00
|
|
|
|
2017-03-29 20:37:01 +00:00
|
|
|
PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
|
|
|
|
nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
|
2016-04-27 14:18:37 +00:00
|
|
|
|
|
|
|
return nb_pkt_sent;
|
|
|
|
}
|
|
|
|
|
2019-09-12 15:24:13 +00:00
|
|
|
uint16_t
|
|
|
|
qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
|
|
|
{
|
|
|
|
struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
|
|
|
|
uint16_t eng0_pkts, eng1_pkts;
|
|
|
|
|
|
|
|
eng0_pkts = nb_pkts / 2;
|
|
|
|
|
|
|
|
eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);
|
|
|
|
|
|
|
|
eng1_pkts = nb_pkts - eng0_pkts;
|
|
|
|
|
|
|
|
eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,
|
|
|
|
eng1_pkts);
|
|
|
|
|
|
|
|
return eng0_pkts + eng1_pkts;
|
|
|
|
}
|
|
|
|
|
2018-09-08 20:31:04 +00:00
|
|
|
/* this function does a fake walk through over completion queue
|
|
|
|
* to calculate number of BDs used by HW.
|
|
|
|
* At the end, it restores the state of completion queue.
|
|
|
|
*/
|
|
|
|
static uint16_t
|
|
|
|
qede_parse_fp_cqe(struct qede_rx_queue *rxq)
|
|
|
|
{
|
|
|
|
uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
|
|
|
|
union eth_rx_cqe *cqe, *orig_cqe = NULL;
|
|
|
|
|
|
|
|
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
|
|
|
|
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
|
|
|
|
|
|
|
|
if (hw_comp_cons == sw_comp_cons)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Get the CQE from the completion ring */
|
|
|
|
cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
|
|
|
|
orig_cqe = cqe;
|
|
|
|
|
|
|
|
while (sw_comp_cons != hw_comp_cons) {
|
|
|
|
switch (cqe->fast_path_regular.type) {
|
|
|
|
case ETH_RX_CQE_TYPE_REGULAR:
|
|
|
|
bd_count += cqe->fast_path_regular.bd_num;
|
|
|
|
break;
|
|
|
|
case ETH_RX_CQE_TYPE_TPA_END:
|
|
|
|
bd_count += cqe->fast_path_tpa_end.num_of_bds;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cqe =
|
|
|
|
(union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
|
|
|
|
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* revert comp_ring to original state */
|
|
|
|
ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
|
|
|
|
|
|
|
|
return bd_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
|
|
|
|
{
|
|
|
|
uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
|
|
|
|
uint16_t produced, consumed;
|
|
|
|
struct qede_rx_queue *rxq = p_rxq;
|
|
|
|
|
|
|
|
if (offset > rxq->nb_rx_desc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
|
|
|
|
sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
|
|
|
|
|
|
|
|
/* find BDs used by HW from completion queue elements */
|
|
|
|
hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
|
|
|
|
|
|
|
|
if (hw_bd_cons < sw_bd_cons)
|
|
|
|
/* wraparound case */
|
|
|
|
consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
|
|
|
|
else
|
|
|
|
consumed = hw_bd_cons - sw_bd_cons;
|
|
|
|
|
|
|
|
if (offset <= consumed)
|
|
|
|
return RTE_ETH_RX_DESC_DONE;
|
|
|
|
|
|
|
|
if (sw_bd_prod < sw_bd_cons)
|
|
|
|
/* wraparound case */
|
|
|
|
produced = (0xffff - sw_bd_cons) + sw_bd_prod;
|
|
|
|
else
|
|
|
|
produced = sw_bd_prod - sw_bd_cons;
|
|
|
|
|
|
|
|
if (offset <= produced)
|
|
|
|
return RTE_ETH_RX_DESC_AVAIL;
|
|
|
|
|
|
|
|
return RTE_ETH_RX_DESC_UNAVAIL;
|
|
|
|
}
|