i40e: fix alignment of HW descriptors number
According to XL710 datasheet: RX QLEN restrictions: When the PXE_MODE flag in the GLLAN_RCTL_0 register is cleared, the QLEN must be whole number of 32 descriptors. TX QLEN restrictions: When the PXE_MODE flag in the GLLAN_RCTL_0 register is cleared, the QLEN must be whole number of 32 descriptors. So make sure that for both RX and TX queues number of HW descriptors is a multiple of 32. Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com> Acked-by: Remy Horton <remy.horton@intel.com>
This commit is contained in:
parent
55587b01cc
commit
d0510c2a25
@ -57,9 +57,6 @@
|
||||
#include "i40e_ethdev.h"
|
||||
#include "i40e_rxtx.h"
|
||||
|
||||
#define I40E_MIN_RING_DESC 64
|
||||
#define I40E_MAX_RING_DESC 4096
|
||||
#define I40E_ALIGN 128
|
||||
#define DEFAULT_TX_RS_THRESH 32
|
||||
#define DEFAULT_TX_FREE_THRESH 32
|
||||
#define I40E_MAX_PKT_TYPE 256
|
||||
@ -68,6 +65,9 @@
|
||||
|
||||
#define I40E_DMA_MEM_ALIGN 4096
|
||||
|
||||
/* Base address of the HW descriptor ring should be 128B aligned. */
|
||||
#define I40E_RING_BASE_ALIGN 128
|
||||
|
||||
#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
|
||||
ETH_TXQ_FLAGS_NOOFFLOADS)
|
||||
|
||||
@ -2126,9 +2126,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
"index exceeds the maximum");
|
||||
return I40E_ERR_PARAM;
|
||||
}
|
||||
if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
|
||||
(nb_desc > I40E_MAX_RING_DESC) ||
|
||||
(nb_desc < I40E_MIN_RING_DESC)) {
|
||||
if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
|
||||
(nb_desc > I40E_MAX_RING_DESC) ||
|
||||
(nb_desc < I40E_MIN_RING_DESC)) {
|
||||
PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
|
||||
"invalid", nb_desc);
|
||||
return I40E_ERR_PARAM;
|
||||
@ -2352,9 +2352,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
return I40E_ERR_PARAM;
|
||||
}
|
||||
|
||||
if (((nb_desc * sizeof(struct i40e_tx_desc)) % I40E_ALIGN) != 0 ||
|
||||
(nb_desc > I40E_MAX_RING_DESC) ||
|
||||
(nb_desc < I40E_MIN_RING_DESC)) {
|
||||
if (nb_desc % I40E_ALIGN_RING_DESC != 0 ||
|
||||
(nb_desc > I40E_MAX_RING_DESC) ||
|
||||
(nb_desc < I40E_MIN_RING_DESC)) {
|
||||
PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
|
||||
"invalid", nb_desc);
|
||||
return I40E_ERR_PARAM;
|
||||
@ -2557,10 +2557,10 @@ i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
|
||||
|
||||
#ifdef RTE_LIBRTE_XEN_DOM0
|
||||
return rte_memzone_reserve_bounded(z_name, ring_size,
|
||||
socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
|
||||
socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
|
||||
#else
|
||||
return rte_memzone_reserve_aligned(z_name, ring_size,
|
||||
socket_id, 0, I40E_ALIGN);
|
||||
socket_id, 0, I40E_RING_BASE_ALIGN);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2574,10 +2574,10 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
|
||||
return mz;
|
||||
#ifdef RTE_LIBRTE_XEN_DOM0
|
||||
mz = rte_memzone_reserve_bounded(name, len,
|
||||
socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
|
||||
socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
|
||||
#else
|
||||
mz = rte_memzone_reserve_aligned(name, len,
|
||||
socket_id, 0, I40E_ALIGN);
|
||||
socket_id, 0, I40E_RING_BASE_ALIGN);
|
||||
#endif
|
||||
return mz;
|
||||
}
|
||||
|
@ -57,6 +57,12 @@
|
||||
#define I40E_RXBUF_SZ_1024 1024
|
||||
#define I40E_RXBUF_SZ_2048 2048
|
||||
|
||||
/* In none-PXE mode QLEN must be whole number of 32 descriptors. */
|
||||
#define I40E_ALIGN_RING_DESC 32
|
||||
|
||||
#define I40E_MIN_RING_DESC 64
|
||||
#define I40E_MAX_RING_DESC 4096
|
||||
|
||||
#undef container_of
|
||||
#define container_of(ptr, type, member) ({ \
|
||||
typeof(((type *)0)->member)(*__mptr) = (ptr); \
|
||||
|
Loading…
x
Reference in New Issue
Block a user