ixgbe: remove Rx bulk allocation option

RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC config option is not really
necessary, as bulk alloc rx function can be used anyway, as long as the
necessary conditions are satisfied, which are checked already
in the library.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
Pablo de Lara 2015-07-23 15:29:41 +01:00 committed by Thomas Monjalon
parent 0369957f27
commit 7c574623ba
6 changed files with 3 additions and 55 deletions

View File

@ -173,7 +173,6 @@ CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_IXGBE_INC_VECTOR=n
CONFIG_RTE_IXGBE_RX_OLFLAGS_DISABLE=n

View File

@ -171,7 +171,6 @@ CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_IXGBE_INC_VECTOR=y
CONFIG_RTE_IXGBE_RX_OLFLAGS_ENABLE=y

View File

@ -57,8 +57,6 @@ The following prerequisites apply:
* To enable vPMD to work for RX, bulk allocation for Rx must be allowed.
* The RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y configuration MACRO must be set before compiling the code.
Ensure that the following pre-conditions are satisfied:
* rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST

View File

@ -119,8 +119,4 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net
ifeq ($(CONFIG_RTE_IXGBE_INC_VECTOR)$(CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC),yn)
$(error The ixgbe vpmd depends on Rx bulk alloc)
endif
include $(RTE_SDK)/mk/rte.lib.mk

View File

@ -1034,7 +1034,6 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
}
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* LOOK_AHEAD defines how many desc statuses to check beyond the
* current descriptor.
@ -1305,24 +1304,6 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
#else
/* Stub to avoid extra ifdefs */
static uint16_t
ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
{
return 0;
}
static inline int
ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq,
__rte_unused bool reset_mbuf)
{
return -ENOMEM;
}
#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
uint16_t
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
@ -1708,7 +1689,6 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
break;
}
}
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
else if (nb_hold > rxq->rx_free_thresh) {
uint16_t next_rdt = rxq->rx_free_trigger;
@ -1727,7 +1707,6 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
break;
}
}
#endif
nb_hold++;
rxe = &sw_ring[rx_id];
@ -2285,7 +2264,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
rxq->sw_ring[i].mbuf = NULL;
}
}
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
@ -2294,7 +2272,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
}
rxq->rx_nb_avail = 0;
}
#endif
}
if (rxq->sw_sc_ring)
@ -2331,11 +2308,7 @@ ixgbe_dev_rx_queue_release(void *rxq)
* function must be used.
*/
static inline int __attribute__((cold))
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
#else
check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
#endif
{
int ret = 0;
@ -2348,7 +2321,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->rx_free_thresh=%d, "
@ -2377,9 +2349,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
RTE_PMD_IXGBE_RX_MAX_BURST);
ret = -EINVAL;
}
#else
ret = -EINVAL;
#endif
return ret;
}
@ -2415,7 +2384,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
rxq->rx_ring[i] = zeroed_desc;
}
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* initialize extra software ring entries. Space for these extra
* entries is always allocated
@ -2428,7 +2396,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
@ -4015,9 +3982,8 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
"satisfied, or Scattered Rx is requested, "
"or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
"is not enabled (port=%d).",
"satisfied, or Scattered Rx is requested "
"(port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts;

View File

@ -38,13 +38,7 @@
#define RTE_PMD_IXGBE_TX_MAX_BURST 32
#define RTE_PMD_IXGBE_RX_MAX_BURST 32
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
#define RTE_IXGBE_DESCS_PER_LOOP 4
#elif defined(RTE_IXGBE_INC_VECTOR)
#define RTE_IXGBE_DESCS_PER_LOOP 4
#else
#define RTE_IXGBE_DESCS_PER_LOOP 1
#endif
#define RTE_IXGBE_DESCS_PER_LOOP 4
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
@ -116,11 +110,9 @@ struct ixgbe_rx_queue {
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
#endif
uint16_t rx_using_sse;
/**< indicates that vector RX is in use */
#ifdef RTE_IXGBE_INC_VECTOR
@ -134,12 +126,10 @@ struct ixgbe_rx_queue {
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint8_t rx_deferred_start; /**< not in global dev start. */
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
#endif
};
/**