i40e: allow vector Rx and Tx usage
To support FVL PMD can select which RX and TX function should be used according to the queue config. Signed-off-by: Zhe Tao <zhe.tao@intel.com> Acked-by: Cunming Liang <cunming.liang@intel.com>
This commit is contained in:
parent
0e0da28cd8
commit
8e109464c0
@ -18,6 +18,8 @@ New Features
|
||||
* Add 2 new flow director modes on x550.
|
||||
* One is MAC VLAN mode, the other is tunnel mode.
|
||||
|
||||
* **Added i40e vector RX/TX.**
|
||||
|
||||
* **Added i40e flow control support.**
|
||||
|
||||
* **Added fm10k TSO support for both PF and VF.**
|
||||
|
@ -451,8 +451,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
|
||||
* has already done this work. Only check we don't need a different
|
||||
* RX function */
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
|
||||
if (dev->data->scattered_rx)
|
||||
dev->rx_pkt_burst = i40e_recv_scattered_pkts;
|
||||
i40e_set_rx_function(dev);
|
||||
i40e_set_tx_function(dev);
|
||||
return 0;
|
||||
}
|
||||
pci_dev = dev->pci_dev;
|
||||
@ -724,10 +724,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
|
||||
static int
|
||||
i40e_dev_configure(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct i40e_adapter *ad =
|
||||
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
|
||||
int ret;
|
||||
|
||||
/* Initialize to TRUE. If any of Rx queues doesn't meet the
|
||||
* bulk allocation or vector Rx preconditions we will reset it.
|
||||
*/
|
||||
ad->rx_bulk_alloc_allowed = true;
|
||||
ad->rx_vec_allowed = true;
|
||||
ad->tx_simple_allowed = true;
|
||||
ad->tx_vec_allowed = true;
|
||||
|
||||
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
|
||||
ret = i40e_fdir_setup(pf);
|
||||
if (ret != I40E_SUCCESS) {
|
||||
@ -3901,6 +3911,9 @@ i40e_dev_tx_init(struct i40e_pf *pf)
|
||||
if (ret != I40E_SUCCESS)
|
||||
break;
|
||||
}
|
||||
if (ret == I40E_SUCCESS)
|
||||
i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
|
||||
->eth_dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3927,6 +3940,9 @@ i40e_dev_rx_init(struct i40e_pf *pf)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret == I40E_SUCCESS)
|
||||
i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
|
||||
->eth_dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -479,6 +479,12 @@ struct i40e_adapter {
|
||||
struct i40e_pf pf;
|
||||
struct i40e_vf vf;
|
||||
};
|
||||
|
||||
/* for vector PMD */
|
||||
bool rx_bulk_alloc_allowed;
|
||||
bool rx_vec_allowed;
|
||||
bool tx_simple_allowed;
|
||||
bool tx_vec_allowed;
|
||||
};
|
||||
|
||||
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
|
||||
|
@ -1197,8 +1197,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
|
||||
* has already done this work.
|
||||
*/
|
||||
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
|
||||
if (eth_dev->data->scattered_rx)
|
||||
eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
|
||||
i40e_set_rx_function(eth_dev);
|
||||
i40e_set_tx_function(eth_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1292,6 +1292,17 @@ PMD_REGISTER_DRIVER(rte_i40evf_driver);
|
||||
static int
|
||||
i40evf_dev_configure(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct i40e_adapter *ad =
|
||||
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
|
||||
/* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
|
||||
* allocation or vector Rx preconditions we will reset it.
|
||||
*/
|
||||
ad->rx_bulk_alloc_allowed = true;
|
||||
ad->rx_vec_allowed = true;
|
||||
ad->tx_simple_allowed = true;
|
||||
ad->tx_vec_allowed = true;
|
||||
|
||||
return i40evf_init_vlan(dev);
|
||||
}
|
||||
|
||||
@ -1523,7 +1534,6 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
|
||||
if (dev_data->dev_conf.rxmode.enable_scatter ||
|
||||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
|
||||
dev_data->scattered_rx = 1;
|
||||
dev->rx_pkt_burst = i40e_recv_scattered_pkts;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1534,6 +1544,7 @@ i40evf_rx_init(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
|
||||
uint16_t i;
|
||||
int ret = I40E_SUCCESS;
|
||||
struct i40e_rx_queue **rxq =
|
||||
(struct i40e_rx_queue **)dev->data->rx_queues;
|
||||
|
||||
@ -1541,11 +1552,14 @@ i40evf_rx_init(struct rte_eth_dev *dev)
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
if (!rxq[i] || !rxq[i]->q_set)
|
||||
continue;
|
||||
if (i40evf_rxq_init(dev, rxq[i]) < 0)
|
||||
return -EFAULT;
|
||||
ret = i40evf_rxq_init(dev, rxq[i]);
|
||||
if (ret != I40E_SUCCESS)
|
||||
break;
|
||||
}
|
||||
if (ret == I40E_SUCCESS)
|
||||
i40e_set_rx_function(dev);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1558,6 +1572,8 @@ i40evf_tx_init(struct rte_eth_dev *dev)
|
||||
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++)
|
||||
txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
|
||||
|
||||
i40e_set_tx_function(dev);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -2105,6 +2105,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
struct i40e_vsi *vsi;
|
||||
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
||||
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
|
||||
struct i40e_adapter *ad =
|
||||
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
struct i40e_rx_queue *rxq;
|
||||
const struct rte_memzone *rz;
|
||||
uint32_t ring_size;
|
||||
@ -2213,13 +2215,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
|
||||
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
|
||||
|
||||
if (!use_def_burst_func && !dev->data->scattered_rx) {
|
||||
if (!use_def_burst_func) {
|
||||
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
|
||||
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
|
||||
"satisfied. Rx Burst Bulk Alloc function will be "
|
||||
"used on port=%d, queue=%d.",
|
||||
rxq->port_id, rxq->queue_id);
|
||||
dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
|
||||
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
|
||||
} else {
|
||||
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
|
||||
@ -2227,6 +2228,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
|
||||
"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
|
||||
"not enabled on port=%d, queue=%d.",
|
||||
rxq->port_id, rxq->queue_id);
|
||||
ad->rx_bulk_alloc_allowed = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2488,14 +2490,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
|
||||
dev->data->tx_queues[queue_idx] = txq;
|
||||
|
||||
/* Use a simple TX queue without offloads or multi segs if possible */
|
||||
if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
|
||||
(txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
|
||||
PMD_INIT_LOG(INFO, "Using simple tx path");
|
||||
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
|
||||
} else {
|
||||
PMD_INIT_LOG(INFO, "Using full-featured tx path");
|
||||
dev->tx_pkt_burst = i40e_xmit_pkts;
|
||||
}
|
||||
i40e_set_tx_function_flag(dev, txq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2564,6 +2559,12 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
|
||||
{
|
||||
uint16_t i;
|
||||
|
||||
/* SSE Vector driver has a different way of releasing mbufs. */
|
||||
if (rxq->rx_using_sse) {
|
||||
i40e_rx_queue_release_mbufs_vec(rxq);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!rxq || !rxq->sw_ring) {
|
||||
PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
|
||||
return;
|
||||
@ -2837,7 +2838,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
|
||||
int err = I40E_SUCCESS;
|
||||
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
|
||||
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
|
||||
struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
|
||||
uint16_t pf_q = rxq->reg_idx;
|
||||
uint16_t buf_size;
|
||||
struct i40e_hmc_obj_rxq rx_ctx;
|
||||
@ -2893,7 +2893,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
|
||||
/* Check if scattered RX needs to be used. */
|
||||
if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
|
||||
dev_data->scattered_rx = 1;
|
||||
dev->rx_pkt_burst = i40e_recv_scattered_pkts;
|
||||
}
|
||||
|
||||
/* Init the RX tail regieter. */
|
||||
@ -3064,7 +3063,159 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
|
||||
return I40E_SUCCESS;
|
||||
}
|
||||
|
||||
void __attribute__((cold))
|
||||
i40e_set_rx_function(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct i40e_adapter *ad =
|
||||
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
uint16_t rx_using_sse, i;
|
||||
/* In order to allow Vector Rx there are a few configuration
|
||||
* conditions to be met and Rx Bulk Allocation should be allowed.
|
||||
*/
|
||||
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
||||
if (i40e_rx_vec_dev_conf_condition_check(dev) ||
|
||||
!ad->rx_bulk_alloc_allowed) {
|
||||
PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
|
||||
" Vector Rx preconditions",
|
||||
dev->data->port_id);
|
||||
|
||||
ad->rx_vec_allowed = false;
|
||||
}
|
||||
if (ad->rx_vec_allowed) {
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
struct i40e_rx_queue *rxq =
|
||||
dev->data->rx_queues[i];
|
||||
|
||||
if (i40e_rxq_vec_setup(rxq)) {
|
||||
ad->rx_vec_allowed = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dev->data->scattered_rx) {
|
||||
/* Set the non-LRO scattered callback: there are Vector and
|
||||
* single allocation versions.
|
||||
*/
|
||||
if (ad->rx_vec_allowed) {
|
||||
PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
|
||||
"callback (port=%d).",
|
||||
dev->data->port_id);
|
||||
|
||||
dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
|
||||
} else {
|
||||
PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
|
||||
"allocation callback (port=%d).",
|
||||
dev->data->port_id);
|
||||
dev->rx_pkt_burst = i40e_recv_scattered_pkts;
|
||||
}
|
||||
/* If parameters allow we are going to choose between the following
|
||||
* callbacks:
|
||||
* - Vector
|
||||
* - Bulk Allocation
|
||||
* - Single buffer allocation (the simplest one)
|
||||
*/
|
||||
} else if (ad->rx_vec_allowed) {
|
||||
PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
|
||||
"burst size no less than %d (port=%d).",
|
||||
RTE_I40E_DESCS_PER_LOOP,
|
||||
dev->data->port_id);
|
||||
|
||||
dev->rx_pkt_burst = i40e_recv_pkts_vec;
|
||||
} else if (ad->rx_bulk_alloc_allowed) {
|
||||
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
|
||||
"satisfied. Rx Burst Bulk Alloc function "
|
||||
"will be used on port=%d.",
|
||||
dev->data->port_id);
|
||||
|
||||
dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
|
||||
} else {
|
||||
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
|
||||
"satisfied, or Scattered Rx is requested "
|
||||
"(port=%d).",
|
||||
dev->data->port_id);
|
||||
|
||||
dev->rx_pkt_burst = i40e_recv_pkts;
|
||||
}
|
||||
|
||||
/* Propagate information about RX function choice through all queues. */
|
||||
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
||||
rx_using_sse =
|
||||
(dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
|
||||
dev->rx_pkt_burst == i40e_recv_pkts_vec);
|
||||
|
||||
for (i = 0; i < dev->data->nb_rx_queues; i++) {
|
||||
struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
|
||||
|
||||
rxq->rx_using_sse = rx_using_sse;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void __attribute__((cold))
|
||||
i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
|
||||
{
|
||||
struct i40e_adapter *ad =
|
||||
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
|
||||
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
|
||||
if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
|
||||
&& (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
|
||||
if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
|
||||
PMD_INIT_LOG(DEBUG, "Vector tx"
|
||||
" can be enabled on this txq.");
|
||||
|
||||
} else {
|
||||
ad->tx_vec_allowed = false;
|
||||
}
|
||||
} else {
|
||||
ad->tx_simple_allowed = false;
|
||||
}
|
||||
}
|
||||
|
||||
void __attribute__((cold))
|
||||
i40e_set_tx_function(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct i40e_adapter *ad =
|
||||
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
|
||||
int i;
|
||||
|
||||
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
|
||||
if (ad->tx_vec_allowed) {
|
||||
for (i = 0; i < dev->data->nb_tx_queues; i++) {
|
||||
struct i40e_tx_queue *txq =
|
||||
dev->data->tx_queues[i];
|
||||
|
||||
if (i40e_txq_vec_setup(txq)) {
|
||||
ad->tx_vec_allowed = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ad->tx_simple_allowed) {
|
||||
if (ad->tx_vec_allowed) {
|
||||
PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
|
||||
dev->tx_pkt_burst = i40e_xmit_pkts_vec;
|
||||
} else {
|
||||
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
|
||||
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
|
||||
}
|
||||
} else {
|
||||
PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
|
||||
dev->tx_pkt_burst = i40e_xmit_pkts;
|
||||
}
|
||||
}
|
||||
|
||||
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
|
||||
int __attribute__((weak))
|
||||
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint16_t __attribute__((weak))
|
||||
i40e_recv_pkts_vec(
|
||||
void __rte_unused *rx_queue,
|
||||
@ -3089,6 +3240,12 @@ i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
|
||||
return -1;
|
||||
}
|
||||
|
||||
int __attribute__((weak))
|
||||
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
void __attribute__((weak))
|
||||
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
|
||||
{
|
||||
@ -3102,3 +3259,4 @@ i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -132,6 +132,7 @@ struct i40e_rx_queue {
|
||||
uint8_t hs_mode; /* Header Split mode */
|
||||
bool q_set; /**< indicate if rx queue has been configured */
|
||||
bool rx_deferred_start; /**< don't start this queue in dev start */
|
||||
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
|
||||
};
|
||||
|
||||
struct i40e_tx_entry {
|
||||
@ -234,10 +235,15 @@ uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
|
||||
uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
|
||||
struct rte_mbuf **rx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
|
||||
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
|
||||
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
|
||||
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
|
||||
uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
|
||||
uint16_t nb_pkts);
|
||||
void i40e_set_rx_function(struct rte_eth_dev *dev);
|
||||
void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
|
||||
struct i40e_tx_queue *txq);
|
||||
void i40e_set_tx_function(struct rte_eth_dev *dev);
|
||||
|
||||
#endif /* _I40E_RXTX_H_ */
|
||||
|
@ -743,3 +743,35 @@ i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __attribute__((cold))
|
||||
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
|
||||
{
|
||||
#ifndef RTE_LIBRTE_IEEE1588
|
||||
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
|
||||
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
|
||||
|
||||
#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
|
||||
/* whithout rx ol_flags, no VP flag report */
|
||||
if (rxmode->hw_vlan_strip != 0 ||
|
||||
rxmode->hw_vlan_extend != 0)
|
||||
return -1;
|
||||
#endif
|
||||
|
||||
/* no fdir support */
|
||||
if (fconf->mode != RTE_FDIR_MODE_NONE)
|
||||
return -1;
|
||||
|
||||
/* - no csum error report support
|
||||
* - no header split support
|
||||
*/
|
||||
if (rxmode->hw_ip_checksum == 1 ||
|
||||
rxmode->header_split == 1)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
#else
|
||||
RTE_SET_USED(dev);
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user