net/ixgbe: add Tx preparation

Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
Tomasz Kulasek 2016-12-23 19:40:51 +01:00 committed by Thomas Monjalon
parent 2b76648872
commit 7829b8d52b
4 changed files with 66 additions and 1 deletions

View File

@ -518,6 +518,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
.nb_max = IXGBE_MAX_RING_DESC, .nb_max = IXGBE_MAX_RING_DESC,
.nb_min = IXGBE_MIN_RING_DESC, .nb_min = IXGBE_MIN_RING_DESC,
.nb_align = IXGBE_TXD_ALIGN, .nb_align = IXGBE_TXD_ALIGN,
.nb_seg_max = IXGBE_TX_MAX_SEG,
.nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
}; };
static const struct eth_dev_ops ixgbe_eth_dev_ops = { static const struct eth_dev_ops ixgbe_eth_dev_ops = {
@ -1105,6 +1107,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &ixgbe_eth_dev_ops; eth_dev->dev_ops = &ixgbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
/* /*
* For secondary processes, we don't initialise any further as primary * For secondary processes, we don't initialise any further as primary

View File

@ -1,7 +1,7 @@
/*- /*-
* BSD LICENSE * BSD LICENSE
* *
* Copyright(c) 2010-2015 Intel Corporation. All rights reserved. * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
@ -399,6 +399,9 @@ uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts); uint16_t nb_pkts);
uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev, int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf); struct rte_eth_rss_conf *rss_conf);

View File

@ -70,6 +70,7 @@
#include <rte_string_fns.h> #include <rte_string_fns.h>
#include <rte_errno.h> #include <rte_errno.h>
#include <rte_ip.h> #include <rte_ip.h>
#include <rte_net.h>
#include "ixgbe_logs.h" #include "ixgbe_logs.h"
#include "base/ixgbe_api.h" #include "base/ixgbe_api.h"
@ -87,6 +88,9 @@
PKT_TX_TCP_SEG | \ PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM) PKT_TX_OUTER_IP_CKSUM)
#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
#if 1 #if 1
#define RTE_PMD_USE_PREFETCH #define RTE_PMD_USE_PREFETCH
#endif #endif
@ -903,6 +907,57 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx; return nb_tx;
} }
/*********************************************************************
*
* TX prep functions
*
**********************************************************************/
uint16_t
ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i, ret;
uint64_t ol_flags;
struct rte_mbuf *m;
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
ol_flags = m->ol_flags;
/**
* Check if packet meets requirements for number of segments
*
* NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
* non-TSO
*/
if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
rte_errno = -EINVAL;
return i;
}
if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
rte_errno = -ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
rte_errno = ret;
return i;
}
}
return i;
}
/********************************************************************* /*********************************************************************
* *
* RX functions * RX functions
@ -2282,6 +2337,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path"); PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_prepare = NULL;
#ifdef RTE_IXGBE_INC_VECTOR #ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY || (rte_eal_process_type() != RTE_PROC_PRIMARY ||
@ -2302,6 +2358,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
(unsigned long)txq->tx_rs_thresh, (unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST); (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
dev->tx_pkt_burst = ixgbe_xmit_pkts; dev->tx_pkt_burst = ixgbe_xmit_pkts;
dev->tx_pkt_prepare = ixgbe_prep_pkts;
} }
} }

View File

@ -80,6 +80,8 @@
#define RTE_IXGBE_WAIT_100_US 100 #define RTE_IXGBE_WAIT_100_US 100
#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2 #define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
#define IXGBE_TX_MAX_SEG 40
#define IXGBE_PACKET_TYPE_MASK_82599 0X7F #define IXGBE_PACKET_TYPE_MASK_82599 0X7F
#define IXGBE_PACKET_TYPE_MASK_X550 0X10FF #define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF #define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF