net/ixgbe: fix build without security library

Fixes: 9a0752f498 ("net/ixgbe: enable inline IPsec")

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Tested-by: David Marchand <david.marchand@6wind.com>
This commit is contained in:
Radu Nicolau 2017-10-26 15:15:12 +01:00 committed by Thomas Monjalon
parent 289ba0c0f5
commit 71864623ba
7 changed files with 61 additions and 12 deletions

View File

@ -124,7 +124,9 @@ ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_BYPASS),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
endif
ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c

View File

@ -61,7 +61,9 @@
#include <rte_random.h>
#include <rte_dev.h>
#include <rte_hash_crc.h>
#ifdef RTE_LIBRTE_SECURITY
#include <rte_security_driver.h>
#endif
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@ -1168,10 +1170,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
#ifdef RTE_LIBRTE_SECURITY
/* Initialize security_ctx only for primary process*/
eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev);
if (eth_dev->security_ctx == NULL)
return -ENOMEM;
#endif
rte_eth_copy_pci_info(eth_dev, pci_dev);
@ -1406,7 +1410,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
/* Remove all Traffic Manager configuration */
ixgbe_tm_conf_uninit(eth_dev);
#ifdef RTE_LIBRTE_SECURITY
rte_free(eth_dev->security_ctx);
#endif
return 0;
}
@ -3707,8 +3713,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw->mac.type == ixgbe_mac_X550EM_a)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
#ifdef RTE_LIBRTE_SECURITY
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
#endif
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {

View File

@ -38,7 +38,9 @@
#include "base/ixgbe_dcb_82599.h"
#include "base/ixgbe_dcb_82598.h"
#include "ixgbe_bypass.h"
#ifdef RTE_LIBRTE_SECURITY
#include "ixgbe_ipsec.h"
#endif
#include <rte_time.h>
#include <rte_hash.h>
#include <rte_pci.h>
@ -487,7 +489,9 @@ struct ixgbe_adapter {
struct ixgbe_filter_info filter;
struct ixgbe_l2_tn_info l2_tn;
struct ixgbe_bw_conf bw_conf;
#ifdef RTE_LIBRTE_SECURITY
struct ixgbe_ipsec ipsec;
#endif
bool rx_bulk_alloc_allowed;
bool rx_vec_allowed;
struct rte_timecounter systime_tc;

View File

@ -229,6 +229,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
#ifdef RTE_LIBRTE_SECURITY
/**
* Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
*/
@ -263,6 +264,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
item->type == RTE_FLOW_ITEM_TYPE_IPV6);
}
#endif
/* the first not void item can be MAC or IPv4 */
item = next_no_void_pattern(pattern, NULL);
@ -557,9 +559,11 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
#ifdef RTE_LIBRTE_SECURITY
/* ESP flow not really a flow*/
if (filter->proto == IPPROTO_ESP)
return 0;
#endif
/* Ixgbe doesn't support tcp flags. */
if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
@ -2801,9 +2805,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
#ifdef RTE_LIBRTE_SECURITY
/* ESP flow not really a flow*/
if (ntuple_filter.proto == IPPROTO_ESP)
return flow;
#endif
if (!ret) {
ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);

View File

@ -397,7 +397,7 @@ static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
union ixgbe_crypto_tx_desc_md *mdata)
__rte_unused uint64_t *mdata)
{
uint32_t type_tucmd_mlhl;
uint32_t mss_l4len_idx = 0;
@ -481,17 +481,21 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
seqnum_seed |= tx_offload.l2_len
<< IXGBE_ADVTXD_TUNNEL_LEN;
}
#ifdef RTE_LIBRTE_SECURITY
if (ol_flags & PKT_TX_SEC_OFFLOAD) {
union ixgbe_crypto_tx_desc_md *md =
(union ixgbe_crypto_tx_desc_md *)mdata;
seqnum_seed |=
(IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mdata->sa_idx);
type_tucmd_mlhl |= mdata->enc ?
(IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & md->sa_idx);
type_tucmd_mlhl |= md->enc ?
(IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
type_tucmd_mlhl |=
(mdata->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
(md->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
tx_offload_mask.sa_idx |= ~0;
tx_offload_mask.sec_pad_len |= ~0;
}
#endif
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].tx_offload.data[0] =
@ -670,7 +674,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint32_t ctx = 0;
uint32_t new_ctx;
union ixgbe_tx_offload tx_offload;
#ifdef RTE_LIBRTE_SECURITY
uint8_t use_ipsec;
#endif
tx_offload.data[0] = 0;
tx_offload.data[1] = 0;
@ -698,7 +704,9 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
#ifdef RTE_LIBRTE_SECURITY
use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
#endif
/* If hardware offload required */
tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
@ -710,6 +718,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_offload.tso_segsz = tx_pkt->tso_segsz;
tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
#ifdef RTE_LIBRTE_SECURITY
if (use_ipsec) {
union ixgbe_crypto_tx_desc_md *ipsec_mdata =
(union ixgbe_crypto_tx_desc_md *)
@ -717,6 +726,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_offload.sa_idx = ipsec_mdata->sa_idx;
tx_offload.sec_pad_len = ipsec_mdata->pad_len;
}
#endif
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
@ -877,9 +887,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
tx_offload,
(union ixgbe_crypto_tx_desc_md *)
&tx_pkt->udata64);
tx_offload, &tx_pkt->udata64);
txe->last_id = tx_last;
tx_id = txe->next_id;
@ -897,8 +905,10 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
#ifdef RTE_LIBRTE_SECURITY
if (use_ipsec)
olinfo_status |= IXGBE_ADVTXD_POPTS_IPSEC;
#endif
m_seg = tx_pkt;
do {
@ -1473,11 +1483,13 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
}
#ifdef RTE_LIBRTE_SECURITY
if (rx_status & IXGBE_RXD_STAT_SECP) {
pkt_flags |= PKT_RX_SEC_OFFLOAD;
if (rx_status & IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG)
pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
}
#endif
return pkt_flags;
}
@ -2397,9 +2409,10 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST) &&
!(dev->data->dev_conf.txmode.offloads
& DEV_TX_OFFLOAD_SECURITY)) {
#ifdef RTE_LIBRTE_SECURITY
!(txq->using_ipsec) &&
#endif
(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_prepare = NULL;
#ifdef RTE_IXGBE_INC_VECTOR
@ -2569,8 +2582,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->txq_flags = tx_conf->txq_flags;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIBRTE_SECURITY
txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
DEV_TX_OFFLOAD_SECURITY);
#endif
/*
* Modification to set VFTDT for virtual function if vf is detected
@ -4555,8 +4570,10 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
rxq->rx_using_sse = rx_using_sse;
#ifdef RTE_LIBRTE_SECURITY
rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SECURITY);
#endif
}
}
@ -5044,6 +5061,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
ixgbe_setup_loopback_link_82599(hw);
#ifdef RTE_LIBRTE_SECURITY
if ((dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SECURITY) ||
(dev->data->dev_conf.txmode.offloads &
@ -5056,6 +5074,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
return ret;
}
}
#endif
return 0;
}

View File

@ -140,8 +140,10 @@ struct ixgbe_rx_queue {
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
uint8_t rx_using_sse;
/**< indicates that vector RX is in use */
#ifdef RTE_LIBRTE_SECURITY
uint8_t using_ipsec;
/**< indicates that IPsec RX feature is in use */
#endif
#ifdef RTE_IXGBE_INC_VECTOR
uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
uint16_t rxrearm_start; /**< the idx we start the re-arming from */
@ -185,10 +187,11 @@ union ixgbe_tx_offload {
/* fields for TX offloading of tunnels */
uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
#ifdef RTE_LIBRTE_SECURITY
/* inline ipsec related*/
uint64_t sa_idx:8; /**< TX SA database entry index */
uint64_t sec_pad_len:4; /**< padding length */
#endif
};
};
@ -253,9 +256,10 @@ struct ixgbe_tx_queue {
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
const struct ixgbe_txq_ops *ops; /**< txq ops */
uint8_t tx_deferred_start; /**< not in global dev start. */
#ifdef RTE_LIBRTE_SECURITY
uint8_t using_ipsec;
/**< indicates that IPsec TX feature is in use */
#endif
};
struct ixgbe_txq_ops {

View File

@ -122,6 +122,7 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
}
#ifdef RTE_LIBRTE_SECURITY
static inline void
desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts)
{
@ -174,6 +175,7 @@ desc_to_olflags_v_ipsec(__m128i descs[4], struct rte_mbuf **rx_pkts)
_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
}
#endif
static inline void
desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
@ -363,7 +365,9 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
volatile union ixgbe_adv_rx_desc *rxdp;
struct ixgbe_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
#ifdef RTE_LIBRTE_SECURITY
uint8_t use_ipsec = rxq->using_ipsec;
#endif
int pos;
uint64_t var;
__m128i shuf_msk;
@ -527,8 +531,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* set ol_flags with vlan packet type */
desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
#ifdef RTE_LIBRTE_SECURITY
if (unlikely(use_ipsec))
desc_to_olflags_v_ipsec(descs, rx_pkts);
#endif
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);