net/txgbe: add security offload in Rx and Tx

Add security offload in Rx and Tx process.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
This commit is contained in:
Jiawen Wu 2020-12-18 17:37:01 +08:00 committed by Ferruh Yigit
parent 87d8a2a4a8
commit d51a133cc7
4 changed files with 212 additions and 2 deletions

View File

@ -16,6 +16,55 @@
(a).ipv6[2] == (b).ipv6[2] && \
(a).ipv6[3] == (b).ipv6[3])
static void
txgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
int i = 0;
/* clear Rx IP table*/
for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
uint16_t index = i << 3;
uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
TXGBE_IPSRXIDX_TB_IP | index;
wr32(hw, TXGBE_IPSRXADDR(0), 0);
wr32(hw, TXGBE_IPSRXADDR(1), 0);
wr32(hw, TXGBE_IPSRXADDR(2), 0);
wr32(hw, TXGBE_IPSRXADDR(3), 0);
wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
}
/* clear Rx SPI and Rx/Tx SA tables*/
for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
uint32_t index = i << 3;
uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
TXGBE_IPSRXIDX_TB_SPI | index;
wr32(hw, TXGBE_IPSRXSPI, 0);
wr32(hw, TXGBE_IPSRXADDRIDX, 0);
wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_KEY | index;
wr32(hw, TXGBE_IPSRXKEY(0), 0);
wr32(hw, TXGBE_IPSRXKEY(1), 0);
wr32(hw, TXGBE_IPSRXKEY(2), 0);
wr32(hw, TXGBE_IPSRXKEY(3), 0);
wr32(hw, TXGBE_IPSRXSALT, 0);
wr32(hw, TXGBE_IPSRXMODE, 0);
wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
reg_val = TXGBE_IPSTXIDX_WRITE | index;
wr32(hw, TXGBE_IPSTXKEY(0), 0);
wr32(hw, TXGBE_IPSTXKEY(1), 0);
wr32(hw, TXGBE_IPSTXKEY(2), 0);
wr32(hw, TXGBE_IPSTXKEY(3), 0);
wr32(hw, TXGBE_IPSTXSALT, 0);
wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
}
memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
}
static int
txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
{
@ -549,6 +598,63 @@ txgbe_crypto_capabilities_get(void *device __rte_unused)
return txgbe_security_capabilities;
}
int
txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
uint32_t reg;
uint64_t rx_offloads;
uint64_t tx_offloads;
rx_offloads = dev->data->dev_conf.rxmode.offloads;
tx_offloads = dev->data->dev_conf.txmode.offloads;
/* sanity checks */
if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
/* Set TXGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/
wr32(hw, TXGBE_SECTXBUFAF, 0x14);
/* IFG needs to be set to 3 when we are using security. Otherwise a Tx
* hang will occur with heavy traffic.
*/
reg = rd32(hw, TXGBE_SECTXIFG);
reg = (reg & ~TXGBE_SECTXIFG_MIN_MASK) | TXGBE_SECTXIFG_MIN(0x3);
wr32(hw, TXGBE_SECTXIFG, reg);
reg = rd32(hw, TXGBE_SECRXCTL);
reg |= TXGBE_SECRXCTL_CRCSTRIP;
wr32(hw, TXGBE_SECRXCTL, reg);
if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
if (reg != 0) {
PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
return -1;
}
}
if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
reg = rd32(hw, TXGBE_SECTXCTL);
if (reg != TXGBE_SECTXCTL_STFWD) {
PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
return -1;
}
}
txgbe_crypto_clear_ipsec_tables(dev);
return 0;
}
static struct rte_security_ops txgbe_security_ops = {
.session_create = txgbe_crypto_create_session,
.session_get_size = txgbe_crypto_session_get_size,

View File

@ -89,4 +89,6 @@ struct txgbe_ipsec {
struct txgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
};
int txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
#endif /*TXGBE_IPSEC_H_*/

View File

@ -20,6 +20,7 @@
#include <rte_debug.h>
#include <rte_ethdev.h>
#include <rte_ethdev_driver.h>
#include <rte_security_driver.h>
#include <rte_memzone.h>
#include <rte_atomic.h>
#include <rte_mempool.h>
@ -57,6 +58,9 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
PKT_TX_TCP_SEG |
PKT_TX_TUNNEL_MASK |
PKT_TX_OUTER_IP_CKSUM |
#ifdef RTE_LIB_SECURITY
PKT_TX_SEC_OFFLOAD |
#endif
TXGBE_TX_IEEE1588_TMST);
#define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
@ -311,7 +315,8 @@ txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
static inline void
txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
volatile struct txgbe_tx_ctx_desc *ctx_txd,
uint64_t ol_flags, union txgbe_tx_offload tx_offload)
uint64_t ol_flags, union txgbe_tx_offload tx_offload,
__rte_unused uint64_t *mdata)
{
union txgbe_tx_offload tx_offload_mask;
uint32_t type_tucmd_mlhl;
@ -405,6 +410,19 @@ txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
}
#ifdef RTE_LIB_SECURITY
if (ol_flags & PKT_TX_SEC_OFFLOAD) {
union txgbe_crypto_tx_desc_md *md =
(union txgbe_crypto_tx_desc_md *)mdata;
tunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx);
type_tucmd_mlhl |= md->enc ?
(TXGBE_TXD_IPSEC_ESP | TXGBE_TXD_IPSEC_ESPENC) : 0;
type_tucmd_mlhl |= TXGBE_TXD_IPSEC_ESPLEN(md->pad_len);
tx_offload_mask.sa_idx |= ~0;
tx_offload_mask.sec_pad_len |= ~0;
}
#endif
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].tx_offload.data[0] =
tx_offload_mask.data[0] & tx_offload.data[0];
@ -701,6 +719,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint32_t ctx = 0;
uint32_t new_ctx;
union txgbe_tx_offload tx_offload;
#ifdef RTE_LIB_SECURITY
uint8_t use_ipsec;
#endif
tx_offload.data[0] = 0;
tx_offload.data[1] = 0;
@ -727,6 +748,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
#ifdef RTE_LIB_SECURITY
use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
#endif
/* If hardware offload required */
tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
@ -742,6 +766,16 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
#ifdef RTE_LIB_SECURITY
if (use_ipsec) {
union txgbe_crypto_tx_desc_md *ipsec_mdata =
(union txgbe_crypto_tx_desc_md *)
rte_security_dynfield(tx_pkt);
tx_offload.sa_idx = ipsec_mdata->sa_idx;
tx_offload.sec_pad_len = ipsec_mdata->pad_len;
}
#endif
/* If new context need be built or reuse the exist ctx*/
ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
/* Only allocate context descriptor if required */
@ -895,7 +929,8 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
tx_offload);
tx_offload,
rte_security_dynfield(tx_pkt));
txe->last_id = tx_last;
tx_id = txe->next_id;
@ -914,6 +949,10 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
#ifdef RTE_LIB_SECURITY
if (use_ipsec)
olinfo_status |= TXGBE_TXD_IPSEC;
#endif
m_seg = tx_pkt;
do {
@ -1098,6 +1137,14 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
}
#ifdef RTE_LIB_SECURITY
if (rx_status & TXGBE_RXD_STAT_SECP) {
pkt_flags |= PKT_RX_SEC_OFFLOAD;
if (rx_status & TXGBE_RXD_ERR_SECERR)
pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
}
#endif
return pkt_flags;
}
@ -1926,6 +1973,11 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
offloads |= DEV_RX_OFFLOAD_SECURITY;
#endif
return offloads;
}
@ -2027,6 +2079,9 @@ txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
{
struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
if (txq->offloads == 0 &&
#ifdef RTE_LIB_SECURITY
!(txq->using_ipsec) &&
#endif
txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)
return txgbe_tx_done_cleanup_simple(txq, free_cnt);
@ -2110,6 +2165,9 @@ txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (txq->offloads == 0 &&
#ifdef RTE_LIB_SECURITY
!(txq->using_ipsec) &&
#endif
txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
@ -2164,6 +2222,10 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
#ifdef RTE_LIB_SECURITY
if (dev->security_ctx)
tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
#endif
return tx_offload_capa;
}
@ -2262,6 +2324,10 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIB_SECURITY
txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
DEV_TX_OFFLOAD_SECURITY);
#endif
/* Modification to set tail pointer for virtual function
* if vf is detected.
@ -4062,6 +4128,7 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
void __rte_cold
txgbe_set_rx_function(struct rte_eth_dev *dev)
{
uint16_t i;
struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
/*
@ -4122,6 +4189,15 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)
dev->rx_pkt_burst = txgbe_recv_pkts;
}
#ifdef RTE_LIB_SECURITY
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SECURITY);
}
#endif
}
/*
@ -4392,6 +4468,19 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
dev->data->dev_conf.lpbk_mode)
txgbe_setup_loopback_link_raptor(hw);
#ifdef RTE_LIB_SECURITY
if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
(dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
ret = txgbe_crypto_enable_ipsec(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR,
"txgbe_crypto_enable_ipsec fails with %d.",
ret);
return ret;
}
}
#endif
return 0;
}

View File

@ -293,6 +293,10 @@ struct txgbe_rx_queue {
uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
#ifdef RTE_LIB_SECURITY
uint8_t using_ipsec;
/**< indicates that IPsec RX feature is in use */
#endif
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
@ -336,6 +340,11 @@ union txgbe_tx_offload {
uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */
uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */
#ifdef RTE_LIB_SECURITY
/* inline ipsec related*/
uint64_t sa_idx:8; /**< TX SA database entry index */
uint64_t sec_pad_len:4; /**< padding length */
#endif
};
};
@ -388,6 +397,10 @@ struct txgbe_tx_queue {
struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
const struct txgbe_txq_ops *ops; /**< txq ops */
uint8_t tx_deferred_start; /**< not in global dev start. */
#ifdef RTE_LIB_SECURITY
uint8_t using_ipsec;
/**< indicates that IPsec TX feature is in use */
#endif
};
struct txgbe_txq_ops {