qat: Add Intel® 4xxx Series platform support

Overview:
Intel(R) QuickAssist Technology (Intel(R) QAT) provides hardware
acceleration for offloading security, authentication and compression
services from the CPU, thus significantly increasing the performance and
efficiency of standard platform solutions.

This commit introduces:
- Intel® 4xxx Series platform support.
- QuickAssist kernel API implementation update for Generation 4 device.
  Enabled services: symmetric cryptography and data compression.
- Increased default number of crypto instances in static configuration
  for performance purposes.

OCF backend changes:
- changed GCM/CCM MAC validation policy to generate MAC by HW
  and validate by SW due to the QAT HW limitations.

Patch co-authored by: Krzysztof Zdziarski <krzysztofx.zdziarski@intel.com>
Patch co-authored by: Michal Jaraczewski <michalx.jaraczewski@intel.com>
Patch co-authored by: Michal Gulbicki <michalx.gulbicki@intel.com>
Patch co-authored by: Julian Grajkowski <julianx.grajkowski@intel.com>
Patch co-authored by: Piotr Kasierski <piotrx.kasierski@intel.com>
Patch co-authored by: Adam Czupryna <adamx.czupryna@intel.com>
Patch co-authored by: Konrad Zelazny <konradx.zelazny@intel.com>
Patch co-authored by: Katarzyna Rucinska <katarzynax.kargol@intel.com>
Patch co-authored by: Lukasz Kolodzinski <lukaszx.kolodzinski@intel.com>
Patch co-authored by: Zbigniew Jedlinski <zbigniewx.jedlinski@intel.com>

Sponsored by:	Intel Corporation
Reviewed by:	markj, jhb
Differential Revision:	https://reviews.freebsd.org/D36254
This commit is contained in:
Michal Gulbicki 2023-01-24 09:31:38 -05:00 committed by Mark Johnston
parent 2d3515d61e
commit a977168c48
101 changed files with 8939 additions and 2453 deletions

View File

@ -1,7 +1,7 @@
.\" SPDX-License-Identifier: BSD-3-Clause
.\" Copyright(c) 2007-2022 Intel Corporation
.\" $FreeBSD$
.Dd June 30, 2022
.Dd September 1, 2022
.Dt QAT 4
.Os
.Sh NAME
@ -30,6 +30,8 @@ qat_c62x_fw_load="YES"
.It
qat_dh895xcc_fw_load="YES"
.It
qat_4xxx_fw_load="YES"
.It
qat_load="YES"
.El
.Sh DESCRIPTION
@ -53,6 +55,8 @@ Intel (R) QuickAssist Adapter 8960/Intel (R) QuickAssist Adapter 8970
Intel (R) Communications Chipset 8925 to 8955 Series
.It
Intel (R) Atom P5300 processor product family
.It
Intel (R) QAT 4xxx Series
.El
.Pp
The

Binary file not shown.

Binary file not shown.

View File

@ -28,7 +28,7 @@
#define ADF_CFG_STATIC_CONF_NUM_DC_ACCEL_UNITS 2
#define ADF_CFG_STATIC_CONF_NUM_INLINE_ACCEL_UNITS 0
#define ADF_CFG_STATIC_CONF_INST_NUM_DC 2
#define ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL 2
#define ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL 6
#define ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ 2
#define ADF_CFG_FW_STRING_TO_ID(str, acc, id) \

View File

@ -19,6 +19,7 @@
#define ADF_200XXVF_DEVICE_NAME "200xxvf"
#define ADF_C4XXX_DEVICE_NAME "c4xxx"
#define ADF_C4XXXVF_DEVICE_NAME "c4xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
#define ADF_C62X_PCI_DEVICE_ID 0x37c8
@ -31,8 +32,17 @@
#define ADF_D15XXIOV_PCI_DEVICE_ID 0x6f55
#define ADF_C4XXX_PCI_DEVICE_ID 0x18a0
#define ADF_C4XXXIOV_PCI_DEVICE_ID 0x18a1
#define ADF_4XXX_PCI_DEVICE_ID 0x4940
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); })
static inline bool
IS_QAT_GEN4(const unsigned int id)
{
return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID);
}
#define IS_QAT_GEN3_OR_GEN4(ID) (IS_QAT_GEN3(ID) || IS_QAT_GEN4(ID))
#define ADF_VF2PF_SET_SIZE 32
#define ADF_MAX_VF2PF_SET 4
#define ADF_VF2PF_SET_OFFSET(set_nr) ((set_nr)*ADF_VF2PF_SET_SIZE)
@ -50,7 +60,7 @@
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
#define ADF_MAX_MSIX_VECTOR_NAME 16
#define ADF_MAX_MSIX_VECTOR_NAME 32
#define ADF_DEVICE_NAME_PREFIX "qat_"
#define ADF_STOP_RETRY 50
#define ADF_NUM_THREADS_PER_AE (8)
@ -58,7 +68,6 @@
#define ADF_NUM_PKE_STRAND (2)
#define ADF_AE_STRAND0_THREAD (8)
#define ADF_AE_STRAND1_THREAD (9)
#define ADF_NUM_HB_CNT_PER_AE (ADF_NUM_THREADS_PER_AE + ADF_NUM_PKE_STRAND)
#define ADF_CFG_NUM_SERVICES 4
#define ADF_SRV_TYPE_BIT_LEN 3
#define ADF_SRV_TYPE_MASK 0x7
@ -75,6 +84,8 @@
#define GET_SRV_TYPE(ena_srv_mask, srv) \
(((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK)
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops)
#define ADF_DEFAULT_RING_TO_SRV_MAP \
(CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
@ -156,7 +167,9 @@ enum adf_accel_unit_services {
ADF_ACCEL_SERVICE_NULL = 0,
ADF_ACCEL_INLINE_CRYPTO = 1,
ADF_ACCEL_CRYPTO = 2,
ADF_ACCEL_COMPRESSION = 4
ADF_ACCEL_COMPRESSION = 4,
ADF_ACCEL_ASYM = 8,
ADF_ACCEL_ADMIN = 16
};
struct adf_ae_info {
@ -182,6 +195,7 @@ struct adf_accel_unit_info {
u32 dc_ae_msk;
u8 num_cy_au;
u8 num_dc_au;
u8 num_asym_au;
u8 num_inline_au;
struct adf_accel_unit *au;
const struct adf_ae_info *ae_info;
@ -231,6 +245,60 @@ struct admin_info {
u32 mailbox_offset;
} __packed;
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(bus_addr_t addr, u32 size);
u32 (*read_csr_ring_head)(struct resource *csr_base_addr,
u32 bank,
u32 ring);
void (*write_csr_ring_head)(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value);
u32 (*read_csr_ring_tail)(struct resource *csr_base_addr,
u32 bank,
u32 ring);
void (*write_csr_ring_tail)(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value);
u32 (*read_csr_e_stat)(struct resource *csr_base_addr, u32 bank);
void (*write_csr_ring_config)(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value);
void (*write_csr_ring_base)(struct resource *csr_base_addr,
u32 bank,
u32 ring,
bus_addr_t addr);
void (*write_csr_int_flag)(struct resource *csr_base_addr,
u32 bank,
u32 value);
void (*write_csr_int_srcsel)(struct resource *csr_base_addr, u32 bank);
void (*write_csr_int_col_en)(struct resource *csr_base_addr,
u32 bank,
u32 value);
void (*write_csr_int_col_ctl)(struct resource *csr_base_addr,
u32 bank,
u32 value);
void (*write_csr_int_flag_and_col)(struct resource *csr_base_addr,
u32 bank,
u32 value);
u32 (*read_csr_ring_srv_arb_en)(struct resource *csr_base_addr,
u32 bank);
void (*write_csr_ring_srv_arb_en)(struct resource *csr_base_addr,
u32 bank,
u32 value);
};
struct adf_hw_csr_info {
struct adf_hw_csr_ops csr_ops;
u32 csr_addr_offset;
u32 ring_bundle_size;
u32 bank_int_flag_clear_mask;
u32 num_rings_per_int_srcsel;
u32 arb_enable_mask;
};
struct adf_cfg_device_data;
struct adf_accel_dev;
struct adf_etr_data;
@ -282,8 +350,10 @@ struct adf_hw_device_data {
void (*exit_arb)(struct adf_accel_dev *accel_dev);
void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
const uint32_t **cfg);
int (*init_device)(struct adf_accel_dev *accel_dev);
int (*get_heartbeat_status)(struct adf_accel_dev *accel_dev);
uint32_t (*get_ae_clock)(struct adf_hw_device_data *self);
uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
@ -298,6 +368,8 @@ struct adf_hw_device_data {
void (*restore_device)(struct adf_accel_dev *accel_dev);
uint32_t (*get_obj_cfg_ae_mask)(struct adf_accel_dev *accel_dev,
enum adf_accel_unit_services services);
enum adf_accel_unit_services (
*get_service_type)(struct adf_accel_dev *accel_dev, s32 obj_num);
int (*add_pke_stats)(struct adf_accel_dev *accel_dev);
void (*remove_pke_stats)(struct adf_accel_dev *accel_dev);
int (*add_misc_error)(struct adf_accel_dev *accel_dev);
@ -311,6 +383,14 @@ struct adf_hw_device_data {
enum adf_accel_unit_services services);
void (*pre_reset)(struct adf_accel_dev *accel_dev);
void (*post_reset)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
void (*get_ring_svc_map_data)(int ring_pair_index,
u16 ring_to_svc_map,
u8 *serv_type,
int *ring_index,
int *num_rings_per_srv,
int bundle_num);
struct adf_hw_csr_info csr_info;
const char *fw_name;
const char *fw_mmp_name;
bool reset_ack;
@ -320,7 +400,10 @@ struct adf_hw_device_data {
uint16_t accel_mask;
u32 aerucm_mask;
u32 ae_mask;
u32 admin_ae_mask;
u32 service_mask;
u32 service_to_load_mask;
u32 heartbeat_ctr_num;
uint16_t tx_rings_mask;
uint8_t tx_rx_gap;
uint8_t num_banks;

View File

@ -76,4 +76,14 @@ int adf_cfg_get_services_enabled(struct adf_accel_dev *accel_dev,
int adf_cfg_restore_section(struct adf_accel_dev *accel_dev,
struct adf_cfg_section *section);
void adf_cfg_keyval_del_all(struct list_head *head);
static inline int
adf_cy_inst_cross_banks(struct adf_accel_dev *accel_dev)
{
if (accel_dev->hw_device->num_rings_per_bank == 2)
return 1;
else
return 0;
}
#endif

View File

@ -27,7 +27,7 @@
#define ADF_MAX_ACCELENGINES 12
#define ADF_CFG_STORAGE_ENABLED 1
#define ADF_DEVS_ARRAY_SIZE BITS_TO_LONGS(ADF_MAX_DEVICES)
#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x3000000
#define ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE 0x3000000
#define ADF_WDT_TIMER_SYM_COMP_MS 3
#define ADF_MIN_HB_TIMER_MS 100
#define ADF_CFG_MAX_NUM_OF_SECTIONS 16
@ -87,7 +87,8 @@ enum adf_device_type {
DEV_200XX,
DEV_200XXVF,
DEV_C4XXX,
DEV_C4XXXVF
DEV_C4XXXVF,
DEV_4XXX
};
enum adf_cfg_fw_image_type {
@ -158,6 +159,7 @@ struct adf_cfg_bundle {
/* contains all the info about rings */
struct adf_cfg_ring **rings;
u16 in_use;
u16 max_cfg_svc_num;
};
struct adf_cfg_instance {

View File

@ -22,6 +22,8 @@
#define ADF_RING_DC_RX "RingRx"
#define ADF_ETRMGR_BANK "Bank"
#define ADF_RING_BANK_NUM "BankNumber"
#define ADF_RING_BANK_NUM_ASYM "BankNumberAsym"
#define ADF_RING_BANK_NUM_SYM "BankNumberSym"
#define ADF_CY "Cy"
#define ADF_DC "Dc"
#define ADF_DC_EXTENDED_FEATURES "Device_DcExtendedFeatures"
@ -112,6 +114,8 @@
#define ADF_CY_CORE_AFFINITY_FORMAT ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY
#define ADF_DC_CORE_AFFINITY_FORMAT ADF_DC "%d" ADF_ETRMGR_CORE_AFFINITY
#define ADF_CY_BANK_NUM_FORMAT ADF_CY "%d" ADF_RING_BANK_NUM
#define ADF_CY_ASYM_BANK_NUM_FORMAT ADF_CY "%d" ADF_RING_BANK_NUM_ASYM
#define ADF_CY_SYM_BANK_NUM_FORMAT ADF_CY "%d" ADF_RING_BANK_NUM_SYM
#define ADF_DC_BANK_NUM_FORMAT ADF_DC "%d" ADF_RING_BANK_NUM
#define ADF_CY_ASYM_TX_FORMAT ADF_CY "%d" ADF_RING_ASYM_TX
#define ADF_CY_SYM_TX_FORMAT ADF_CY "%d" ADF_RING_SYM_TX

View File

@ -203,10 +203,14 @@ int adf_init_gen2_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_disable_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
void
adf_enable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask);
void
adf_disable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask);
void adf_enable_ring_arb(struct adf_accel_dev *accel_dev,
void *csr_addr,
unsigned int bank_nr,
unsigned int mask);
void adf_disable_ring_arb(struct adf_accel_dev *accel_dev,
void *csr_addr,
unsigned int bank_nr,
unsigned int mask);
int adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
struct adf_accel_dev *adf_devmgr_get_dev_by_bdf(struct adf_pci_address *addr);
struct adf_accel_dev *adf_devmgr_get_dev_by_pci_bus(u8 bus);
@ -238,9 +242,7 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
int qat_hal_init(struct adf_accel_dev *accel_dev);
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
void qat_hal_start(struct icp_qat_fw_loader_handle *handle,
unsigned char ae,
unsigned int ctx_mask);
int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
void qat_hal_stop(struct icp_qat_fw_loader_handle *handle,
unsigned char ae,
unsigned int ctx_mask);

View File

@ -0,0 +1,172 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2021 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_GEN2_HW_DATA_H_
#define ADF_GEN2_HW_DATA_H_
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
/* Transport access */
#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
#define ADF_RING_CSR_RING_CONFIG 0x000
#define ADF_RING_CSR_RING_LBASE 0x040
#define ADF_RING_CSR_RING_UBASE 0x080
#define ADF_RING_CSR_RING_HEAD 0x0C0
#define ADF_RING_CSR_RING_TAIL 0x100
#define ADF_RING_CSR_E_STAT 0x14C
#define ADF_RING_CSR_INT_FLAG 0x170
#define ADF_RING_CSR_INT_SRCSEL 0x174
#define ADF_RING_CSR_INT_SRCSEL_2 0x178
#define ADF_RING_CSR_INT_COL_EN 0x17C
#define ADF_RING_CSR_INT_COL_CTL 0x180
#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
#define ADF_RING_BUNDLE_SIZE 0x1000
#define ADF_GEN2_RX_RINGS_OFFSET 8
#define ADF_GEN2_TX_RINGS_MASK 0xFF
#define BUILD_RING_BASE_ADDR(addr, size) \
(((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
ADF_CSR_RD(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_RING_HEAD + \
((ring) << 2))
#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
ADF_CSR_RD(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_RING_TAIL + \
((ring) << 2))
#define READ_CSR_E_STAT(csr_base_addr, bank) \
ADF_CSR_RD(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_E_STAT)
#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_RING_CONFIG + ((ring) << 2), \
value)
#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
do { \
u32 l_base = 0, u_base = 0; \
l_base = (u32)((value)&0xFFFFFFFF); \
u_base = (u32)(((value)&0xFFFFFFFF00000000ULL) >> 32); \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_RING_LBASE + ((ring) << 2), \
l_base); \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_RING_UBASE + ((ring) << 2), \
u_base); \
} while (0)
#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_RING_HEAD + \
((ring) << 2), \
value)
#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_RING_TAIL + \
((ring) << 2), \
value)
#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_INT_FLAG, \
value)
#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
do { \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_INT_SRCSEL, \
ADF_BANK_INT_SRC_SEL_MASK_0); \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_INT_SRCSEL_2, \
ADF_BANK_INT_SRC_SEL_MASK_X); \
} while (0)
#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_INT_COL_EN, \
value)
#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + ADF_RING_CSR_INT_COL_CTL, \
ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
ADF_CSR_WR(csr_base_addr, \
(ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_INT_FLAG_AND_COL, \
value)
/* AE to function map */
#define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190)
#define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310)
#define AE2FUNCTION_MAP_REG_SIZE 4
#define AE2FUNCTION_MAP_VALID BIT(7)
#define READ_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index) \
ADF_CSR_RD(pmisc_bar_addr, \
AE2FUNCTION_MAP_A_OFFSET + \
AE2FUNCTION_MAP_REG_SIZE * (index))
#define WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_bar_addr, index, value) \
ADF_CSR_WR(pmisc_bar_addr, \
AE2FUNCTION_MAP_A_OFFSET + \
AE2FUNCTION_MAP_REG_SIZE * (index), \
value)
#define READ_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index) \
ADF_CSR_RD(pmisc_bar_addr, \
AE2FUNCTION_MAP_B_OFFSET + \
AE2FUNCTION_MAP_REG_SIZE * (index))
#define WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_bar_addr, index, value) \
ADF_CSR_WR(pmisc_bar_addr, \
AE2FUNCTION_MAP_B_OFFSET + \
AE2FUNCTION_MAP_REG_SIZE * (index), \
value)
/* Admin Interface Offsets */
#define ADF_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
#define ADF_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
#define ADF_MAILBOX_BASE_OFFSET 0x20970
/* Arbiter configuration */
#define ADF_ARB_OFFSET 0x30000
#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
#define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
#define ADF_ARB_REG_SLOT 0x1000
#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
#define READ_CSR_RING_SRV_ARB_EN(csr_addr, index) \
ADF_CSR_RD(csr_addr, \
ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)))
#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, \
ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)), \
value)
/* Power gating */
#define ADF_POWERGATE_DC BIT(23)
#define ADF_POWERGATE_PKE BIT(24)
/* Default ring mapping */
#define ADF_GEN2_DEFAULT_RING_TO_SRV_MAP \
(CRYPTO << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \
CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
UNUSED << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
/* Error detection and correction */
#define ADF_GEN2_AE_CTX_ENABLES(i) ((i)*0x1000 + 0x20818)
#define ADF_GEN2_AE_MISC_CONTROL(i) ((i)*0x1000 + 0x20960)
#define ADF_GEN2_ENABLE_AE_ECC_ERR BIT(28)
#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
#define ADF_GEN2_UERRSSMSH(i) ((i)*0x4000 + 0x18)
#define ADF_GEN2_CERRSSMSH(i) ((i)*0x4000 + 0x10)
#define ADF_GEN2_ERRSSMSH_EN BIT(3)
#define ADF_NUM_HB_CNT_PER_AE (ADF_NUM_THREADS_PER_AE + ADF_NUM_PKE_STRAND)
void adf_gen2_init_hw_csr_info(struct adf_hw_csr_info *csr_info);
#endif

View File

@ -0,0 +1,132 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2021 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_GEN4_HW_CSR_DATA_H_
#define ADF_GEN4_HW_CSR_DATA_H_
#include "adf_accel_devices.h"
/* Transport access */
#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
#define ADF_RING_CSR_RING_CONFIG 0x1000
#define ADF_RING_CSR_RING_LBASE 0x1040
#define ADF_RING_CSR_RING_UBASE 0x1080
#define ADF_RING_CSR_RING_HEAD 0x0C0
#define ADF_RING_CSR_RING_TAIL 0x100
#define ADF_RING_CSR_E_STAT 0x14C
#define ADF_RING_CSR_INT_FLAG 0x170
#define ADF_RING_CSR_INT_SRCSEL 0x174
#define ADF_RING_CSR_INT_COL_CTL 0x180
#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
#define ADF_RING_CSR_INT_COL_EN 0x17C
#define ADF_RING_CSR_ADDR_OFFSET 0x100000
#define ADF_RING_BUNDLE_SIZE 0x2000
#define BUILD_RING_BASE_ADDR(addr, size) \
((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
ADF_CSR_RD((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_HEAD + ((ring) << 2))
#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
ADF_CSR_RD((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_TAIL + ((ring) << 2))
#define READ_CSR_E_STAT(csr_base_addr, bank) \
ADF_CSR_RD((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_E_STAT)
#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_CONFIG + ((ring) << 2), \
value)
#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
do { \
struct resource *_csr_base_addr = csr_base_addr; \
u32 _bank = bank; \
u32 _ring = ring; \
dma_addr_t _value = value; \
u32 l_base = 0, u_base = 0; \
l_base = lower_32_bits(_value); \
u_base = upper_32_bits(_value); \
ADF_CSR_WR((_csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + \
ADF_RING_BUNDLE_SIZE * (_bank) + \
ADF_RING_CSR_RING_LBASE + ((_ring) << 2), \
l_base); \
ADF_CSR_WR((_csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + \
ADF_RING_BUNDLE_SIZE * (_bank) + \
ADF_RING_CSR_RING_UBASE + ((_ring) << 2), \
u_base); \
} while (0)
#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_HEAD + ((ring) << 2), \
value)
#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_TAIL + ((ring) << 2), \
value)
#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_INT_FLAG, \
(value))
#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_INT_SRCSEL, \
ADF_BANK_INT_SRC_SEL_MASK)
#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_INT_COL_EN, \
(value))
#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_INT_COL_CTL, \
ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_INT_FLAG_AND_COL, \
(value))
/* Arbiter configuration */
#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \
ADF_CSR_RD((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_SRV_ARB_EN)
#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_SRV_ARB_EN, \
(value))
/* WDT timers
*
* Timeout is in cycles. Clock speed may vary across products but this
* value should be a few milli-seconds.
*/
#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000
#define ADF_SSMWDTL_OFFSET 0x54
#define ADF_SSMWDTH_OFFSET 0x5C
#define ADF_SSMWDTPKEL_OFFSET 0x58
#define ADF_SSMWDTPKEH_OFFSET 0x60
#define ADF_NUM_HB_CNT_PER_AE (ADF_NUM_THREADS_PER_AE)
int adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info);
#endif

View File

@ -16,6 +16,7 @@ struct icp_qat_fw_loader_ae_data {
struct icp_qat_fw_loader_hal_handle {
struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
unsigned int ae_mask;
unsigned int admin_ae_mask;
unsigned int slice_mask;
unsigned int revision_id;
unsigned int ae_max_num;

View File

@ -52,23 +52,32 @@ enum hal_ae_csr {
};
enum fcu_csr {
FCU_CONTROL = 0x0,
FCU_STATUS = 0x4,
FCU_DRAM_ADDR_LO = 0xc,
FCU_CONTROL = 0x00,
FCU_STATUS = 0x04,
FCU_DRAM_ADDR_LO = 0x0c,
FCU_DRAM_ADDR_HI = 0x10,
FCU_RAMBASE_ADDR_HI = 0x14,
FCU_RAMBASE_ADDR_LO = 0x18
};
enum fcu_csr_c4xxx {
FCU_CONTROL_C4XXX = 0x0,
FCU_STATUS_C4XXX = 0x4,
FCU_STATUS1_C4XXX = 0xc,
FCU_CONTROL_C4XXX = 0x00,
FCU_STATUS_C4XXX = 0x04,
FCU_STATUS1_C4XXX = 0x0c,
FCU_AE_LOADED_C4XXX = 0x10,
FCU_DRAM_ADDR_LO_C4XXX = 0x14,
FCU_DRAM_ADDR_HI_C4XXX = 0x18,
};
enum fcu_csr_4xxx {
FCU_CONTROL_4XXX = 0x00,
FCU_STATUS_4XXX = 0x04,
FCU_ME_BROADCAST_MASK_TYPE = 0x08,
FCU_AE_LOADED_4XXX = 0x10,
FCU_DRAM_ADDR_LO_4XXX = 0x14,
FCU_DRAM_ADDR_HI_4XXX = 0x18,
};
enum fcu_cmd {
FCU_CTRL_CMD_NOOP = 0,
FCU_CTRL_CMD_AUTH = 1,
@ -104,6 +113,7 @@ enum fcu_sts {
#define LCS_STATUS (0x1)
#define MMC_SHARE_CS_BITPOS 2
#define GLOBAL_CSR 0xA00
#define FCU_CTRL_BROADCAST_POS 0x4
#define FCU_CTRL_AE_POS 0x8
#define FCU_AUTH_STS_MASK 0x7
#define FCU_STS_DONE_POS 0x9
@ -111,20 +121,26 @@ enum fcu_sts {
#define FCU_LOADED_AE_POS 0x16
#define FW_AUTH_WAIT_PERIOD 10
#define FW_AUTH_MAX_RETRY 300
#define FW_BROADCAST_MAX_RETRY 300
#define FCU_OFFSET 0x8c0
#define FCU_OFFSET_C4XXX 0x1000
#define FCU_OFFSET_4XXX 0x1000
#define MAX_CPP_NUM 2
#define AE_CPP_NUM 2
#define AES_PER_CPP 16
#define SLICES_PER_CPP 6
#define ICP_QAT_AE_OFFSET 0x20000
#define ICP_QAT_AE_OFFSET_C4XXX 0x40000
#define ICP_QAT_AE_OFFSET_4XXX 0x600000
#define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000)
#define ICP_QAT_CAP_OFFSET_C4XXX 0x70000
#define ICP_QAT_CAP_OFFSET_4XXX 0x640000
#define LOCAL_TO_XFER_REG_OFFSET 0x800
#define ICP_QAT_EP_OFFSET 0x3a000
#define ICP_QAT_EP_OFFSET_C4XXX 0x60000
#define ICP_QAT_EP_OFFSET_4XXX 0x200000 /* HI MMIO CSRs */
#define MEM_CFG_ERR_BIT 0x20
#define AE_TG_NUM_CPM2X 4
#define CAP_CSR_ADDR(csr) (csr + handle->hal_cap_g_ctl_csr_addr_v)
#define SET_CAP_CSR(handle, csr, val) \
@ -133,20 +149,17 @@ enum fcu_sts {
ADF_CSR_RD(handle->hal_misc_addr_v, CAP_CSR_ADDR(csr))
#define SET_GLB_CSR(handle, csr, val) \
({ \
typeof(handle) handle_ = (handle); \
typeof(csr) csr_ = (csr); \
typeof(val) val_ = (val); \
(IS_QAT_GEN3(pci_get_device(GET_DEV(handle_->accel_dev)))) ? \
SET_CAP_CSR(handle_, (csr_), (val_)) : \
SET_CAP_CSR(handle_, csr_ + GLOBAL_CSR, val_); \
u32 dev_id = pci_get_device(GET_DEV((handle)->accel_dev)); \
(IS_QAT_GEN3_OR_GEN4(dev_id)) ? \
SET_CAP_CSR((handle), (csr), (val)) : \
SET_CAP_CSR((handle), (csr) + GLOBAL_CSR, val); \
})
#define GET_GLB_CSR(handle, csr) \
({ \
typeof(handle) handle_ = (handle); \
typeof(csr) csr_ = (csr); \
(IS_QAT_GEN3(pci_get_device(GET_DEV(handle_->accel_dev)))) ? \
(GET_CAP_CSR(handle_, (csr_))) : \
(GET_CAP_CSR(handle_, (GLOBAL_CSR + (csr_)))); \
u32 dev_id = pci_get_device(GET_DEV((handle)->accel_dev)); \
(IS_QAT_GEN3_OR_GEN4(dev_id)) ? \
GET_CAP_CSR((handle), (csr)) : \
GET_CAP_CSR((handle), (csr) + GLOBAL_CSR); \
})
#define SET_FCU_CSR(handle, csr, val) \
({ \
@ -157,7 +170,12 @@ enum fcu_sts {
SET_CAP_CSR(handle_, \
((csr_) + FCU_OFFSET_C4XXX), \
(val_)) : \
SET_CAP_CSR(handle_, ((csr_) + FCU_OFFSET), (val_)); \
((IS_QAT_GEN4( \
pci_get_device(GET_DEV(handle_->accel_dev)))) ? \
SET_CAP_CSR(handle_, \
((csr_) + FCU_OFFSET_4XXX), \
(val_)) : \
SET_CAP_CSR(handle_, ((csr_) + FCU_OFFSET), (val_))); \
})
#define GET_FCU_CSR(handle, csr) \
({ \
@ -165,7 +183,10 @@ enum fcu_sts {
typeof(csr) csr_ = (csr); \
(IS_QAT_GEN3(pci_get_device(GET_DEV(handle_->accel_dev)))) ? \
GET_CAP_CSR(handle_, (FCU_OFFSET_C4XXX + (csr_))) : \
GET_CAP_CSR(handle_, (FCU_OFFSET + (csr_))); \
((IS_QAT_GEN4( \
pci_get_device(GET_DEV(handle_->accel_dev)))) ? \
GET_CAP_CSR(handle_, (FCU_OFFSET_4XXX + (csr_))) : \
GET_CAP_CSR(handle_, (FCU_OFFSET + (csr_)))); \
})
#define AE_CSR(handle, ae) \
((handle)->hal_cap_ae_local_csr_addr_v + ((ae) << 12))
@ -184,13 +205,19 @@ enum fcu_sts {
ADF_CSR_WR((handle)->hal_sram_addr_v, addr, val)
#define GET_CSR_OFFSET(device_id, cap_offset_, ae_offset_, ep_offset_) \
({ \
int gen3 = IS_QAT_GEN3(device_id); \
cap_offset_ = \
(gen3 ? ICP_QAT_CAP_OFFSET_C4XXX : ICP_QAT_CAP_OFFSET); \
ae_offset_ = \
(gen3 ? ICP_QAT_AE_OFFSET_C4XXX : ICP_QAT_AE_OFFSET); \
ep_offset_ = \
(gen3 ? ICP_QAT_EP_OFFSET_C4XXX : ICP_QAT_EP_OFFSET); \
if (IS_QAT_GEN3(device_id)) { \
cap_offset_ = ICP_QAT_CAP_OFFSET_C4XXX; \
ae_offset_ = ICP_QAT_AE_OFFSET_C4XXX; \
ep_offset_ = ICP_QAT_EP_OFFSET_C4XXX; \
} else if (IS_QAT_GEN4(device_id)) { \
cap_offset_ = ICP_QAT_CAP_OFFSET_4XXX; \
ae_offset_ = ICP_QAT_AE_OFFSET_4XXX; \
ep_offset_ = ICP_QAT_EP_OFFSET_4XXX; \
} else { \
cap_offset_ = ICP_QAT_CAP_OFFSET; \
ae_offset_ = ICP_QAT_AE_OFFSET; \
ep_offset_ = ICP_QAT_EP_OFFSET; \
} \
})
#endif

View File

@ -9,6 +9,7 @@
#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
#define ICP_QAT_AC_200XX_DEV_TYPE 0x02000000
#define ICP_QAT_AC_C4XXX_DEV_TYPE 0x04000000
#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
#define ICP_QAT_UCLO_MAX_AE 32
#define ICP_QAT_UCLO_MAX_CTX 8
#define ICP_QAT_UCLO_MAX_CPPNUM 2
@ -17,6 +18,7 @@
#define ICP_QAT_UCLO_MAX_XFER_REG 128
#define ICP_QAT_UCLO_MAX_GPR_REG 128
#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
#define ICP_QAT_UCLO_MAX_LMEM_REG_2X 1280
#define ICP_QAT_UCLO_AE_ALL_CTX 0xff
#define ICP_QAT_UOF_OBJID_LEN 8
#define ICP_QAT_UOF_FID 0xc6c2
@ -46,22 +48,42 @@
#define ICP_QAT_SUOF_IMAG "SUF_IMAG"
#define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long))
#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long))
#define ICP_QAT_CSS_FWSK_MODULUS_LEN 256
#define ICP_QAT_CSS_FWSK_EXPONENT_LEN 4
#define ICP_QAT_CSS_FWSK_PAD_LEN 252
#define ICP_QAT_CSS_FWSK_PUB_LEN \
(ICP_QAT_CSS_FWSK_MODULUS_LEN + ICP_QAT_CSS_FWSK_EXPONENT_LEN + \
ICP_QAT_CSS_FWSK_PAD_LEN)
#define ICP_QAT_CSS_SIGNATURE_LEN 256
#define DSS_FWSK_MODULUS_LEN 384 // RSA3K
#define DSS_FWSK_EXPONENT_LEN 4
#define DSS_FWSK_PADDING_LEN 380
#define DSS_SIGNATURE_LEN 384 // RSA3K
#define CSS_FWSK_MODULUS_LEN 256 // RSA2K
#define CSS_FWSK_EXPONENT_LEN 4
#define CSS_FWSK_PADDING_LEN 252
#define CSS_SIGNATURE_LEN 256 // RSA2K
#define ICP_QAT_CSS_FWSK_MODULUS_LEN(ID) \
(IS_QAT_GEN4(ID) ? DSS_FWSK_MODULUS_LEN : CSS_FWSK_MODULUS_LEN)
#define ICP_QAT_CSS_FWSK_EXPONENT_LEN(ID) \
(IS_QAT_GEN4(ID) ? DSS_FWSK_EXPONENT_LEN : CSS_FWSK_EXPONENT_LEN)
#define ICP_QAT_CSS_FWSK_PAD_LEN(ID) \
(IS_QAT_GEN4(ID) ? DSS_FWSK_PADDING_LEN : CSS_FWSK_PADDING_LEN)
#define ICP_QAT_CSS_FWSK_PUB_LEN(ID) \
(ICP_QAT_CSS_FWSK_MODULUS_LEN(ID) + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN(ID) + ICP_QAT_CSS_FWSK_PAD_LEN(ID))
#define ICP_QAT_CSS_SIGNATURE_LEN(ID) \
(IS_QAT_GEN4(ID) ? DSS_SIGNATURE_LEN : CSS_SIGNATURE_LEN)
#define ICP_QAT_CSS_AE_IMG_LEN \
(sizeof(struct icp_qat_simg_ae_mode) + ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \
ICP_QAT_SIMG_AE_INSTS_LEN)
#define ICP_QAT_CSS_AE_SIMG_LEN \
(sizeof(struct icp_qat_css_hdr) + ICP_QAT_CSS_FWSK_PUB_LEN + \
ICP_QAT_CSS_SIGNATURE_LEN + ICP_QAT_CSS_AE_IMG_LEN)
#define ICP_QAT_AE_IMG_OFFSET \
(sizeof(struct icp_qat_css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN + ICP_QAT_CSS_SIGNATURE_LEN)
#define ICP_QAT_CSS_AE_SIMG_LEN(ID) \
(sizeof(struct icp_qat_css_hdr) + ICP_QAT_CSS_FWSK_PUB_LEN(ID) + \
ICP_QAT_CSS_SIGNATURE_LEN(ID) + ICP_QAT_CSS_AE_IMG_LEN)
#define ICP_QAT_AE_IMG_OFFSET(ID) \
(sizeof(struct icp_qat_css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(ID) + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN(ID) + ICP_QAT_CSS_SIGNATURE_LEN(ID))
#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode)&0xf)

View File

@ -200,10 +200,6 @@ struct icp_qat_fw_init_admin_hb_cnt {
u16 req_heartbeat_cnt;
};
struct icp_qat_fw_init_admin_hb_stats {
struct icp_qat_fw_init_admin_hb_cnt stats[ADF_NUM_HB_CNT_PER_AE];
};
#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0

View File

@ -45,8 +45,11 @@ static inline CpaBoolean
is_use_sep_digest(const struct crypto_session_params *csp)
{
/* Use separated digest for all digest/hash operations,
* including GMAC */
if (CSP_MODE_DIGEST == csp->csp_mode || CSP_MODE_ETA == csp->csp_mode)
* including GMAC. ETA and AEAD use separated digest
* due to FW limitation to specify offset to digest
* appended to pay-load buffer. */
if (CSP_MODE_DIGEST == csp->csp_mode || CSP_MODE_ETA == csp->csp_mode ||
CSP_MODE_AEAD == csp->csp_mode)
return CPA_TRUE;
return CPA_FALSE;

View File

@ -29,6 +29,9 @@
#include "icp_adf_accel_mgr.h"
#include "lac_sal_types.h"
/* To disable AEAD HW MAC verification */
#include "icp_sal_user.h"
/* QAT OCF specific headers */
#include "qat_ocf_mem_pool.h"
#include "qat_ocf_utils.h"
@ -423,24 +426,6 @@ qat_ocf_session_init(device_t dev,
switch (csp->csp_mode) {
case CSP_MODE_AEAD:
sessionSetupData.symOperation =
CPA_CY_SYM_OP_ALGORITHM_CHAINING;
/* Place the digest result in a buffer unrelated to srcBuffer */
sessionSetupData.digestIsAppended = CPA_TRUE;
/* For GCM and CCM driver forces to verify digest on HW */
sessionSetupData.verifyDigest = CPA_TRUE;
if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
sessionSetupData.cipherSetupData.cipherDirection =
CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
sessionSetupData.algChainOrder =
CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
} else {
sessionSetupData.cipherSetupData.cipherDirection =
CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
sessionSetupData.algChainOrder =
CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
}
break;
case CSP_MODE_ETA:
sessionSetupData.symOperation =
CPA_CY_SYM_OP_ALGORITHM_CHAINING;
@ -1086,6 +1071,15 @@ qat_ocf_start_instances(struct qat_ocf_softc *qat_softc, device_t dev)
goto fail;
}
/* Disable forcing HW MAC validation for AEAD */
status = icp_sal_setForceAEADMACVerify(cyInstHandle, CPA_FALSE);
if (CPA_STATUS_SUCCESS != status) {
device_printf(
qat_softc->sc_dev,
"unable to disable AEAD HW MAC verification\n");
goto fail;
}
qat_ocf_instance->driver_id = qat_softc->cryptodev_id;
startedInstances++;
@ -1222,6 +1216,7 @@ MODULE_DEPEND(qat, qat_200xx, 1, 1, 1);
MODULE_DEPEND(qat, qat_c3xxx, 1, 1, 1);
MODULE_DEPEND(qat, qat_c4xxx, 1, 1, 1);
MODULE_DEPEND(qat, qat_dh895xcc, 1, 1, 1);
MODULE_DEPEND(qat, qat_4xxx, 1, 1, 1);
MODULE_DEPEND(qat, crypto, 1, 1, 1);
MODULE_DEPEND(qat, qat_common, 1, 1, 1);
MODULE_DEPEND(qat, qat_api, 1, 1, 1);

View File

@ -26,9 +26,14 @@
#include "sal_types_compression.h"
#include "icp_qat_fw_comp.h"
#include "sal_hw_gen.h"
#define CPA_DC_CEIL_DIV(x, y) (((x) + (y)-1) / (y))
#define DC_DEST_BUFF_EXTRA_DEFLATE_GEN2 (55)
#define DC_DEST_BUFF_EXTRA_DEFLATE_GEN4_STATIC (1029)
#define DC_DEST_BUFF_EXTRA_DEFLATE_GEN4_DYN (512)
#define DC_DEST_BUFF_MIN_EXTRA_BYTES(x) ((x < 8) ? (8 - x) : 0)
#define DC_BUF_MAX_SIZE (0xFFFFFFFF)
CpaStatus
cpaDcBufferListGetMetaSize(const CpaInstanceHandle instanceHandle,
@ -72,13 +77,60 @@ cpaDcBnpBufferListGetMetaSize(const CpaInstanceHandle instanceHandle,
static inline CpaStatus
dcDeflateBoundGen2(CpaDcHuffType huffType, Cpa32U inputSize, Cpa32U *outputSize)
{
Cpa64U inBufferSize = inputSize;
Cpa64U outBufferSize = 0;
/* Formula for GEN2 deflate:
* ceil(9 * Total input bytes / 8) + 55 bytes.
* 55 bytes is the skid pad value for GEN2 devices.
* Adding extra bytes = `DC_DEST_BUFF_MIN_EXTRA_BYTES(inputSize)`
* when calculated value from `CPA_DC_CEIL_DIV(9 * inputSize, 8) +
* DC_DEST_BUFF_EXTRA_DEFLATE_GEN2` is less than 64 bytes to
* achieve a safer output buffer size of 64 bytes.
*/
*outputSize =
CPA_DC_CEIL_DIV(9 * inputSize, 8) + DC_DEST_BUFF_EXTRA_DEFLATE_GEN2;
outBufferSize = CPA_DC_CEIL_DIV(9 * inBufferSize, 8) +
DC_DEST_BUFF_EXTRA_DEFLATE_GEN2 +
DC_DEST_BUFF_MIN_EXTRA_BYTES(inputSize);
if (outBufferSize > DC_BUF_MAX_SIZE)
*outputSize = DC_BUF_MAX_SIZE;
else
*outputSize = (Cpa32U)outBufferSize;
return CPA_STATUS_SUCCESS;
}
static inline CpaStatus
dcDeflateBoundGen4(CpaDcHuffType huffType, Cpa32U inputSize, Cpa32U *outputSize)
{
Cpa64U outputSizeLong;
Cpa64U inputSizeLong = (Cpa64U)inputSize;
switch (huffType) {
case CPA_DC_HT_STATIC:
/* Formula for GEN4 static deflate:
* ceil((9*sourceLen)/8) + 5 + 1024. */
outputSizeLong = CPA_DC_CEIL_DIV(9 * inputSizeLong, 8) +
DC_DEST_BUFF_EXTRA_DEFLATE_GEN4_STATIC;
break;
case CPA_DC_HT_FULL_DYNAMIC:
/* Formula for GEN4 dynamic deflate:
* Ceil ((9*sourceLen)/8)| +
* ((((8/7) * sourceLen)/ 16KB) * (150+5)) + 512
*/
outputSizeLong = DC_DEST_BUFF_EXTRA_DEFLATE_GEN4_DYN;
outputSizeLong += CPA_DC_CEIL_DIV(9 * inputSizeLong, 8);
outputSizeLong += ((8 * inputSizeLong * 155) / 7) / (16 * 1024);
break;
default:
return CPA_STATUS_INVALID_PARAM;
}
/* Avoid output size overflow */
if (outputSizeLong & 0xffffffff00000000UL)
return CPA_STATUS_INVALID_PARAM;
*outputSize = (Cpa32U)outputSizeLong;
return CPA_STATUS_SUCCESS;
}
@ -88,6 +140,7 @@ cpaDcDeflateCompressBound(const CpaInstanceHandle dcInstance,
Cpa32U inputSize,
Cpa32U *outputSize)
{
sal_compression_service_t *pService = NULL;
CpaInstanceHandle insHandle = NULL;
if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) {
@ -112,5 +165,10 @@ cpaDcDeflateCompressBound(const CpaInstanceHandle dcInstance,
return CPA_STATUS_INVALID_PARAM;
}
return dcDeflateBoundGen2(huffType, inputSize, outputSize);
pService = (sal_compression_service_t *)insHandle;
if (isDcGen4x(pService)) {
return dcDeflateBoundGen4(huffType, inputSize, outputSize);
} else {
return dcDeflateBoundGen2(huffType, inputSize, outputSize);
}
}

View File

@ -42,6 +42,7 @@
#include "lac_sync.h"
#include "sal_service_state.h"
#include "sal_qat_cmn_msg.h"
#include "sal_hw_gen.h"
#include "dc_error_counter.h"
#define DC_COMP_MAX_BUFF_SIZE (1024 * 64)
@ -71,6 +72,28 @@ getDcErrorCounter(CpaDcReqStatus dcError)
return 0;
}
static inline void
dcUpdateXltOverflowChecksumsGen4(const dc_compression_cookie_t *pCookie,
const icp_qat_fw_resp_comp_pars_t *pRespPars,
CpaDcRqResults *pDcResults)
{
dc_session_desc_t *pSessionDesc =
DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle);
/* Recompute CRC checksum when either the checksum type
* is CPA_DC_CRC32 or when the integrity CRCs are enabled.
*/
if (CPA_DC_CRC32 == pSessionDesc->checksumType) {
pDcResults->checksum = pRespPars->crc.legacy.curr_crc32;
/* No need to recalculate the swCrc64I here as this will get
* handled later in dcHandleIntegrityChecksumsGen4.
*/
} else if (CPA_DC_ADLER32 == pSessionDesc->checksumType) {
pDcResults->checksum = pRespPars->crc.legacy.curr_adler_32;
}
}
void
dcCompression_ProcessCallback(void *pRespMsg)
{
@ -86,6 +109,8 @@ dcCompression_ProcessCallback(void *pRespMsg)
dc_compression_cookie_t *pCookie = NULL;
CpaDcOpData *pOpData = NULL;
CpaBoolean cmpPass = CPA_TRUE, xlatPass = CPA_TRUE;
CpaBoolean isDcDp = CPA_FALSE;
CpaBoolean integrityCrcCheck = CPA_FALSE;
CpaBoolean verifyHwIntegrityCrcs = CPA_FALSE;
Cpa8U cmpErr = ERR_CODE_NO_ERROR, xlatErr = ERR_CODE_NO_ERROR;
dc_request_dir_t compDecomp = DC_COMPRESSION_REQUEST;
@ -102,17 +127,20 @@ dcCompression_ProcessCallback(void *pRespMsg)
pCookie = (dc_compression_cookie_t *)pReqData;
if (!pCookie)
return;
pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle);
if (CPA_TRUE == pSessionDesc->isDcDp) {
pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle);
pService = (sal_compression_service_t *)(pCookie->dcInstance);
isDcDp = pSessionDesc->isDcDp;
if (CPA_TRUE == isDcDp) {
pResponse = (CpaDcDpOpData *)pReqData;
pResults = &(pResponse->results);
if (CPA_DC_DIR_DECOMPRESS == pSessionDesc->sessDirection) {
compDecomp = DC_DECOMPRESSION_REQUEST;
}
pCookie = NULL;
} else {
pSessionDesc = pCookie->pSessionDesc;
pResults = pCookie->pResults;
callbackTag = pCookie->callbackTag;
pCbFunc = pCookie->pSessionDesc->pCompressionCb;
@ -120,8 +148,6 @@ dcCompression_ProcessCallback(void *pRespMsg)
pOpData = pCookie->pDcOpData;
}
pService = (sal_compression_service_t *)(pCookie->dcInstance);
opStatus = pCompRespMsg->comn_resp.comn_status;
if (NULL != pOpData) {
@ -142,15 +168,17 @@ dcCompression_ProcessCallback(void *pRespMsg)
pResults->status = (Cpa8S)cmpErr;
pResults->consumed = 0;
pResults->produced = 0;
if (CPA_TRUE == pSessionDesc->isDcDp) {
if (CPA_TRUE == isDcDp) {
if (pResponse)
pResponse->responseStatus =
CPA_STATUS_UNSUPPORTED;
(pService->pDcDpCb)(pResponse);
} else {
/* Free the memory pool */
Lac_MemPoolEntryFree(pCookie);
pCookie = NULL;
if (NULL != pCookie) {
Lac_MemPoolEntryFree(pCookie);
pCookie = NULL;
}
if (NULL != pCbFunc) {
pCbFunc(callbackTag, status);
}
@ -169,9 +197,23 @@ dcCompression_ProcessCallback(void *pRespMsg)
ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(opStatus));
}
if (CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr) {
cmpPass = CPA_TRUE;
cmpErr = ERR_CODE_NO_ERROR;
if (isDcGen2x(pService)) {
/* QAT1.7 and QAT 1.8 hardware */
if (CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr) {
cmpPass = CPA_TRUE;
cmpErr = ERR_CODE_NO_ERROR;
}
} else {
/* QAT2.0 hardware cancels the incomplete file errors
* only for DEFLATE algorithm.
* Decompression direction is not tested in the callback as
* the request does not allow it.
*/
if ((pSessionDesc->compType == CPA_DC_DEFLATE) &&
(CPA_DC_INCOMPLETE_FILE_ERR == (Cpa8S)cmpErr)) {
cmpPass = CPA_TRUE;
cmpErr = ERR_CODE_NO_ERROR;
}
}
/* log the slice hang and endpoint push/pull error inside the response
*/
@ -199,8 +241,7 @@ dcCompression_ProcessCallback(void *pRespMsg)
xlatErr = pCompRespMsg->comn_resp.comn_error.s1.xlat_err_code;
/* Return a fatal error or a potential error in the translator
* slice
* if the compression slice did not return any error */
* slice if the compression slice did not return any error */
if ((CPA_DC_OK == pResults->status) ||
(CPA_DC_FATALERR == (Cpa8S)xlatErr)) {
pResults->status = (Cpa8S)xlatErr;
@ -209,7 +250,7 @@ dcCompression_ProcessCallback(void *pRespMsg)
/* Update dc error counter */
dcErrorLog(pResults->status);
if (CPA_FALSE == pSessionDesc->isDcDp) {
if (CPA_FALSE == isDcDp) {
/* In case of any error for an end of packet request, we need to
* update
* the request type for the following request */
@ -223,17 +264,27 @@ dcCompression_ProcessCallback(void *pRespMsg)
((CPA_DC_STATELESS == pSessionDesc->sessState) &&
(DC_COMPRESSION_REQUEST == compDecomp))) {
/* Overflow is a valid use case for Traditional API
* only.
* Stateful Overflow is supported in both compression
* and
* decompression direction.
* Stateless Overflow is supported only in compression
* direction.
* only. Stateful Overflow is supported in both
* compression and decompression direction. Stateless
* Overflow is supported only in compression direction.
*/
if (CPA_DC_OVERFLOW == (Cpa8S)cmpErr)
cmpPass = CPA_TRUE;
if (CPA_DC_OVERFLOW == (Cpa8S)xlatErr) {
if (isDcGen4x(pService) &&
(CPA_TRUE ==
pService->comp_device_data
.translatorOverflow)) {
pResults->consumed =
pCompRespMsg->comp_resp_pars
.input_byte_counter;
dcUpdateXltOverflowChecksumsGen4(
pCookie,
&pCompRespMsg->comp_resp_pars,
pResults);
}
xlatPass = CPA_TRUE;
}
}
@ -242,6 +293,7 @@ dcCompression_ProcessCallback(void *pRespMsg)
cmpPass = CPA_FALSE;
}
if (CPA_DC_OVERFLOW == (Cpa8S)xlatErr) {
/* XLT overflow is not valid for Data Plane requests */
xlatPass = CPA_FALSE;
}
}
@ -254,7 +306,13 @@ dcCompression_ProcessCallback(void *pRespMsg)
pCompRespMsg->comp_resp_pars.output_byte_counter;
pSessionDesc->cumulativeConsumedBytes += pResults->consumed;
if (CPA_DC_OVERFLOW != (Cpa8S)xlatErr) {
/* Handle Checksum for end to end data integrity. */
if (CPA_TRUE ==
pService->generic_service_info.integrityCrcCheck &&
CPA_TRUE == integrityCrcCheck) {
pSessionDesc->previousChecksum =
pSessionDesc->seedSwCrc.swCrc32I;
} else if (CPA_DC_OVERFLOW != (Cpa8S)xlatErr) {
if (CPA_DC_CRC32 == pSessionDesc->checksumType) {
pResults->checksum =
pCompRespMsg->comp_resp_pars.crc.legacy
@ -279,7 +337,7 @@ dcCompression_ProcessCallback(void *pRespMsg)
if ((CPA_DC_OVERFLOW != (Cpa8S)xlatErr) &&
(CPA_TRUE == verifyHwIntegrityCrcs)) {
pSessionDesc->previousChecksum =
pSessionDesc->seedSwCrc.swCrcI;
pSessionDesc->seedSwCrc.swCrc32I;
}
/* Check if a CNV recovery happened and
@ -292,7 +350,7 @@ dcCompression_ProcessCallback(void *pRespMsg)
pService);
}
if (CPA_TRUE == pSessionDesc->isDcDp) {
if (CPA_TRUE == isDcDp) {
if (pResponse)
pResponse->responseStatus = CPA_STATUS_SUCCESS;
} else {
@ -305,8 +363,26 @@ dcCompression_ProcessCallback(void *pRespMsg)
}
}
} else {
#ifdef ICP_DC_RETURN_COUNTERS_ON_ERROR
/* Extract the response from the firmware */
pResults->consumed =
pCompRespMsg->comp_resp_pars.input_byte_counter;
pResults->produced =
pCompRespMsg->comp_resp_pars.output_byte_counter;
if (CPA_DC_STATEFUL == pSessionDesc->sessState) {
pSessionDesc->cumulativeConsumedBytes +=
pResults->consumed;
} else {
/* In the stateless case all requests have both SOP and
* EOP set */
pSessionDesc->cumulativeConsumedBytes =
pResults->consumed;
}
#else
pResults->consumed = 0;
pResults->produced = 0;
#endif
if (CPA_DC_OVERFLOW == pResults->status &&
CPA_DC_STATELESS == pSessionDesc->sessState) {
/* This error message will be returned by Data Plane API
@ -319,7 +395,7 @@ dcCompression_ProcessCallback(void *pRespMsg)
"Unrecoverable error: stateless overflow. You may need to increase the size of your destination buffer.\n");
}
if (CPA_TRUE == pSessionDesc->isDcDp) {
if (CPA_TRUE == isDcDp) {
if (pResponse)
pResponse->responseStatus = CPA_STATUS_FAIL;
} else {
@ -338,7 +414,7 @@ dcCompression_ProcessCallback(void *pRespMsg)
}
}
if (CPA_TRUE == pSessionDesc->isDcDp) {
if (CPA_TRUE == isDcDp) {
/* Decrement number of stateless pending callbacks for session
*/
pSessionDesc->pendingDpStatelessCbCount--;
@ -383,7 +459,7 @@ dcCompression_ProcessCallback(void *pRespMsg)
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in
*
*****************************************************************************/
static CpaStatus
CpaStatus
dcCheckOpData(sal_compression_service_t *pService, CpaDcOpData *pOpData)
{
CpaDcSkipMode skipMode = 0;
@ -440,6 +516,14 @@ dcCheckOpData(sal_compression_service_t *pService, CpaDcOpData *pOpData)
"supported on this device");
return CPA_STATUS_INVALID_PARAM;
}
if (CPA_TRUE == pOpData->integrityCrcCheck &&
NULL == pOpData->pCrcData) {
LAC_INVALID_PARAM_LOG("Integrity CRC data structure "
"not intialized in CpaDcOpData");
return CPA_STATUS_INVALID_PARAM;
}
return CPA_STATUS_SUCCESS;
}
@ -583,8 +667,9 @@ dcCheckDestinationData(sal_compression_service_t *pService,
if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) {
/* Check if intermediate buffers are supported */
if ((0 == pService->pInterBuffPtrsArrayPhyAddr) ||
(NULL == pService->pInterBuffPtrsArray)) {
if ((isDcGen2x(pService)) &&
((0 == pService->pInterBuffPtrsArrayPhyAddr) ||
(NULL == pService->pInterBuffPtrsArray))) {
LAC_LOG_ERROR(
"No intermediate buffer defined for this instance "
"- see cpaDcStartInstance");
@ -702,6 +787,7 @@ dcCreateRequest(dc_compression_cookie_t *pCookie,
Cpa8U crcMode = ICP_QAT_FW_COMP_CRC_MODE_LEGACY;
Cpa8U cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV;
Cpa8U cnvRecovery = ICP_QAT_FW_COMP_NO_CNV_RECOVERY;
CpaBoolean cnvErrorInjection = ICP_QAT_FW_COMP_NO_CNV_DFX;
CpaBoolean integrityCrcCheck = CPA_FALSE;
CpaStatus status = CPA_STATUS_SUCCESS;
CpaDcFlush flush = CPA_DC_FLUSH_NONE;
@ -757,7 +843,7 @@ dcCreateRequest(dc_compression_cookie_t *pCookie,
* bigger as allocated by the user. We ensure that this is not the case
* in dcCheckSourceData and cast the values to Cpa32U here */
pCookie->srcTotalDataLenInBytes = (Cpa32U)srcTotalDataLenInBytes;
if ((DC_COMPRESSION_REQUEST == compDecomp) &&
if ((isDcGen2x(pService)) && (DC_COMPRESSION_REQUEST == compDecomp) &&
(CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType)) {
if (pService->minInterBuffSizeInBytes <
(Cpa32U)dstTotalDataLenInBytes) {
@ -813,9 +899,9 @@ dcCreateRequest(dc_compression_cookie_t *pCookie,
initial_crc32 = 0;
if (CPA_DC_ADLER32 == pSessionDesc->checksumType) {
pSessionDesc->previousChecksum = 1;
pSessionDesc->previousChecksum = initial_adler;
} else {
pSessionDesc->previousChecksum = 0;
pSessionDesc->previousChecksum = initial_crc32;
}
} else if (CPA_DC_STATELESS == pSessionDesc->sessState) {
pSessionDesc->previousChecksum = pResults->checksum;
@ -906,7 +992,6 @@ dcCreateRequest(dc_compression_cookie_t *pCookie,
eop = ICP_QAT_FW_COMP_NOT_EOP;
}
} else {
if (DC_REQUEST_FIRST == pSessionDesc->requestType) {
/* Reinitialise the cumulative amount of consumed bytes
*/
@ -928,6 +1013,9 @@ dcCreateRequest(dc_compression_cookie_t *pCookie,
* cnvDecompReq also needs to be set */
case DC_CNV:
cnvDecompReq = ICP_QAT_FW_COMP_CNV;
if (isDcGen4x(pService)) {
cnvErrorInjection = pSessionDesc->cnvErrorInjection;
}
break;
case DC_NO_CNV:
cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV;
@ -936,8 +1024,14 @@ dcCreateRequest(dc_compression_cookie_t *pCookie,
}
/* LW 18 */
rpCmdFlags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
sop, eop, bFinal, cnvDecompReq, cnvRecovery, crcMode);
rpCmdFlags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop,
eop,
bFinal,
cnvDecompReq,
cnvRecovery,
cnvErrorInjection,
crcMode);
pMsg->comp_pars.req_par_flags = rpCmdFlags;
/* Populates the QAT common request middle part of the message
@ -1244,10 +1338,7 @@ dcZeroLengthRequests(sal_compression_service_t *pService,
COMPRESSION_STAT_INC(numDecompCompleted, pService);
}
if (CPA_STATUS_SUCCESS !=
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) {
LAC_LOG_ERROR("Cannot unlock session lock");
}
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock));
if ((NULL != pCbFunc) &&
(LacSync_GenWakeupSyncCaller != pCbFunc)) {
@ -1371,7 +1462,7 @@ cpaDcCompressData(CpaInstanceHandle dcInstance,
return dcCompDecompData(pService,
pSessionDesc,
dcInstance,
insHandle,
pSessionHandle,
pSrcBuff,
pDestBuff,
@ -1506,10 +1597,7 @@ cpaDcCompressData2(CpaInstanceHandle dcInstance,
if (CPA_DC_STATEFUL == pSessionDesc->sessState) {
/* Lock the session to check if there are in-flight stateful
* requests */
if (CPA_STATUS_SUCCESS !=
LAC_SPINLOCK(&(pSessionDesc->sessionLock))) {
LAC_LOG_ERROR("Cannot unlock session lock");
}
LAC_SPINLOCK(&(pSessionDesc->sessionLock));
/* Check if there is already one in-flight stateful request */
if (0 !=
@ -1517,10 +1605,7 @@ cpaDcCompressData2(CpaInstanceHandle dcInstance,
&(pSessionDesc->pendingStatefulCbCount))) {
LAC_LOG_ERROR(
"Only one in-flight stateful request supported");
if (CPA_STATUS_SUCCESS !=
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) {
LAC_LOG_ERROR("Cannot unlock session lock");
}
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock));
return CPA_STATUS_RETRY;
}
@ -1537,10 +1622,7 @@ cpaDcCompressData2(CpaInstanceHandle dcInstance,
}
qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount));
if (CPA_STATUS_SUCCESS !=
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) {
LAC_LOG_ERROR("Cannot unlock session lock");
}
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock));
}
if (CPA_TRUE == pOpData->compressAndVerify) {
@ -1549,7 +1631,7 @@ cpaDcCompressData2(CpaInstanceHandle dcInstance,
return dcCompDecompData(pService,
pSessionDesc,
dcInstance,
insHandle,
pSessionHandle,
pSrcBuff,
pDestBuff,
@ -1659,16 +1741,50 @@ cpaDcDecompressData(CpaInstanceHandle dcInstance,
pService = (sal_compression_service_t *)insHandle;
/* Check if SAL is initialised otherwise return an error */
SAL_RUNNING_CHECK(insHandle);
/* This check is outside the parameter checking as it is needed to
* manage zero length requests */
if (CPA_STATUS_SUCCESS !=
LacBuffDesc_BufferListVerifyNull(pSrcBuff,
&srcBuffSize,
LAC_NO_ALIGNMENT_SHIFT)) {
QAT_UTILS_LOG("Invalid source buffer list parameter");
return CPA_STATUS_INVALID_PARAM;
}
/* Ensure this is a compression instance */
SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION);
if (dcCheckSourceData(pSessionHandle,
pSrcBuff,
pDestBuff,
pResults,
flushFlag,
srcBuffSize,
NULL) != CPA_STATUS_SUCCESS) {
return CPA_STATUS_INVALID_PARAM;
}
if (dcCheckDestinationData(pService,
pSessionHandle,
pDestBuff,
DC_DECOMPRESSION_REQUEST) !=
CPA_STATUS_SUCCESS) {
return CPA_STATUS_INVALID_PARAM;
}
pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle);
if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) {
QAT_UTILS_LOG("Invalid sessDirection value");
return CPA_STATUS_INVALID_PARAM;
}
if (CPA_DC_STATEFUL == pSessionDesc->sessState) {
/* Lock the session to check if there are in-flight stateful
* requests */
if (CPA_STATUS_SUCCESS !=
LAC_SPINLOCK(&(pSessionDesc->sessionLock))) {
LAC_LOG_ERROR("Cannot lock session lock");
return CPA_STATUS_RESOURCE;
}
LAC_SPINLOCK(&(pSessionDesc->sessionLock));
/* Check if there is already one in-flight stateful request */
if (0 !=
@ -1676,37 +1792,36 @@ cpaDcDecompressData(CpaInstanceHandle dcInstance,
&(pSessionDesc->pendingStatefulCbCount))) {
LAC_LOG_ERROR(
"Only one in-flight stateful request supported");
if (CPA_STATUS_SUCCESS !=
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) {
LAC_LOG_ERROR("Cannot unlock session lock");
}
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock));
return CPA_STATUS_RETRY;
}
if ((0 == srcBuffSize) ||
((1 == srcBuffSize) && (CPA_DC_FLUSH_FINAL != flushFlag) &&
(CPA_DC_FLUSH_FULL != flushFlag))) {
if (CPA_TRUE ==
dcZeroLengthRequests(pService,
pSessionDesc,
pResults,
flushFlag,
callbackTag,
DC_DECOMPRESSION_REQUEST)) {
return CPA_STATUS_SUCCESS;
/* Gen 4 handle 0 len requests in FW */
if (isDcGen2x(pService)) {
if ((0 == srcBuffSize) ||
((1 == srcBuffSize) &&
(CPA_DC_FLUSH_FINAL != flushFlag) &&
(CPA_DC_FLUSH_FULL != flushFlag))) {
if (CPA_TRUE ==
dcZeroLengthRequests(
pService,
pSessionDesc,
pResults,
flushFlag,
callbackTag,
DC_DECOMPRESSION_REQUEST)) {
return CPA_STATUS_SUCCESS;
}
}
}
qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount));
if (CPA_STATUS_SUCCESS !=
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock))) {
LAC_LOG_ERROR("Cannot unlock session lock");
}
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock));
}
return dcCompDecompData(pService,
pSessionDesc,
dcInstance,
insHandle,
pSessionHandle,
pSrcBuff,
pDestBuff,
@ -1768,12 +1883,89 @@ cpaDcDecompressData2(CpaInstanceHandle dcInstance,
pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle);
if (CPA_DC_STATEFUL == pSessionDesc->sessState) {
LAC_INVALID_PARAM_LOG("Invalid session: Stateful session is "
"not supported");
LAC_CHECK_NULL_PARAM(insHandle);
/* Check if SAL is initialised otherwise return an error */
SAL_RUNNING_CHECK(insHandle);
/* This check is outside the parameter checking as it is needed to
* manage zero length requests */
if (CPA_STATUS_SUCCESS !=
LacBuffDesc_BufferListVerifyNull(pSrcBuff,
&srcBuffSize,
LAC_NO_ALIGNMENT_SHIFT)) {
QAT_UTILS_LOG("Invalid source buffer list parameter");
return CPA_STATUS_INVALID_PARAM;
}
/* Ensure this is a compression instance */
SAL_CHECK_INSTANCE_TYPE(insHandle, SAL_SERVICE_TYPE_COMPRESSION);
if (CPA_STATUS_SUCCESS !=
dcCheckSourceData(pSessionHandle,
pSrcBuff,
pDestBuff,
pResults,
CPA_DC_FLUSH_NONE,
srcBuffSize,
NULL)) {
return CPA_STATUS_INVALID_PARAM;
}
if (CPA_STATUS_SUCCESS !=
dcCheckDestinationData(pService,
pSessionHandle,
pDestBuff,
DC_DECOMPRESSION_REQUEST)) {
return CPA_STATUS_INVALID_PARAM;
}
if (CPA_STATUS_SUCCESS != dcCheckOpData(pService, pOpData)) {
return CPA_STATUS_INVALID_PARAM;
}
if (CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) {
QAT_UTILS_LOG("Invalid sessDirection value");
return CPA_STATUS_INVALID_PARAM;
}
if (CPA_DC_STATEFUL == pSessionDesc->sessState) {
/* Lock the session to check if there are in-flight stateful
* requests */
LAC_SPINLOCK(&(pSessionDesc->sessionLock));
/* Check if there is already one in-flight stateful request */
if (0 !=
qatUtilsAtomicGet(
&(pSessionDesc->pendingStatefulCbCount))) {
LAC_LOG_ERROR(
"Only one in-flight stateful request supported");
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock));
return CPA_STATUS_RETRY;
}
/* Gen 4 handle 0 len requests in FW */
if (isDcGen2x(pService)) {
if ((0 == srcBuffSize) ||
((1 == srcBuffSize) &&
(CPA_DC_FLUSH_FINAL != pOpData->flushFlag) &&
(CPA_DC_FLUSH_FULL != pOpData->flushFlag))) {
if (CPA_TRUE ==
dcZeroLengthRequests(
pService,
pSessionDesc,
pResults,
pOpData->flushFlag,
callbackTag,
DC_DECOMPRESSION_REQUEST)) {
return CPA_STATUS_SUCCESS;
}
}
}
qatUtilsAtomicInc(&(pSessionDesc->pendingStatefulCbCount));
LAC_SPINUNLOCK(&(pSessionDesc->sessionLock));
}
return dcCompDecompData(pService,
pSessionDesc,
insHandle,

View File

@ -41,6 +41,7 @@
#include "sal_service_state.h"
#include "sal_qat_cmn_msg.h"
#include "icp_sal_poll.h"
#include "sal_hw_gen.h"
/**
*****************************************************************************
@ -87,8 +88,8 @@ dcDataPlaneParamCheck(const CpaDcDpOpData *pOpData)
/* Compressing zero byte is not supported */
if ((CPA_DC_DIR_COMPRESS == pSessionDesc->sessDirection) &&
(0 == pOpData->bufferLenToCompress)) {
QAT_UTILS_LOG(
"The source buffer length to compress needs to be greater than zero byte.\n");
QAT_UTILS_LOG("The source buffer length to compress needs to "
"be greater than zero byte.\n");
return CPA_STATUS_INVALID_PARAM;
}
@ -171,8 +172,7 @@ dcDataPlaneParamCheck(const CpaDcDpOpData *pOpData)
} else {
/* We are assuming that there is enough memory in the source and
* destination buffer lists. We only receive physical addresses
* of the
* buffers so we are unable to test it here */
* of the buffers so we are unable to test it here */
LAC_CHECK_8_BYTE_ALIGNMENT(pOpData->srcBuffer);
LAC_CHECK_8_BYTE_ALIGNMENT(pOpData->destBuffer);
}
@ -183,8 +183,9 @@ dcDataPlaneParamCheck(const CpaDcDpOpData *pOpData)
(CPA_DC_DIR_COMBINED == pSessionDesc->sessDirection)) {
if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) {
/* Check if Intermediate Buffer Array pointer is NULL */
if ((0 == pService->pInterBuffPtrsArrayPhyAddr) ||
(NULL == pService->pInterBuffPtrsArray)) {
if (isDcGen2x(pService) &&
((0 == pService->pInterBuffPtrsArrayPhyAddr) ||
(NULL == pService->pInterBuffPtrsArray))) {
QAT_UTILS_LOG(
"No intermediate buffer defined for this instance - see cpaDcStartInstance.\n");
return CPA_STATUS_INVALID_PARAM;
@ -312,7 +313,10 @@ dcDpWriteRingMsg(CpaDcDpOpData *pOpData, icp_qat_fw_comp_req_t *pCurrentQatMsg)
Cpa8U cnvDecompReq = ICP_QAT_FW_COMP_NO_CNV;
Cpa8U cnvnrCompReq = ICP_QAT_FW_COMP_NO_CNV_RECOVERY;
CpaBoolean cnvErrorInjection = ICP_QAT_FW_COMP_NO_CNV_DFX;
sal_compression_service_t *pService = NULL;
pService = (sal_compression_service_t *)(pOpData->dcInstance);
pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pOpData->pSessionHandle);
if (CPA_DC_DIR_COMPRESS == pOpData->sessDirection) {
@ -320,6 +324,11 @@ dcDpWriteRingMsg(CpaDcDpOpData *pOpData, icp_qat_fw_comp_req_t *pCurrentQatMsg)
/* CNV check */
if (CPA_TRUE == pOpData->compressAndVerify) {
cnvDecompReq = ICP_QAT_FW_COMP_CNV;
if (isDcGen4x(pService)) {
cnvErrorInjection =
pSessionDesc->cnvErrorInjection;
}
/* CNVNR check */
if (CPA_TRUE == pOpData->compressAndVerifyAndRecover) {
cnvnrCompReq = ICP_QAT_FW_COMP_CNV_RECOVERY;
@ -343,7 +352,13 @@ dcDpWriteRingMsg(CpaDcDpOpData *pOpData, icp_qat_fw_comp_req_t *pCurrentQatMsg)
pCurrentQatMsg->comp_pars.req_par_flags |=
ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
0, 0, 0, cnvDecompReq, cnvnrCompReq, 0);
ICP_QAT_FW_COMP_NOT_SOP,
ICP_QAT_FW_COMP_NOT_EOP,
ICP_QAT_FW_COMP_NOT_BFINAL,
cnvDecompReq,
cnvnrCompReq,
cnvErrorInjection,
ICP_QAT_FW_COMP_CRC_MODE_LEGACY);
SalQatMsg_CmnMidWrite((icp_qat_fw_la_bulk_req_t *)pCurrentQatMsg,
pOpData,

View File

@ -23,6 +23,7 @@
#include "icp_qat_fw.h"
#include "icp_qat_fw_comp.h"
#include "icp_qat_hw.h"
#include "icp_qat_hw_20_comp.h"
/*
*******************************************************************************
@ -36,6 +37,7 @@
#include "lac_buffer_desc.h"
#include "sal_service_state.h"
#include "sal_qat_cmn_msg.h"
#include "sal_hw_gen.h"
/**
*****************************************************************************
@ -54,7 +56,7 @@
* @retval CPA_STATUS_UNSUPPORTED Unsupported algorithm/feature
*
*****************************************************************************/
static CpaStatus
CpaStatus
dcCheckSessionData(const CpaDcSessionSetupData *pSessionData,
CpaInstanceHandle dcInstance)
{
@ -67,6 +69,7 @@ dcCheckSessionData(const CpaDcSessionSetupData *pSessionData,
QAT_UTILS_LOG("Invalid compLevel value\n");
return CPA_STATUS_INVALID_PARAM;
}
if ((pSessionData->autoSelectBestHuffmanTree < CPA_DC_ASB_DISABLED) ||
(pSessionData->autoSelectBestHuffmanTree >
CPA_DC_ASB_UNCOMP_STATIC_DYNAMIC_WITH_NO_HDRS)) {
@ -122,10 +125,10 @@ dcCheckSessionData(const CpaDcSessionSetupData *pSessionData,
*
*****************************************************************************/
static void
dcCompHwBlockPopulate(dc_session_desc_t *pSessionDesc,
dcCompHwBlockPopulate(sal_compression_service_t *pService,
dc_session_desc_t *pSessionDesc,
icp_qat_hw_compression_config_t *pCompConfig,
dc_request_dir_t compDecomp,
icp_qat_hw_compression_delayed_match_t enableDmm)
dc_request_dir_t compDecomp)
{
icp_qat_hw_compression_direction_t dir =
ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
@ -134,6 +137,7 @@ dcCompHwBlockPopulate(dc_session_desc_t *pSessionDesc,
icp_qat_hw_compression_depth_t depth = ICP_QAT_HW_COMPRESSION_DEPTH_1;
icp_qat_hw_compression_file_type_t filetype =
ICP_QAT_HW_COMPRESSION_FILE_TYPE_0;
icp_qat_hw_compression_delayed_match_t dmm;
/* Set the direction */
if (DC_COMPRESSION_REQUEST == compDecomp) {
@ -148,6 +152,13 @@ dcCompHwBlockPopulate(dc_session_desc_t *pSessionDesc,
QAT_UTILS_LOG("Algorithm not supported for Compression\n");
}
/* Set delay match mode */
if (CPA_TRUE == pService->comp_device_data.enableDmm) {
dmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED;
} else {
dmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED;
}
/* Set the depth */
if (DC_DECOMPRESSION_REQUEST == compDecomp) {
depth = ICP_QAT_HW_COMPRESSION_DEPTH_1;
@ -162,8 +173,13 @@ dcCompHwBlockPopulate(dc_session_desc_t *pSessionDesc,
case CPA_DC_L3:
depth = ICP_QAT_HW_COMPRESSION_DEPTH_8;
break;
default:
case CPA_DC_L4:
depth = ICP_QAT_HW_COMPRESSION_DEPTH_16;
break;
default:
depth = pService->comp_device_data
.highestHwCompressionDepth;
break;
}
}
@ -171,10 +187,138 @@ dcCompHwBlockPopulate(dc_session_desc_t *pSessionDesc,
* modes will be used in the future for precompiled huffman trees */
filetype = ICP_QAT_HW_COMPRESSION_FILE_TYPE_0;
pCompConfig->val = ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
dir, enableDmm, algo, depth, filetype);
pCompConfig->lower_val = ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
dir, dmm, algo, depth, filetype);
pCompConfig->reserved = 0;
/* Upper 32-bits of the configuration word do not need to be
* configured with legacy devices.
*/
pCompConfig->upper_val = 0;
}
static void
dcCompHwBlockPopulateGen4(sal_compression_service_t *pService,
dc_session_desc_t *pSessionDesc,
icp_qat_hw_compression_config_t *pCompConfig,
dc_request_dir_t compDecomp)
{
/* Compression related */
if (DC_COMPRESSION_REQUEST == compDecomp) {
icp_qat_hw_comp_20_config_csr_upper_t hw_comp_upper_csr;
icp_qat_hw_comp_20_config_csr_lower_t hw_comp_lower_csr;
memset(&hw_comp_upper_csr, 0, sizeof hw_comp_upper_csr);
memset(&hw_comp_lower_csr, 0, sizeof hw_comp_lower_csr);
/* Disable Literal + Length Limit Block Drop by default and
* enable it only for dynamic deflate compression.
*/
hw_comp_lower_csr.lllbd =
ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED;
switch (pSessionDesc->compType) {
case CPA_DC_DEFLATE:
/* DEFLATE algorithm settings */
hw_comp_lower_csr.skip_ctrl =
ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) {
hw_comp_lower_csr.algo =
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
} else /* Static DEFLATE */
{
hw_comp_lower_csr.algo =
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE;
hw_comp_upper_csr.scb_ctrl =
ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE;
}
if (CPA_DC_STATEFUL == pSessionDesc->sessState) {
hw_comp_upper_csr.som_ctrl =
ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE;
}
break;
default:
QAT_UTILS_LOG("Compression algorithm not supported\n");
break;
}
/* Set the search depth */
switch (pSessionDesc->compLevel) {
case CPA_DC_L1:
case CPA_DC_L2:
case CPA_DC_L3:
case CPA_DC_L4:
case CPA_DC_L5:
hw_comp_lower_csr.sd =
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
hw_comp_lower_csr.hash_col =
ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW;
break;
case CPA_DC_L6:
case CPA_DC_L7:
case CPA_DC_L8:
hw_comp_lower_csr.sd =
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6;
break;
case CPA_DC_L9:
hw_comp_lower_csr.sd =
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9;
break;
default:
hw_comp_lower_csr.sd = pService->comp_device_data
.highestHwCompressionDepth;
if ((CPA_DC_HT_FULL_DYNAMIC ==
pSessionDesc->huffType) &&
(CPA_DC_DEFLATE == pSessionDesc->compType)) {
/* Enable Literal + Length Limit Block Drop
* with dynamic deflate compression when
* highest compression levels are selected.
*/
hw_comp_lower_csr.lllbd =
ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
}
break;
}
/* Same for all algorithms */
hw_comp_lower_csr.abd = ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED;
hw_comp_lower_csr.hash_update =
ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
hw_comp_lower_csr.edmm =
(CPA_TRUE == pService->comp_device_data.enableDmm) ?
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED :
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED;
/* Hard-coded HW-specific values */
hw_comp_upper_csr.nice =
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
hw_comp_upper_csr.lazy =
ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
pCompConfig->upper_val =
ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
pCompConfig->lower_val =
ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
} else /* Decompress */
{
icp_qat_hw_decomp_20_config_csr_lower_t hw_decomp_lower_csr;
memset(&hw_decomp_lower_csr, 0, sizeof hw_decomp_lower_csr);
/* Set the algorithm */
if (CPA_DC_DEFLATE == pSessionDesc->compType) {
hw_decomp_lower_csr.algo =
ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
} else {
QAT_UTILS_LOG("Algorithm not supported for "
"Decompression\n");
}
pCompConfig->upper_val = 0;
pCompConfig->lower_val =
ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(
hw_decomp_lower_csr);
}
}
/**
@ -268,10 +412,19 @@ dcCompContentDescPopulate(sal_compression_service_t *pService,
pCompControlBlock->resrvd = 0;
/* Populate Compression Hardware Setup Block */
dcCompHwBlockPopulate(pSessionDesc,
pCompConfig,
compDecomp,
pService->comp_device_data.enableDmm);
if (isDcGen4x(pService)) {
dcCompHwBlockPopulateGen4(pService,
pSessionDesc,
pCompConfig,
compDecomp);
} else if (isDcGen2x(pService)) {
dcCompHwBlockPopulate(pService,
pSessionDesc,
pCompConfig,
compDecomp);
} else {
QAT_UTILS_LOG("Invalid QAT generation value\n");
}
}
/**
@ -286,7 +439,7 @@ dcCompContentDescPopulate(sal_compression_service_t *pService,
* @param[in] nextSlice Next slice
*
*****************************************************************************/
static void
void
dcTransContentDescPopulate(icp_qat_fw_comp_req_t *pMsg,
icp_qat_fw_slice_t nextSlice)
{
@ -333,14 +486,70 @@ dcGetContextSize(CpaInstanceHandle dcInstance,
*pContextSize = 0;
if ((CPA_DC_STATEFUL == pSessionData->sessState) &&
(CPA_DC_DEFLATE == pSessionData->compType) &&
(CPA_DC_DIR_COMPRESS != pSessionData->sessDirection)) {
*pContextSize =
pCompService->comp_device_data.inflateContextSize;
switch (pSessionData->compType) {
case CPA_DC_DEFLATE:
*pContextSize =
pCompService->comp_device_data.inflateContextSize;
break;
default:
QAT_UTILS_LOG("Invalid compression algorithm.");
return CPA_STATUS_FAIL;
}
}
return CPA_STATUS_SUCCESS;
}
CpaStatus
dcGetCompressCommandId(sal_compression_service_t *pService,
CpaDcSessionSetupData *pSessionData,
Cpa8U *pDcCmdId)
{
CpaStatus status = CPA_STATUS_SUCCESS;
LAC_CHECK_NULL_PARAM(pService);
LAC_CHECK_NULL_PARAM(pSessionData);
LAC_CHECK_NULL_PARAM(pDcCmdId);
switch (pSessionData->compType) {
case CPA_DC_DEFLATE:
*pDcCmdId = (CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType) ?
ICP_QAT_FW_COMP_CMD_DYNAMIC :
ICP_QAT_FW_COMP_CMD_STATIC;
break;
default:
QAT_UTILS_LOG("Algorithm not supported for "
"compression\n");
status = CPA_STATUS_UNSUPPORTED;
break;
}
return status;
}
CpaStatus
dcGetDecompressCommandId(sal_compression_service_t *pService,
CpaDcSessionSetupData *pSessionData,
Cpa8U *pDcCmdId)
{
CpaStatus status = CPA_STATUS_SUCCESS;
LAC_CHECK_NULL_PARAM(pService);
LAC_CHECK_NULL_PARAM(pSessionData);
LAC_CHECK_NULL_PARAM(pDcCmdId);
switch (pSessionData->compType) {
case CPA_DC_DEFLATE:
*pDcCmdId = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
break;
default:
QAT_UTILS_LOG("Algorithm not supported for "
"decompression\n");
status = CPA_STATUS_UNSUPPORTED;
break;
}
return status;
}
CpaStatus
dcInitSession(CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle,
@ -394,7 +603,17 @@ dcInitSession(CpaInstanceHandle dcInstance,
return CPA_STATUS_UNSUPPORTED;
}
if (CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType) {
/* Check for Gen4 and stateful, return error if both exist */
if ((isDcGen4x(pService)) &&
(CPA_DC_STATEFUL == pSessionData->sessState &&
CPA_DC_DIR_DECOMPRESS != pSessionData->sessDirection)) {
QAT_UTILS_LOG("Stateful sessions are not supported for "
"compression direction");
return CPA_STATUS_UNSUPPORTED;
}
if ((isDcGen2x(pService)) &&
(CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType)) {
/* Test if DRAM is available for the intermediate buffers */
if ((NULL == pService->pInterBuffPtrsArray) &&
(0 == pService->pInterBuffPtrsArrayPhyAddr)) {
@ -404,7 +623,8 @@ dcInitSession(CpaInstanceHandle dcInstance,
pSessionData->huffType = CPA_DC_HT_STATIC;
} else {
QAT_UTILS_LOG(
"No buffer defined for this instance - see cpaDcStartInstance.\n");
"No buffer defined for this instance - "
"see cpaDcStartInstance.\n");
return CPA_STATUS_RESOURCE;
}
}
@ -541,7 +761,8 @@ dcInitSession(CpaInstanceHandle dcInstance,
pSessionDesc->pendingDpStatelessCbCount = 0;
if (CPA_DC_DIR_DECOMPRESS != pSessionData->sessDirection) {
if (CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType) {
if ((isDcGen2x(pService)) &&
CPA_DC_HT_FULL_DYNAMIC == pSessionData->huffType) {
/* Populate the compression section of the content
* descriptor */
dcCompContentDescPopulate(pService,
@ -607,17 +828,29 @@ dcInitSession(CpaInstanceHandle dcInstance,
pDataIntegrityCrcs = &pSessionDesc->dataIntegrityCrcs;
pDataIntegrityCrcs->crc32 = 0;
pDataIntegrityCrcs->adler32 = 1;
pDataIntegrityCrcs->oCrc32Cpr = DC_INVALID_CRC;
pDataIntegrityCrcs->iCrc32Cpr = DC_INVALID_CRC;
pDataIntegrityCrcs->oCrc32Xlt = DC_INVALID_CRC;
pDataIntegrityCrcs->iCrc32Xlt = DC_INVALID_CRC;
pDataIntegrityCrcs->xorFlags = DC_XOR_FLAGS_DEFAULT;
pDataIntegrityCrcs->crcPoly = DC_CRC_POLY_DEFAULT;
pDataIntegrityCrcs->xorOut = DC_XOR_OUT_DEFAULT;
/* Initialise seed checksums */
pSessionDesc->seedSwCrc.swCrcI = 0;
pSessionDesc->seedSwCrc.swCrcO = 0;
if (isDcGen2x(pService)) {
pDataIntegrityCrcs->oCrc32Cpr = DC_INVALID_CRC;
pDataIntegrityCrcs->iCrc32Cpr = DC_INVALID_CRC;
pDataIntegrityCrcs->oCrc32Xlt = DC_INVALID_CRC;
pDataIntegrityCrcs->iCrc32Xlt = DC_INVALID_CRC;
pDataIntegrityCrcs->xorFlags = DC_XOR_FLAGS_DEFAULT;
pDataIntegrityCrcs->crcPoly = DC_CRC_POLY_DEFAULT;
pDataIntegrityCrcs->xorOut = DC_XOR_OUT_DEFAULT;
} else {
pDataIntegrityCrcs->iCrc64Cpr = DC_INVALID_CRC;
pDataIntegrityCrcs->oCrc64Cpr = DC_INVALID_CRC;
pDataIntegrityCrcs->iCrc64Xlt = DC_INVALID_CRC;
pDataIntegrityCrcs->oCrc64Xlt = DC_INVALID_CRC;
pDataIntegrityCrcs->crc64Poly = DC_CRC64_POLY_DEFAULT;
pDataIntegrityCrcs->xor64Out = DC_XOR64_OUT_DEFAULT;
}
/* Initialise seed checksums.
* It initializes swCrc32I, swCrc32O, too(union).
*/
pSessionDesc->seedSwCrc.swCrc64I = 0;
pSessionDesc->seedSwCrc.swCrc64O = 0;
/* Populate the cmdFlags */
switch (pSessionDesc->autoSelectBestHuffmanTree) {
@ -646,6 +879,7 @@ dcInitSession(CpaInstanceHandle dcInstance,
ICP_QAT_FW_COMP_BFINAL,
ICP_QAT_FW_COMP_NO_CNV,
ICP_QAT_FW_COMP_NO_CNV_RECOVERY,
ICP_QAT_FW_COMP_NO_CNV_DFX,
ICP_QAT_FW_COMP_CRC_MODE_LEGACY);
cmdFlags =
@ -656,11 +890,16 @@ dcInitSession(CpaInstanceHandle dcInstance,
secureRam);
if (CPA_DC_DIR_DECOMPRESS != pSessionData->sessDirection) {
if (CPA_DC_HT_FULL_DYNAMIC == pSessionDesc->huffType) {
dcCmdId = (icp_qat_fw_la_cmd_id_t)(
ICP_QAT_FW_COMP_CMD_DYNAMIC);
}
status = dcGetCompressCommandId(pService,
pSessionData,
(Cpa8U *)&dcCmdId);
if (CPA_STATUS_SUCCESS != status) {
QAT_UTILS_LOG(
"Couldn't get compress command ID for current "
"session data.");
return status;
}
pReqCache = &(pSessionDesc->reqCacheComp);
pReqCache->comp_pars.req_par_flags = rpCmdFlags;
pReqCache->comp_pars.crc.legacy.initial_adler = 1;
@ -675,8 +914,16 @@ dcInitSession(CpaInstanceHandle dcInstance,
}
if (CPA_DC_DIR_COMPRESS != pSessionData->sessDirection) {
dcCmdId =
(icp_qat_fw_la_cmd_id_t)(ICP_QAT_FW_COMP_CMD_DECOMPRESS);
status = dcGetDecompressCommandId(pService,
pSessionData,
(Cpa8U *)&dcCmdId);
if (CPA_STATUS_SUCCESS != status) {
QAT_UTILS_LOG(
"Couldn't get decompress command ID for current "
"session data.");
return status;
}
pReqCache = &(pSessionDesc->reqCacheDecomp);
pReqCache->comp_pars.req_par_flags = rpCmdFlags;
pReqCache->comp_pars.crc.legacy.initial_adler = 1;
@ -730,10 +977,14 @@ cpaDcResetSession(const CpaInstanceHandle dcInstance,
{
CpaStatus status = CPA_STATUS_SUCCESS;
CpaInstanceHandle insHandle = NULL;
sal_compression_service_t *pService = NULL;
dc_session_desc_t *pSessionDesc = NULL;
Cpa64U numPendingStateless = 0;
Cpa64U numPendingStateful = 0;
icp_comms_trans_handle trans_handle = NULL;
dc_integrity_crc_fw_t *pDataIntegrityCrcs = NULL;
dc_sw_checksums_t *pSwCrcs = NULL;
LAC_CHECK_NULL_PARAM(pSessionHandle);
pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle);
LAC_CHECK_NULL_PARAM(pSessionDesc);
@ -752,7 +1003,7 @@ cpaDcResetSession(const CpaInstanceHandle dcInstance,
/* Check if SAL is running otherwise return an error */
SAL_RUNNING_CHECK(insHandle);
if (CPA_TRUE == pSessionDesc->isDcDp) {
trans_handle = ((sal_compression_service_t *)dcInstance)
trans_handle = ((sal_compression_service_t *)insHandle)
->trans_handle_compression_tx;
if (CPA_TRUE == icp_adf_queueDataToSend(trans_handle)) {
/* Process the remaining messages on the ring */
@ -798,7 +1049,32 @@ cpaDcResetSession(const CpaInstanceHandle dcInstance,
} else {
pSessionDesc->previousChecksum = 0;
}
pSessionDesc->cnvErrorInjection = ICP_QAT_FW_COMP_NO_CNV_DFX;
/* Reset integrity CRCs to default parameters. */
pDataIntegrityCrcs = &pSessionDesc->dataIntegrityCrcs;
memset(pDataIntegrityCrcs, 0, sizeof(dc_integrity_crc_fw_t));
pDataIntegrityCrcs->adler32 = 1;
pService = (sal_compression_service_t *)insHandle;
if (isDcGen2x(pService)) {
pDataIntegrityCrcs->xorFlags = DC_XOR_FLAGS_DEFAULT;
pDataIntegrityCrcs->crcPoly = DC_CRC_POLY_DEFAULT;
pDataIntegrityCrcs->xorOut = DC_XOR_OUT_DEFAULT;
} else {
pDataIntegrityCrcs->crc64Poly = DC_CRC64_POLY_DEFAULT;
pDataIntegrityCrcs->xor64Out = DC_XOR64_OUT_DEFAULT;
}
/* Reset seed SW checksums. */
pSwCrcs = &pSessionDesc->seedSwCrc;
memset(pSwCrcs, 0, sizeof(dc_sw_checksums_t));
/* Reset integrity SW checksums. */
pSwCrcs = &pSessionDesc->integritySwCrc;
memset(pSwCrcs, 0, sizeof(dc_sw_checksums_t));
}
/* Reset the pending callback counters */
qatUtilsAtomicSet(0, &pSessionDesc->pendingStatelessCbCount);
qatUtilsAtomicSet(0, &pSessionDesc->pendingStatefulCbCount);
@ -886,12 +1162,7 @@ cpaDcRemoveSession(const CpaInstanceHandle dcInstance,
}
if ((CPA_DC_STATEFUL == pSessionDesc->sessState) &&
(CPA_STATUS_SUCCESS == status)) {
if (CPA_STATUS_SUCCESS !=
LAC_SPINLOCK_DESTROY(
&(pSessionDesc->sessionLock))) {
QAT_UTILS_LOG(
"Failed to destory session lock.\n");
}
LAC_SPINLOCK_DESTROY(&(pSessionDesc->sessionLock));
}
}
@ -955,3 +1226,33 @@ cpaDcGetSessionSize(CpaInstanceHandle dcInstance,
pSessionSize,
pContextSize);
}
CpaStatus
dcSetCnvError(CpaInstanceHandle dcInstance, CpaDcSessionHandle pSessionHandle)
{
LAC_CHECK_NULL_PARAM(pSessionHandle);
dc_session_desc_t *pSessionDesc = NULL;
CpaInstanceHandle insHandle = NULL;
sal_compression_service_t *pService = NULL;
if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) {
insHandle = dcGetFirstHandle();
} else {
insHandle = dcInstance;
}
pService = (sal_compression_service_t *)insHandle;
if (isDcGen2x(pService)) {
QAT_UTILS_LOG("Unsupported compression feature.\n");
return CPA_STATUS_UNSUPPORTED;
}
pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pSessionHandle);
LAC_CHECK_NULL_PARAM(pSessionDesc);
pSessionDesc->cnvErrorInjection = ICP_QAT_FW_COMP_CNV_DFX;
return CPA_STATUS_SUCCESS;
}

View File

@ -26,6 +26,8 @@
* the management of skid buffers in the firmware */
#define DC_DEST_BUFFER_DYN_MIN_SIZE (128)
#define DC_DEST_BUFFER_STA_MIN_SIZE (64)
#define DC_DEST_BUFFER_DYN_MIN_SIZE_GEN4 (512)
#define DC_DEST_BUFFER_STA_MIN_SIZE_GEN4 (1024)
/* C62x and C3xxx pcie rev0 devices require an additional 32bytes */
#define DC_DEST_BUFFER_STA_ADDITIONAL_SIZE (32)
@ -93,8 +95,10 @@
* those are used to inform hardware of specifying CRC parameters to be used
* when calculating CRCs */
#define DC_CRC_POLY_DEFAULT 0x04c11db7
#define DC_CRC64_POLY_DEFAULT 0x42f0e1eba9ea3693ULL
#define DC_XOR_FLAGS_DEFAULT 0xe0000
#define DC_XOR_OUT_DEFAULT 0xffffffff
#define DC_XOR64_OUT_DEFAULT 0x0ULL
#define DC_INVALID_CRC 0x0
/**
@ -147,6 +151,13 @@ typedef struct dc_compression_cookie_s {
/**< virtual userspace ptr to source SGL */
CpaBufferList *pUserDestBuff;
/**< virtual userspace ptr to destination SGL */
CpaDcCallbackFn pCbFunc;
/**< Callback function defined for the traditional sessionless API */
CpaDcChecksum checksumType;
/**< Type of checksum */
dc_integrity_crc_fw_t dataIntegrityCrcs;
/**< Data integrity table */
} dc_compression_cookie_t;
/**
@ -165,6 +176,9 @@ typedef struct dc_compression_cookie_s {
*****************************************************************************/
void dcCompression_ProcessCallback(void *pRespMsg);
CpaStatus dcCheckOpData(sal_compression_service_t *pService,
CpaDcOpData *pOpData);
/**
*****************************************************************************
* @ingroup Dc_DataCompression

View File

@ -17,6 +17,7 @@
#include "cpa_dc_dp.h"
#include "icp_qat_fw_comp.h"
#include "sal_qat_cmn_msg.h"
#include "sal_types_compression.h"
/* Maximum number of intermediate buffers SGLs for devices
* with a maximum of 6 compression slices */
@ -35,6 +36,7 @@
/* Size of the history window.
* Base 2 logarithm of maximum window size minus 8 */
#define DC_4K_WINDOW_SIZE (4)
#define DC_8K_WINDOW_SIZE (5)
#define DC_16K_WINDOW_SIZE (6)
#define DC_32K_WINDOW_SIZE (7)
@ -95,35 +97,81 @@ typedef struct dc_integrity_crc_fw_s {
/* CRC32 checksum returned for compressed data */
Cpa32U adler32;
/* ADLER32 checksum returned for compressed data */
Cpa32U oCrc32Cpr;
/* CRC32 checksum returned for data output by compression accelerator */
Cpa32U iCrc32Cpr;
/* CRC32 checksum returned for input data to compression accelerator */
Cpa32U oCrc32Xlt;
/* CRC32 checksum returned for data output by translator accelerator */
Cpa32U iCrc32Xlt;
/* CRC32 checksum returned for input data to translator accelerator */
Cpa32U xorFlags;
/* Initialise transactor pCRC controls in state register */
Cpa32U crcPoly;
/* CRC32 polynomial used by hardware */
Cpa32U xorOut;
/* CRC32 from XOR stage (Input CRC is xor'ed with value in the state) */
Cpa32U deflateBlockType;
/* Bit 1 - Bit 0
* 0 0 -> RAW DATA + Deflate header.
* This will not produced any CRC check because
* the output will not come from the slices.
* It will be a simple copy from input to output
* buffers list.
* 0 1 -> Static deflate block type
* 1 0 -> Dynamic deflate block type
* 1 1 -> Invalid type */
union {
struct {
Cpa32U oCrc32Cpr;
/* CRC32 checksum returned for data output by
* compression accelerator */
Cpa32U iCrc32Cpr;
/* CRC32 checksum returned for input data to compression
* accelerator
*/
Cpa32U oCrc32Xlt;
/* CRC32 checksum returned for data output by translator
* accelerator
*/
Cpa32U iCrc32Xlt;
/* CRC32 checksum returned for input data to translator
* accelerator
*/
Cpa32U xorFlags;
/* Initialise transactor pCRC controls in state register
*/
Cpa32U crcPoly;
/* CRC32 polynomial used by hardware */
Cpa32U xorOut;
/* CRC32 from XOR stage (Input CRC is xor'ed with value
* in the state) */
Cpa32U deflateBlockType;
/* Bit 1 - Bit 0
* 0 0 -> RAW DATA + Deflate header.
* This will not produced any CRC check
* because the output will not come
* from the slices. It will be a simple
* copy from input to output buffer
* list. 0 1 -> Static deflate block type 1 0 ->
* Dynamic deflate block type 1 1 -> Invalid type
*/
};
struct {
Cpa64U iCrc64Cpr;
/* CRC64 checksum returned for input data to compression
* accelerator
*/
Cpa64U oCrc64Cpr;
/* CRC64 checksum returned for data output by
* compression accelerator */
Cpa64U iCrc64Xlt;
/* CRC64 checksum returned for input data to translator
* accelerator
*/
Cpa64U oCrc64Xlt;
/* CRC64 checksum returned for data output by translator
* accelerator
*/
Cpa64U crc64Poly;
/* CRC64 polynomial used by hardware */
Cpa64U xor64Out;
/* CRC64 from XOR stage (Input CRC is xor'ed with value
* in the state) */
};
};
} dc_integrity_crc_fw_t;
typedef struct dc_sw_checksums_s {
Cpa32U swCrcI;
Cpa32U swCrcO;
union {
struct {
Cpa32U swCrc32I;
Cpa32U swCrc32O;
};
struct {
Cpa64U swCrc64I;
Cpa64U swCrc64O;
};
};
} dc_sw_checksums_t;
/* Session descriptor structure for compression */
@ -211,6 +259,8 @@ typedef struct dc_session_desc_s {
dc_sw_checksums_t seedSwCrc;
/* Driver calculated integrity software CRC */
dc_sw_checksums_t integritySwCrc;
/* Flag to disable or enable CnV Error Injection mechanism */
CpaBoolean cnvErrorInjection;
} dc_session_desc_t;
/**
@ -275,4 +325,106 @@ CpaStatus dcGetSessionSize(CpaInstanceHandle dcInstance,
Cpa32U *pSessionSize,
Cpa32U *pContextSize);
/**
*****************************************************************************
* @ingroup Dc_DataCompression
* Set the cnvErrorInjection flag in session descriptor
*
* @description
* This function enables the CnVError injection for the session
* passed in. All Compression requests sent within the session
* are injected with CnV errors. This error injection is for the
* duration of the session. Resetting the session results in
* setting being cleared. CnV error injection does not apply to
* Data Plane API.
*
* @param[in] dcInstance Instance Handle
* @param[in] pSessionHandle Pointer to a session handle
*
* @retval CPA_STATUS_SUCCESS Function executed successfully
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in
* @retval CPA_STATUS_UNSUPPORTED Unsupported feature
*****************************************************************************/
CpaStatus dcSetCnvError(CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle);
/**
*****************************************************************************
* @ingroup Dc_DataCompression
* Check that pSessionData is valid
*
* @description
* Check that all the parameters defined in the pSessionData are valid
*
* @param[in] pSessionData Pointer to a user instantiated structure
* containing session data
*
* @retval CPA_STATUS_SUCCESS Function executed successfully
* @retval CPA_STATUS_FAIL Function failed to find device
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in
* @retval CPA_STATUS_UNSUPPORTED Unsupported algorithm/feature
*
*****************************************************************************/
CpaStatus dcCheckSessionData(const CpaDcSessionSetupData *pSessionData,
CpaInstanceHandle dcInstance);
/**
*****************************************************************************
* @ingroup Dc_DataCompression
* Get the compression command id for the given session setup data.
*
* @description
* This function will get the compression command id based on parameters
* passed in the given session setup data.
*
* @param[in] pService Pointer to the service
* @param[in] pSessionData Pointer to a user instantiated
* structure containing session data
* @param[out] pDcCmdId Pointer to the command id
*
* @retval CPA_STATUS_SUCCESS Function executed successfully
* @retval CPA_STATUS_UNSUPPORTED Unsupported algorithm/feature
*
*****************************************************************************/
CpaStatus dcGetCompressCommandId(sal_compression_service_t *pService,
CpaDcSessionSetupData *pSessionData,
Cpa8U *pDcCmdId);
/**
*****************************************************************************
* @ingroup Dc_DataCompression
* Get the decompression command id for the given session setup data.
*
* @description
* This function will get the decompression command id based on parameters
* passed in the given session setup data.
*
* @param[in] pService Pointer to the service
* @param[in] pSessionData Pointer to a user instantiated
* structure containing session data
* @param[out] pDcCmdId Pointer to the command id
*
* @retval CPA_STATUS_SUCCESS Function executed successfully
* @retval CPA_STATUS_UNSUPPORTED Unsupported algorithm/feature
*
*****************************************************************************/
CpaStatus dcGetDecompressCommandId(sal_compression_service_t *pService,
CpaDcSessionSetupData *pSessionData,
Cpa8U *pDcCmdId);
/**
*****************************************************************************
* @ingroup Dc_DataCompression
* Populate the translator content descriptor
*
* @description
* This function will populate the translator content descriptor
*
* @param[out] pMsg Pointer to the compression message
* @param[in] nextSlice Next slice
*
*****************************************************************************/
void dcTransContentDescPopulate(icp_qat_fw_comp_req_t *pMsg,
icp_qat_fw_slice_t nextSlice);
#endif /* DC_SESSION_H */

View File

@ -107,6 +107,21 @@
* Include private header files
*******************************************************************************
*/
/**
*****************************************************************************
* @ingroup LacSym
* Spc state
*
* @description
* This enum is used to indicate the Spc state.
*
*****************************************************************************/
typedef enum lac_single_pass_state_e {
NON_SPC, /* Algorithms other than CHACHA-POLY and AES-GCM */
LIKELY_SPC, /* AES-GCM - Likely to handle it as single pass */
SPC /* CHACHA-POLY and AES-GCM */
} lac_single_pass_state_t;
/**
*******************************************************************************
* @ingroup LacSym_Session
@ -214,15 +229,17 @@ typedef struct lac_session_desc_s {
/**< Flag indicating whether the SymConstantsTable can be used or not */
CpaBoolean useOptimisedContentDesc : 1;
/**< Flag indicating whether to use the optimised CD or not */
CpaBoolean isPartialSupported : 1;
/**< Flag indicating whether symOperation support partial packet */
CpaBoolean useStatefulSha3ContentDesc : 1;
/**< Flag indicating whether to use the stateful SHA3 CD or not */
icp_qat_la_bulk_req_hdr_t shramReqCacheHdr;
icp_qat_fw_la_key_gen_common_t shramReqCacheMid;
icp_qat_la_bulk_req_ftr_t shramReqCacheFtr;
/**< Alternative pre-built request (header, mid & footer)
* for use with symConstantsTable. */
CpaBoolean isPartialSupported : 1;
/**< Flag indicating whether symOperation support partial packet */
CpaBoolean isSinglePass : 1;
/**< Flag indicating whether symOperation is single pass operation */
lac_single_pass_state_t singlePassState;
/**< Flag indicating whether symOperation support single pass */
icp_qat_fw_serv_specif_flags laCmdFlags;
/**< Common request - Service specific flags type */
icp_qat_fw_comn_flags cmnRequestFlags;
@ -236,6 +253,23 @@ typedef struct lac_session_desc_s {
/**< Hash Mode for the qat slices. Not to be confused with QA-API
* hashMode
*/
Cpa32U cipherSliceType;
/**< Cipher slice type to be used, set at init session time */
Cpa8U cipherAesXtsKey1Forward[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< Cached AES XTS Forward key
* For CPM2.0 AES XTS key convertion need to be done in SW.
* Because use can update session direction at any time,
* also forward key needs to be cached
*/
Cpa8U cipherAesXtsKey1Reverse[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< AES XTS Reverse key
* For CPM2.0 AES XTS key convertion need to be done in SW.
* Reverse key always will be calcilated at session setup time and
* cached to be used when needed */
Cpa8U cipherAesXtsKey2[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< For AES XTS session need to store Key2 value in order to generate
* tweak
*/
void *writeRingMsgFunc;
/**< function which will be called to write ring message */
Cpa32U aadLenInBytes;
@ -325,8 +359,7 @@ typedef struct lac_session_desc_d1_s {
* a decrypt operation. */
CpaCySymPacketType partialState;
/**< state of the partial packet. This can be written to by the perform
* because the SpinLock pPartialInFlightSpinlock guarantees that that
* the
* because the SpinLock pPartialInFlightSpinlock guarantees that the
* state is accessible in only one place at a time. */
icp_qat_la_bulk_req_hdr_t reqCacheHdr;
icp_qat_fw_la_key_gen_common_t reqCacheMid;
@ -382,15 +415,17 @@ typedef struct lac_session_desc_d1_s {
/**< Flag indicating whether the SymConstantsTable can be used or not */
CpaBoolean useOptimisedContentDesc : 1;
/**< Flag indicating whether to use the optimised CD or not */
CpaBoolean isPartialSupported : 1;
/**< Flag indicating whether symOperation support partial packet */
CpaBoolean useStatefulSha3ContentDesc : 1;
/**< Flag indicating whether to use the stateful SHA3 CD or not */
icp_qat_la_bulk_req_hdr_t shramReqCacheHdr;
icp_qat_fw_la_key_gen_common_t shramReqCacheMid;
icp_qat_la_bulk_req_ftr_t shramReqCacheFtr;
/**< Alternative pre-built request (header, mid & footer)
* for use with symConstantsTable. */
CpaBoolean isPartialSupported : 1;
/**< Flag indicating whether symOperation support partial packet */
CpaBoolean isSinglePass : 1;
/**< Flag indicating whether symOperation is single pass operation */
lac_single_pass_state_t singlePassState;
/**< Flag indicating whether symOperation support single pass */
icp_qat_fw_serv_specif_flags laCmdFlags;
/**< Common request - Service specific flags type */
icp_qat_fw_comn_flags cmnRequestFlags;
@ -404,6 +439,23 @@ typedef struct lac_session_desc_d1_s {
/**< Hash Mode for the qat slices. Not to be confused with QA-API
* hashMode
*/
Cpa32U cipherSliceType;
/**< Cipher slice type to be used, set at init session time */
Cpa8U cipherAesXtsKey1Forward[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< Cached AES XTS Forward key
* For CPM2.0 AES XTS key convertion need to be done in SW.
* Because use can update session direction at any time,
* also forward key needs to be cached
*/
Cpa8U cipherAesXtsKey1Reverse[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< AES XTS Reverse key
* For CPM2.0 AES XTS key convertion need to be done in SW.
* Reverse key always will be calcilated at session setup time and
* cached to be used when needed */
Cpa8U cipherAesXtsKey2[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< For AES XTS session need to store Key2 value in order to generate
* tweak
*/
void *writeRingMsgFunc;
/**< function which will be called to write ring message */
} lac_session_desc_d1_t;
@ -444,8 +496,8 @@ typedef struct lac_session_desc_d2_s {
/**< info on the hash state prefix buffer */
CpaCySymHashAlgorithm hashAlgorithm;
/**< hash algorithm */
Cpa32U authKeyLenInBytes;
/**< Authentication key length in bytes */
Cpa32U authKeyLenInBytes;
/**< Authentication key length in bytes */
CpaCySymHashMode hashMode;
/**< Mode of the hash operation. plain, auth or nested */
Cpa32U hashResultSize;
@ -459,8 +511,7 @@ typedef struct lac_session_desc_d2_s {
* a decrypt operation. */
CpaCySymPacketType partialState;
/**< state of the partial packet. This can be written to by the perform
* because the SpinLock pPartialInFlightSpinlock guarantees that that
* the
* because the SpinLock pPartialInFlightSpinlock guarantees that the
* state is accessible in only one place at a time. */
icp_qat_la_bulk_req_hdr_t reqCacheHdr;
icp_qat_fw_la_key_gen_common_t reqCacheMid;
@ -516,15 +567,17 @@ typedef struct lac_session_desc_d2_s {
/**< Flag indicating whether the SymConstantsTable can be used or not */
CpaBoolean useOptimisedContentDesc : 1;
/**< Flag indicating whether to use the optimised CD or not */
CpaBoolean isPartialSupported : 1;
/**< Flag indicating whether symOperation support partial packet */
CpaBoolean useStatefulSha3ContentDesc : 1;
/**< Flag indicating whether to use the stateful SHA3 CD or not */
icp_qat_la_bulk_req_hdr_t shramReqCacheHdr;
icp_qat_fw_la_key_gen_common_t shramReqCacheMid;
icp_qat_la_bulk_req_ftr_t shramReqCacheFtr;
/**< Alternative pre-built request (header. mid & footer)
* for use with symConstantsTable. */
CpaBoolean isPartialSupported : 1;
/**< Flag indicating whether symOperation support partial packet */
CpaBoolean isSinglePass : 1;
/**< Flag indicating whether symOperation is single pass operation */
lac_single_pass_state_t singlePassState;
/**< Flag indicating whether symOperation support single pass */
icp_qat_fw_serv_specif_flags laCmdFlags;
/**< Common request - Service specific flags type */
icp_qat_fw_comn_flags cmnRequestFlags;
@ -538,6 +591,23 @@ typedef struct lac_session_desc_d2_s {
/**< Hash Mode for the qat slices. Not to be confused with QA-API
* hashMode
*/
Cpa32U cipherSliceType;
/**< Cipher slice type to be used, set at init session time */
Cpa8U cipherAesXtsKey1Forward[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< Cached AES XTS Forward key
* For CPM2.0 AES XTS key convertion need to be done in SW.
* Because use can update session direction at any time,
* also forward key needs to be cached
*/
Cpa8U cipherAesXtsKey1Reverse[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< AES XTS Reverse key
* For CPM2.0 AES XTS key convertion need to be done in SW.
* Reverse key always will be calcilated at session setup time and
* cached to be used when needed */
Cpa8U cipherAesXtsKey2[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< For AES XTS session need to store Key2 value in order to generate
* tweak
*/
void *writeRingMsgFunc;
/**< function which will be called to write ring message */
Cpa32U aadLenInBytes;

View File

@ -228,6 +228,7 @@
#include "lac_session.h"
#include "lac_sym.h"
#include "lac_sal_types_crypto.h"
/*
* WARNING: There are no checks done on the parameters of the functions in
@ -253,8 +254,9 @@
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter.
*
*****************************************************************************/
CpaStatus LacCipher_SessionSetupDataCheck(
const CpaCySymCipherSetupData *pCipherSetupData);
CpaStatus
LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData,
Cpa32U capabilitiesMask);
/**
*******************************************************************************
@ -309,4 +311,22 @@ CpaStatus LacCipher_PerformIvCheck(sal_service_t *pService,
Cpa32U qatPacketType,
Cpa8U **ppIvBuffer);
/**
*****************************************************************************
* @ingroup LacCipher
* Return cipher slice type for given algorithm
*
* @description
* This function will check what cipher slice type should be used for given
* algorithms and CPM generation combination.
* Since CPM2.0 there is new UCS cipher slice available.
*
* @param[in] pService Pointer to service struct
* @param[in] cipherAlgorithm cipher algorithm
* @param[in] hashAlgorithm hash algorithm
*
*****************************************************************************/
Cpa32U LacCipher_GetCipherSliceType(sal_crypto_service_t *pService,
CpaCySymCipherAlgorithm algorithm,
CpaCySymHashAlgorithm hash);
#endif /* LAC_SYM_CIPHER_H */

View File

@ -45,6 +45,14 @@
/* ARC4 256 bytes for key matrix, 2 for i and j and 6 bytes for padding */
#define LAC_CIPHER_ARC4_STATE_LEN_BYTES 264
/*
* Constant values for CCM AAD buffer
*/
#define LAC_CIPHER_CCM_B0_SIZE 16
#define LAC_CIPHER_CCM_ENCODED_AAD_LEN_SIZE 2
#define LAC_CIPHER_CCM_AAD_OFFSET \
(LAC_CIPHER_CCM_B0_SIZE + LAC_CIPHER_CCM_ENCODED_AAD_LEN_SIZE)
#define LAC_SYM_SNOW3G_CIPHER_CONFIG_FOR_HASH_SZ 40
/* Snow3g cipher config required for performing a Snow3g hash operation.
* It contains 8 Bytes of config for hardware, 16 Bytes of Key and requires
@ -64,6 +72,9 @@
/* The IV length for AES F8 is 16 bytes */
#define LAC_CIPHER_AES_F8_IV_LENGTH 16
/* The max key length for AES XTS 32 is bytes*/
#define LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH 32
/* For Snow3G UEA2, need to make sure last 8 Bytes of IV buffer are
* zero. */
#define LAC_CIPHER_SNOW3G_UEA2_IV_BUFFER_ZERO_LENGTH 8
@ -171,12 +182,17 @@
/* Macro to check if the Algorithm Mode is XTS */
#define LAC_CIPHER_IS_XTS_MODE(algo) (algo == CPA_CY_SYM_CIPHER_AES_XTS)
/* Macro to check if the accelerator has AES V2 capability */
#define LAC_CIPHER_AES_V2(mask) ((mask)&ICP_ACCEL_CAPABILITIES_AES_V2)
/* Macro to check if the Algorithm is single pass */
#define LAC_CIPHER_IS_SPC(cipher, hash, mask) \
((LAC_CIPHER_IS_CHACHA(cipher) && (CPA_CY_SYM_HASH_POLY == hash) && \
((mask)&ICP_ACCEL_CAPABILITIES_CHACHA_POLY)) || \
(LAC_CIPHER_IS_GCM(cipher) && ((CPA_CY_SYM_HASH_AES_GCM == hash) || \
(CPA_CY_SYM_HASH_AES_GMAC == hash)) && \
((mask)&ICP_ACCEL_CAPABILITIES_AESGCM_SPC)))
(((mask)&ICP_ACCEL_CAPABILITIES_CHACHA_POLY && \
LAC_CIPHER_IS_CHACHA(cipher) && (CPA_CY_SYM_HASH_POLY == hash)) || \
(((mask)&ICP_ACCEL_CAPABILITIES_AESGCM_SPC) && \
LAC_CIPHER_IS_GCM(cipher) && \
((CPA_CY_SYM_HASH_AES_GCM == hash) || \
(CPA_CY_SYM_HASH_AES_GMAC == hash))) || \
(LAC_CIPHER_IS_CCM(cipher) && LAC_CIPHER_AES_V2(mask)))
#endif /* LAC_CIPHER_DEFS_H */

View File

@ -108,51 +108,26 @@
/* Constants for SHA3_384 algorithm */
#define LAC_HASH_SHA3_384_BLOCK_SIZE 104
/**< @ingroup LacHashDefs
* * SHA3_384 block size in bytes */
* SHA3_384 block size in bytes */
#define LAC_HASH_SHA3_384_DIGEST_SIZE 48
/**< @ingroup LacHashDefs
* * SHA3_384 digest length in bytes */
* SHA3_384 digest length in bytes */
#define LAC_HASH_SHA3_384_STATE_SIZE 48
/**< @ingroup LacHashDefs
* * SHA3_384 state size */
* SHA3_384 state size */
/* Constants for SHA3_512 algorithm */
#define LAC_HASH_SHA3_512_BLOCK_SIZE 72
/**< @ingroup LacHashDefs
* * * SHA3_512 block size in bytes */
* SHA3_512 block size in bytes */
#define LAC_HASH_SHA3_512_DIGEST_SIZE 64
/**< @ingroup LacHashDefs
* * * SHA3_512 digest length in bytes */
* SHA3_512 digest length in bytes */
#define LAC_HASH_SHA3_512_STATE_SIZE 64
/**< @ingroup LacHashDefs
* * * SHA3_512 state size */
* SHA3_512 state size */
/* Constants for SHAKE_128 algorithm */
#define LAC_HASH_SHAKE_128_BLOCK_SIZE 168
/**< @ingroup LacHashDefs
* * * SHAKE_128 block size in bytes */
#define LAC_HASH_SHAKE_128_DIGEST_SIZE 0xFFFFFFFF
/**< @ingroup LacHashDefs
* * * SHAKE_128 digest length in bytes ((2^32)-1)*/
/* Constants for SHAKE_256 algorithm */
#define LAC_HASH_SHAKE_256_BLOCK_SIZE 136
/**< @ingroup LacHashDefs
* * * SHAKE_256 block size in bytes */
#define LAC_HASH_SHAKE_256_DIGEST_SIZE 0xFFFFFFFF
/**< @ingroup LacHashDefs
* * * SHAKE_256 digest length in bytes ((2^ 32)-1)*/
/* Constants for POLY algorithm */
#define LAC_HASH_POLY_BLOCK_SIZE 64
/**< @ingroup LacHashDefs
* POLY block size in bytes */
#define LAC_HASH_POLY_DIGEST_SIZE 16
/**< @ingroup LacHashDefs
* POLY digest length */
#define LAC_HASH_POLY_STATE_SIZE 0
/**< @ingroup LacHashDefs
* POLY state size */
#define LAC_HASH_SHA3_STATEFUL_STATE_SIZE 200
/* Constants for SM3 algorithm */
#define LAC_HASH_SM3_BLOCK_SIZE 64
@ -165,6 +140,17 @@
/**< @ingroup LacHashDefs
* SM3 state size */
/* Constants for POLY algorithm */
#define LAC_HASH_POLY_BLOCK_SIZE 64
/**< @ingroup LacHashDefs
* POLY block size in bytes */
#define LAC_HASH_POLY_DIGEST_SIZE 16
/**< @ingroup LacHashDefs
* POLY digest length */
#define LAC_HASH_POLY_STATE_SIZE 0
/**< @ingroup LacHashDefs
* POLY state size */
/* Constants for XCBC precompute algorithm */
#define LAC_HASH_XCBC_PRECOMP_KEY_NUM 3
/**< @ingroup LacHashDefs
@ -285,7 +271,7 @@
* the size in an 8bit field */
#define LAC_MAX_HASH_STATE_STORAGE_SIZE \
(sizeof(icp_qat_hw_auth_counter_t) + LAC_HASH_SHA512_STATE_SIZE)
(sizeof(icp_qat_hw_auth_counter_t) + LAC_HASH_SHA3_STATEFUL_STATE_SIZE)
/**< Maximum size of the hash state storage section of the hash state prefix
* buffer */
@ -307,11 +293,8 @@
(algorithm == CPA_CY_SYM_HASH_SHA256) || \
(algorithm == CPA_CY_SYM_HASH_SHA384) || \
(algorithm == CPA_CY_SYM_HASH_SHA512) || \
(algorithm == CPA_CY_SYM_HASH_SHA3_224) || \
(algorithm == CPA_CY_SYM_HASH_SHA3_256) || \
(algorithm == CPA_CY_SYM_HASH_SHA3_384) || \
(algorithm == CPA_CY_SYM_HASH_SHA3_512) || \
(algorithm == CPA_CY_SYM_HASH_SM3))
(algorithm == CPA_CY_SYM_HASH_SM3)) || \
(LAC_HASH_IS_SHA3(algorithm))
/**< @ingroup LacSymQatHash
* Macro to detect if the hash algorithm is a HMAC algorithm */
@ -341,4 +324,12 @@
* Nested. This applies to TLS. This is used to differentiate between
* TLS and HMAC */
#define LAC_HASH_IS_SHA3(algo) \
((algo == CPA_CY_SYM_HASH_SHA3_224) || \
(algo == CPA_CY_SYM_HASH_SHA3_256) || \
(algo == CPA_CY_SYM_HASH_SHA3_384) || \
(algo == CPA_CY_SYM_HASH_SHA3_512))
/**< @ingroup LacSymQatHash
* Macro to check if the hash algorithm is SHA3 */
#endif /* LAC_SYM_HASH_DEFS_H */

View File

@ -206,4 +206,43 @@ void LacSymQat_LaPacketCommandFlagSet(Cpa32U qatPacketType,
void LacSymQat_LaSetDefaultFlags(icp_qat_fw_serv_specif_flags *laCmdFlags,
CpaCySymOp symOp);
/**
******************************************************************************
* @ingroup LacSymQat
*
*
* @description
* this function defines whether the shared constants table can be used
* for a particular cipher and hash algorithm
*
* @param[in] ptr to session
* @param[in] ptr to return offset into table for cipher config
* @param[in] ptr to return offset into table for hash config
*
* @return CPA_TRUE if Constants table is available for use, CPA_FALSE if it's
* not.
*
*****************************************************************************/
CpaBoolean LacSymQat_UseSymConstantsTable(lac_session_desc_t *pSession,
Cpa8U *cipherOffset,
Cpa8U *hashOffset);
/**
******************************************************************************
* @ingroup LacSymQat
*
*
* @description
* this function calculates whether the optimized content descriptor can
* be used for a particular chained cipher and hash algorithm
*
* @param[in] ptr to session
*
* @return CPA_TRUE if optimized CD can be used, CPA_FALSE if it's not.
*
*****************************************************************************/
CpaBoolean LacSymQat_UseOptimisedContentDesc(lac_session_desc_t *pSession);
#endif /* LAC_SYM_QAT_H */

View File

@ -42,9 +42,11 @@
#define LAC_SYM_QAT_CIPHER_NEXT_ID_BIT_OFFSET 24
#define LAC_SYM_QAT_CIPHER_CURR_ID_BIT_OFFSET 16
#define LAC_SYM_QAT_CIPHER_STATE_SIZE_BIT_OFFSET 8
#define LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_GCM_SPC 9
#define LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_CHACHA_SPC 2
#define LAC_SYM_QAT_CIPHER_STATE_SIZE_SPC 48
#define LAC_SYM_QAT_CIPHER_GCM_SPC_OFFSET_IN_DRAM 9
#define LAC_SYM_QAT_CIPHER_CCM_SPC_OFFSET_IN_DRAM 8
#define LAC_SYM_QAT_CIPHER_CHACHA_SPC_OFFSET_IN_DRAM 2
#define LAC_SYM_QAT_CIPHER_SPC_STATE_SIZE 48
/**
******************************************************************************
* @ingroup LacSymQat_Cipher
@ -111,11 +113,13 @@ Cpa32U LacSymQat_CipherIvSizeBytesGet(CpaCySymCipherAlgorithm cipherAlgorithm);
* @retval void
*
*****************************************************************************/
CpaStatus LacSymQat_CipherRequestParamsPopulate(icp_qat_fw_la_bulk_req_t *pReq,
Cpa32U cipherOffsetInBytes,
Cpa32U cipherLenInBytes,
Cpa64U ivBufferPhysAddr,
Cpa8U *pIvBufferVirt);
CpaStatus
LacSymQat_CipherRequestParamsPopulate(lac_session_desc_t *pSessionDesc,
icp_qat_fw_la_bulk_req_t *pReq,
Cpa32U cipherOffsetInBytes,
Cpa32U cipherLenInBytes,
Cpa64U ivBufferPhysAddr,
Cpa8U *pIvBufferVirt);
/**
******************************************************************************
@ -194,6 +198,8 @@ void LacSymQat_CipherCtrlBlockInitialize(icp_qat_fw_la_bulk_req_t *pMsg);
* @param[in] targetKeyLenInBytes cipher key length in bytes of selected
* algorithm
*
* @param[in] sliceType Cipher slice type to be used
*
* @param[out] nextSlice SliceID for next control block
* entry. This value is known only by
* the calling component
@ -206,6 +212,7 @@ void LacSymQat_CipherCtrlBlockInitialize(icp_qat_fw_la_bulk_req_t *pMsg);
void LacSymQat_CipherCtrlBlockWrite(icp_qat_la_bulk_req_ftr_t *pMsg,
Cpa32U cipherAlgorithm,
Cpa32U targetKeyLenInBytes,
Cpa32U sliceType,
icp_qat_fw_slice_t nextSlice,
Cpa8U cipherCfgOffsetInQuadWord);
@ -274,6 +281,8 @@ void LacSymQat_CipherGetCfgData(lac_session_desc_t *pSession,
* key length MUST match the key length
* in the cipher setup data.
*
* @param[in] sliceType Cipher slice type to be used
*
* @param[in] pCipherHwBlock Pointer to the cipher hardware block
*
* @param[out] pCipherHwBlockSizeBytes Size in bytes of cipher setup block
@ -283,8 +292,10 @@ void LacSymQat_CipherGetCfgData(lac_session_desc_t *pSession,
*
*****************************************************************************/
void LacSymQat_CipherHwBlockPopulateKeySetup(
lac_session_desc_t *pSessionDesc,
const CpaCySymCipherSetupData *pCipherSetupData,
Cpa32U targetKeyLenInBytes,
Cpa32U sliceType,
const void *pCipherHwBlock,
Cpa32U *pCipherHwBlockSizeBytes);

View File

@ -0,0 +1,111 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/**
*****************************************************************************
* @file lac_sym_qat_constants_table.h
*
* @ingroup LacSymQat
*
* API to be used for the CySym constants table.
*
*****************************************************************************/
#ifndef LAC_SYM_QAT_CONSTANTS_TABLE_H
#define LAC_SYM_QAT_CONSTANTS_TABLE_H
#include "cpa.h"
#include "icp_qat_fw_la.h"
typedef struct lac_sym_qat_constants_s {
/* Note these arrays must match the tables in lac_sym_qat_constants.c
* icp_qat_hw_cipher_lookup_tbl and icp_qat_hw_auth_lookup_tbl */
uint8_t cipher_offset[ICP_QAT_HW_CIPHER_DELIMITER]
[ICP_QAT_HW_CIPHER_MODE_DELIMITER][2][2];
uint8_t auth_offset[ICP_QAT_HW_AUTH_ALGO_DELIMITER]
[ICP_QAT_HW_AUTH_MODE_DELIMITER][2];
} lac_sym_qat_constants_t;
/**
*******************************************************************************
* @ingroup LacSymQat
* LacSymQat_ConstantsInitLookupTables
*
*
* @description
* The SymCy constants table is 1K of static data which is passed down
* to the FW to be stored in SHRAM for use by the FW.
* This function populates the associated lookup tables which the IA
* driver uses.
* Where there is config data available in the constants table the lookup
* table stores the offset into the constants table.
* Where there's no suitable config data available in the constants table
* zero is stored in the lookup table.
*
* @return none
*
*****************************************************************************/
void LacSymQat_ConstantsInitLookupTables(CpaInstanceHandle instanceHandle);
/**
*******************************************************************************
* @ingroup LacSymQat
* LacSymQat_ConstantsGetCipherOffset
*
* @description
* This function looks up the cipher constants lookup array for
* a specific cipher algorithm, mode, direction and convert flag.
* If the lookup table value is zero then there's no suitable config data
* available in the constants table.
* If the value > zero, then there is config data available in the constants
* table which is stored in SHRAM for use by the FW. The value is the offset
* into the constants table, it is returned to the caller in poffset.
*
*
* @param[in] Cipher Algorithm
* @param[in] Cipher Mode
* @param[in] Direction - encrypt/decrypt
* @param[in] convert / no convert
* @param[out] offset into constants table
*
* @return none
*
*****************************************************************************/
void LacSymQat_ConstantsGetCipherOffset(CpaInstanceHandle instanceHandle,
uint8_t algo,
uint8_t mode,
uint8_t direction,
uint8_t convert,
uint8_t *poffset);
/**
*******************************************************************************
* @ingroup LacSymQat
* LacSymQat_ConstantsGetAuthOffset
*
* @description
* This function looks up the auth constants lookup array for
* a specific auth algorithm, mode, direction and convert flag.
* If the lookup table value is zero then there's no suitable config data
* available in the constants table.
* If the value > zero, then there is config data available in the constants
* table which is stored in SHRAM for use by the FW. The value is the offset
* into the constants table, it is returned to the caller in poffset.
*
*
* @param[in] auth Algorithm
* @param[in] auth Mode
* @param[in] nested / no nested
* @param[out] offset into constants table
*
* @return none
*
*****************************************************************************/
void LacSymQat_ConstantsGetAuthOffset(CpaInstanceHandle instanceHandle,
uint8_t algo,
uint8_t mode,
uint8_t nested,
uint8_t *poffset);
#endif /* LAC_SYM_QAT_SHRAM_CONSTANTS_TABLE_H */

View File

@ -126,6 +126,10 @@ typedef struct lac_sym_qat_hash_state_buffer_info_s {
* @param[in] useOptimisedContentDesc Indicate if optimised content desc
* is used for this session.
*
* @param[in] useStatefulSha3ContentDesc
* Indicate if stateful SHA3 content desc
* is used for this session.
*
* @param[in] pPrecompute For auth mode, this is the pointer
* to the precompute data. Otherwise this
* should be set to NULL
@ -145,6 +149,7 @@ LacSymQat_HashContentDescInit(icp_qat_la_bulk_req_ftr_t *pMsg,
icp_qat_hw_auth_mode_t qatHashMode,
CpaBoolean useSymConstantsTable,
CpaBoolean useOptimisedContentDesc,
CpaBoolean useStatefulSha3ContentDesc,
lac_sym_qat_hash_precompute_info_t *pPrecompute,
Cpa32U *pHashBlkSizeInBytes);

View File

@ -798,8 +798,9 @@ LacSymKey_MgfCommon(const CpaInstanceHandle instanceHandle,
ICP_QAT_FW_SLICE_DRAM_WR,
ICP_QAT_HW_AUTH_MODE0, /* just a plain hash */
CPA_FALSE, /* Not using sym Constants Table in Shared SRAM
*/
*/
CPA_FALSE, /* not using the optimised Content Desc */
CPA_FALSE, /* Not using the stateful SHA3 Content Desc */
NULL,
&hashBlkSizeInBytes);
@ -1550,9 +1551,11 @@ LacSymKey_KeyGenSslTls_GenCommon(CpaInstanceHandle instanceHandle,
LAC_SYM_KEY_NO_HASH_BLK_OFFSET_QW,
ICP_QAT_FW_SLICE_DRAM_WR,
qatHashMode,
CPA_FALSE, /* Not using sym Constants Table in SRAM */
CPA_FALSE, /* Not using the optimised content Desc */
NULL, /* Precompute data */
CPA_FALSE, /* Not using sym Constants Table in Shared SRAM
*/
CPA_FALSE, /* not using the optimised content Desc */
CPA_FALSE, /* Not using the stateful SHA3 Content Desc */
NULL, /* precompute data */
&hashBlkSizeInBytes);
/* SSL3 */

File diff suppressed because it is too large Load Diff

View File

@ -55,8 +55,9 @@
#include "lac_sal_types_crypto.h"
#include "sal_service_state.h"
#define IS_EXT_ALG_CHAIN_UNSUPPORTED( \
cipherAlgorithm, hashAlgorithm, extAlgchainSupported) \
#define IS_EXT_ALG_CHAIN_UNSUPPORTED(cipherAlgorithm, \
hashAlgorithm, \
extAlgchainSupported) \
((((CPA_CY_SYM_CIPHER_ZUC_EEA3 == cipherAlgorithm || \
CPA_CY_SYM_CIPHER_SNOW3G_UEA2 == cipherAlgorithm) && \
CPA_CY_SYM_HASH_AES_CMAC == hashAlgorithm) || \
@ -77,6 +78,10 @@ LacSymPerform_BufferParamCheck(const CpaBufferList *const pSrcBuffer,
const lac_session_desc_t *const pSessionDesc,
const CpaCySymOpData *const pOpData);
void LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest,
icp_qat_fw_la_bulk_req_t *pCurrentQatMsg);
void LacDp_WriteRingMsgOpt(CpaCySymDpOpData *pRequest,
icp_qat_fw_la_bulk_req_t *pCurrentQatMsg);
void getCtxSize(const CpaCySymSessionSetupData *pSessionSetupData,
Cpa32U *pSessionCtxSizeInBytes);
@ -151,45 +156,40 @@ LacSymSession_ParamCheck(const CpaInstanceHandle instanceHandle,
/* Protect against value of cipher outside the bitmap
* and check if cipher algorithm is correct
*/
if ((pCipherSetupData->cipherAlgorithm >=
CPA_CY_SYM_CIPHER_CAP_BITMAP_SIZE) ||
(!CPA_BITMAP_BIT_TEST(capInfo.ciphers,
pCipherSetupData->cipherAlgorithm))) {
if (pCipherSetupData->cipherAlgorithm >=
CPA_CY_SYM_CIPHER_CAP_BITMAP_SIZE) {
LAC_INVALID_PARAM_LOG("cipherAlgorithm");
return CPA_STATUS_INVALID_PARAM;
}
if (!CPA_BITMAP_BIT_TEST(capInfo.ciphers,
pCipherSetupData->cipherAlgorithm)) {
LAC_UNSUPPORTED_PARAM_LOG(
"UnSupported cipherAlgorithm");
return CPA_STATUS_UNSUPPORTED;
}
}
/* Ensure hash algorithm is correct and supported */
if ((CPA_CY_SYM_OP_ALGORITHM_CHAINING ==
pSessionSetupData->symOperation) ||
(CPA_CY_SYM_OP_HASH == pSessionSetupData->symOperation)) {
/* Ensure SHAKE algorithms are not supported */
if ((CPA_CY_SYM_HASH_SHAKE_128 ==
pHashSetupData->hashAlgorithm) ||
(CPA_CY_SYM_HASH_SHAKE_256 ==
pHashSetupData->hashAlgorithm)) {
LAC_INVALID_PARAM_LOG(
"Hash algorithms SHAKE-128 and SHAKE-256 "
"are not supported.");
return CPA_STATUS_UNSUPPORTED;
}
/* Protect against value of hash outside the bitmap
* and check if hash algorithm is correct
*/
if ((pHashSetupData->hashAlgorithm >=
CPA_CY_SYM_HASH_CAP_BITMAP_SIZE) ||
(!CPA_BITMAP_BIT_TEST(capInfo.hashes,
pHashSetupData->hashAlgorithm))) {
if (pHashSetupData->hashAlgorithm >=
CPA_CY_SYM_HASH_CAP_BITMAP_SIZE) {
LAC_INVALID_PARAM_LOG("hashAlgorithm");
return CPA_STATUS_INVALID_PARAM;
}
if (!CPA_BITMAP_BIT_TEST(capInfo.hashes,
pHashSetupData->hashAlgorithm)) {
LAC_UNSUPPORTED_PARAM_LOG("UnSupported hashAlgorithm");
return CPA_STATUS_UNSUPPORTED;
}
}
/* ensure CCM, GCM, Kasumi, Snow3G and ZUC cipher and hash algorithms
* are
* selected together for Algorithm Chaining */
* are selected together for Algorithm Chaining */
if (CPA_CY_SYM_OP_ALGORITHM_CHAINING ==
pSessionSetupData->symOperation) {
/* ensure both hash and cipher algorithms are POLY and CHACHA */
@ -435,7 +435,11 @@ LacSymPerform_BufferParamCheck(const CpaBufferList *const pSrcBuffer,
}
}
/* Check that src Buffer and dst Buffer Lengths are equal */
if (srcBufferLen != dstBufferLen) {
/* CCM output needs to be longer than input buffer for appending
* tag*/
if (srcBufferLen != dstBufferLen &&
pSessionDesc->cipherAlgorithm !=
CPA_CY_SYM_CIPHER_AES_CCM) {
LAC_INVALID_PARAM_LOG(
"Source and Dest buffer lengths need to be equal ");
return CPA_STATUS_INVALID_PARAM;
@ -451,8 +455,7 @@ LacSymPerform_BufferParamCheck(const CpaBufferList *const pSrcBuffer,
return CPA_STATUS_INVALID_PARAM;
} else {
/* This function checks to see if the partial packet
* sequence
* is correct */
* sequence is correct */
if (CPA_STATUS_SUCCESS !=
LacSym_PartialPacketStateCheck(
pOpData->packetType,
@ -551,7 +554,7 @@ LacSym_InitSession(const CpaInstanceHandle instanceHandle,
const CpaCySymCipherSetupData *pCipherSetupData = NULL;
const CpaCySymHashSetupData *pHashSetupData = NULL;
/* Instance param checking done by calling function */
/* Instance param checking done by calling function */
LAC_CHECK_NULL_PARAM(pSessionSetupData);
LAC_CHECK_NULL_PARAM(pSessionCtx);
@ -580,7 +583,7 @@ LacSym_InitSession(const CpaInstanceHandle instanceHandle,
if (0 == physAddress) {
LAC_LOG_ERROR(
"Unable to get the physical address of the session");
"Unable to get the physical address of the session\n");
return CPA_STATUS_FAIL;
}
@ -631,8 +634,8 @@ LacSym_InitSession(const CpaInstanceHandle instanceHandle,
/* For asynchronous - use the user supplied callback
* for synchronous - use the internal synchronous callback */
pSessionDesc->pSymCb = ((void *)NULL != (void *)pSymCb) ?
pSymCb :
LacSync_GenBufListVerifyCb;
pSymCb :
LacSync_GenBufListVerifyCb;
}
pSessionDesc->isDPSession = isDPSession;
@ -649,10 +652,8 @@ LacSym_InitSession(const CpaInstanceHandle instanceHandle,
if (CPA_STATUS_SUCCESS == status) {
/* Session set up via API call (not internal one) */
/* Services such as DRBG call the crypto api as part of their
* service
* hence the need to for the flag, it is needed to distinguish
* between
* an internal and external session.
* service hence the need to for the flag, it is needed to
* distinguish between an internal and external session.
*/
pSessionDesc->internalSession = CPA_FALSE;
@ -697,14 +698,11 @@ cpaCySymRemoveSession(const CpaInstanceHandle instanceHandle_in,
/*
* Based on one instance, we can initialize multiple sessions.
* For example, we can initialize the session "X" and session
* "Y" with
* the same instance "A". If there is no operation pending for
* session
* "X", we can remove the session "X".
* "Y" with the same instance "A". If there is no operation
* pending for session "X", we can remove the session "X".
*
* Now we only check the @pSessionDesc->pendingDpCbCount, if it
* becomes
* zero, we can remove the session.
* becomes zero, we can remove the session.
*
* Why?
* (1) We increase it in the cpaCySymDpEnqueueOp/
@ -713,12 +711,10 @@ cpaCySymRemoveSession(const CpaInstanceHandle instanceHandle_in,
*
* If the @pSessionDesc->pendingDpCbCount becomes zero, it means
* there is no operation pending for the session "X" anymore, so
* we can
* remove this session. Maybe there is still some requests left
* in the
* instance's ring (icp_adf_queueDataToSend() returns true), but
* the
* request does not belong to "X", it belongs to session "Y".
* we can remove this session. Maybe there is still some
* requests left in the instance's ring
* (icp_adf_queueDataToSend() returns true), but the request
* does not belong to "X", it belongs to session "Y".
*/
numPendingRequests =
qatUtilsAtomicGet(&(pSessionDesc->u.pendingDpCbCount));
@ -734,8 +730,7 @@ cpaCySymRemoveSession(const CpaInstanceHandle instanceHandle_in,
status = CPA_STATUS_RETRY;
if (CPA_TRUE == pSessionDesc->isDPSession) {
/* Need to update tail if messages queue on tx hi ring
for
data plane api */
for data plane api */
icp_comms_trans_handle trans_handle =
((sal_crypto_service_t *)instanceHandle)
->trans_handle_sym_tx;
@ -752,10 +747,7 @@ cpaCySymRemoveSession(const CpaInstanceHandle instanceHandle_in,
}
}
if (CPA_STATUS_SUCCESS == status) {
if (CPA_STATUS_SUCCESS !=
LAC_SPINLOCK_DESTROY(&pSessionDesc->requestQueueLock)) {
LAC_LOG_ERROR("Failed to destroy request queue lock");
}
LAC_SPINLOCK_DESTROY(&pSessionDesc->requestQueueLock);
if (CPA_FALSE == pSessionDesc->isDPSession) {
LAC_SYM_STAT_INC(numSessionsRemoved, instanceHandle);
}

View File

@ -69,7 +69,7 @@ LacSymCb_CleanUserData(const lac_session_desc_t *pSessionDesc,
const CpaCySymOpData *pOpData,
CpaBoolean isCCM)
{
Cpa8U authTagLen = 0;
Cpa32U authTagLen = 0;
/* Retrieve authTagLen */
authTagLen = pSessionDesc->hashResultSize;
@ -138,10 +138,10 @@ LacSymCb_ProcessCallbackInternal(lac_sym_bulk_cookie_t *pCookie,
/* For a digest verify operation - for full packet and final partial
* only, perform a comparison with the digest generated and with the one
* supplied in the packet. */
* supplied in the packet. In case of AES_GCM in SPC mode, destination
* buffer needs to be cleared if digest verify operation fails */
if (((pSessionDesc->isSinglePass &&
(CPA_CY_SYM_CIPHER_AES_GCM == pSessionDesc->cipherAlgorithm)) ||
if (((SPC == pSessionDesc->singlePassState) ||
(CPA_CY_SYM_OP_CIPHER != operationType)) &&
(CPA_TRUE == pSessionDesc->digestVerify) &&
((CPA_CY_SYM_PACKET_TYPE_FULL == pOpData->packetType) ||
@ -151,14 +151,10 @@ LacSymCb_ProcessCallbackInternal(lac_sym_bulk_cookie_t *pCookie,
instanceHandle);
/* The comparison has failed at this point (status is
* fail),
* need to clean any sensitive calculated data up to
* this point.
* The data calculated is no longer useful to the end
* result and
* does not need to be returned to the user so setting
* buffers to
* zero.
* fail), need to clean any sensitive calculated data up
* to this point. The data calculated is no longer
* useful to the end result and does not need to be
* returned to the user so setting buffers to zero.
*/
if (pSessionDesc->cipherAlgorithm ==
CPA_CY_SYM_CIPHER_AES_CCM) {
@ -200,8 +196,7 @@ LacSymCb_ProcessCallbackInternal(lac_sym_bulk_cookie_t *pCookie,
/* Update the user's IV buffer
* Very important to do this BEFORE dequeuing
* subsequent partial requests, as the state
* buffer
* may get overwritten
* buffer may get overwritten
*/
memcpy(pCookie->pOpData->pIv,
pSessionDesc->cipherPartialOpState,
@ -218,8 +213,9 @@ LacSymCb_ProcessCallbackInternal(lac_sym_bulk_cookie_t *pCookie,
} else if (CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL == pOpData->packetType) {
if ((CPA_CY_SYM_OP_CIPHER == operationType) ||
(CPA_CY_SYM_OP_ALGORITHM_CHAINING == operationType)) {
if (CPA_TRUE == LAC_CIPHER_IS_XTS_MODE(
pSessionDesc->cipherAlgorithm)) {
if (CPA_TRUE ==
LAC_CIPHER_IS_XTS_MODE(
pSessionDesc->cipherAlgorithm)) {
/*
* For XTS mode, we replace the updated key with
* the original key - for subsequent partial
@ -294,51 +290,46 @@ LacSymCb_ProcessCallbackInternal(lac_sym_bulk_cookie_t *pCookie,
static void
LacSymCb_ProcessDpCallback(CpaCySymDpOpData *pResponse,
CpaBoolean qatRespStatusOkFlag,
CpaStatus status,
lac_session_desc_t *pSessionDesc)
{
CpaStatus status = CPA_STATUS_SUCCESS;
CpaCySymDpCbFunc pSymDpCb = NULL;
/* For CCM and GCM, if qatRespStatusOkFlag is false, the data has to be
* cleaned as stated in RFC 3610; in DP mode, it is the user
* responsability
* to do so */
* responsability to do so */
if (CPA_FALSE == pSessionDesc->isSinglePass) {
if ((CPA_CY_SYM_OP_CIPHER == pSessionDesc->symOperation) ||
(CPA_FALSE == pSessionDesc->digestVerify)) {
/* If not doing digest compare and qatRespStatusOkFlag
!=
CPA_TRUE
then there is something very wrong */
if (CPA_FALSE == qatRespStatusOkFlag) {
LAC_LOG_ERROR(
"Response status value not as expected");
status = CPA_STATUS_FAIL;
}
if (((CPA_CY_SYM_OP_CIPHER == pSessionDesc->symOperation) &&
SPC != pSessionDesc->singlePassState) ||
(CPA_FALSE == pSessionDesc->digestVerify)) {
/* If not doing digest compare and qatRespStatusOkFlag !=
CPA_TRUE then there is something very wrong */
if ((CPA_FALSE == qatRespStatusOkFlag) &&
(status != CPA_STATUS_UNSUPPORTED)) {
LAC_LOG_ERROR("Response status value not as expected");
status = CPA_STATUS_FAIL;
}
}
((sal_crypto_service_t *)pResponse->instanceHandle)
->pSymDpCb(pResponse, status, qatRespStatusOkFlag);
pSymDpCb =
((sal_crypto_service_t *)pResponse->instanceHandle)->pSymDpCb;
pSymDpCb(pResponse, status, qatRespStatusOkFlag);
/*
* Decrement the number of pending CB.
*
* If the @pendingDpCbCount becomes zero, we may remove the session,
* please
* read more information in the cpaCySymRemoveSession().
* please read more information in the cpaCySymRemoveSession().
*
* But there is a field in the @pResponse to store the session,
* the "sessionCtx". In another word, in the above @->pSymDpCb()
* callback,
* it may use the session again. If we decrease the @pendingDpCbCount
* before
* the @->pSymDpCb(), there is a _risk_ the @->pSymDpCb() may reference
* to
* a deleted session.
* callback, it may use the session again. If we decrease the
* @pendingDpCbCount before the @->pSymDpCb(), there is a _risk_ the
* @->pSymDpCb() may reference to a deleted session.
*
* So in order to avoid the risk, we decrease the @pendingDpCbCount
* after
* the @->pSymDpCb() callback.
* after the @->pSymDpCb() callback.
*/
qatUtilsAtomicDec(&pSessionDesc->u.pendingDpCbCount);
}
@ -367,6 +358,7 @@ LacSymCb_ProcessCallback(icp_qat_fw_la_cmd_id_t lacCmdId,
void *pOpaqueData,
icp_qat_fw_comn_flags cmnRespFlags)
{
CpaStatus status = CPA_STATUS_SUCCESS;
CpaCySymDpOpData *pDpOpData = (CpaCySymDpOpData *)pOpaqueData;
lac_session_desc_t *pSessionDesc =
LAC_SYM_SESSION_DESC_FROM_CTX_GET(pDpOpData->sessionCtx);
@ -376,8 +368,13 @@ LacSymCb_ProcessCallback(icp_qat_fw_la_cmd_id_t lacCmdId,
if (CPA_TRUE == pSessionDesc->isDPSession) {
/* DP session */
if (ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
cmnRespFlags)) {
status = CPA_STATUS_UNSUPPORTED;
}
LacSymCb_ProcessDpCallback(pDpOpData,
qatRespStatusOkFlag,
status,
pSessionDesc);
} else {
/* Trad session */
@ -414,11 +411,7 @@ LacSymCb_PendingReqsDequeue(lac_session_desc_t *pSessionDesc)
* be accessed by multiple contexts simultaneously for enqueue and
* dequeue operations
*/
if (CPA_STATUS_SUCCESS !=
LAC_SPINLOCK(&pSessionDesc->requestQueueLock)) {
LAC_LOG_ERROR("Failed to lock request queue");
return CPA_STATUS_RESOURCE;
}
LAC_SPINLOCK(&pSessionDesc->requestQueueLock);
/* Clear the blocking flag in the session descriptor */
pSessionDesc->nonBlockingOpsInProgress = CPA_TRUE;
@ -427,10 +420,9 @@ LacSymCb_PendingReqsDequeue(lac_session_desc_t *pSessionDesc)
(CPA_TRUE == pSessionDesc->nonBlockingOpsInProgress)) {
/* If we send a partial packet request, set the
* blockingOpsInProgress
* flag for the session to indicate that subsequent requests
* must be
* queued up until this request completes
* blockingOpsInProgress flag for the session to indicate that
* subsequent requests must be queued up until this request
* completes
*/
if (CPA_CY_SYM_PACKET_TYPE_FULL !=
pSessionDesc->pRequestQueueHead->pOpData->packetType) {
@ -438,14 +430,10 @@ LacSymCb_PendingReqsDequeue(lac_session_desc_t *pSessionDesc)
}
/* At this point, we're clear to send the request. For cipher
* requests,
* we need to check if the session IV needs to be updated. This
* can
* only be done when no other partials are in flight for this
* session,
* to ensure the cipherPartialOpState buffer in the session
* descriptor
* is not currently in use
* requests, we need to check if the session IV needs to be
* updated. This can only be done when no other partials are in
* flight for this session, to ensure the cipherPartialOpState
* buffer in the session descriptor is not currently in use
*/
if (CPA_TRUE ==
pSessionDesc->pRequestQueueHead->updateSessionIvOnSend) {
@ -464,13 +452,11 @@ LacSymCb_PendingReqsDequeue(lac_session_desc_t *pSessionDesc)
/*
* Now we'll attempt to send the message directly to QAT. We'll
* keep
* looing until it succeeds (or at least a very high number of
* retries),
* as the failure only happens when the ring is full, and this
* is only
* a temporary situation. After a few retries, space will become
* availble, allowing the putMsg to succeed.
* keep looing until it succeeds (or at least a very high number
* of retries), as the failure only happens when the ring is
* full, and this is only a temporary situation. After a few
* retries, space will become availble, allowing the putMsg to
* succeed.
*/
retries = 0;
do {
@ -483,8 +469,7 @@ LacSymCb_PendingReqsDequeue(lac_session_desc_t *pSessionDesc)
retries++;
/*
* Yield to allow other threads that may be on this
* session to poll
* and make some space on the ring
* session to poll and make some space on the ring
*/
if (CPA_STATUS_SUCCESS != status) {
qatUtilsYield();
@ -509,10 +494,7 @@ LacSymCb_PendingReqsDequeue(lac_session_desc_t *pSessionDesc)
}
cleanup:
if (CPA_STATUS_SUCCESS !=
LAC_SPINUNLOCK(&pSessionDesc->requestQueueLock)) {
LAC_LOG_ERROR("Failed to unlock request queue");
}
LAC_SPINUNLOCK(&pSessionDesc->requestQueueLock);
return status;
}

View File

@ -49,6 +49,7 @@
#include "lac_sym_qat_cipher.h"
#include "lac_log.h"
#include "lac_buffer_desc.h"
#include "sal_hw_gen.h"
/*
*******************************************************************************
@ -66,14 +67,23 @@ LacCipher_PerformIvCheck(sal_service_t *pService,
lac_session_desc_t *pSessionDesc =
LAC_SYM_SESSION_DESC_FROM_CTX_GET(pOpData->sessionCtx);
CpaCySymCipherAlgorithm algorithm = pSessionDesc->cipherAlgorithm;
unsigned ivLenInBytes = 0;
/* Perform IV check. */
if (LAC_CIPHER_IS_CTR_MODE(algorithm) ||
LAC_CIPHER_IS_CBC_MODE(algorithm) ||
LAC_CIPHER_IS_AES_F8(algorithm) ||
LAC_CIPHER_IS_XTS_MODE(algorithm)) {
unsigned ivLenInBytes =
LacSymQat_CipherIvSizeBytesGet(algorithm);
switch (algorithm) {
/* Perform IV check for CTR, CBC, XTS, F8 MODE. */
case CPA_CY_SYM_CIPHER_AES_CTR:
case CPA_CY_SYM_CIPHER_3DES_CTR:
case CPA_CY_SYM_CIPHER_SM4_CTR:
case CPA_CY_SYM_CIPHER_AES_CCM:
case CPA_CY_SYM_CIPHER_AES_GCM:
case CPA_CY_SYM_CIPHER_CHACHA:
case CPA_CY_SYM_CIPHER_AES_CBC:
case CPA_CY_SYM_CIPHER_DES_CBC:
case CPA_CY_SYM_CIPHER_3DES_CBC:
case CPA_CY_SYM_CIPHER_SM4_CBC:
case CPA_CY_SYM_CIPHER_AES_F8:
case CPA_CY_SYM_CIPHER_AES_XTS: {
ivLenInBytes = LacSymQat_CipherIvSizeBytesGet(algorithm);
LAC_CHECK_NULL_PARAM(pOpData->pIv);
if (pOpData->ivLenInBytes != ivLenInBytes) {
if (!(/* GCM with 12 byte IV is OK */
@ -96,26 +106,21 @@ LacCipher_PerformIvCheck(sal_service_t *pService,
/* Set the value of the ppIvBuffer to that supplied
* by the user.
* NOTE: There is no guarantee that this address is
* aligned on
* an 8 or 64 Byte address. */
* aligned on an 8 or 64 Byte address. */
*ppIvBuffer = pOpData->pIv;
} else {
/* For partial packets, we use a per-session buffer to
* maintain
* the IV. This allows us to easily pass the updated IV
* forward
* to the next partial in the sequence. This makes
* internal
* buffering of partials easier to implement.
* maintain the IV. This allows us to easily pass the
* updated IV forward to the next partial in the
* sequence. This makes internal buffering of partials
* easier to implement.
*/
*ppIvBuffer = pSessionDesc->cipherPartialOpState;
/* Ensure that the user's IV buffer gets updated between
* partial
* requests so that they may also see the residue from
* the
* previous partial. Not needed for final partials
* though.
* partial requests so that they may also see the
* residue from the previous partial. Not needed for
* final partials though.
*/
if ((ICP_QAT_FW_LA_PARTIAL_START == qatPacketType) ||
(ICP_QAT_FW_LA_PARTIAL_MID == qatPacketType)) {
@ -124,97 +129,87 @@ LacCipher_PerformIvCheck(sal_service_t *pService,
if (ICP_QAT_FW_LA_PARTIAL_START ==
qatPacketType) {
/* if the previous partial state was
* full, then this is
* the first partial in the sequence so
* we need to copy
* in the user's IV. But, we have to be
* very careful
* here not to overwrite the
* cipherPartialOpState just
* yet in case there's a previous
* partial sequence in
* flight, so we defer the copy for now.
* This will be
* completed in the
* LacSymQueue_RequestSend() function.
* full, then this is the first partial
* in the sequence so we need to copy in
* the user's IV. But, we have to be
* very careful here not to overwrite
* the cipherPartialOpState just yet in
* case there's a previous partial
* sequence in flight, so we defer the
* copy for now. This will be completed
* in the LacSymQueue_RequestSend()
* function.
*/
pCbCookie->updateSessionIvOnSend =
CPA_TRUE;
}
/* For subsequent partials in a sequence, we'll
* re-use the
* IV that was written back by the QAT, using
* internal
* request queueing if necessary to ensure that
* the next
* partial request isn't issued to the QAT until
* the
* re-use the IV that was written back by the
* QAT, using internal request queueing if
* necessary to ensure that the next partial
* request isn't issued to the QAT until the
* previous one completes
*/
}
}
} else if (LAC_CIPHER_IS_KASUMI(algorithm)) {
} break;
case CPA_CY_SYM_CIPHER_KASUMI_F8: {
LAC_CHECK_NULL_PARAM(pOpData->pIv);
if (LAC_CIPHER_IS_KASUMI(algorithm) &&
(pOpData->ivLenInBytes != LAC_CIPHER_KASUMI_F8_IV_LENGTH)) {
if (pOpData->ivLenInBytes != LAC_CIPHER_KASUMI_F8_IV_LENGTH) {
LAC_INVALID_PARAM_LOG("invalid cipher IV size");
return CPA_STATUS_INVALID_PARAM;
}
*ppIvBuffer = pOpData->pIv;
} else if (LAC_CIPHER_IS_SNOW3G_UEA2(algorithm)) {
} break;
case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: {
LAC_CHECK_NULL_PARAM(pOpData->pIv);
if (LAC_CIPHER_IS_SNOW3G_UEA2(algorithm) &&
(pOpData->ivLenInBytes != ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ)) {
if (pOpData->ivLenInBytes != ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) {
LAC_INVALID_PARAM_LOG("invalid cipher IV size");
return CPA_STATUS_INVALID_PARAM;
}
*ppIvBuffer = pOpData->pIv;
} else if (LAC_CIPHER_IS_ARC4(algorithm)) {
} break;
case CPA_CY_SYM_CIPHER_ARC4: {
if (ICP_QAT_FW_LA_PARTIAL_NONE == qatPacketType) {
/* For full packets, the initial ARC4 state is stored in
* the
* session descriptor. Use it directly.
* the session descriptor. Use it directly.
*/
*ppIvBuffer = pSessionDesc->cipherARC4InitialState;
} else {
/* For partial packets, we maintain the running ARC4
* state in
* dedicated buffer in the session descriptor
* state in dedicated buffer in the session descriptor
*/
*ppIvBuffer = pSessionDesc->cipherPartialOpState;
if (ICP_QAT_FW_LA_PARTIAL_START == qatPacketType) {
/* if the previous partial state was full, then
* this is the
* first partial in the sequence so we need to
* (re-)initialise
* the contents of the state buffer using the
* initial state
* that is stored in the session descriptor.
* But, we have to be
* very careful here not to overwrite the
* cipherPartialOpState
* just yet in case there's a previous partial
* sequence in
* this is the first partial in the sequence so
* we need to (re-)initialise the contents of
* the state buffer using the initial state that
* is stored in the session descriptor. But, we
* have to be very careful here not to overwrite
* the cipherPartialOpState just yet in case
* there's a previous partial sequence in
* flight, so we defer the copy for now. This
* will be completed
* in the LacSymQueue_RequestSend() function
* when clear to send.
* will be completed in the
* LacSymQueue_RequestSend() function when clear
* to send.
*/
pCbCookie->updateSessionIvOnSend = CPA_TRUE;
}
}
} else if (LAC_CIPHER_IS_ZUC_EEA3(algorithm)) {
} break;
case CPA_CY_SYM_CIPHER_ZUC_EEA3: {
LAC_CHECK_NULL_PARAM(pOpData->pIv);
if (pOpData->ivLenInBytes != ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ) {
LAC_INVALID_PARAM_LOG("invalid cipher IV size");
return CPA_STATUS_INVALID_PARAM;
}
*ppIvBuffer = pOpData->pIv;
} else {
} break;
default:
*ppIvBuffer = NULL;
}
@ -223,40 +218,50 @@ LacCipher_PerformIvCheck(sal_service_t *pService,
CpaStatus
LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData)
LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData,
Cpa32U capabilitiesMask)
{
/* No key required for NULL algorithm */
if (!LAC_CIPHER_IS_NULL(pCipherSetupData->cipherAlgorithm)) {
LAC_CHECK_NULL_PARAM(pCipherSetupData->pCipherKey);
/* Check that algorithm and keys passed in are correct size */
if (LAC_CIPHER_IS_ARC4(pCipherSetupData->cipherAlgorithm)) {
switch (pCipherSetupData->cipherAlgorithm) {
case CPA_CY_SYM_CIPHER_ARC4:
if (pCipherSetupData->cipherKeyLenInBytes >
ICP_QAT_HW_ARC4_KEY_SZ) {
LAC_INVALID_PARAM_LOG(
"Invalid ARC4 cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_CCM(
pCipherSetupData->cipherAlgorithm)) {
if (pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_AES_128_KEY_SZ) {
break;
case CPA_CY_SYM_CIPHER_AES_CCM:
if (!LAC_CIPHER_AES_V2(capabilitiesMask) &&
pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_AES_128_KEY_SZ) {
LAC_INVALID_PARAM_LOG(
"Invalid AES CCM cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_XTS_MODE(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_AES_XTS:
if ((pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_AES_128_XTS_KEY_SZ) &&
(pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_AES_256_XTS_KEY_SZ)) {
ICP_QAT_HW_AES_256_XTS_KEY_SZ) &&
(pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_UCS_AES_128_XTS_KEY_SZ) &&
(pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_UCS_AES_256_XTS_KEY_SZ)) {
LAC_INVALID_PARAM_LOG(
"Invalid AES XTS cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_AES(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_AES_ECB:
case CPA_CY_SYM_CIPHER_AES_CBC:
case CPA_CY_SYM_CIPHER_AES_CTR:
case CPA_CY_SYM_CIPHER_AES_GCM:
if ((pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_AES_128_KEY_SZ) &&
(pCipherSetupData->cipherKeyLenInBytes !=
@ -267,8 +272,8 @@ LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData)
"Invalid AES cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_AES_F8(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_AES_F8:
if ((pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_AES_128_F8_KEY_SZ) &&
(pCipherSetupData->cipherKeyLenInBytes !=
@ -279,35 +284,37 @@ LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData)
"Invalid AES cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_DES(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_DES_ECB:
case CPA_CY_SYM_CIPHER_DES_CBC:
if (pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_DES_KEY_SZ) {
LAC_INVALID_PARAM_LOG(
"Invalid DES cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_TRIPLE_DES(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_3DES_ECB:
case CPA_CY_SYM_CIPHER_3DES_CBC:
case CPA_CY_SYM_CIPHER_3DES_CTR:
if (pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_3DES_KEY_SZ) {
LAC_INVALID_PARAM_LOG(
"Invalid Triple-DES cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_KASUMI(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_KASUMI_F8:
/* QAT-FW only supports 128 bits Cipher Key size for
* Kasumi F8
* Ref: 3GPP TS 55.216 V6.2.0 */
* Kasumi F8 Ref: 3GPP TS 55.216 V6.2.0 */
if (pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_KASUMI_KEY_SZ) {
LAC_INVALID_PARAM_LOG(
"Invalid Kasumi cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_SNOW3G_UEA2(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_SNOW3G_UEA2:
/* QAT-FW only supports 256 bits Cipher Key size for
* Snow_3G */
if (pCipherSetupData->cipherKeyLenInBytes !=
@ -316,8 +323,8 @@ LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData)
"Invalid Snow_3G cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_ZUC_EEA3(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_ZUC_EEA3:
/* ZUC EEA3 */
if (pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ) {
@ -325,23 +332,26 @@ LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData)
"Invalid ZUC cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_CHACHA(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_CHACHA:
if (pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_CHACHAPOLY_KEY_SZ) {
LAC_INVALID_PARAM_LOG(
"Invalid CHACHAPOLY cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_SM4(
pCipherSetupData->cipherAlgorithm)) {
break;
case CPA_CY_SYM_CIPHER_SM4_ECB:
case CPA_CY_SYM_CIPHER_SM4_CBC:
case CPA_CY_SYM_CIPHER_SM4_CTR:
if (pCipherSetupData->cipherKeyLenInBytes !=
ICP_QAT_HW_SM4_KEY_SZ) {
LAC_INVALID_PARAM_LOG(
"Invalid SM4 cipher key length");
return CPA_STATUS_INVALID_PARAM;
}
} else {
break;
default:
LAC_INVALID_PARAM_LOG("Invalid cipher algorithm");
return CPA_STATUS_INVALID_PARAM;
}
@ -365,52 +375,86 @@ LacCipher_PerformParamCheck(CpaCySymCipherAlgorithm algorithm,
LAC_INVALID_PARAM_LOG("cipher len + offset greater than "
"srcBuffer packet len");
status = CPA_STATUS_INVALID_PARAM;
}
if (CPA_STATUS_SUCCESS == status) {
} else {
/* Perform algorithm-specific checks */
switch (algorithm) {
case CPA_CY_SYM_CIPHER_ARC4:
case CPA_CY_SYM_CIPHER_AES_CTR:
case CPA_CY_SYM_CIPHER_3DES_CTR:
case CPA_CY_SYM_CIPHER_SM4_CTR:
case CPA_CY_SYM_CIPHER_AES_CCM:
case CPA_CY_SYM_CIPHER_AES_GCM:
case CPA_CY_SYM_CIPHER_CHACHA:
case CPA_CY_SYM_CIPHER_KASUMI_F8:
case CPA_CY_SYM_CIPHER_AES_F8:
case CPA_CY_SYM_CIPHER_SNOW3G_UEA2:
case CPA_CY_SYM_CIPHER_ZUC_EEA3:
/* No action needed */
break;
/*
* XTS Mode allow for ciphers which are not multiples of
* the block size.
*/
/* Perform algorithm-specific checks */
if (LAC_CIPHER_IS_XTS_MODE(algorithm) &&
((pOpData->packetType == CPA_CY_SYM_PACKET_TYPE_FULL) ||
(pOpData->packetType ==
CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL))) {
/*
* If this is the last of a partial request
*/
if (pOpData->messageLenToCipherInBytes <
ICP_QAT_HW_AES_BLK_SZ) {
LAC_INVALID_PARAM_LOG(
"data size must be greater than block "
"size for last XTS partial or XTS "
"full packet");
status = CPA_STATUS_INVALID_PARAM;
case CPA_CY_SYM_CIPHER_AES_XTS:
if ((pOpData->packetType ==
CPA_CY_SYM_PACKET_TYPE_FULL) ||
(pOpData->packetType ==
CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL)) {
/*
* If this is the last of a partial request
*/
if (pOpData->messageLenToCipherInBytes <
ICP_QAT_HW_AES_BLK_SZ) {
LAC_INVALID_PARAM_LOG(
"data size must be greater than block"
" size for last XTS partial or XTS "
"full packet");
status = CPA_STATUS_INVALID_PARAM;
}
}
} else if (!(LAC_CIPHER_IS_ARC4(algorithm) ||
LAC_CIPHER_IS_CTR_MODE(algorithm) ||
LAC_CIPHER_IS_F8_MODE(algorithm) ||
LAC_CIPHER_IS_SNOW3G_UEA2(algorithm) ||
LAC_CIPHER_IS_XTS_MODE(algorithm) ||
LAC_CIPHER_IS_CHACHA(algorithm) ||
LAC_CIPHER_IS_ZUC_EEA3(algorithm))) {
break;
default:
/* Mask & check below is based on assumption that block
* size is
* a power of 2. If data size is not a multiple of the
* block size,
* the "remainder" bits selected by the mask be non-zero
* size is a power of 2. If data size is not a multiple
* of the block size, the "remainder" bits selected by
* the mask be non-zero
*/
if (pOpData->messageLenToCipherInBytes &
(LacSymQat_CipherBlockSizeBytesGet(algorithm) -
1)) {
LAC_INVALID_PARAM_LOG(
"data size must be block size multiple");
"data size must be block size"
" multiple");
status = CPA_STATUS_INVALID_PARAM;
}
}
}
return status;
}
Cpa32U
LacCipher_GetCipherSliceType(sal_crypto_service_t *pService,
CpaCySymCipherAlgorithm cipherAlgorithm,
CpaCySymHashAlgorithm hashAlgorithm)
{
Cpa32U sliceType = ICP_QAT_FW_LA_USE_LEGACY_SLICE_TYPE;
Cpa32U capabilitiesMask =
pService->generic_service_info.capabilitiesMask;
/* UCS Slice is supproted only in Gen4 */
if (isCyGen4x(pService)) {
if (LAC_CIPHER_IS_XTS_MODE(cipherAlgorithm) ||
LAC_CIPHER_IS_CHACHA(cipherAlgorithm) ||
LAC_CIPHER_IS_GCM(cipherAlgorithm)) {
sliceType = ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE;
} else if (LAC_CIPHER_IS_CCM(cipherAlgorithm) &&
LAC_CIPHER_AES_V2(capabilitiesMask)) {
sliceType = ICP_QAT_FW_LA_USE_LEGACY_SLICE_TYPE;
} else if (LAC_CIPHER_IS_AES(cipherAlgorithm) &&
LAC_CIPHER_IS_CTR_MODE(cipherAlgorithm)) {
sliceType = ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE;
}
}
return sliceType;
}

View File

@ -35,14 +35,16 @@
#include "qat_utils.h"
#include "lac_mem.h"
#include "lac_log.h"
#include "lac_sym.h"
#include "lac_sym_qat_cipher.h"
#include "lac_list.h"
#include "lac_log.h"
#include "lac_mem.h"
#include "lac_sal_types_crypto.h"
#include "sal_service_state.h"
#include "lac_sym.h"
#include "lac_sym_cipher.h"
#include "lac_sym_auth_enc.h"
#include "lac_sym_qat_cipher.h"
#include "sal_service_state.h"
#include "sal_hw_gen.h"
typedef void (*write_ringMsgFunc_t)(CpaCySymDpOpData *pRequest,
icp_qat_fw_la_bulk_req_t *pCurrentQatMsg);
@ -133,7 +135,7 @@ LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest)
/* digestVerify and digestIsAppended on Hash-Only operation not
* supported */
if (pSessionDesc->digestIsAppended && pSessionDesc->digestVerify &&
(CPA_CY_SYM_OP_HASH == pSessionDesc->symOperation)) {
(pSessionDesc->symOperation == CPA_CY_SYM_OP_HASH)) {
LAC_INVALID_PARAM_LOG(
"digestVerify and digestIsAppended set "
"on Hash-Only operation is not supported");
@ -143,10 +145,17 @@ LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest)
/* Cipher specific tests */
if (CPA_CY_SYM_OP_HASH != pSessionDesc->symOperation) {
/* Perform IV check */
if ((LAC_CIPHER_IS_CTR_MODE(pSessionDesc->cipherAlgorithm) ||
LAC_CIPHER_IS_CBC_MODE(pSessionDesc->cipherAlgorithm) ||
LAC_CIPHER_IS_AES_F8(pSessionDesc->cipherAlgorithm)) &&
(!(LAC_CIPHER_IS_CCM(pSessionDesc->cipherAlgorithm)))) {
switch (pSessionDesc->cipherAlgorithm) {
case CPA_CY_SYM_CIPHER_AES_CTR:
case CPA_CY_SYM_CIPHER_3DES_CTR:
case CPA_CY_SYM_CIPHER_SM4_CTR:
case CPA_CY_SYM_CIPHER_AES_GCM:
case CPA_CY_SYM_CIPHER_CHACHA:
case CPA_CY_SYM_CIPHER_AES_CBC:
case CPA_CY_SYM_CIPHER_DES_CBC:
case CPA_CY_SYM_CIPHER_3DES_CBC:
case CPA_CY_SYM_CIPHER_SM4_CBC:
case CPA_CY_SYM_CIPHER_AES_F8: {
Cpa32U ivLenInBytes = LacSymQat_CipherIvSizeBytesGet(
pSessionDesc->cipherAlgorithm);
if (pRequest->ivLenInBytes != ivLenInBytes) {
@ -164,11 +173,10 @@ LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest)
LAC_INVALID_PARAM_LOG("invalid iv of 0");
return CPA_STATUS_INVALID_PARAM;
}
/* pRequest->pIv is only used for CCM so is not checked
* here */
} else if (LAC_CIPHER_IS_KASUMI(
pSessionDesc->cipherAlgorithm)) {
} break;
case CPA_CY_SYM_CIPHER_KASUMI_F8: {
if (LAC_CIPHER_KASUMI_F8_IV_LENGTH !=
pRequest->ivLenInBytes) {
LAC_INVALID_PARAM_LOG("invalid cipher IV size");
@ -178,8 +186,8 @@ LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest)
LAC_INVALID_PARAM_LOG("invalid iv of 0");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_SNOW3G_UEA2(
pSessionDesc->cipherAlgorithm)) {
} break;
case CPA_CY_SYM_CIPHER_SNOW3G_UEA2: {
if (ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ !=
pRequest->ivLenInBytes) {
LAC_INVALID_PARAM_LOG("invalid cipher IV size");
@ -189,8 +197,8 @@ LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest)
LAC_INVALID_PARAM_LOG("invalid iv of 0");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_ZUC_EEA3(
pSessionDesc->cipherAlgorithm)) {
} break;
case CPA_CY_SYM_CIPHER_ZUC_EEA3: {
if (ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ !=
pRequest->ivLenInBytes) {
LAC_INVALID_PARAM_LOG("invalid cipher IV size");
@ -200,7 +208,8 @@ LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest)
LAC_INVALID_PARAM_LOG("invalid iv of 0");
return CPA_STATUS_INVALID_PARAM;
}
} else if (LAC_CIPHER_IS_CCM(pSessionDesc->cipherAlgorithm)) {
} break;
case CPA_CY_SYM_CIPHER_AES_CCM: {
if (CPA_STATUS_SUCCESS !=
LacSymAlgChain_CheckCCMData(
pRequest->pAdditionalAuthData,
@ -209,30 +218,42 @@ LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest)
pRequest->ivLenInBytes)) {
return CPA_STATUS_INVALID_PARAM;
}
} break;
default:
break;
}
/* Perform algorithm-specific checks */
if (!(LAC_CIPHER_IS_ARC4(pSessionDesc->cipherAlgorithm) ||
LAC_CIPHER_IS_CTR_MODE(pSessionDesc->cipherAlgorithm) ||
LAC_CIPHER_IS_F8_MODE(pSessionDesc->cipherAlgorithm) ||
LAC_CIPHER_IS_SNOW3G_UEA2(
pSessionDesc->cipherAlgorithm) ||
LAC_CIPHER_IS_ZUC_EEA3(pSessionDesc->cipherAlgorithm))) {
switch (pSessionDesc->cipherAlgorithm) {
case CPA_CY_SYM_CIPHER_ARC4:
case CPA_CY_SYM_CIPHER_AES_CTR:
case CPA_CY_SYM_CIPHER_3DES_CTR:
case CPA_CY_SYM_CIPHER_SM4_CTR:
case CPA_CY_SYM_CIPHER_AES_CCM:
case CPA_CY_SYM_CIPHER_AES_GCM:
case CPA_CY_SYM_CIPHER_CHACHA:
case CPA_CY_SYM_CIPHER_KASUMI_F8:
case CPA_CY_SYM_CIPHER_AES_F8:
case CPA_CY_SYM_CIPHER_SNOW3G_UEA2:
case CPA_CY_SYM_CIPHER_ZUC_EEA3:
/* No action needed */
break;
default: {
/* Mask & check below is based on assumption that block
* size is
* a power of 2. If data size is not a multiple of the
* block size,
* the "remainder" bits selected by the mask be non-zero
* size is a power of 2. If data size is not a multiple
* of the block size, the "remainder" bits selected by
* the mask be non-zero
*/
if (pRequest->messageLenToCipherInBytes &
(LacSymQat_CipherBlockSizeBytesGet(
pSessionDesc->cipherAlgorithm) -
1)) {
LAC_INVALID_PARAM_LOG(
"Data size must be block size multiple");
"Data size must be block size"
" multiple");
return CPA_STATUS_INVALID_PARAM;
}
}
}
cipher = pSessionDesc->cipherAlgorithm;
hash = pSessionDesc->hashAlgorithm;
@ -242,11 +263,9 @@ LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest)
if (LAC_CIPHER_IS_SPC(cipher, hash, capabilitiesMask) &&
(LAC_CIPHER_SPC_IV_SIZE == pRequest->ivLenInBytes)) {
/* For CHACHA and AES_GCM single pass there is an AAD
* buffer
* if aadLenInBytes is nonzero. AES_GMAC AAD is stored
* in
* source buffer, therefore there is no separate AAD
* buffer. */
* buffer if aadLenInBytes is nonzero. AES_GMAC AAD is
* stored in source buffer, therefore there is no
* separate AAD buffer. */
if ((0 != pSessionDesc->aadLenInBytes) &&
(CPA_CY_SYM_HASH_AES_GMAC !=
pSessionDesc->hashAlgorithm)) {
@ -397,7 +416,7 @@ LacDp_EnqueueParamCheck(const CpaCySymDpOpData *pRequest)
* @ingroup cpaCySymDp
* Write Message on the ring and write request params
* This is the optimized version, which should not be used for
* algorithm of CCM, GCM and RC4
* algorithm of CCM, GCM, CHACHA and RC4
*
* @description
* Write Message on the ring and write request params
@ -425,12 +444,15 @@ LacDp_WriteRingMsgOpt(CpaCySymDpOpData *pRequest,
/* Write Request */
/*
* Fill in the header and footer bytes of the ET ring message - cached
* from
* the session descriptor.
* from the session descriptor.
*/
pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->reqCacheHdr);
pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->reqCacheFtr);
if (!pSessionDesc->useSymConstantsTable) {
pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->reqCacheHdr);
pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->reqCacheFtr);
} else {
pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->shramReqCacheHdr);
pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->shramReqCacheFtr);
}
memcpy(pMsgDummy,
pCacheDummyHdr,
(LAC_LONG_WORD_IN_BYTES * LAC_SIZE_OF_CACHE_HDR_IN_LW));
@ -457,6 +479,7 @@ LacDp_WriteRingMsgOpt(CpaCySymDpOpData *pRequest,
if (pSessionDesc->isCipher) {
LacSymQat_CipherRequestParamsPopulate(
pSessionDesc,
pCurrentQatMsg,
pRequest->cryptoStartSrcOffsetInBytes,
pRequest->messageLenToCipherInBytes,
@ -493,14 +516,27 @@ LacDp_WriteRingMsgOpt(CpaCySymDpOpData *pRequest,
* copied directly from the op request data because they share a
* corresponding layout. The remaining 4 bytes are taken
* from the session message template and use values
* preconfigured at
* sessionInit (updated per request for some specific cases
* below)
* preconfigured at sessionInit (updated per request for some
* specific cases below)
*/
memcpy(pAuthReqPars,
(Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes),
((unsigned long)&(pAuthReqPars->u2.inner_prefix_sz) -
(unsigned long)pAuthReqPars));
/* We force a specific compiler optimisation here. The length
* to be copied turns out to be always 16, and by coding a
* memcpy with a literal value the compiler will compile inline
* code (in fact, only two vector instructions) to effect the
* copy. This gives us a huge performance increase.
*/
unsigned long cplen =
(unsigned long)&(pAuthReqPars->u2.inner_prefix_sz) -
(unsigned long)pAuthReqPars;
if (cplen == 16)
memcpy(pAuthReqPars,
(Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes),
16);
else
memcpy(pAuthReqPars,
(Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes),
cplen);
if (CPA_TRUE == pSessionDesc->isAuthEncryptOp) {
pAuthReqPars->hash_state_sz =
@ -548,25 +584,29 @@ LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest,
Cpa32U sizeInBytes = 0;
CpaCySymCipherAlgorithm cipher = pSessionDesc->cipherAlgorithm;
CpaCySymHashAlgorithm hash = pSessionDesc->hashAlgorithm;
sal_crypto_service_t *pService =
(sal_crypto_service_t *)pRequest->instanceHandle;
Cpa32U capabilitiesMask =
((sal_crypto_service_t *)pRequest->instanceHandle)
->generic_service_info.capabilitiesMask;
CpaBoolean isSpCcm = LAC_CIPHER_IS_CCM(cipher) &&
LAC_CIPHER_IS_SPC(cipher, hash, capabilitiesMask);
Cpa8U paddingLen = 0;
Cpa8U blockLen = 0;
Cpa32U aadDataLen = 0;
pMsgDummy = (Cpa8U *)pCurrentQatMsg;
/* Write Request */
/*
* Fill in the header and footer bytes of the ET ring message - cached
* from
* the session descriptor.
* from the session descriptor.
*/
if (!pSessionDesc->isSinglePass &&
LAC_CIPHER_IS_SPC(cipher, hash, capabilitiesMask) &&
(LAC_CIPHER_SPC_IV_SIZE == pRequest->ivLenInBytes)) {
pSessionDesc->isSinglePass = CPA_TRUE;
if ((NON_SPC != pSessionDesc->singlePassState) &&
(isSpCcm || (LAC_CIPHER_SPC_IV_SIZE == pRequest->ivLenInBytes))) {
pSessionDesc->singlePassState = SPC;
pSessionDesc->isCipher = CPA_TRUE;
pSessionDesc->isAuthEncryptOp = CPA_FALSE;
pSessionDesc->isAuth = CPA_FALSE;
@ -581,7 +621,8 @@ LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest,
*/
ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
pSessionDesc->laCmdFlags, ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags, 0);
if (isCyGen2x(pService))
ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags, 0);
pCdInfo = &(pSessionDesc->contentDescInfo);
pHwBlockBaseInDRAM = (Cpa8U *)pCdInfo->pData;
@ -589,11 +630,22 @@ LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest,
pSessionDesc->cipherDirection) {
if (LAC_CIPHER_IS_GCM(cipher))
hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES(
LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_GCM_SPC);
else
LAC_SYM_QAT_CIPHER_GCM_SPC_OFFSET_IN_DRAM);
else if (LAC_CIPHER_IS_CHACHA(cipher))
hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES(
LAC_SYM_QAT_CIPHER_OFFSET_IN_DRAM_CHACHA_SPC);
LAC_SYM_QAT_CIPHER_CHACHA_SPC_OFFSET_IN_DRAM);
} else if (isSpCcm) {
hwBlockOffsetInDRAM = LAC_QUADWORDS_TO_BYTES(
LAC_SYM_QAT_CIPHER_CCM_SPC_OFFSET_IN_DRAM);
}
/* Update slice type, as used algos changed */
pSessionDesc->cipherSliceType =
LacCipher_GetCipherSliceType(pService, cipher, hash);
ICP_QAT_FW_LA_SLICE_TYPE_SET(pSessionDesc->laCmdFlags,
pSessionDesc->cipherSliceType);
/* construct cipherConfig in CD in DRAM */
LacSymQat_CipherHwBlockPopulateCfgData(pSessionDesc,
pHwBlockBaseInDRAM +
@ -605,10 +657,42 @@ LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest,
pSessionDesc->laCmdId,
pSessionDesc->cmnRequestFlags,
pSessionDesc->laCmdFlags);
} else if ((SPC == pSessionDesc->singlePassState) &&
(LAC_CIPHER_SPC_IV_SIZE != pRequest->ivLenInBytes)) {
pSessionDesc->symOperation = CPA_CY_SYM_OP_ALGORITHM_CHAINING;
pSessionDesc->singlePassState = LIKELY_SPC;
pSessionDesc->isCipher = CPA_TRUE;
pSessionDesc->isAuthEncryptOp = CPA_TRUE;
pSessionDesc->isAuth = CPA_TRUE;
pCdInfo = &(pSessionDesc->contentDescInfo);
pHwBlockBaseInDRAM = (Cpa8U *)pCdInfo->pData;
if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT ==
pSessionDesc->cipherDirection) {
pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
} else {
pSessionDesc->laCmdId = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
}
ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
pSessionDesc->laCmdFlags, 0);
ICP_QAT_FW_LA_PROTO_SET(pSessionDesc->laCmdFlags,
ICP_QAT_FW_LA_GCM_PROTO);
LacSymQat_CipherHwBlockPopulateCfgData(pSessionDesc,
pHwBlockBaseInDRAM +
hwBlockOffsetInDRAM,
&sizeInBytes);
SalQatMsg_CmnHdrWrite((icp_qat_fw_comn_req_t *)&(
pSessionDesc->reqCacheHdr),
ICP_QAT_FW_COMN_REQ_CPM_FW_LA,
pSessionDesc->laCmdId,
pSessionDesc->cmnRequestFlags,
pSessionDesc->laCmdFlags);
} else if (CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm) {
pSessionDesc->aadLenInBytes = pRequest->messageLenToHashInBytes;
}
if (pSessionDesc->isSinglePass) {
if (SPC == pSessionDesc->singlePassState) {
pCacheDummyHdr = (Cpa8U *)&(pSessionDesc->reqSpcCacheHdr);
pCacheDummyFtr = (Cpa8U *)&(pSessionDesc->reqSpcCacheFtr);
} else {
@ -644,8 +728,9 @@ LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest,
pRequest->srcBufferLen,
pRequest->dstBufferLen);
if (CPA_CY_SYM_HASH_AES_CCM == pSessionDesc->hashAlgorithm &&
pSessionDesc->isAuth == CPA_TRUE) {
if ((CPA_CY_SYM_HASH_AES_CCM == pSessionDesc->hashAlgorithm &&
pSessionDesc->isAuth == CPA_TRUE) ||
isSpCcm) {
/* prepare IV and AAD for CCM */
LacSymAlgChain_PrepareCCMData(
pSessionDesc,
@ -655,15 +740,14 @@ LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest,
pRequest->ivLenInBytes);
/* According to the API, for CCM and GCM,
* messageLenToHashInBytes
* and hashStartSrcOffsetInBytes are not initialized by the
* user and must be set by the driver
* messageLenToHashInBytes and hashStartSrcOffsetInBytes are not
* initialized by the user and must be set by the driver
*/
pRequest->hashStartSrcOffsetInBytes =
pRequest->cryptoStartSrcOffsetInBytes;
pRequest->messageLenToHashInBytes =
pRequest->messageLenToCipherInBytes;
} else if (!pSessionDesc->isSinglePass &&
} else if ((SPC != pSessionDesc->singlePassState) &&
(CPA_CY_SYM_HASH_AES_GCM == pSessionDesc->hashAlgorithm ||
CPA_CY_SYM_HASH_AES_GMAC == pSessionDesc->hashAlgorithm)) {
/* GCM case */
@ -693,56 +777,87 @@ LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest,
if (pSessionDesc->isCipher) {
if (CPA_CY_SYM_CIPHER_ARC4 == pSessionDesc->cipherAlgorithm) {
/* ARC4 does not have an IV but the field is used to
* store the
* initial state */
* store the initial state */
pRequest->iv =
pSessionDesc->cipherARC4InitialStatePhysAddr;
}
ICP_QAT_FW_LA_SLICE_TYPE_SET(
pCurrentQatMsg->comn_hdr.serv_specif_flags,
pSessionDesc->cipherSliceType);
LacSymQat_CipherRequestParamsPopulate(
pSessionDesc,
pCurrentQatMsg,
pRequest->cryptoStartSrcOffsetInBytes,
pRequest->messageLenToCipherInBytes,
pRequest->iv,
pRequest->pIv);
if (pSessionDesc->isSinglePass) {
if (SPC == pSessionDesc->singlePassState) {
icp_qat_fw_la_cipher_req_params_t *pCipherReqParams =
(icp_qat_fw_la_cipher_req_params_t
*)((Cpa8U *)&(
pCurrentQatMsg->serv_specif_rqpars) +
ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET);
pCipherReqParams->spc_aad_addr =
(uint64_t)pRequest->additionalAuthData;
pCipherReqParams->spc_aad_sz =
pSessionDesc->aadLenInBytes;
icp_qat_fw_la_cipher_20_req_params_t *pCipher20ReqParams =
(void
*)((Cpa8U *)&(
pCurrentQatMsg->serv_specif_rqpars) +
ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET);
pCipherReqParams->spc_auth_res_addr =
(uint64_t)pRequest->digestResult;
pCipherReqParams->spc_auth_res_sz =
pSessionDesc->hashResultSize;
if (isCyGen4x(pService)) {
pCipher20ReqParams->spc_aad_addr =
(uint64_t)pRequest->additionalAuthData;
pCipher20ReqParams->spc_aad_sz =
pSessionDesc->aadLenInBytes;
pCipher20ReqParams->spc_aad_offset = 0;
if (isSpCcm)
pCipher20ReqParams->spc_aad_sz +=
LAC_CIPHER_CCM_AAD_OFFSET;
pCipher20ReqParams->spc_auth_res_addr =
(uint64_t)pRequest->digestResult;
pCipher20ReqParams->spc_auth_res_sz =
(Cpa8U)pSessionDesc->hashResultSize;
} else {
pCipherReqParams->spc_aad_addr =
(uint64_t)pRequest->additionalAuthData;
pCipherReqParams->spc_aad_sz =
(Cpa16U)pSessionDesc->aadLenInBytes;
pCipherReqParams->spc_auth_res_addr =
(uint64_t)pRequest->digestResult;
pCipherReqParams->spc_auth_res_sz =
(Cpa8U)pSessionDesc->hashResultSize;
}
/* For CHACHA and AES_GCM single pass AAD buffer needs
* alignment
* if aadLenInBytes is nonzero.
* In case of AES-GMAC, AAD buffer passed in the src
* buffer.
* alignment if aadLenInBytes is nonzero. In case of
* AES-GMAC, AAD buffer passed in the src buffer.
*/
if (0 != pSessionDesc->aadLenInBytes &&
CPA_CY_SYM_HASH_AES_GMAC !=
pSessionDesc->hashAlgorithm) {
blockLen = LacSymQat_CipherBlockSizeBytesGet(
pSessionDesc->cipherAlgorithm);
if ((pSessionDesc->aadLenInBytes % blockLen) !=
0) {
paddingLen = blockLen -
(pSessionDesc->aadLenInBytes %
blockLen);
memset(
&pRequest->pAdditionalAuthData
[pSessionDesc->aadLenInBytes],
0,
paddingLen);
aadDataLen = pSessionDesc->aadLenInBytes;
/* In case of AES_CCM, B0 block size and 2 bytes
* of AAD len
* encoding need to be added to total AAD data
* len */
if (isSpCcm)
aadDataLen += LAC_CIPHER_CCM_AAD_OFFSET;
if ((aadDataLen % blockLen) != 0) {
paddingLen =
blockLen - (aadDataLen % blockLen);
memset(&pRequest->pAdditionalAuthData
[aadDataLen],
0,
paddingLen);
}
}
}
@ -777,32 +892,14 @@ LacDp_WriteRingMsgFull(CpaCySymDpOpData *pRequest,
* copied directly from the op request data because they share a
* corresponding layout. The remaining 4 bytes are taken
* from the session message template and use values
* preconfigured at
* sessionInit (updated per request for some specific cases
* below)
* preconfigured at sessionInit (updated per request for some
* specific cases below)
*/
/* We force a specific compiler optimisation here. The length
* to
* be copied turns out to be always 16, and by coding a memcpy
* with
* a literal value the compiler will compile inline code (in
* fact,
* only two vector instructions) to effect the copy. This gives
* us
* a huge performance increase.
*/
unsigned long cplen =
(unsigned long)&(pAuthReqPars->u2.inner_prefix_sz) -
(unsigned long)pAuthReqPars;
if (cplen == 16)
memcpy(pAuthReqPars,
(Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes),
16);
else
memcpy(pAuthReqPars,
(Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes),
cplen);
memcpy(pAuthReqPars,
(Cpa32U *)&(pRequest->hashStartSrcOffsetInBytes),
((uintptr_t) &
(pAuthReqPars->u2.inner_prefix_sz) -
(uintptr_t)pAuthReqPars));
if (CPA_TRUE == pSessionDesc->isAuthEncryptOp) {
pAuthReqPars->hash_state_sz =
@ -892,7 +989,7 @@ cpaCySymDpRemoveSession(const CpaInstanceHandle instanceHandle,
/* CPA_INSTANCE_HANDLE_SINGLE is not supported on DP apis */
LAC_CHECK_INSTANCE_HANDLE(instanceHandle);
/* All other param checks are common with trad api */
/* All other param checks are common with trad api */
return cpaCySymRemoveSession(instanceHandle, sessionCtx);
}
@ -932,6 +1029,10 @@ cpaCySymDpEnqueueOp(CpaCySymDpOpData *pRequest, const CpaBoolean performOpNow)
return status;
}
/* Check if SAL is running in crypto data plane otherwise return an
* error */
SAL_RUNNING_CHECK(pRequest->instanceHandle);
trans_handle = ((sal_crypto_service_t *)pRequest->instanceHandle)
->trans_handle_sym_tx;
@ -1029,6 +1130,10 @@ cpaCySymDpEnqueueOpBatch(const Cpa32U numberRequests,
}
}
/* Check if SAL is running in crypto data plane otherwise return an
* error */
SAL_RUNNING_CHECK(pRequests[0]->instanceHandle);
trans_handle = ((sal_crypto_service_t *)pRequests[0]->instanceHandle)
->trans_handle_sym_tx;
pSessionDesc =

View File

@ -50,17 +50,16 @@
(CPA_CY_SYM_HASH_AES_CMAC == (alg)) || \
(CPA_CY_SYM_HASH_ZUC_EIA3 == (alg))) && \
(CPA_CY_SYM_HASH_MODE_AUTH != (mode))) || \
(((CPA_CY_SYM_HASH_SHA3_224 == (alg)) || \
(CPA_CY_SYM_HASH_SHA3_256 == (alg)) || \
(CPA_CY_SYM_HASH_SHA3_384 == (alg)) || \
(CPA_CY_SYM_HASH_SHA3_512 == (alg))) && \
(CPA_CY_SYM_HASH_MODE_NESTED == (mode))) || \
(((CPA_CY_SYM_HASH_SHAKE_128 == (alg)) || \
(CPA_CY_SYM_HASH_SHAKE_256 == (alg))) && \
(CPA_CY_SYM_HASH_MODE_AUTH == (mode))))
((LAC_HASH_IS_SHA3(alg)) && (CPA_CY_SYM_HASH_MODE_NESTED == (mode))))
/**< Macro to check for valid algorithm-mode combination */
void LacSync_GenBufListVerifyCb(void *pCallbackTag,
CpaStatus status,
CpaCySymOp operationType,
void *pOpData,
CpaBufferList *pDstBuffer,
CpaBoolean opResult);
/**
* @ingroup LacHash
* This callback function will be invoked whenever a synchronous
@ -98,7 +97,7 @@ LacHash_StatePrefixAadBufferInit(
if (pHashStateBufferInfo->pDataPhys == 0) {
LAC_LOG_ERROR("Unable to get the physical address of "
"the hash state buffer");
"the hash state buffer\n");
return CPA_STATUS_FAIL;
}
@ -110,9 +109,11 @@ LacHash_StatePrefixAadBufferInit(
pHashStateBufferInfo,
pReq,
pHashSetupData->nestedModeSetupData.pInnerPrefixData,
pHashSetupData->nestedModeSetupData.innerPrefixLenInBytes,
(Cpa8U)pHashSetupData->nestedModeSetupData
.innerPrefixLenInBytes,
pHashSetupData->nestedModeSetupData.pOuterPrefixData,
pHashSetupData->nestedModeSetupData.outerPrefixLenInBytes);
(Cpa8U)pHashSetupData->nestedModeSetupData
.outerPrefixLenInBytes);
}
/* For mode2 HMAC the key gets copied into both the inner and
* outer prefix fields */
@ -121,9 +122,9 @@ LacHash_StatePrefixAadBufferInit(
pHashStateBufferInfo,
pReq,
pHashSetupData->authModeSetupData.authKey,
pHashSetupData->authModeSetupData.authKeyLenInBytes,
(Cpa8U)pHashSetupData->authModeSetupData.authKeyLenInBytes,
pHashSetupData->authModeSetupData.authKey,
pHashSetupData->authModeSetupData.authKeyLenInBytes);
(Cpa8U)pHashSetupData->authModeSetupData.authKeyLenInBytes);
}
/* else do nothing for the other cases */
return CPA_STATUS_SUCCESS;
@ -226,22 +227,32 @@ LacHash_PrecomputeDataCreate(const CpaInstanceHandle instanceHandle,
pCallbackTag);
} else if (CPA_CY_SYM_HASH_AES_CCM == hashAlgorithm) {
/*
* The Inner Hash Initial State2 block must contain K
* (the cipher key) and 16 zeroes which will be replaced with
* EK(Ctr0) by the QAT-ME.
* The Inner Hash Initial State2 block is 32 bytes long.
* Therefore, for keys bigger than 128 bits (16 bytes),
* there is no space for 16 zeroes.
*/
if (pSessionSetup->cipherSetupData.cipherKeyLenInBytes ==
ICP_QAT_HW_AES_128_KEY_SZ) {
/*
* The Inner Hash Initial State2 block must contain K
* (the cipher key) and 16 zeroes which will be replaced
* with EK(Ctr0) by the QAT-ME.
*/
/* write the auth key which for CCM is equivalent to cipher key
*/
memcpy(pState2,
pSessionSetup->cipherSetupData.pCipherKey,
pSessionSetup->cipherSetupData.cipherKeyLenInBytes);
/* write the auth key which for CCM is equivalent to
* cipher key
*/
memcpy(
pState2,
pSessionSetup->cipherSetupData.pCipherKey,
pSessionSetup->cipherSetupData.cipherKeyLenInBytes);
/* initialize remaining buffer space to all zeroes */
LAC_OS_BZERO(
pState2 +
pSessionSetup->cipherSetupData.cipherKeyLenInBytes,
ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ);
/* initialize remaining buffer space to all zeroes */
LAC_OS_BZERO(pState2 +
pSessionSetup->cipherSetupData
.cipherKeyLenInBytes,
ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ);
}
/* There is no request sent to the QAT for this operation,
* so just invoke the user's callback directly to signal
@ -279,8 +290,8 @@ LacHash_PrecomputeDataCreate(const CpaInstanceHandle instanceHandle,
if (CPA_STATUS_SUCCESS == status) {
/* write len(A) (the length of A) into bytes 16-19 of
* pState2
* in big-endian format. This field is 8 bytes */
* pState2 in big-endian format. This field is 8 bytes
*/
*(Cpa32U *)&pState2[ICP_QAT_HW_GALOIS_H_SZ] =
LAC_MEM_WR_32(pAuthModeSetupData->aadLenInBytes);
}
@ -367,26 +378,15 @@ LacHash_HashContextCheck(CpaInstanceHandle instanceHandle,
CpaCySymCapabilitiesInfo capInfo;
/*Protect against value of hash outside the bitmap*/
if ((pHashSetupData->hashAlgorithm) >=
CPA_CY_SYM_HASH_CAP_BITMAP_SIZE) {
if (pHashSetupData->hashAlgorithm >= CPA_CY_SYM_HASH_CAP_BITMAP_SIZE) {
LAC_INVALID_PARAM_LOG("hashAlgorithm");
return CPA_STATUS_INVALID_PARAM;
}
cpaCySymQueryCapabilities(instanceHandle, &capInfo);
if (!CPA_BITMAP_BIT_TEST(capInfo.hashes,
pHashSetupData->hashAlgorithm) &&
pHashSetupData->hashAlgorithm != CPA_CY_SYM_HASH_AES_CBC_MAC) {
/* Ensure SHAKE algorithms are not supported */
if ((CPA_CY_SYM_HASH_SHAKE_128 ==
pHashSetupData->hashAlgorithm) ||
(CPA_CY_SYM_HASH_SHAKE_256 ==
pHashSetupData->hashAlgorithm)) {
LAC_INVALID_PARAM_LOG(
"Hash algorithms SHAKE-128 and SHAKE-256 "
"are not supported.");
return CPA_STATUS_UNSUPPORTED;
}
LAC_INVALID_PARAM_LOG("hashAlgorithm");
return CPA_STATUS_INVALID_PARAM;
}
@ -405,8 +405,9 @@ LacHash_HashContextCheck(CpaInstanceHandle instanceHandle,
if (LAC_HASH_ALG_MODE_NOT_SUPPORTED(pHashSetupData->hashAlgorithm,
pHashSetupData->hashMode)) {
LAC_INVALID_PARAM_LOG("hashAlgorithm and hashMode combination");
return CPA_STATUS_INVALID_PARAM;
LAC_UNSUPPORTED_PARAM_LOG(
"hashAlgorithm and hashMode combination");
return CPA_STATUS_UNSUPPORTED;
}
LacSymQat_HashAlgLookupGet(instanceHandle,
@ -432,10 +433,8 @@ LacHash_HashContextCheck(CpaInstanceHandle instanceHandle,
Cpa32U aadDataSize = 0;
/* RFC 4106: Implementations MUST support a full-length
* 16-octet
* ICV, and MAY support 8 or 12 octet ICVs, and MUST NOT
* support
* other ICV lengths. */
* 16-octet ICV, and MAY support 8 or 12 octet ICVs, and
* MUST NOT support other ICV lengths. */
if ((pHashSetupData->digestResultLenInBytes !=
LAC_HASH_AES_GCM_ICV_SIZE_8) &&
(pHashSetupData->digestResultLenInBytes !=
@ -490,15 +489,13 @@ LacHash_HashContextCheck(CpaInstanceHandle instanceHandle,
aadDataSize = LAC_HASH_AES_CCM_BLOCK_SIZE;
/* then, if there is some 'a' data, the buffer will
* store encoded
* length of 'a' and 'a' itself */
* store encoded length of 'a' and 'a' itself */
if (pHashSetupData->authModeSetupData.aadLenInBytes >
0) {
/* as the QAT API puts the requirement on the
* pAdditionalAuthData not to be bigger than 240
* bytes then we
* just need 2 bytes to store encoded length of
* 'a' */
* bytes then we just need 2 bytes to store
* encoded length of 'a' */
aadDataSize += sizeof(Cpa16U);
aadDataSize += pHashSetupData->authModeSetupData
.aadLenInBytes;
@ -536,8 +533,7 @@ LacHash_HashContextCheck(CpaInstanceHandle instanceHandle,
return CPA_STATUS_INVALID_PARAM;
}
/* For Snow3g hash aad field contains IV - it needs to
* be 16
* bytes long
* be 16 bytes long
*/
if (pHashSetupData->authModeSetupData.aadLenInBytes !=
ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) {
@ -570,8 +566,7 @@ LacHash_HashContextCheck(CpaInstanceHandle instanceHandle,
return CPA_STATUS_INVALID_PARAM;
}
/* For ZUC EIA3 hash aad field contains IV - it needs to
* be 16
* bytes long
* be 16 bytes long
*/
if (pHashSetupData->authModeSetupData.aadLenInBytes !=
ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ) {
@ -613,19 +608,6 @@ LacHash_HashContextCheck(CpaInstanceHandle instanceHandle,
if (!CPA_BITMAP_BIT_TEST(capInfo.hashes,
pHashSetupData->nestedModeSetupData
.outerHashAlgorithm)) {
/* Ensure SHAKE algorithms are not supported */
if ((CPA_CY_SYM_HASH_SHAKE_128 ==
pHashSetupData->nestedModeSetupData
.outerHashAlgorithm) ||
(CPA_CY_SYM_HASH_SHAKE_256 ==
pHashSetupData->nestedModeSetupData
.outerHashAlgorithm)) {
LAC_INVALID_PARAM_LOG(
"Hash algorithms SHAKE-128 and SHAKE-256 "
"are not supported.");
return CPA_STATUS_UNSUPPORTED;
}
LAC_INVALID_PARAM_LOG("outerHashAlgorithm");
return CPA_STATUS_INVALID_PARAM;
}
@ -689,11 +671,15 @@ LacHash_PerformParamCheck(CpaInstanceHandle instanceHandle,
{
CpaStatus status = CPA_STATUS_SUCCESS;
lac_sym_qat_hash_alg_info_t *pHashAlgInfo = NULL;
CpaBoolean digestIsAppended = pSessionDesc->digestIsAppended;
CpaBoolean digestVerify = pSessionDesc->digestVerify;
CpaCySymOp symOperation = pSessionDesc->symOperation;
CpaCySymHashAlgorithm hashAlgorithm = pSessionDesc->hashAlgorithm;
/* digestVerify and digestIsAppended on Hash-Only operation not
* supported */
if (pSessionDesc->digestIsAppended && pSessionDesc->digestVerify &&
(CPA_CY_SYM_OP_HASH == pSessionDesc->symOperation)) {
if (digestIsAppended && digestVerify &&
(CPA_CY_SYM_OP_HASH == symOperation)) {
LAC_INVALID_PARAM_LOG(
"digestVerify and digestIsAppended set "
"on Hash-Only operation is not supported");
@ -702,21 +688,19 @@ LacHash_PerformParamCheck(CpaInstanceHandle instanceHandle,
/* check the digest result pointer */
if ((CPA_CY_SYM_PACKET_TYPE_PARTIAL != pOpData->packetType) &&
!pSessionDesc->digestIsAppended &&
(NULL == pOpData->pDigestResult)) {
!digestIsAppended && (NULL == pOpData->pDigestResult)) {
LAC_INVALID_PARAM_LOG("pDigestResult is NULL");
return CPA_STATUS_INVALID_PARAM;
}
/*
* Check if the pVerifyResult pointer is not null for hash operation
* when
* the packet is the last one and user has set verifyDigest flag
* when the packet is the last one and user has set verifyDigest flag
* Also, this is only needed for symchronous operation, so check if the
* callback pointer is the internal synchronous one rather than a user-
* supplied one.
*/
if ((CPA_TRUE == pSessionDesc->digestVerify) &&
if ((CPA_TRUE == digestVerify) &&
(CPA_CY_SYM_PACKET_TYPE_PARTIAL != pOpData->packetType) &&
(LacSync_GenBufListVerifyCb == pSessionDesc->pSymCb)) {
if (NULL == pVerifyResult) {
@ -732,8 +716,8 @@ LacHash_PerformParamCheck(CpaInstanceHandle instanceHandle,
* written anywhere so we cannot check for this been inside a buffer
* CCM/GCM specify the auth region using just the cipher params as this
* region is the same for auth and cipher. It is not checked here */
if ((CPA_CY_SYM_HASH_AES_CCM == pSessionDesc->hashAlgorithm) ||
(CPA_CY_SYM_HASH_AES_GCM == pSessionDesc->hashAlgorithm)) {
if ((CPA_CY_SYM_HASH_AES_CCM == hashAlgorithm) ||
(CPA_CY_SYM_HASH_AES_GCM == hashAlgorithm)) {
/* ensure AAD data pointer is non-NULL if AAD len > 0 */
if ((pSessionDesc->aadLenInBytes > 0) &&
(NULL == pOpData->pAdditionalAuthData)) {
@ -752,8 +736,8 @@ LacHash_PerformParamCheck(CpaInstanceHandle instanceHandle,
/* For Snow3g & ZUC hash pAdditionalAuthData field
* of OpData should contain IV */
if ((CPA_CY_SYM_HASH_SNOW3G_UIA2 == pSessionDesc->hashAlgorithm) ||
(CPA_CY_SYM_HASH_ZUC_EIA3 == pSessionDesc->hashAlgorithm)) {
if ((CPA_CY_SYM_HASH_SNOW3G_UIA2 == hashAlgorithm) ||
(CPA_CY_SYM_HASH_ZUC_EIA3 == hashAlgorithm)) {
if (NULL == pOpData->pAdditionalAuthData) {
LAC_INVALID_PARAM_LOG("pAdditionalAuthData is NULL");
return CPA_STATUS_INVALID_PARAM;
@ -761,12 +745,11 @@ LacHash_PerformParamCheck(CpaInstanceHandle instanceHandle,
}
/* partial packets need to be multiples of the algorithm block size in
* hash
* only mode (except for final partial packet) */
* hash only mode (except for final partial packet) */
if ((CPA_CY_SYM_PACKET_TYPE_PARTIAL == pOpData->packetType) &&
(CPA_CY_SYM_OP_HASH == pSessionDesc->symOperation)) {
(CPA_CY_SYM_OP_HASH == symOperation)) {
LacSymQat_HashAlgLookupGet(instanceHandle,
pSessionDesc->hashAlgorithm,
hashAlgorithm,
&pHashAlgInfo);
/* check if the message is a multiple of the block size. */

View File

@ -57,11 +57,7 @@ LacSymQueue_RequestSend(const CpaInstanceHandle instanceHandle,
*/
if ((CPA_FALSE == pSessionDesc->nonBlockingOpsInProgress) ||
(NULL != pSessionDesc->pRequestQueueTail)) {
if (CPA_STATUS_SUCCESS !=
LAC_SPINLOCK(&pSessionDesc->requestQueueLock)) {
LAC_LOG_ERROR("Failed to lock request queue");
return CPA_STATUS_RESOURCE;
}
LAC_SPINLOCK(&pSessionDesc->requestQueueLock);
/* Re-check blockingOpsInProgress and pRequestQueueTail in case
* either
@ -95,10 +91,7 @@ LacSymQueue_RequestSend(const CpaInstanceHandle instanceHandle,
/* request is queued, don't send to QAT here */
enqueued = CPA_TRUE;
}
if (CPA_STATUS_SUCCESS !=
LAC_SPINUNLOCK(&pSessionDesc->requestQueueLock)) {
LAC_LOG_ERROR("Failed to unlock request queue");
}
LAC_SPINUNLOCK(&pSessionDesc->requestQueueLock);
}
if (CPA_FALSE == enqueued) {

View File

@ -31,6 +31,7 @@
#include "sal_string_parse.h"
#include "lac_sym_key.h"
#include "lac_sym_qat_hash_defs_lookup.h"
#include "lac_sym_qat_constants_table.h"
#include "lac_sym_qat_cipher.h"
#include "lac_sym_qat_hash.h"
@ -104,6 +105,9 @@ LacSymQat_Init(CpaInstanceHandle instanceHandle)
{
CpaStatus status = CPA_STATUS_SUCCESS;
/* Initialize the SHRAM constants table */
LacSymQat_ConstantsInitLookupTables(instanceHandle);
/* Initialise the Hash lookup table */
status = LacSymQat_HashLookupInit(instanceHandle);
@ -130,10 +134,10 @@ LacSymQat_LaPacketCommandFlagSet(Cpa32U qatPacketType,
Cpa16U *pLaCommandFlags,
Cpa32U ivLenInBytes)
{
/* For Chacha ciphers set command flag as partial none to proceed
/* For SM4/Chacha ciphers set command flag as partial none to proceed
* with stateless processing */
if (LAC_CIPHER_IS_CHACHA(cipherAlgorithm) ||
LAC_CIPHER_IS_SM4(cipherAlgorithm)) {
if (LAC_CIPHER_IS_SM4(cipherAlgorithm) ||
LAC_CIPHER_IS_CHACHA(cipherAlgorithm)) {
ICP_QAT_FW_LA_PARTIAL_SET(*pLaCommandFlags,
ICP_QAT_FW_LA_PARTIAL_NONE);
return;
@ -144,10 +148,10 @@ LacSymQat_LaPacketCommandFlagSet(Cpa32U qatPacketType,
* must be disabled always.
* For all other ciphers and auth
* update state is disabled for full packets and final partials */
if (((laCmdId != ICP_QAT_FW_LA_CMD_AUTH) &&
LAC_CIPHER_IS_ECB_MODE(cipherAlgorithm)) ||
(ICP_QAT_FW_LA_PARTIAL_NONE == qatPacketType) ||
(ICP_QAT_FW_LA_PARTIAL_END == qatPacketType)) {
if ((ICP_QAT_FW_LA_PARTIAL_NONE == qatPacketType) ||
(ICP_QAT_FW_LA_PARTIAL_END == qatPacketType) ||
((laCmdId != ICP_QAT_FW_LA_CMD_AUTH) &&
LAC_CIPHER_IS_ECB_MODE(cipherAlgorithm))) {
ICP_QAT_FW_LA_UPDATE_STATE_SET(*pLaCommandFlags,
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
@ -182,8 +186,9 @@ LacSymQat_packetTypeGet(CpaCySymPacketType packetType,
CpaCySymPacketType packetState,
Cpa32U *pQatPacketType)
{
switch (packetType) {
/* partial */
if (CPA_CY_SYM_PACKET_TYPE_PARTIAL == packetType) {
case CPA_CY_SYM_PACKET_TYPE_PARTIAL:
/* if the previous state was full, then this is the first packet
*/
if (CPA_CY_SYM_PACKET_TYPE_FULL == packetState) {
@ -191,13 +196,15 @@ LacSymQat_packetTypeGet(CpaCySymPacketType packetType,
} else {
*pQatPacketType = ICP_QAT_FW_LA_PARTIAL_MID;
}
}
break;
/* final partial */
else if (CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL == packetType) {
case CPA_CY_SYM_PACKET_TYPE_LAST_PARTIAL:
*pQatPacketType = ICP_QAT_FW_LA_PARTIAL_END;
}
break;
/* full packet - CPA_CY_SYM_PACKET_TYPE_FULL */
else {
default:
*pQatPacketType = ICP_QAT_FW_LA_PARTIAL_NONE;
}
}
@ -225,3 +232,101 @@ LacSymQat_LaSetDefaultFlags(icp_qat_fw_serv_specif_flags *laCmdFlags,
ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
*laCmdFlags, ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS);
}
CpaBoolean
LacSymQat_UseSymConstantsTable(lac_session_desc_t *pSession,
Cpa8U *pCipherOffset,
Cpa8U *pHashOffset)
{
CpaBoolean useOptimisedContentDesc = CPA_FALSE;
CpaBoolean useSHRAMConstants = CPA_FALSE;
*pCipherOffset = 0;
*pHashOffset = 0;
/* for chaining can we use the optimised content descritor */
if (pSession->laCmdId == ICP_QAT_FW_LA_CMD_CIPHER_HASH ||
pSession->laCmdId == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
useOptimisedContentDesc =
LacSymQat_UseOptimisedContentDesc(pSession);
}
/* Cipher-only case or chaining */
if (pSession->laCmdId == ICP_QAT_FW_LA_CMD_CIPHER ||
useOptimisedContentDesc) {
icp_qat_hw_cipher_algo_t algorithm;
icp_qat_hw_cipher_mode_t mode;
icp_qat_hw_cipher_dir_t dir;
icp_qat_hw_cipher_convert_t key_convert;
if (pSession->cipherKeyLenInBytes >
sizeof(icp_qat_fw_comn_req_hdr_cd_pars_t)) {
return CPA_FALSE;
}
LacSymQat_CipherGetCfgData(
pSession, &algorithm, &mode, &dir, &key_convert);
/* Check if cipher config is available in table. */
LacSymQat_ConstantsGetCipherOffset(pSession->pInstance,
algorithm,
mode,
dir,
key_convert,
pCipherOffset);
if (*pCipherOffset > 0) {
useSHRAMConstants = CPA_TRUE;
} else {
useSHRAMConstants = CPA_FALSE;
}
}
/* hash only case or when chaining, cipher must be found in SHRAM table
* for
* optimised CD case */
if (pSession->laCmdId == ICP_QAT_FW_LA_CMD_AUTH ||
(useOptimisedContentDesc && useSHRAMConstants)) {
icp_qat_hw_auth_algo_t algorithm;
CpaBoolean nested;
if (pSession->digestVerify) {
return CPA_FALSE;
}
if ((!(useOptimisedContentDesc && useSHRAMConstants)) &&
(pSession->qatHashMode == ICP_QAT_HW_AUTH_MODE1)) {
/* we can only use the SHA1-mode1 in the SHRAM constants
* table when
* we are using the opimised content desc */
return CPA_FALSE;
}
LacSymQat_HashGetCfgData(pSession->pInstance,
pSession->qatHashMode,
pSession->hashMode,
pSession->hashAlgorithm,
&algorithm,
&nested);
/* Check if config data is available in table. */
LacSymQat_ConstantsGetAuthOffset(pSession->pInstance,
algorithm,
pSession->qatHashMode,
nested,
pHashOffset);
if (*pHashOffset > 0) {
useSHRAMConstants = CPA_TRUE;
} else {
useSHRAMConstants = CPA_FALSE;
}
}
return useSHRAMConstants;
}
CpaBoolean
LacSymQat_UseOptimisedContentDesc(lac_session_desc_t *pSession)
{
return CPA_FALSE;
}

View File

@ -0,0 +1,257 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/**
***************************************************************************
* @file lac_sym_qat_constants_table.c
*
* @ingroup LacSymQat
***************************************************************************/
/*
*******************************************************************************
* Include public/global header files
*******************************************************************************
*/
#include "cpa.h"
/*
*******************************************************************************
* Include private header files
*******************************************************************************
*/
#include "lac_common.h"
#include "icp_qat_fw_la.h"
#include "lac_log.h"
#include "lac_mem.h"
#include "sal_string_parse.h"
#include "lac_sal_types_crypto.h"
#include "sal_types_compression.h"
static uint8_t icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_DELIMITER]
[ICP_QAT_HW_CIPHER_MODE_DELIMITER][2]
[2]; /* IA version */
static uint8_t icp_qat_hw_auth_lookup_tbl[ICP_QAT_HW_AUTH_ALGO_DELIMITER]
[ICP_QAT_HW_AUTH_MODE_DELIMITER]
[2]; /* IA version */
#define ICP_QAT_HW_FILL_LOOKUP_TBLS \
{ \
\
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \
[ICP_QAT_HW_CIPHER_ECB_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
9; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \
[ICP_QAT_HW_CIPHER_ECB_MODE] \
[ICP_QAT_HW_CIPHER_DECRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
10; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \
[ICP_QAT_HW_CIPHER_CBC_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
11; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \
[ICP_QAT_HW_CIPHER_CBC_MODE] \
[ICP_QAT_HW_CIPHER_DECRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
12; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_DES] \
[ICP_QAT_HW_CIPHER_CTR_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
13; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_ECB_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
14; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_ECB_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_KEY_CONVERT] = \
15; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_ECB_MODE] \
[ICP_QAT_HW_CIPHER_DECRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
16; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_ECB_MODE] \
[ICP_QAT_HW_CIPHER_DECRYPT] \
[ICP_QAT_HW_CIPHER_KEY_CONVERT] = \
17; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_CBC_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
18; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_CBC_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_KEY_CONVERT] = \
19; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_CBC_MODE] \
[ICP_QAT_HW_CIPHER_DECRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
20; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_CBC_MODE] \
[ICP_QAT_HW_CIPHER_DECRYPT] \
[ICP_QAT_HW_CIPHER_KEY_CONVERT] = \
21; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_CTR_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
22; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_AES128] \
[ICP_QAT_HW_CIPHER_F8_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
23; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_ARC4] \
[ICP_QAT_HW_CIPHER_ECB_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_NO_CONVERT] = \
24; \
icp_qat_hw_cipher_lookup_tbl[ICP_QAT_HW_CIPHER_ALGO_ARC4] \
[ICP_QAT_HW_CIPHER_ECB_MODE] \
[ICP_QAT_HW_CIPHER_ENCRYPT] \
[ICP_QAT_HW_CIPHER_KEY_CONVERT] = \
25; \
\
icp_qat_hw_auth_lookup_tbl \
[ICP_QAT_HW_AUTH_ALGO_MD5][ICP_QAT_HW_AUTH_MODE0] \
[ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 37; \
icp_qat_hw_auth_lookup_tbl \
[ICP_QAT_HW_AUTH_ALGO_SHA1][ICP_QAT_HW_AUTH_MODE0] \
[ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 41; \
icp_qat_hw_auth_lookup_tbl \
[ICP_QAT_HW_AUTH_ALGO_SHA1][ICP_QAT_HW_AUTH_MODE1] \
[ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 46; \
icp_qat_hw_auth_lookup_tbl \
[ICP_QAT_HW_AUTH_ALGO_SHA224][ICP_QAT_HW_AUTH_MODE0] \
[ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 48; \
icp_qat_hw_auth_lookup_tbl \
[ICP_QAT_HW_AUTH_ALGO_SHA256][ICP_QAT_HW_AUTH_MODE0] \
[ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 54; \
icp_qat_hw_auth_lookup_tbl \
[ICP_QAT_HW_AUTH_ALGO_SHA384][ICP_QAT_HW_AUTH_MODE0] \
[ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 60; \
icp_qat_hw_auth_lookup_tbl \
[ICP_QAT_HW_AUTH_ALGO_SHA512][ICP_QAT_HW_AUTH_MODE0] \
[ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED] = 70; \
}
/**
*****************************************************************************
* @ingroup LacSymQat
* LacSymQat_ConstantsInitLookupTables
*
*
*****************************************************************************/
void
LacSymQat_ConstantsInitLookupTables(CpaInstanceHandle instanceHandle)
{
sal_service_t *pService = (sal_service_t *)instanceHandle;
lac_sym_qat_constants_t *pConstantsLookupTables;
/* Note the global tables are initialised first, then copied
* to the service which probably seems like a waste of memory
* and processing cycles as the global tables are never needed again
* but this allows use of the ICP_QAT_HW_FILL_LOOKUP_TBLS macro
* supplied by FW without modification */
if (SAL_SERVICE_TYPE_COMPRESSION == pService->type) {
/* DC chaining not supported yet */
return;
} else {
pConstantsLookupTables = &(
((sal_crypto_service_t *)pService)->constantsLookupTables);
}
/* First fill the global lookup tables with zeroes. */
memset(icp_qat_hw_cipher_lookup_tbl,
0,
sizeof(icp_qat_hw_cipher_lookup_tbl));
memset(icp_qat_hw_auth_lookup_tbl,
0,
sizeof(icp_qat_hw_auth_lookup_tbl));
/* Override lookup tables with the offsets into the SHRAM table
* for supported algorithms/modes */
ICP_QAT_HW_FILL_LOOKUP_TBLS;
/* Copy the global tables to the service instance */
memcpy(pConstantsLookupTables->cipher_offset,
icp_qat_hw_cipher_lookup_tbl,
sizeof(pConstantsLookupTables->cipher_offset));
memcpy(pConstantsLookupTables->auth_offset,
icp_qat_hw_auth_lookup_tbl,
sizeof(pConstantsLookupTables->auth_offset));
}
/**
*****************************************************************************
* @ingroup LacSymQat
* LacSymQat_ConstantsGetCipherOffset
*
*
*****************************************************************************/
void
LacSymQat_ConstantsGetCipherOffset(CpaInstanceHandle instanceHandle,
uint8_t algo,
uint8_t mode,
uint8_t direction,
uint8_t convert,
uint8_t *poffset)
{
sal_service_t *pService = (sal_service_t *)instanceHandle;
lac_sym_qat_constants_t *pConstantsLookupTables;
if (SAL_SERVICE_TYPE_COMPRESSION == pService->type) {
/* DC chaining not supported yet */
return;
} else {
pConstantsLookupTables = &(
((sal_crypto_service_t *)pService)->constantsLookupTables);
}
*poffset = pConstantsLookupTables
->cipher_offset[algo][mode][direction][convert];
}
/**
*****************************************************************************
* @ingroup LacSymQat
* LacSymQat_ConstantsGetAuthOffset
*
*
*****************************************************************************/
void
LacSymQat_ConstantsGetAuthOffset(CpaInstanceHandle instanceHandle,
uint8_t algo,
uint8_t mode,
uint8_t nested,
uint8_t *poffset)
{
sal_service_t *pService = (sal_service_t *)instanceHandle;
lac_sym_qat_constants_t *pConstantsLookupTables;
if (SAL_SERVICE_TYPE_COMPRESSION == pService->type) {
/* DC chaining not supported yet */
return;
} else {
pConstantsLookupTables = &(
((sal_crypto_service_t *)pService)->constantsLookupTables);
}
*poffset = pConstantsLookupTables->auth_offset[algo][mode][nested];
}

View File

@ -28,8 +28,10 @@
#include "lac_sym_qat.h"
#include "lac_list.h"
#include "lac_sal_types.h"
#include "lac_sal_types_crypto.h"
#include "lac_sym_qat_hash.h"
#include "lac_sym_qat_hash_defs_lookup.h"
#include "sal_hw_gen.h"
/**
* This structure contains pointers into the hash setup block of the
@ -148,10 +150,10 @@ LacSymQat_HashContentDescInit(icp_qat_la_bulk_req_ftr_t *pMsg,
icp_qat_hw_auth_mode_t qatHashMode,
CpaBoolean useSymConstantsTable,
CpaBoolean useOptimisedContentDesc,
CpaBoolean useStatefulSha3ContentDesc,
lac_sym_qat_hash_precompute_info_t *pPrecompute,
Cpa32U *pHashBlkSizeInBytes)
{
icp_qat_fw_auth_cd_ctrl_hdr_t *cd_ctrl =
(icp_qat_fw_auth_cd_ctrl_hdr_t *)&(pMsg->cd_ctrl);
lac_sym_qat_hash_defs_t *pHashDefs = NULL;
@ -159,7 +161,7 @@ LacSymQat_HashContentDescInit(icp_qat_la_bulk_req_ftr_t *pMsg,
Cpa32U hashSetupBlkSize = 0;
/* setup the offset in QuadWords into the hw blk */
cd_ctrl->hash_cfg_offset = hwBlockOffsetInQuadWords;
cd_ctrl->hash_cfg_offset = (Cpa8U)hwBlockOffsetInQuadWords;
ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, nextSlice);
ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
@ -170,11 +172,19 @@ LacSymQat_HashContentDescInit(icp_qat_la_bulk_req_ftr_t *pMsg,
/* Hmac in mode 2 TLS */
if (IS_HASH_MODE_2(qatHashMode)) {
/* Set bit for nested hashing.
* Make sure not to overwrite other flags in hash_flags byte.
*/
ICP_QAT_FW_HASH_FLAG_AUTH_HDR_NESTED_SET(
cd_ctrl->hash_flags, ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED);
if (isCyGen4x((sal_crypto_service_t *)instanceHandle)) {
/* CPM2.0 has a dedicated bit for HMAC mode2 */
ICP_QAT_FW_HASH_FLAG_MODE2_SET(cd_ctrl->hash_flags,
QAT_FW_LA_MODE2);
} else {
/* Set bit for nested hashing.
* Make sure not to overwrite other flags in hash_flags
* byte.
*/
ICP_QAT_FW_HASH_FLAG_AUTH_HDR_NESTED_SET(
cd_ctrl->hash_flags,
ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED);
}
}
/* Nested hash in mode 0 */
else if (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode) {
@ -190,16 +200,32 @@ LacSymQat_HashContentDescInit(icp_qat_la_bulk_req_ftr_t *pMsg,
cd_ctrl->hash_flags, ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED);
}
/* Set skip state load flags */
if (useStatefulSha3ContentDesc) {
/* Here both skip state load flags are set. FW reads them based
* on partial packet type. */
ICP_QAT_FW_HASH_FLAG_SKIP_INNER_STATE1_LOAD_SET(
cd_ctrl->hash_flags, QAT_FW_LA_SKIP_INNER_STATE1_LOAD);
ICP_QAT_FW_HASH_FLAG_SKIP_OUTER_STATE1_LOAD_SET(
cd_ctrl->hash_flags, QAT_FW_LA_SKIP_OUTER_STATE1_LOAD);
}
/* set the final digest size */
cd_ctrl->final_sz = pHashSetupData->digestResultLenInBytes;
cd_ctrl->final_sz = (Cpa8U)pHashSetupData->digestResultLenInBytes;
/* set the state1 size */
cd_ctrl->inner_state1_sz =
LAC_ALIGN_POW2_ROUNDUP(pHashDefs->qatInfo->state1Length,
LAC_QUAD_WORD_IN_BYTES);
if (useStatefulSha3ContentDesc) {
cd_ctrl->inner_state1_sz =
LAC_ALIGN_POW2_ROUNDUP(LAC_HASH_SHA3_STATEFUL_STATE_SIZE,
LAC_QUAD_WORD_IN_BYTES);
} else {
cd_ctrl->inner_state1_sz =
LAC_ALIGN_POW2_ROUNDUP(pHashDefs->qatInfo->state1Length,
LAC_QUAD_WORD_IN_BYTES);
}
/* set the inner result size to the digest length */
cd_ctrl->inner_res_sz = pHashDefs->algInfo->digestLength;
cd_ctrl->inner_res_sz = (Cpa8U)pHashDefs->algInfo->digestLength;
/* set the state2 size - only for mode 1 Auth algos and AES CBC MAC */
if (IS_HASH_MODE_1(qatHashMode) ||
@ -212,13 +238,22 @@ LacSymQat_HashContentDescInit(icp_qat_la_bulk_req_ftr_t *pMsg,
cd_ctrl->inner_state2_sz = 0;
}
cd_ctrl->inner_state2_offset = cd_ctrl->hash_cfg_offset +
LAC_BYTES_TO_QUADWORDS(sizeof(icp_qat_hw_auth_setup_t) +
cd_ctrl->inner_state1_sz);
if (useSymConstantsTable) {
cd_ctrl->inner_state2_offset =
LAC_BYTES_TO_QUADWORDS(cd_ctrl->inner_state1_sz);
/* size of inner part of hash setup block */
hashSetupBlkSize = sizeof(icp_qat_hw_auth_setup_t) +
cd_ctrl->inner_state1_sz + cd_ctrl->inner_state2_sz;
/* size of inner part of hash setup block */
hashSetupBlkSize =
cd_ctrl->inner_state1_sz + cd_ctrl->inner_state2_sz;
} else {
cd_ctrl->inner_state2_offset = cd_ctrl->hash_cfg_offset +
LAC_BYTES_TO_QUADWORDS(sizeof(icp_qat_hw_auth_setup_t) +
cd_ctrl->inner_state1_sz);
/* size of inner part of hash setup block */
hashSetupBlkSize = sizeof(icp_qat_hw_auth_setup_t) +
cd_ctrl->inner_state1_sz + cd_ctrl->inner_state2_sz;
}
/* For nested hashing - Fill in the outer fields */
if (CPA_CY_SYM_HASH_MODE_NESTED == pHashSetupData->hashMode ||
@ -238,18 +273,24 @@ LacSymQat_HashContentDescInit(icp_qat_la_bulk_req_ftr_t *pMsg,
cd_ctrl->outer_config_offset = cd_ctrl->inner_state2_offset +
LAC_BYTES_TO_QUADWORDS(cd_ctrl->inner_state2_sz);
cd_ctrl->outer_state1_sz =
LAC_ALIGN_POW2_ROUNDUP(pOuterHashDefs->algInfo->stateSize,
LAC_QUAD_WORD_IN_BYTES);
if (useStatefulSha3ContentDesc) {
cd_ctrl->outer_state1_sz = LAC_ALIGN_POW2_ROUNDUP(
LAC_HASH_SHA3_STATEFUL_STATE_SIZE,
LAC_QUAD_WORD_IN_BYTES);
} else {
cd_ctrl->outer_state1_sz = LAC_ALIGN_POW2_ROUNDUP(
pOuterHashDefs->algInfo->stateSize,
LAC_QUAD_WORD_IN_BYTES);
}
/* outer result size */
cd_ctrl->outer_res_sz = pOuterHashDefs->algInfo->digestLength;
cd_ctrl->outer_res_sz =
(Cpa8U)pOuterHashDefs->algInfo->digestLength;
/* outer_prefix_offset will be the size of the inner prefix data
* plus the hash state storage size. */
/* The prefix buffer is part of the ReqParams, so this param
* will be
* setup where ReqParams are set up */
* will be setup where ReqParams are set up */
/* add on size of outer part of hash block */
hashSetupBlkSize +=
@ -325,8 +366,9 @@ LacSymQat_HashSetupReqParamsMetaData(
if (IS_HASH_MODE_2(qatHashMode)) {
/* Inner and outer prefixes are the block length */
pHashReqParams->u2.inner_prefix_sz =
pHashDefs->algInfo->blockLength;
cd_ctrl->outer_prefix_sz = pHashDefs->algInfo->blockLength;
(Cpa8U)pHashDefs->algInfo->blockLength;
cd_ctrl->outer_prefix_sz =
(Cpa8U)pHashDefs->algInfo->blockLength;
cd_ctrl->outer_prefix_offset = LAC_BYTES_TO_QUADWORDS(
LAC_ALIGN_POW2_ROUNDUP((pHashReqParams->u2.inner_prefix_sz),
LAC_QUAD_WORD_IN_BYTES));
@ -336,9 +378,11 @@ LacSymQat_HashSetupReqParamsMetaData(
/* set inner and outer prefixes */
pHashReqParams->u2.inner_prefix_sz =
pHashSetupData->nestedModeSetupData.innerPrefixLenInBytes;
(Cpa8U)pHashSetupData->nestedModeSetupData
.innerPrefixLenInBytes;
cd_ctrl->outer_prefix_sz =
pHashSetupData->nestedModeSetupData.outerPrefixLenInBytes;
(Cpa8U)pHashSetupData->nestedModeSetupData
.outerPrefixLenInBytes;
cd_ctrl->outer_prefix_offset = LAC_BYTES_TO_QUADWORDS(
LAC_ALIGN_POW2_ROUNDUP((pHashReqParams->u2.inner_prefix_sz),
LAC_QUAD_WORD_IN_BYTES));
@ -363,8 +407,9 @@ LacSymQat_HashSetupReqParamsMetaData(
* just need 2 bytes to store encoded length of
* 'a' */
aadDataSize += sizeof(Cpa16U);
aadDataSize += pHashSetupData->authModeSetupData
.aadLenInBytes;
aadDataSize +=
(Cpa16U)pHashSetupData->authModeSetupData
.aadLenInBytes;
}
/* round the aad size to the multiple of CCM block
@ -375,7 +420,8 @@ LacSymQat_HashSetupReqParamsMetaData(
} else if (CPA_CY_SYM_HASH_AES_GCM ==
pHashSetupData->hashAlgorithm) {
aadDataSize =
pHashSetupData->authModeSetupData.aadLenInBytes;
(Cpa16U)
pHashSetupData->authModeSetupData.aadLenInBytes;
/* round the aad size to the multiple of GCM hash block
* size. */
@ -406,7 +452,7 @@ LacSymQat_HashSetupReqParamsMetaData(
/* auth result size in bytes to be read in for a verify
* operation */
pHashReqParams->auth_res_sz =
pHashSetupData->digestResultLenInBytes;
(Cpa8U)pHashSetupData->digestResultLenInBytes;
} else {
pHashReqParams->auth_res_sz = 0;
}
@ -453,7 +499,7 @@ LacSymQat_HashSetupBlockInit(const CpaCySymHashSetupData *pHashSetupData,
{
Cpa32U innerConfig = 0;
lac_hash_blk_ptrs_t hashBlkPtrs = { 0 };
Cpa32U aed_hash_cmp_length = 0;
Cpa32U aedHashCmpLength = 0;
LacSymQat_HashHwBlockPtrsInit(pHashControlBlock,
pHwBlockBase,
@ -610,7 +656,7 @@ LacSymQat_HashSetupBlockInit(const CpaCySymHashSetupData *pHashSetupData,
ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
ICP_QAT_HW_CIPHER_KEY_CONVERT,
ICP_QAT_HW_CIPHER_ENCRYPT,
aed_hash_cmp_length);
aedHashCmpLength);
pCipherConfig->reserved = 0;
@ -633,7 +679,7 @@ LacSymQat_HashSetupBlockInit(const CpaCySymHashSetupData *pHashSetupData,
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3,
ICP_QAT_HW_CIPHER_KEY_CONVERT,
ICP_QAT_HW_CIPHER_ENCRYPT,
aed_hash_cmp_length);
aedHashCmpLength);
pCipherConfig->reserved = 0;
@ -820,7 +866,7 @@ LacSymQat_HashRequestParamsPopulate(
CpaBoolean digestVerify,
Cpa8U *pAuthResult,
CpaCySymHashAlgorithm alg,
void *hkdf_secret)
void *pHKDFSecret)
{
Cpa64U authResultPhys = 0;
icp_qat_fw_la_auth_req_params_t *pHashReqParams;
@ -833,11 +879,11 @@ LacSymQat_HashRequestParamsPopulate(
pHashReqParams->auth_len = authLenInBytes;
/* Set the physical location of secret for HKDF */
if (NULL != hkdf_secret) {
if (NULL != pHKDFSecret) {
LAC_MEM_SHARED_WRITE_VIRT_TO_PHYS_PTR_EXTERNAL(
(*pService), pHashReqParams->u1.aad_adr, hkdf_secret);
(*pService), pHashReqParams->u1.aad_adr, pHKDFSecret);
if (pHashReqParams->u1.aad_adr == 0) {
if (0 == pHashReqParams->u1.aad_adr) {
LAC_LOG_ERROR(
"Unable to get the physical address of the"
" HKDF secret\n");
@ -868,7 +914,7 @@ LacSymQat_HashRequestParamsPopulate(
if (CPA_TRUE == digestVerify) {
/* auth result size in bytes to be read in for a verify
* operation */
pHashReqParams->auth_res_sz = hashResultSize;
pHashReqParams->auth_res_sz = (Cpa8U)hashResultSize;
} else {
pHashReqParams->auth_res_sz = 0;
}

View File

@ -74,62 +74,70 @@ static Cpa8U sha224InitialState[LAC_HASH_SHA224_STATE_SIZE] = {
};
/* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
static Cpa8U sha256InitialState[LAC_HASH_SHA256_STATE_SIZE] =
{ 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19 };
static Cpa8U sha256InitialState[LAC_HASH_SHA256_STATE_SIZE] = {
0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3,
0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05,
0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19
};
/* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
static Cpa8U sha384InitialState[LAC_HASH_SHA384_STATE_SIZE] =
{ 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4 };
static Cpa8U sha384InitialState[LAC_HASH_SHA384_STATE_SIZE] = {
0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70,
0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67,
0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87,
0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f,
0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
};
/* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
static Cpa8U sha512InitialState[LAC_HASH_SHA512_STATE_SIZE] =
{ 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79 };
static Cpa8U sha512InitialState[LAC_HASH_SHA512_STATE_SIZE] = {
0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae,
0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51,
0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c,
0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd,
0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
};
/* SHA3 224 - 28 bytes */
static Cpa8U sha3_224InitialState[LAC_HASH_SHA3_224_STATE_SIZE] =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
static Cpa8U sha3_224InitialState[LAC_HASH_SHA3_224_STATE_SIZE] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/* SHA3 256 - 32 bytes */
static Cpa8U sha3_256InitialState[LAC_HASH_SHA3_256_STATE_SIZE] =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
static Cpa8U sha3_256InitialState[LAC_HASH_SHA3_256_STATE_SIZE] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/* SHA3 384 - 48 bytes */
static Cpa8U sha3_384InitialState[LAC_HASH_SHA3_384_STATE_SIZE] =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
static Cpa8U sha3_384InitialState[LAC_HASH_SHA3_384_STATE_SIZE] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/* SHA3 512 - 64 bytes */
static Cpa8U sha3_512InitialState[LAC_HASH_SHA3_512_STATE_SIZE] =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
static Cpa8U sha3_512InitialState[LAC_HASH_SHA3_512_STATE_SIZE] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/* SM3 - 32 bytes */
static Cpa8U sm3InitialState[LAC_HASH_SM3_STATE_SIZE] =
{ 0x73, 0x80, 0x16, 0x6f, 0x49, 0x14, 0xb2, 0xb9, 0x17, 0x24, 0x42,
0xd7, 0xda, 0x8a, 0x06, 0x00, 0xa9, 0x6f, 0x30, 0xbc, 0x16, 0x31,
0x38, 0xaa, 0xe3, 0x8d, 0xee, 0x4d, 0xb0, 0xfb, 0x0e, 0x4e };
static Cpa8U sm3InitialState[LAC_HASH_SM3_STATE_SIZE] = {
0x73, 0x80, 0x16, 0x6f, 0x49, 0x14, 0xb2, 0xb9, 0x17, 0x24, 0x42,
0xd7, 0xda, 0x8a, 0x06, 0x00, 0xa9, 0x6f, 0x30, 0xbc, 0x16, 0x31,
0x38, 0xaa, 0xe3, 0x8d, 0xee, 0x4d, 0xb0, 0xfb, 0x0e, 0x4e
};
/* Constants used in generating K1, K2, K3 from a Key for AES_XCBC_MAC
* State defined in RFC 3566 */
@ -140,12 +148,10 @@ static Cpa8U aesXcbcKeySeed[LAC_SYM_QAT_XCBC_STATE_SIZE] = {
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
};
static Cpa8U aesCmacKeySeed[LAC_HASH_CMAC_BLOCK_SIZE] = { 0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
0x00 };
static Cpa8U aesCmacKeySeed[LAC_HASH_CMAC_BLOCK_SIZE] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
/* Hash Algorithm specific structure */
@ -179,57 +185,57 @@ static lac_sym_qat_hash_alg_info_t sha512Info = { LAC_HASH_SHA512_DIGEST_SIZE,
sha512InitialState,
LAC_HASH_SHA512_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t sha3_224Info =
{ LAC_HASH_SHA3_224_DIGEST_SIZE,
LAC_HASH_SHA3_224_BLOCK_SIZE,
sha3_224InitialState,
LAC_HASH_SHA3_224_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t sha3_224Info = {
LAC_HASH_SHA3_224_DIGEST_SIZE,
LAC_HASH_SHA3_224_BLOCK_SIZE,
sha3_224InitialState,
LAC_HASH_SHA3_224_STATE_SIZE
};
static lac_sym_qat_hash_alg_info_t sha3_256Info =
{ LAC_HASH_SHA3_256_DIGEST_SIZE,
LAC_HASH_SHA3_256_BLOCK_SIZE,
sha3_256InitialState,
LAC_HASH_SHA3_256_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t sha3_256Info = {
LAC_HASH_SHA3_256_DIGEST_SIZE,
LAC_HASH_SHA3_256_BLOCK_SIZE,
sha3_256InitialState,
LAC_HASH_SHA3_256_STATE_SIZE
};
static lac_sym_qat_hash_alg_info_t sha3_384Info =
{ LAC_HASH_SHA3_384_DIGEST_SIZE,
LAC_HASH_SHA3_384_BLOCK_SIZE,
sha3_384InitialState,
LAC_HASH_SHA3_384_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t sha3_384Info = {
LAC_HASH_SHA3_384_DIGEST_SIZE,
LAC_HASH_SHA3_384_BLOCK_SIZE,
sha3_384InitialState,
LAC_HASH_SHA3_384_STATE_SIZE
};
static lac_sym_qat_hash_alg_info_t sha3_512Info =
{ LAC_HASH_SHA3_512_DIGEST_SIZE,
LAC_HASH_SHA3_512_BLOCK_SIZE,
sha3_512InitialState,
LAC_HASH_SHA3_512_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t polyInfo = { LAC_HASH_POLY_DIGEST_SIZE,
LAC_HASH_POLY_BLOCK_SIZE,
NULL, /* intial state */
LAC_HASH_POLY_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t shake_128Info =
{ LAC_HASH_SHAKE_128_DIGEST_SIZE, LAC_HASH_SHAKE_128_BLOCK_SIZE, NULL, 0 };
static lac_sym_qat_hash_alg_info_t shake_256Info =
{ LAC_HASH_SHAKE_256_DIGEST_SIZE, LAC_HASH_SHAKE_256_BLOCK_SIZE, NULL, 0 };
static lac_sym_qat_hash_alg_info_t sha3_512Info = {
LAC_HASH_SHA3_512_DIGEST_SIZE,
LAC_HASH_SHA3_512_BLOCK_SIZE,
sha3_512InitialState,
LAC_HASH_SHA3_512_STATE_SIZE
};
static lac_sym_qat_hash_alg_info_t sm3Info = { LAC_HASH_SM3_DIGEST_SIZE,
LAC_HASH_SM3_BLOCK_SIZE,
sm3InitialState,
LAC_HASH_SM3_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t xcbcMacInfo =
{ LAC_HASH_XCBC_MAC_128_DIGEST_SIZE,
LAC_HASH_XCBC_MAC_BLOCK_SIZE,
aesXcbcKeySeed,
LAC_SYM_QAT_XCBC_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t polyInfo = { LAC_HASH_POLY_DIGEST_SIZE,
LAC_HASH_POLY_BLOCK_SIZE,
NULL, /* intial state */
LAC_HASH_POLY_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t aesCmacInfo =
{ LAC_HASH_CMAC_128_DIGEST_SIZE,
LAC_HASH_CMAC_BLOCK_SIZE,
aesCmacKeySeed,
LAC_SYM_QAT_CMAC_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t xcbcMacInfo = {
LAC_HASH_XCBC_MAC_128_DIGEST_SIZE,
LAC_HASH_XCBC_MAC_BLOCK_SIZE,
aesXcbcKeySeed,
LAC_SYM_QAT_XCBC_STATE_SIZE
};
static lac_sym_qat_hash_alg_info_t aesCmacInfo = {
LAC_HASH_CMAC_128_DIGEST_SIZE,
LAC_HASH_CMAC_BLOCK_SIZE,
aesCmacKeySeed,
LAC_SYM_QAT_CMAC_STATE_SIZE
};
static lac_sym_qat_hash_alg_info_t aesCcmInfo = {
LAC_HASH_AES_CCM_DIGEST_SIZE,
@ -259,11 +265,12 @@ static lac_sym_qat_hash_alg_info_t snow3gUia2Info = {
0 /* state size */
};
static lac_sym_qat_hash_alg_info_t aesCbcMacInfo =
{ LAC_HASH_AES_CBC_MAC_DIGEST_SIZE,
LAC_HASH_AES_CBC_MAC_BLOCK_SIZE,
NULL,
0 };
static lac_sym_qat_hash_alg_info_t aesCbcMacInfo = {
LAC_HASH_AES_CBC_MAC_DIGEST_SIZE,
LAC_HASH_AES_CBC_MAC_BLOCK_SIZE,
NULL,
0
};
static lac_sym_qat_hash_alg_info_t zucEia3Info = {
LAC_HASH_ZUC_EIA3_DIGEST_SIZE,
@ -283,145 +290,154 @@ static lac_sym_qat_hash_qat_info_t sha1Config = { ICP_QAT_HW_AUTH_ALGO_SHA1,
ICP_QAT_HW_SHA1_STATE1_SZ,
ICP_QAT_HW_SHA1_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t sha224Config =
{ ICP_QAT_HW_AUTH_ALGO_SHA224,
LAC_HASH_SHA224_BLOCK_SIZE,
ICP_QAT_HW_SHA224_STATE1_SZ,
ICP_QAT_HW_SHA224_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t sha224Config = {
ICP_QAT_HW_AUTH_ALGO_SHA224,
LAC_HASH_SHA224_BLOCK_SIZE,
ICP_QAT_HW_SHA224_STATE1_SZ,
ICP_QAT_HW_SHA224_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t sha256Config =
{ ICP_QAT_HW_AUTH_ALGO_SHA256,
LAC_HASH_SHA256_BLOCK_SIZE,
ICP_QAT_HW_SHA256_STATE1_SZ,
ICP_QAT_HW_SHA256_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t sha256Config = {
ICP_QAT_HW_AUTH_ALGO_SHA256,
LAC_HASH_SHA256_BLOCK_SIZE,
ICP_QAT_HW_SHA256_STATE1_SZ,
ICP_QAT_HW_SHA256_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t sha384Config =
{ ICP_QAT_HW_AUTH_ALGO_SHA384,
LAC_HASH_SHA384_BLOCK_SIZE,
ICP_QAT_HW_SHA384_STATE1_SZ,
ICP_QAT_HW_SHA384_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t sha384Config = {
ICP_QAT_HW_AUTH_ALGO_SHA384,
LAC_HASH_SHA384_BLOCK_SIZE,
ICP_QAT_HW_SHA384_STATE1_SZ,
ICP_QAT_HW_SHA384_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t sha512Config =
{ ICP_QAT_HW_AUTH_ALGO_SHA512,
LAC_HASH_SHA512_BLOCK_SIZE,
ICP_QAT_HW_SHA512_STATE1_SZ,
ICP_QAT_HW_SHA512_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t sha512Config = {
ICP_QAT_HW_AUTH_ALGO_SHA512,
LAC_HASH_SHA512_BLOCK_SIZE,
ICP_QAT_HW_SHA512_STATE1_SZ,
ICP_QAT_HW_SHA512_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t sha3_224Config =
{ ICP_QAT_HW_AUTH_ALGO_SHA3_224,
LAC_HASH_SHA3_224_BLOCK_SIZE,
ICP_QAT_HW_SHA3_224_STATE1_SZ,
ICP_QAT_HW_SHA3_224_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t sha3_224Config = {
ICP_QAT_HW_AUTH_ALGO_SHA3_224,
LAC_HASH_SHA3_224_BLOCK_SIZE,
ICP_QAT_HW_SHA3_224_STATE1_SZ,
ICP_QAT_HW_SHA3_224_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t sha3_256Config =
{ ICP_QAT_HW_AUTH_ALGO_SHA3_256,
LAC_HASH_SHA3_256_BLOCK_SIZE,
ICP_QAT_HW_SHA3_256_STATE1_SZ,
ICP_QAT_HW_SHA3_256_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t sha3_256Config = {
ICP_QAT_HW_AUTH_ALGO_SHA3_256,
LAC_HASH_SHA3_256_BLOCK_SIZE,
ICP_QAT_HW_SHA3_256_STATE1_SZ,
ICP_QAT_HW_SHA3_256_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t sha3_384Config =
{ ICP_QAT_HW_AUTH_ALGO_SHA3_384,
LAC_HASH_SHA3_384_BLOCK_SIZE,
ICP_QAT_HW_SHA3_384_STATE1_SZ,
ICP_QAT_HW_SHA3_384_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t sha3_384Config = {
ICP_QAT_HW_AUTH_ALGO_SHA3_384,
LAC_HASH_SHA3_384_BLOCK_SIZE,
ICP_QAT_HW_SHA3_384_STATE1_SZ,
ICP_QAT_HW_SHA3_384_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t sha3_512Config =
{ ICP_QAT_HW_AUTH_ALGO_SHA3_512,
LAC_HASH_SHA3_512_BLOCK_SIZE,
ICP_QAT_HW_SHA3_512_STATE1_SZ,
ICP_QAT_HW_SHA3_512_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t shake_128Config =
{ ICP_QAT_HW_AUTH_ALGO_SHAKE_128, LAC_HASH_SHAKE_128_BLOCK_SIZE, 0, 0 };
static lac_sym_qat_hash_qat_info_t shake_256Config =
{ ICP_QAT_HW_AUTH_ALGO_SHAKE_256, LAC_HASH_SHAKE_256_BLOCK_SIZE, 0, 0 };
static lac_sym_qat_hash_qat_info_t polyConfig = { ICP_QAT_HW_AUTH_ALGO_POLY,
LAC_HASH_POLY_BLOCK_SIZE,
0,
0 };
static lac_sym_qat_hash_qat_info_t sha3_512Config = {
ICP_QAT_HW_AUTH_ALGO_SHA3_512,
LAC_HASH_SHA3_512_BLOCK_SIZE,
ICP_QAT_HW_SHA3_512_STATE1_SZ,
ICP_QAT_HW_SHA3_512_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t sm3Config = { ICP_QAT_HW_AUTH_ALGO_SM3,
LAC_HASH_SM3_BLOCK_SIZE,
ICP_QAT_HW_SM3_STATE1_SZ,
ICP_QAT_HW_SM3_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t xcbcMacConfig =
{ ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
0,
ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
LAC_SYM_QAT_XCBC_STATE_SIZE };
static lac_sym_qat_hash_qat_info_t polyConfig = { ICP_QAT_HW_AUTH_ALGO_POLY,
LAC_HASH_POLY_BLOCK_SIZE,
0,
0 };
static lac_sym_qat_hash_qat_info_t aesCmacConfig =
{ ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
0,
ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
LAC_SYM_QAT_CMAC_STATE_SIZE };
static lac_sym_qat_hash_qat_info_t xcbcMacConfig = {
ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
0,
ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
LAC_SYM_QAT_XCBC_STATE_SIZE
};
static lac_sym_qat_hash_qat_info_t aesCcmConfig =
{ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC,
0,
ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ };
static lac_sym_qat_hash_qat_info_t aesCmacConfig = {
ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
0,
ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
LAC_SYM_QAT_CMAC_STATE_SIZE
};
static lac_sym_qat_hash_qat_info_t aesGcmConfig =
{ ICP_QAT_HW_AUTH_ALGO_GALOIS_128,
0,
ICP_QAT_HW_GALOIS_128_STATE1_SZ,
ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
ICP_QAT_HW_GALOIS_E_CTR0_SZ };
static lac_sym_qat_hash_qat_info_t aesCcmConfig = {
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC,
0,
ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ
};
static lac_sym_qat_hash_qat_info_t kasumiF9Config =
{ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9,
0,
ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
ICP_QAT_HW_KASUMI_F9_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t aesGcmConfig = {
ICP_QAT_HW_AUTH_ALGO_GALOIS_128,
0,
ICP_QAT_HW_GALOIS_128_STATE1_SZ,
ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
ICP_QAT_HW_GALOIS_E_CTR0_SZ
};
static lac_sym_qat_hash_qat_info_t snow3gUia2Config =
{ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2,
0,
ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t kasumiF9Config = {
ICP_QAT_HW_AUTH_ALGO_KASUMI_F9,
0,
ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
ICP_QAT_HW_KASUMI_F9_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t aesCbcMacConfig =
{ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC,
0,
ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ + ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ };
static lac_sym_qat_hash_qat_info_t snow3gUia2Config = {
ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2,
0,
ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ
};
static lac_sym_qat_hash_qat_info_t zucEia3Config =
{ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3,
0,
ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ };
static lac_sym_qat_hash_qat_info_t aesCbcMacConfig = {
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC,
0,
ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ + ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ
};
static lac_sym_qat_hash_qat_info_t zucEia3Config = {
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3,
0,
ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ
};
/* Array of mappings between algorithm and info structure
* This array is used to populate the lookup table */
static lac_sym_qat_hash_def_map_t lacHashDefsMapping[] =
{ { CPA_CY_SYM_HASH_MD5, { &md5Info, &md5Config } },
{ CPA_CY_SYM_HASH_SHA1, { &sha1Info, &sha1Config } },
{ CPA_CY_SYM_HASH_SHA224, { &sha224Info, &sha224Config } },
{ CPA_CY_SYM_HASH_SHA256, { &sha256Info, &sha256Config } },
{ CPA_CY_SYM_HASH_SHA384, { &sha384Info, &sha384Config } },
{ CPA_CY_SYM_HASH_SHA512, { &sha512Info, &sha512Config } },
{ CPA_CY_SYM_HASH_SHA3_224, { &sha3_224Info, &sha3_224Config } },
{ CPA_CY_SYM_HASH_SHA3_256, { &sha3_256Info, &sha3_256Config } },
{ CPA_CY_SYM_HASH_SHA3_384, { &sha3_384Info, &sha3_384Config } },
{ CPA_CY_SYM_HASH_SHA3_512, { &sha3_512Info, &sha3_512Config } },
{ CPA_CY_SYM_HASH_SHAKE_128, { &shake_128Info, &shake_128Config } },
{ CPA_CY_SYM_HASH_SHAKE_256, { &shake_256Info, &shake_256Config } },
{ CPA_CY_SYM_HASH_POLY, { &polyInfo, &polyConfig } },
{ CPA_CY_SYM_HASH_SM3, { &sm3Info, &sm3Config } },
{ CPA_CY_SYM_HASH_AES_XCBC, { &xcbcMacInfo, &xcbcMacConfig } },
{ CPA_CY_SYM_HASH_AES_CMAC, { &aesCmacInfo, &aesCmacConfig } },
{ CPA_CY_SYM_HASH_AES_CCM, { &aesCcmInfo, &aesCcmConfig } },
{ CPA_CY_SYM_HASH_AES_GCM, { &aesGcmInfo, &aesGcmConfig } },
{ CPA_CY_SYM_HASH_KASUMI_F9, { &kasumiF9Info, &kasumiF9Config } },
{ CPA_CY_SYM_HASH_SNOW3G_UIA2, { &snow3gUia2Info, &snow3gUia2Config } },
{ CPA_CY_SYM_HASH_AES_GMAC, { &aesGcmInfo, &aesGcmConfig } },
{ CPA_CY_SYM_HASH_ZUC_EIA3, { &zucEia3Info, &zucEia3Config } },
{ CPA_CY_SYM_HASH_AES_CBC_MAC, { &aesCbcMacInfo, &aesCbcMacConfig } } };
static lac_sym_qat_hash_def_map_t lacHashDefsMapping[] = {
{ CPA_CY_SYM_HASH_MD5, { &md5Info, &md5Config } },
{ CPA_CY_SYM_HASH_SHA1, { &sha1Info, &sha1Config } },
{ CPA_CY_SYM_HASH_SHA224, { &sha224Info, &sha224Config } },
{ CPA_CY_SYM_HASH_SHA256, { &sha256Info, &sha256Config } },
{ CPA_CY_SYM_HASH_SHA384, { &sha384Info, &sha384Config } },
{ CPA_CY_SYM_HASH_SHA512, { &sha512Info, &sha512Config } },
{ CPA_CY_SYM_HASH_SHA3_224, { &sha3_224Info, &sha3_224Config } },
{ CPA_CY_SYM_HASH_SHA3_256, { &sha3_256Info, &sha3_256Config } },
{ CPA_CY_SYM_HASH_SHA3_384, { &sha3_384Info, &sha3_384Config } },
{ CPA_CY_SYM_HASH_SHA3_512, { &sha3_512Info, &sha3_512Config } },
{ CPA_CY_SYM_HASH_SM3, { &sm3Info, &sm3Config } },
{ CPA_CY_SYM_HASH_POLY, { &polyInfo, &polyConfig } },
{ CPA_CY_SYM_HASH_AES_XCBC, { &xcbcMacInfo, &xcbcMacConfig } },
{ CPA_CY_SYM_HASH_AES_CMAC, { &aesCmacInfo, &aesCmacConfig } },
{ CPA_CY_SYM_HASH_AES_CCM, { &aesCcmInfo, &aesCcmConfig } },
{ CPA_CY_SYM_HASH_AES_GCM, { &aesGcmInfo, &aesGcmConfig } },
{ CPA_CY_SYM_HASH_KASUMI_F9, { &kasumiF9Info, &kasumiF9Config } },
{ CPA_CY_SYM_HASH_SNOW3G_UIA2, { &snow3gUia2Info, &snow3gUia2Config } },
{ CPA_CY_SYM_HASH_AES_GMAC, { &aesGcmInfo, &aesGcmConfig } },
{ CPA_CY_SYM_HASH_ZUC_EIA3, { &zucEia3Info, &zucEia3Config } },
{ CPA_CY_SYM_HASH_AES_CBC_MAC, { &aesCbcMacInfo, &aesCbcMacConfig } }
};
/*
* LacSymQat_HashLookupInit
@ -434,33 +450,35 @@ LacSymQat_HashLookupInit(CpaInstanceHandle instanceHandle)
Cpa32U arraySize = 0;
CpaStatus status = CPA_STATUS_SUCCESS;
CpaCySymHashAlgorithm hashAlg = CPA_CY_SYM_HASH_NONE;
sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle;
sal_service_t *pService = (sal_service_t *)instanceHandle;
lac_sym_qat_hash_defs_t **pLacHashLookupDefs;
arraySize =
(CPA_CY_HASH_ALG_END + 1) * sizeof(lac_sym_qat_hash_defs_t *);
/* Size round up for performance */
arraySize = LAC_ALIGN_POW2_ROUNDUP(arraySize, LAC_64BYTE_ALIGNMENT);
pService->pLacHashLookupDefs = LAC_OS_MALLOC(arraySize);
if (NULL != pService->pLacHashLookupDefs) {
LAC_OS_BZERO(pService->pLacHashLookupDefs, arraySize);
numEntries = sizeof(lacHashDefsMapping) /
sizeof(lac_sym_qat_hash_def_map_t);
/* initialise the hash lookup definitions table so that the
* algorithm
* can be used to index into the table */
for (entry = 0; entry < numEntries; entry++) {
hashAlg = lacHashDefsMapping[entry].hashAlgorithm;
pService->pLacHashLookupDefs[hashAlg] =
&(lacHashDefsMapping[entry].hashDefs);
}
} else {
status = CPA_STATUS_RESOURCE;
pLacHashLookupDefs = LAC_OS_MALLOC(arraySize);
if (NULL == pLacHashLookupDefs) {
return CPA_STATUS_RESOURCE;
}
LAC_OS_BZERO(pLacHashLookupDefs, arraySize);
numEntries =
sizeof(lacHashDefsMapping) / sizeof(lac_sym_qat_hash_def_map_t);
/* initialise the hash lookup definitions table so that the algorithm
* can be used to index into the table */
for (entry = 0; entry < numEntries; entry++) {
hashAlg = lacHashDefsMapping[entry].hashAlgorithm;
pLacHashLookupDefs[hashAlg] =
&(lacHashDefsMapping[entry].hashDefs);
}
((sal_crypto_service_t *)pService)->pLacHashLookupDefs =
pLacHashLookupDefs;
return status;
}
@ -472,9 +490,11 @@ LacSymQat_HashAlgLookupGet(CpaInstanceHandle instanceHandle,
CpaCySymHashAlgorithm hashAlgorithm,
lac_sym_qat_hash_alg_info_t **ppHashAlgInfo)
{
sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle;
sal_service_t *pService = (sal_service_t *)instanceHandle;
lac_sym_qat_hash_defs_t **pLacHashLookupDefs =
((sal_crypto_service_t *)pService)->pLacHashLookupDefs;
*ppHashAlgInfo = pService->pLacHashLookupDefs[hashAlgorithm]->algInfo;
*ppHashAlgInfo = pLacHashLookupDefs[hashAlgorithm]->algInfo;
}
/*
@ -485,7 +505,9 @@ LacSymQat_HashDefsLookupGet(CpaInstanceHandle instanceHandle,
CpaCySymHashAlgorithm hashAlgorithm,
lac_sym_qat_hash_defs_t **ppHashDefsInfo)
{
sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle;
sal_service_t *pService = (sal_service_t *)instanceHandle;
lac_sym_qat_hash_defs_t **pLacHashLookupDefs =
((sal_crypto_service_t *)pService)->pLacHashLookupDefs;
*ppHashDefsInfo = pService->pLacHashLookupDefs[hashAlgorithm];
*ppHashDefsInfo = pLacHashLookupDefs[hashAlgorithm];
}

View File

@ -47,6 +47,7 @@
#include "sal_service_state.h"
#include "lac_buffer_desc.h"
#include "icp_qat_fw_comp.h"
#include "icp_qat_hw_20_comp_defs.h"
#include "icp_sal_versions.h"
/* C string null terminator size */
@ -124,6 +125,10 @@ static CpaStatus
SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device,
sal_compression_service_t *pCompService)
{
int level = 0;
pCompService->comp_device_data.uniqueCompressionLevels[0] = CPA_FALSE;
switch (device->deviceType) {
case DEVICE_DH895XCC:
case DEVICE_DH895XCCVF:
@ -191,13 +196,36 @@ SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device,
ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF;
pCompService->comp_device_data.inflateContextSize =
DC_INFLATE_EH_CONTEXT_SIZE;
pCompService->comp_device_data.highestHwCompressionDepth =
ICP_QAT_HW_COMPRESSION_DEPTH_16;
pCompService->comp_device_data.windowSizeMask =
(1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE);
(1 << DC_4K_WINDOW_SIZE | 1 << DC_8K_WINDOW_SIZE |
1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE);
pCompService->comp_device_data.minOutputBuffSize =
DC_DEST_BUFFER_STA_MIN_SIZE;
pCompService->comp_device_data.minOutputBuffSizeDynamic =
pCompService->comp_device_data.minOutputBuffSize;
pCompService->comp_device_data.enableDmm =
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED;
pCompService->comp_device_data.cnvnrSupported = CPA_TRUE;
for (level = CPA_DC_L1; level <= CPA_DC_L9; level++) {
switch (level) {
case CPA_DC_L1:
case CPA_DC_L2:
case CPA_DC_L3:
case CPA_DC_L4:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_TRUE;
break;
default:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_FALSE;
break;
}
}
pCompService->comp_device_data.numCompressionLevels =
DC_NUM_COMPRESSION_LEVELS;
break;
case DEVICE_C4XXX:
case DEVICE_C4XXXVF:
@ -227,6 +255,45 @@ SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device,
(1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE);
pCompService->comp_device_data.cnvnrSupported = CPA_TRUE;
break;
case DEVICE_GEN4:
pCompService->generic_service_info.integrityCrcCheck = CPA_TRUE;
pCompService->numInterBuffs = 0;
pCompService->comp_device_data.minOutputBuffSize =
DC_DEST_BUFFER_STA_MIN_SIZE_GEN4;
pCompService->comp_device_data.minOutputBuffSizeDynamic =
DC_DEST_BUFFER_DYN_MIN_SIZE_GEN4;
pCompService->comp_device_data.oddByteDecompNobFinal = CPA_TRUE;
pCompService->comp_device_data.oddByteDecompInterim = CPA_FALSE;
pCompService->comp_device_data.translatorOverflow = CPA_TRUE;
pCompService->comp_device_data.useDevRam =
ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF;
pCompService->comp_device_data.enableDmm =
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED;
pCompService->comp_device_data.inflateContextSize =
DC_INFLATE_CONTEXT_SIZE;
pCompService->comp_device_data.highestHwCompressionDepth =
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9;
pCompService->comp_device_data.windowSizeMask =
(1 << DC_4K_WINDOW_SIZE | 1 << DC_8K_WINDOW_SIZE |
1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE);
for (level = CPA_DC_L1; level <= CPA_DC_L9; level++) {
switch (level) {
case CPA_DC_L1:
case CPA_DC_L6:
case CPA_DC_L9:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_TRUE;
break;
default:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_FALSE;
break;
}
}
pCompService->comp_device_data.numCompressionLevels =
DC_NUM_COMPRESSION_LEVELS;
break;
default:
QAT_UTILS_LOG("Unknown device type! - %d.\n",
device->deviceType);

View File

@ -64,6 +64,9 @@ SalCtrl_ServiceCreate(sal_service_type_t serviceType,
pCrypto_service->generic_service_info.shutdown =
SalCtrl_CryptoShutdown;
/* Force HW MAC validation for GCM and CCM */
pCrypto_service->forceAEADMacVerify = CPA_TRUE;
*(ppInst) = &(pCrypto_service->generic_service_info);
return CPA_STATUS_SUCCESS;

View File

@ -59,7 +59,10 @@
#include "lac_sym_qat.h"
#include "icp_sal_versions.h"
#include "icp_sal_user.h"
#include "sal_hw_gen.h"
#define HMAC_MODE_1 1
#define HMAC_MODE_2 2
#define TH_CY_RX_0 0
#define TH_CY_RX_1 1
#define MAX_CY_RX_RINGS 2
@ -211,7 +214,7 @@ SalCtrl_SymCreateTransHandle(icp_accel_dev_t *device,
ICP_TRANS_TYPE_ETR,
section,
pCryptoService->acceleratorNum,
pCryptoService->bankNum,
pCryptoService->bankNumSym,
temp_string,
lac_getRingType(SAL_RING_TYPE_A_SYM_HI),
NULL,
@ -235,7 +238,7 @@ SalCtrl_SymCreateTransHandle(icp_accel_dev_t *device,
ICP_TRANS_TYPE_ETR,
section,
pCryptoService->acceleratorNum,
pCryptoService->bankNum,
pCryptoService->bankNumSym,
temp_string,
lac_getRingType(SAL_RING_TYPE_NONE),
(icp_trans_callback)LacSymQat_SymRespHandler,
@ -328,6 +331,7 @@ static CpaStatus
SalCtrl_SymInit(icp_accel_dev_t *device, sal_service_t *service)
{
CpaStatus status = CPA_STATUS_SUCCESS;
Cpa32U qatHmacMode = 0;
Cpa32U numSymConcurrentReq = 0;
char adfGetParam[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
char temp_string[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
@ -344,6 +348,19 @@ SalCtrl_SymInit(icp_accel_dev_t *device, sal_service_t *service)
* (Hash, Cipher, Algorithm-Chaining) (returns void)*/
LacSymCb_CallbacksRegister();
qatHmacMode = (Cpa32U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC);
switch (qatHmacMode) {
case HMAC_MODE_1:
pCryptoService->qatHmacMode = ICP_QAT_HW_AUTH_MODE1;
break;
case HMAC_MODE_2:
pCryptoService->qatHmacMode = ICP_QAT_HW_AUTH_MODE2;
break;
default:
pCryptoService->qatHmacMode = ICP_QAT_HW_AUTH_MODE1;
break;
}
/* Get num concurrent requests from config file */
status =
Sal_StringParsing("Cy",
@ -524,6 +541,32 @@ SalCtrl_DebugInit(icp_accel_dev_t *device, sal_service_t *service)
return status;
}
static CpaStatus
SalCtrl_GetBankNum(icp_accel_dev_t *device,
Cpa32U inst,
char *section,
char *bank_name,
Cpa16U *bank)
{
char adfParamValue[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
char adfParamName[SAL_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
CpaStatus status = CPA_STATUS_SUCCESS;
status = Sal_StringParsing("Cy", inst, bank_name, adfParamName);
LAC_CHECK_STATUS(status);
status = icp_adf_cfgGetParamValue(device,
section,
adfParamName,
adfParamValue);
if (CPA_STATUS_SUCCESS != status) {
QAT_UTILS_LOG("Failed to get %s from configuration file\n",
adfParamName);
return status;
}
*bank = (Cpa16U)Sal_Strtoul(adfParamValue, NULL, SAL_CFG_BASE_DEC);
return status;
}
static CpaStatus
SalCtr_InstInit(icp_accel_dev_t *device, sal_service_t *service)
{
@ -545,21 +588,62 @@ SalCtr_InstInit(icp_accel_dev_t *device, sal_service_t *service)
pCryptoService->acceleratorNum = 0;
status =
Sal_StringParsing("Cy",
pCryptoService->generic_service_info.instance,
"BankNumber",
temp_string);
LAC_CHECK_STATUS(status);
status =
icp_adf_cfgGetParamValue(device, section, temp_string, adfGetParam);
if (CPA_STATUS_SUCCESS != status) {
QAT_UTILS_LOG("Failed to get %s from configuration file\n",
temp_string);
return status;
/* Gen4, a bank only has 2 rings (1 ring pair), only one type of service
can be assigned one time. asym and sym will be in different bank*/
if (isCyGen4x(pCryptoService)) {
switch (service->type) {
case SAL_SERVICE_TYPE_CRYPTO_ASYM:
status = SalCtrl_GetBankNum(
device,
pCryptoService->generic_service_info.instance,
section,
"BankNumberAsym",
&pCryptoService->bankNumAsym);
if (CPA_STATUS_SUCCESS != status)
return status;
break;
case SAL_SERVICE_TYPE_CRYPTO_SYM:
status = SalCtrl_GetBankNum(
device,
pCryptoService->generic_service_info.instance,
section,
"BankNumberSym",
&pCryptoService->bankNumSym);
if (CPA_STATUS_SUCCESS != status)
return status;
break;
case SAL_SERVICE_TYPE_CRYPTO:
status = SalCtrl_GetBankNum(
device,
pCryptoService->generic_service_info.instance,
section,
"BankNumberAsym",
&pCryptoService->bankNumAsym);
if (CPA_STATUS_SUCCESS != status)
return status;
status = SalCtrl_GetBankNum(
device,
pCryptoService->generic_service_info.instance,
section,
"BankNumberSym",
&pCryptoService->bankNumSym);
if (CPA_STATUS_SUCCESS != status)
return status;
break;
default:
return CPA_STATUS_FAIL;
}
} else {
status = SalCtrl_GetBankNum(
device,
pCryptoService->generic_service_info.instance,
section,
"BankNumber",
&pCryptoService->bankNumSym);
if (CPA_STATUS_SUCCESS != status)
return status;
pCryptoService->bankNumAsym = pCryptoService->bankNumSym;
}
pCryptoService->bankNum =
(Cpa16U)Sal_Strtoul(adfGetParam, NULL, SAL_CFG_BASE_DEC);
status =
Sal_StringParsing("Cy",
@ -619,10 +703,18 @@ SalCtr_InstInit(icp_accel_dev_t *device, sal_service_t *service)
"",
temp_string2);
LAC_CHECK_STATUS(status);
status = Sal_StringParsing("Bank",
pCryptoService->bankNum,
"CoreAffinity",
temp_string);
if (service->type == SAL_SERVICE_TYPE_CRYPTO_ASYM)
status = Sal_StringParsing("Bank",
pCryptoService->bankNumAsym,
"CoreAffinity",
temp_string);
else
/* For cy service, asym bank and sym bank will set the
same core affinity. So Just read one*/
status = Sal_StringParsing("Bank",
pCryptoService->bankNumSym,
"CoreAffinity",
temp_string);
LAC_CHECK_STATUS(status);
} else {
strncpy(temp_string2, section, (strlen(section) + 1));
@ -817,6 +909,9 @@ cpaCyGetStatusText(const CpaInstanceHandle instanceHandle,
case CPA_STATUS_FATAL:
LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_FATAL);
break;
case CPA_STATUS_UNSUPPORTED:
LAC_COPY_STRING(pStatusText, CPA_STATUS_STR_UNSUPPORTED);
break;
default:
status = CPA_STATUS_INVALID_PARAM;
break;
@ -886,6 +981,10 @@ cpaCyStartInstance(CpaInstanceHandle instanceHandle_in)
instanceHandle = instanceHandle_in;
}
LAC_CHECK_NULL_PARAM(instanceHandle);
SAL_CHECK_INSTANCE_TYPE(instanceHandle,
(SAL_SERVICE_TYPE_CRYPTO |
SAL_SERVICE_TYPE_CRYPTO_ASYM |
SAL_SERVICE_TYPE_CRYPTO_SYM));
pService = (sal_crypto_service_t *)instanceHandle;
@ -930,6 +1029,10 @@ cpaCyStopInstance(CpaInstanceHandle instanceHandle_in)
instanceHandle = instanceHandle_in;
}
LAC_CHECK_NULL_PARAM(instanceHandle);
SAL_CHECK_INSTANCE_TYPE(instanceHandle,
(SAL_SERVICE_TYPE_CRYPTO |
SAL_SERVICE_TYPE_CRYPTO_ASYM |
SAL_SERVICE_TYPE_CRYPTO_SYM));
status = cpaCyInstanceGetInfo2(instanceHandle, &info);
if (CPA_STATUS_SUCCESS != status) {
@ -1431,23 +1534,31 @@ cpaCySymQueryCapabilities(const CpaInstanceHandle instanceHandle_in,
}
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_NULL);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_ARC4);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_ECB);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_CBC);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_CTR);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_CCM);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_GCM);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_DES_ECB);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_DES_CBC);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_3DES_ECB);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_3DES_CBC);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_3DES_CTR);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_KASUMI_F8);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_SNOW3G_UEA2);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_F8);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_XTS);
if (isCyGen2x(pCryptoService)) {
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_ARC4);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers,
CPA_CY_SYM_CIPHER_DES_ECB);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers,
CPA_CY_SYM_CIPHER_DES_CBC);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers,
CPA_CY_SYM_CIPHER_3DES_ECB);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers,
CPA_CY_SYM_CIPHER_3DES_CBC);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers,
CPA_CY_SYM_CIPHER_3DES_CTR);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers,
CPA_CY_SYM_CIPHER_KASUMI_F8);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers,
CPA_CY_SYM_CIPHER_SNOW3G_UEA2);
CPA_BITMAP_BIT_SET(pCapInfo->ciphers, CPA_CY_SYM_CIPHER_AES_F8);
}
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_MD5);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA1);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA224);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SHA256);
@ -1456,11 +1567,15 @@ cpaCySymQueryCapabilities(const CpaInstanceHandle instanceHandle_in,
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_XCBC);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_CCM);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_GCM);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_KASUMI_F9);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_SNOW3G_UIA2);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_CMAC);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_GMAC);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_AES_CBC_MAC);
if (isCyGen2x(pCryptoService)) {
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_MD5);
CPA_BITMAP_BIT_SET(pCapInfo->hashes, CPA_CY_SYM_HASH_KASUMI_F9);
CPA_BITMAP_BIT_SET(pCapInfo->hashes,
CPA_CY_SYM_HASH_SNOW3G_UIA2);
}
if (pGenericService->capabilitiesMask &
ICP_ACCEL_CAPABILITIES_CRYPTO_ZUC) {
@ -1660,18 +1775,33 @@ static CpaInstanceHandle
Lac_GetFirstAsymHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES],
Cpa16U num_dev)
{
CpaStatus status = CPA_STATUS_SUCCESS;
icp_accel_dev_t *dev_addr = NULL;
sal_t *base_addr = NULL;
sal_list_t *list_temp = NULL;
CpaInstanceHandle cyInst = NULL;
CpaInstanceInfo2 info;
Cpa16U i = 0;
for (i = 0; i < num_dev; i++) {
dev_addr = (icp_accel_dev_t *)adfInsts[i];
base_addr = dev_addr->pSalHandle;
if ((NULL != base_addr) && (NULL != base_addr->asym_services)) {
list_temp = base_addr->asym_services;
if (NULL == base_addr) {
continue;
}
list_temp = base_addr->asym_services;
while (NULL != list_temp) {
cyInst = SalList_getObject(list_temp);
status = cpaCyInstanceGetInfo2(cyInst, &info);
list_temp = SalList_next(list_temp);
if (CPA_STATUS_SUCCESS != status ||
CPA_TRUE != info.isPolled) {
cyInst = NULL;
continue;
}
break;
}
if (cyInst) {
break;
}
}
@ -1684,18 +1814,33 @@ static CpaInstanceHandle
Lac_GetFirstSymHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES],
Cpa16U num_dev)
{
CpaStatus status = CPA_STATUS_SUCCESS;
icp_accel_dev_t *dev_addr = NULL;
sal_t *base_addr = NULL;
sal_list_t *list_temp = NULL;
CpaInstanceHandle cyInst = NULL;
CpaInstanceInfo2 info;
Cpa16U i = 0;
for (i = 0; i < num_dev; i++) {
dev_addr = (icp_accel_dev_t *)adfInsts[i];
base_addr = dev_addr->pSalHandle;
if ((NULL != base_addr) && (NULL != base_addr->sym_services)) {
list_temp = base_addr->sym_services;
if (NULL == base_addr) {
continue;
}
list_temp = base_addr->sym_services;
while (NULL != list_temp) {
cyInst = SalList_getObject(list_temp);
status = cpaCyInstanceGetInfo2(cyInst, &info);
list_temp = SalList_next(list_temp);
if (CPA_STATUS_SUCCESS != status ||
CPA_TRUE != info.isPolled) {
cyInst = NULL;
continue;
}
break;
}
if (cyInst) {
break;
}
}
@ -1709,22 +1854,37 @@ Lac_GetFirstSymHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES],
static CpaInstanceHandle
Lac_GetFirstCyHandle(icp_accel_dev_t *adfInsts[ADF_MAX_DEVICES], Cpa16U num_dev)
{
CpaStatus status = CPA_STATUS_SUCCESS;
icp_accel_dev_t *dev_addr = NULL;
sal_t *base_addr = NULL;
sal_list_t *list_temp = NULL;
CpaInstanceHandle cyInst = NULL;
CpaInstanceInfo2 info;
Cpa16U i = 0;
for (i = 0; i < num_dev; i++) {
dev_addr = (icp_accel_dev_t *)adfInsts[i];
base_addr = dev_addr->pSalHandle;
if ((NULL != base_addr) &&
(NULL != base_addr->crypto_services)) {
list_temp = base_addr->crypto_services;
if (NULL == base_addr) {
continue;
}
list_temp = base_addr->crypto_services;
while (NULL != list_temp) {
cyInst = SalList_getObject(list_temp);
status = cpaCyInstanceGetInfo2(cyInst, &info);
list_temp = SalList_next(list_temp);
if (CPA_STATUS_SUCCESS != status ||
CPA_TRUE != info.isPolled) {
cyInst = NULL;
continue;
}
break;
}
if (cyInst) {
break;
}
}
return cyInst;
}
@ -1835,3 +1995,16 @@ icp_sal_dp_SymGetInflightRequests(CpaInstanceHandle instanceHandle,
numInflightRequests);
}
CpaStatus
icp_sal_setForceAEADMACVerify(CpaInstanceHandle instanceHandle,
CpaBoolean forceAEADMacVerify)
{
sal_crypto_service_t *crypto_handle = NULL;
crypto_handle = (sal_crypto_service_t *)instanceHandle;
LAC_CHECK_NULL_PARAM(crypto_handle);
crypto_handle->forceAEADMacVerify = forceAEADMacVerify;
return CPA_STATUS_SUCCESS;
}

View File

@ -458,6 +458,37 @@ SalCtrl_ServiceShutdown(icp_accel_dev_t *device,
return status;
}
static CpaStatus
selectGeneration(device_type_t deviceType, sal_service_t *pInst)
{
switch (deviceType) {
case DEVICE_C62X:
case DEVICE_C62XVF:
case DEVICE_DH895XCC:
case DEVICE_DH895XCCVF:
case DEVICE_C3XXX:
case DEVICE_C3XXXVF:
case DEVICE_200XX:
case DEVICE_200XXVF:
pInst->gen = GEN2;
break;
case DEVICE_C4XXX:
case DEVICE_C4XXXVF:
pInst->gen = GEN3;
break;
case DEVICE_GEN4:
pInst->gen = GEN4;
break;
default:
QAT_UTILS_LOG("deviceType not initialised\n");
return CPA_STATUS_FAIL;
}
return CPA_STATUS_SUCCESS;
}
/*************************************************************************
* @ingroup SalCtrl
* @description
@ -523,7 +554,12 @@ SalCtrl_ServiceInit(icp_accel_dev_t *device,
}
pInst->debug_parent_dir = debug_dir;
pInst->capabilitiesMask = device->accelCapabilitiesMask;
status = SalList_add(services, &tail_list, pInst);
status = selectGeneration(device->deviceType, pInst);
if (CPA_STATUS_SUCCESS == status) {
status =
SalList_add(services, &tail_list, pInst);
}
if (CPA_STATUS_SUCCESS != status) {
free(pInst, M_QAT);
}

View File

@ -756,17 +756,14 @@ typedef struct mtx *lac_lock_t;
#define LAC_SPINLOCK(lock) \
({ \
(void)qatUtilsLock(lock); \
CPA_STATUS_SUCCESS; \
})
#define LAC_SPINUNLOCK(lock) \
({ \
(void)qatUtilsUnlock(lock); \
CPA_STATUS_SUCCESS; \
})
#define LAC_SPINLOCK_DESTROY(lock) \
({ \
(void)qatUtilsLockDestroy(lock); \
CPA_STATUS_SUCCESS; \
})
#define LAC_CONST_PTR_CAST(castee) ((void *)(LAC_ARCH_UINT)(castee))

View File

@ -54,8 +54,7 @@ CpaStatus SalCtrl_ServiceCreate(sal_service_type_t service,
Cpa32U instance_num,
sal_service_t **pObj);
/**
*******************************************************************************
/******************************************************************************
* @ingroup SalCtl
* @description
* This macro goes through the 'list' passed in as a parameter. For each

View File

@ -67,6 +67,17 @@ typedef enum {
SAL_SERVICE_TYPE_QAT = 32
} sal_service_type_t;
/**
*****************************************************************************
* @ingroup SalCtrl
* Device generations
*
* @description
* List in an enum all the QAT device generations.
*
*****************************************************************************/
typedef enum { GEN2, GEN3, GEN4 } sal_generation_t;
/**
*****************************************************************************
* @ingroup SalCtrl
@ -128,6 +139,9 @@ typedef struct sal_service_s {
CpaBoolean integrityCrcCheck;
/** < True if the device supports end to end data integrity checks */
sal_generation_t gen;
/** Generation of devices */
} sal_service_t;
/**

View File

@ -16,6 +16,7 @@
#define LAC_SAL_TYPES_CRYPTO_H_
#include "lac_sym_qat_hash_defs_lookup.h"
#include "lac_sym_qat_constants_table.h"
#include "lac_sym_key.h"
#include "cpa_cy_sym_dp.h"
@ -84,6 +85,8 @@ typedef struct sal_crypto_service_s {
QatUtilsAtomic *pLacDrbgStatsArr;
/**< pointer to an array of atomic stats for DRBG */
icp_qat_hw_auth_mode_t qatHmacMode;
/**< Hmac Mode */
Cpa32U pkeFlowId;
/**< Flow ID for all pke requests from this instance - identifies
@ -105,6 +108,8 @@ typedef struct sal_crypto_service_s {
Cpa16U acceleratorNum;
Cpa16U bankNum;
Cpa16U bankNumAsym;
Cpa16U bankNumSym;
Cpa16U pkgID;
Cpa8U isPolled;
Cpa8U executionEngine;
@ -119,6 +124,9 @@ typedef struct sal_crypto_service_s {
/**< table of pointers to standard defined information for all hash
algorithms. We support an extra hash algo that is not exported by
cy api which is why we need the extra +1 */
lac_sym_qat_constants_t constantsLookupTables;
Cpa8U **ppHmacContentDesc;
/**< table of pointers to CD for Hmac precomputes - used at session init
*/
@ -137,6 +145,10 @@ typedef struct sal_crypto_service_s {
debug_file_info_t *debug_file;
/**< Statistics handler */
CpaBoolean forceAEADMacVerify;
/**< internal flag to enable/disable forcing HW digest verification for
GCM and CCM algorithms */
} sal_crypto_service_t;
/*************************************************************************

View File

@ -61,7 +61,7 @@ typedef struct lac_sync_op_data_s {
* Timeout for wait for init messages response in msecs
*/
#define DC_SYNC_CALLBACK_TIMEOUT (1000)
#define DC_SYNC_CALLBACK_TIMEOUT (2000)
/**< @ingroup LacSyn
* Timeout for wait for compression response in msecs */

View File

@ -0,0 +1,88 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/**
***************************************************************************
* @file sal_hw_gen.h
*
* @ingroup SalHwGen
*
* @description
* Functions which return a value corresponding to qat device generation
*
***************************************************************************/
#ifndef SAL_HW_GEN_H
#define SAL_HW_GEN_H
#include "cpa.h"
#include "sal_types_compression.h"
#include "lac_sal_types_crypto.h"
/**
***************************************************************************
* @ingroup SalHwGen
*
* @description This function returns whether qat device is gen 4 or not
*
* @param[in] pService pointer to compression service
*
***************************************************************************/
static inline CpaBoolean
isDcGen4x(const sal_compression_service_t *pService)
{
return (pService->generic_service_info.gen == GEN4);
}
/**
***************************************************************************
* @ingroup SalHwGen
*
* @description This function returns whether qat device is gen 2/3 or not
*
* @param[in] pService pointer to compression service
*
***************************************************************************/
static inline CpaBoolean
isDcGen2x(const sal_compression_service_t *pService)
{
return ((pService->generic_service_info.gen == GEN2) ||
(pService->generic_service_info.gen == GEN3));
}
/**
***************************************************************************
* @ingroup SalHwGen
*
* @description This function returns whether qat device is gen 4 or not
*
* @param[in] pService pointer to crypto service
*
***************************************************************************/
static inline CpaBoolean
isCyGen4x(const sal_crypto_service_t *pService)
{
return (pService->generic_service_info.gen == GEN4);
}
/**
***************************************************************************
* @ingroup SalHwGen
*
* @description This function returns whether qat device is gen 2/3 or not
*
* @param[in] pService pointer to crypto service
*
***************************************************************************/
static inline CpaBoolean
isCyGen2x(const sal_crypto_service_t *pService)
{
return ((pService->generic_service_info.gen == GEN2) ||
(pService->generic_service_info.gen == GEN3));
}
#endif /* SAL_HW_GEN_H */

View File

@ -23,6 +23,7 @@
#include "icp_adf_transport.h"
#define DC_NUM_RX_RINGS (1)
#define DC_NUM_COMPRESSION_LEVELS (CPA_DC_L9)
/**
*****************************************************************************
@ -37,6 +38,9 @@ typedef struct sal_compression_device_data {
/* Device specific minimum output buffer size for static compression */
Cpa32U minOutputBuffSize;
/* Device specific minimum output buffer size for dynamic compression */
Cpa32U minOutputBuffSizeDynamic;
/* Enable/disable secureRam/acceleratorRam for intermediate buffers*/
Cpa8U useDevRam;
@ -62,6 +66,11 @@ typedef struct sal_compression_device_data {
/* Mask that reports supported window sizes for comp/decomp */
Cpa8U windowSizeMask;
/* List representing compression levels that are the first to have
a unique search depth. */
CpaBoolean uniqueCompressionLevels[DC_NUM_COMPRESSION_LEVELS + 1];
Cpa8U numCompressionLevels;
/* Flag to indicate CompressAndVerifyAndRecover feature support */
CpaBoolean cnvnrSupported;
} sal_compression_device_data_t;

View File

@ -486,12 +486,12 @@ typedef struct icp_qat_fw_comn_resp_s {
/* ========================================================================= */
/* Common QAT FW request header - structure of LW0
* + ===== + ---- + ----------- + ----------- + ----------- + ----------- +
* | Bit | 31 | 30 - 24 | 21 - 16 | 15 - 8 | 7 - 0 |
* + ===== + ---- + ----------- + ----------- + ----------- + ----------- +
* | Flags | V | Reserved | Serv Type | Serv Cmd Id | Reserved |
* + ===== + ---- + ----------- + ----------- + ----------- + ----------- +
*/
* + ===== + ------- + ----------- + ----------- + ----------- + -------- +
* | Bit | 31/30 | 29 - 24 | 21 - 16 | 15 - 8 | 7 - 0 |
* + ===== + ------- + ----------- + ----------- + ----------- + -------- +
* | Flags | V/Gen | Reserved | Serv Type | Serv Cmd Id | Rsv |
* + ===== + ------- + ----------- + ----------- + ----------- + -------- +
*/
/**< @ingroup icp_qat_fw_comn
* Definition of the setting of the header's valid flag */
@ -505,6 +505,20 @@ typedef struct icp_qat_fw_comn_resp_s {
* hdr_flags field of LW0 (service request and response) */
#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
/**< @ingroup icp_qat_fw_comn
* Macros defining the bit position and mask of the 'generation' flag, within
* the hdr_flags field of LW0 (service request and response) */
#define ICP_QAT_FW_COMN_GEN_FLAG_BITPOS 6
#define ICP_QAT_FW_COMN_GEN_FLAG_MASK 0x1
/**< @ingroup icp_qat_fw_comn
* The request is targeted for QAT2.0 */
#define ICP_QAT_FW_COMN_GEN_2 1
/**< @ingroup icp_qat_fw_comn
* The request is targeted for QAT1.x. QAT2.0 FW will return
'unsupported request' if GEN1 request type is sent to QAT2.0 FW */
#define ICP_QAT_FW_COMN_GEN_1 0
#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
/* Common QAT FW response header - structure of LW0
@ -525,6 +539,13 @@ typedef struct icp_qat_fw_comn_resp_s {
#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
/**< @ingroup icp_qat_fw_comn
* Macros defining the bit position and mask of Stored Blocks flag
* within the hdr_flags field of LW0 (service response only)
*/
#define ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS 4
#define ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK 0x1
/**
******************************************************************************
* @ingroup icp_qat_fw_comn
@ -660,6 +681,89 @@ typedef struct icp_qat_fw_comn_resp_s {
ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
ICP_QAT_FW_COMN_VALID_FLAG_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Extract the Stored Block flag from the header flags in the
* response only.
*
* @param hdr_flags Response 'hdr' structure to extract the
* Stored Block bit from the 'hdr_flags' field.
*
*****************************************************************************/
#define ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(hdr_flags) \
QAT_FIELD_GET(hdr_flags, \
ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS, \
ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Set the Stored Block bit in the response's header flags.
*
* @param hdr_t Response 'hdr_t' structure to set the ST_BLK bit
* @param val Value of the ST_BLK bit flag.
*
*****************************************************************************/
#define ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_SET(hdr_t, val) \
QAT_FIELD_SET((hdr_t.hdr_flags), \
(val), \
ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS, \
ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Set the generation bit in the request's header flags.
*
* @param hdr_t Request or Response 'hdr_t' structure to set the gen bit
* @param val Value of the generation bit flag.
*
*****************************************************************************/
#define ICP_QAT_FW_COMN_HDR_GENERATION_FLAG_SET(hdr_t, val) \
ICP_QAT_FW_COMN_GENERATION_FLAG_SET(hdr_t, val)
/**
******************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Common macro to set the generation bit in the common header
*
* @param hdr_t Structure (request or response) containing the header
* flags field, to allow the generation bit to be set.
* @param val Value of the generation bit flag.
*
*****************************************************************************/
#define ICP_QAT_FW_COMN_GENERATION_FLAG_SET(hdr_t, val) \
QAT_FIELD_SET((hdr_t.hdr_flags), \
(val), \
ICP_QAT_FW_COMN_GEN_FLAG_BITPOS, \
ICP_QAT_FW_COMN_GEN_FLAG_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Common macro to extract the generation flag from the header flags field
* within the header structure (request or response).
*
* @param hdr_t Structure (request or response) to extract the
* generation bit from the 'hdr_flags' field.
*
*****************************************************************************/
#define ICP_QAT_FW_COMN_HDR_GENERATION_FLAG_GET(hdr_flags) \
QAT_FIELD_GET(hdr_flags, \
ICP_QAT_FW_COMN_GEN_FLAG_BITPOS, \
ICP_QAT_FW_COMN_GEN_FLAG_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_comn

View File

@ -61,6 +61,8 @@ typedef enum {
* | | |as intmd| | | | | | | |
* | | | buf | | | | | | | |
* + ===== + ------ + ----- + --- + ------ + ----- + ----- + -- + ---- + --- +
* Note: For QAT 2.0 Disable Secure Ram, DisType0 Header and Enhanced ASB bits
* are don't care. i.e., these features are removed from QAT 2.0.
*/
/** Flag usage */
@ -183,6 +185,28 @@ typedef enum {
((secure_ram & ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \
<< ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
/**
******************************************************************************
* @ingroup icp_qat_fw_comp
*
* @description
* Macro used for the generation of the command flags for Compression Request.
* This should always be used for the generation of the flags. No direct sets or
* masks should be performed on the flags data
*
* @param sesstype Session Type
* @param autoselect AutoSelectBest
* Selects between compressed and uncompressed output.
* No distinction made between static and dynamic
* compressed data.
*
*********************************************************************************/
#define ICP_QAT_FW_COMP_20_FLAGS_BUILD(sesstype, autoselect) \
(((sesstype & ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \
<< ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
((autoselect & ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \
<< ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS))
/**
******************************************************************************
* @ingroup icp_qat_fw_comp
@ -375,14 +399,16 @@ typedef struct icp_qat_fw_comp_req_params_s {
*
*****************************************************************************/
#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD( \
sop, eop, bfinal, cnv, cnvnr, crc) \
sop, eop, bfinal, cnv, cnvnr, cnvdfx, crc) \
(((sop & ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \
((eop & ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \
((bfinal & ICP_QAT_FW_COMP_BFINAL_MASK) \
<< ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \
((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \
<< ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS) | \
((cnvnr & ICP_QAT_FW_COMP_CNVNR_MASK) \
<< ICP_QAT_FW_COMP_CNVNR_BITPOS) | \
((cnvdfx & ICP_QAT_FW_COMP_CNV_DFX_MASK) \
<< ICP_QAT_FW_COMP_CNV_DFX_BITPOS) | \
((crc & ICP_QAT_FW_COMP_CRC_MODE_MASK) \
<< ICP_QAT_FW_COMP_CRC_MODE_BITPOS))
@ -443,6 +469,14 @@ typedef struct icp_qat_fw_comp_req_params_s {
/**< @ingroup icp_qat_fw_comp
* Flag indicating that a cnv recovery is to be performed on the request */
#define ICP_QAT_FW_COMP_NO_CNV_DFX 0
/**< @ingroup icp_qat_fw_comp
* Flag indicating that NO CNV inject error is to be performed on the request */
#define ICP_QAT_FW_COMP_CNV_DFX 1
/**< @ingroup icp_qat_fw_comp
* Flag indicating that CNV inject error is to be performed on the request */
#define ICP_QAT_FW_COMP_CRC_MODE_LEGACY 0
/**< @ingroup icp_qat_fw_comp
* Flag representing to use the legacy CRC mode */
@ -491,6 +525,22 @@ typedef struct icp_qat_fw_comp_req_params_s {
/**< @ingroup icp_qat_fw_comp
* Starting bit position for the CNV Recovery bit */
#define ICP_QAT_FW_COMP_CNVNR_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask for the CNV Recovery bit */
#define ICP_QAT_FW_COMP_CNVNR_BITPOS 17
/**< @ingroup icp_qat_fw_comp
* Starting bit position for the CNV Recovery bit */
#define ICP_QAT_FW_COMP_CNV_DFX_BITPOS 18
/**< @ingroup icp_qat_fw_comp
* Starting bit position for the CNV DFX bit */
#define ICP_QAT_FW_COMP_CNV_DFX_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask for the CNV DFX bit */
#define ICP_QAT_FW_COMP_CRC_MODE_BITPOS 19
/**< @ingroup icp_qat_fw_comp
* Starting bit position for CRC mode */
@ -499,6 +549,14 @@ typedef struct icp_qat_fw_comp_req_params_s {
/**< @ingroup icp_qat_fw_comp
* One bit mask used to determine CRC mode */
#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS 20
/**< @ingroup icp_qat_fw_comp
* Starting bit position for xxHash accumulate mode */
#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask used to determine xxHash accumulate mode */
/**
******************************************************************************
* @ingroup icp_qat_fw_comp
@ -574,6 +632,38 @@ typedef struct icp_qat_fw_comp_req_params_s {
ICP_QAT_FW_COMP_CRC_MODE_BITPOS, \
ICP_QAT_FW_COMP_CRC_MODE_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_comp
*
* @description
* Macro for extraction of the xxHash accumulate mode bit
*
* @param flags Flags to extract the xxHash accumulate mode bit from
*
*****************************************************************************/
#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_GET(flags) \
QAT_FIELD_GET(flags, \
ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_comp
*
* @description
* Macro for setting of the xxHash accumulate mode bit
*
* @param flags Flags to set the xxHash accumulate mode bit to
* @param val xxHash accumulate mode to set
*
*****************************************************************************/
#define ICP_QAT_FW_COMP_XXHASH_ACC_MODE_SET(flags, val) \
QAT_FIELD_SET(flags, \
val, \
ICP_QAT_FW_COMP_XXHASH_ACC_MODE_BITPOS, \
ICP_QAT_FW_COMP_XXHASH_ACC_MODE_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_comp
@ -589,10 +679,11 @@ typedef struct icp_qat_fw_xlt_req_params_s {
/**< LWs 20-21 */
uint64_t inter_buff_ptr;
/**< This field specifies the physical address of an intermediate
* buffer SGL array. The array contains a pair of 64-bit
* intermediate buffer pointers to SGL buffer descriptors, one pair
* per CPM. Please refer to the CPM1.6 Firmware Interface HLD
* specification for more details. */
* buffer SGL array. The array contains a pair of 64-bit
* intermediate buffer pointers to SGL buffer descriptors, one pair
* per CPM. Please refer to the CPM1.6 Firmware Interface HLD
* specification for more details.
* Placeholder for QAT2.0. */
} icp_qat_fw_xlt_req_params_t;
/**
@ -1026,4 +1117,31 @@ typedef enum {
(((bank_a_enable)&QAT_FW_COMP_BANK_FLAG_MASK) \
<< QAT_FW_COMP_BANK_A_BITPOS))
/**
*****************************************************************************
* @ingroup icp_qat_fw_comp
* Definition of the xxhash32 acc state buffer
* @description
* This is data structure used in stateful lite for xxhash32
*
*****************************************************************************/
typedef struct xxhash_acc_state_buff_s {
/**< LW 0 */
uint32_t in_counter;
/**< Accumulated (total) consumed bytes. As oppose to the per request
* IBC in the response.*/
/**< LW 1 */
uint32_t out_counter;
/**< OBC as in the response.*/
/**< LW 2-5 */
uint32_t xxhash_state[4];
/**< Initial value is set by IA to the values stated in HAS.*/
/**< LW 6-9 */
uint32_t clear_txt[4];
/**< Set to 0 for the first request.*/
} xxhash_acc_state_buff_t;
#endif /* _ICP_QAT_FW_COMP_H_ */

View File

@ -92,6 +92,44 @@ typedef enum {
/**< Delimiter type */
} icp_qat_fw_la_cmd_id_t;
typedef struct icp_qat_fw_la_cipher_20_req_params_s {
/**< LW 14 */
uint32_t cipher_offset;
/**< Cipher offset long word. */
/**< LW 15 */
uint32_t cipher_length;
/**< Cipher length long word. */
/**< LWs 16-19 */
union {
uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
/**< Cipher IV array */
struct {
uint64_t cipher_IV_ptr;
/**< Cipher IV pointer or Partial State Pointer */
uint64_t resrvd1;
/**< reserved */
} s;
} u;
/**< LW 20 */
uint32_t spc_aad_offset;
/**< LW 21 */
uint32_t spc_aad_sz;
/**< LW 22 - 23 */
uint64_t spc_aad_addr;
/**< LW 24 - 25 */
uint64_t spc_auth_res_addr;
/**< LW 26 */
uint8_t reserved[3];
uint8_t spc_auth_res_sz;
} icp_qat_fw_la_cipher_20_req_params_t;
/* For the definitions of the bits in the status field of the common
* response, refer to icp_qat_fw.h.
* The return values specific to Lookaside service are given below.
@ -165,6 +203,34 @@ typedef struct icp_qat_fw_la_bulk_req_s {
/* Private defines */
/* bits 15:14 */
#define ICP_QAT_FW_LA_USE_WIRELESS_SLICE_TYPE 2
/**< @ingroup icp_qat_fw_la
* FW Selects Wireless Cipher Slice
* Cipher Algorithms: AES-{F8}, Snow3G, ZUC
* Auth Algorithms : Snow3G, ZUC */
#define ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE 1
/**< @ingroup icp_qat_fw_la
* FW Selects UCS Cipher Slice
* Cipher Algorithms: AES-{CTR/XTS}, Single Pass AES-GCM
* Auth Algorithms : SHA1/ SHA{2/3}-{224/256/384/512} */
#define ICP_QAT_FW_LA_USE_LEGACY_SLICE_TYPE 0
/**< @ingroup icp_qat_fw_la
* FW Selects Legacy Cipher/Auth Slice
* Cipher Algorithms: AES-{CBC/ECB}, SM4, Single Pass AES-CCM
* Auth Algorithms : SHA1/ SHA{2/3}-{224/256/384/512} */
#define QAT_LA_SLICE_TYPE_BITPOS 14
/**< @ingroup icp_qat_fw_la
* Starting bit position for the slice type selection.
* Refer to HAS for Slice type assignment details on QAT2.0 */
#define QAT_LA_SLICE_TYPE_MASK 0x3
/**< @ingroup icp_qat_fw_la
* Two bit mask used to determine the Slice type */
/* bit 11 */
#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
/**< @ingroup icp_qat_fw_la
@ -482,7 +548,7 @@ typedef struct icp_qat_fw_la_bulk_req_s {
*
* @description
* Macro for extraction of the Cipher IV field contents (bit 2)
*
*
* @param flags Flags to extract the Cipher IV field contents
*
*****************************************************************************/
@ -496,7 +562,7 @@ typedef struct icp_qat_fw_la_bulk_req_s {
* @description
* Macro for extraction of the Cipher/Auth Config
* offset type (bit 3)
*
*
* @param flags Flags to extract the Cipher/Auth Config
* offset type
*
@ -640,6 +706,19 @@ typedef struct icp_qat_fw_la_bulk_req_s {
QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_BITPOS, \
QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
*
* @description
* Macro for extraction of the slice type information from the flags.
*
* @param flags Flags to extract the protocol state
*
*****************************************************************************/
#define ICP_QAT_FW_LA_SLICE_TYPE_GET(flags) \
QAT_FIELD_GET(flags, QAT_LA_SLICE_TYPE_BITPOS, QAT_LA_SLICE_TYPE_MASK)
/* Macros for setting field bits */
/**
******************************************************************************
@ -647,7 +726,7 @@ typedef struct icp_qat_fw_la_bulk_req_s {
*
* @description
* Macro for setting the Cipher IV field contents
*
*
* @param flags Flags to set with the Cipher IV field contents
* @param val Field contents indicator value
*
@ -665,7 +744,7 @@ typedef struct icp_qat_fw_la_bulk_req_s {
* @description
* Macro for setting the Cipher/Auth Config
* offset type
*
*
* @param flags Flags to set the Cipher/Auth Config offset type
* @param val Offset type value
*
@ -840,6 +919,23 @@ typedef struct icp_qat_fw_la_bulk_req_s {
QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_BITPOS, \
QAT_LA_USE_EXTENDED_PROTOCOL_FLAGS_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
*
* @description
* Macro for setting the "slice type" field in la flags
*
* @param flags Flags to set the slice type
* @param val Value of the slice type to be set.
*
*****************************************************************************/
#define ICP_QAT_FW_LA_SLICE_TYPE_SET(flags, val) \
QAT_FIELD_SET(flags, \
val, \
QAT_LA_SLICE_TYPE_BITPOS, \
QAT_LA_SLICE_TYPE_MASK)
/**
*****************************************************************************
* @ingroup icp_qat_fw_la
@ -860,14 +956,10 @@ typedef union icp_qat_fw_cipher_req_hdr_cd_pars_s {
uint8_t content_desc_params_sz;
/**< Size of the content descriptor parameters in quad words.
* These
* parameters describe the session setup configuration info for
* the
* slices that this request relies upon i.e. the configuration
* word and
* cipher key needed by the cipher slice if there is a request
* for
* cipher processing. */
* These parameters describe the session setup configuration
* info for the slices that this request relies upon i.e. the
* configuration word and cipher key needed by the cipher slice
* if there is a request for cipher processing. */
uint8_t content_desc_hdr_resrvd2;
/**< Content descriptor reserved field */
@ -916,14 +1008,10 @@ typedef union icp_qat_fw_cipher_auth_req_hdr_cd_pars_s {
uint8_t content_desc_params_sz;
/**< Size of the content descriptor parameters in quad words.
* These
* parameters describe the session setup configuration info for
* the
* slices that this request relies upon i.e. the configuration
* word and
* cipher key needed by the cipher slice if there is a request
* for
* cipher processing. */
* These parameters describe the session setup configuration
* info for the slices that this request relies upon i.e. the
* configuration word and cipher key needed by the cipher slice
* if there is a request for cipher processing. */
uint8_t content_desc_hdr_resrvd2;
/**< Content descriptor reserved field */
@ -955,8 +1043,7 @@ typedef struct icp_qat_fw_cipher_cd_ctrl_hdr_s {
/**< LW 27 */
uint8_t cipher_state_sz;
/**< State size in quad words of the cipher algorithm used in this
* session.
* Set to zero if the algorithm doesnt provide any state */
* session. Set to zero if the algorithm doesnt provide any state */
uint8_t cipher_key_sz;
/**< Key size in quad words of the cipher algorithm used in this session
@ -964,17 +1051,16 @@ typedef struct icp_qat_fw_cipher_cd_ctrl_hdr_s {
uint8_t cipher_cfg_offset;
/**< Quad word offset from the content descriptor parameters address
* i.e.
* (content_address + (cd_hdr_sz << 3)) to the parameters for the cipher
* processing */
* i.e. (content_address + (cd_hdr_sz << 3)) to the parameters for the
* cipher processing */
uint8_t next_curr_id;
/**< This field combines the next and current id (each four bits) -
* the next id is the most significant nibble.
* Next Id: Set to the next slice to pass the ciphered data through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* any more slices after cipher.
* Current Id: Initialised with the cipher slice type */
* the next id is the most significant nibble.
* Next Id: Set to the next slice to pass the ciphered data through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* any more slices after cipher.
* Current Id: Initialised with the cipher slice type */
/**< LW 28 */
uint8_t cipher_padding_sz;
@ -1021,17 +1107,15 @@ typedef struct icp_qat_fw_auth_cd_ctrl_hdr_s {
uint8_t hash_cfg_offset;
/**< Quad word offset from the content descriptor parameters address to
* the
* parameters for the auth processing */
* the parameters for the auth processing */
uint8_t next_curr_id;
/**< This field combines the next and current id (each four bits) -
* the next id is the most significant nibble.
* Next Id: Set to the next slice to pass the authentication data
* through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* any more slices after authentication.
* Current Id: Initialised with the authentication slice type */
* the next id is the most significant nibble.
* Next Id: Set to the next slice to pass the authentication data
* through. Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go
* through any more slices after authentication.
* Current Id: Initialised with the authentication slice type */
/**< LW 29 */
uint8_t resrvd3;
@ -1057,8 +1141,7 @@ typedef struct icp_qat_fw_auth_cd_ctrl_hdr_s {
uint8_t inner_state2_offset;
/**< Quad word offset from the content descriptor parameters pointer to
* the
* inner state2 value */
* the inner state2 value */
uint8_t inner_state2_sz;
/**< Size in bytes of inner hash state2 data. Must be a qword multiple
@ -1067,8 +1150,7 @@ typedef struct icp_qat_fw_auth_cd_ctrl_hdr_s {
/**< LW 31 */
uint8_t outer_config_offset;
/**< Quad word offset from the content descriptor parameters pointer to
* the
* outer configuration information */
* the outer configuration information */
uint8_t outer_state1_sz;
/**< Size in bytes of the outer state1 value */
@ -1078,10 +1160,8 @@ typedef struct icp_qat_fw_auth_cd_ctrl_hdr_s {
uint8_t outer_prefix_offset;
/**< Quad word offset from the start of the inner prefix data to the
* outer
* prefix information. Should equal the rounded inner prefix size,
* converted
* to qwords */
* outer prefix information. Should equal the rounded inner prefix size,
* converted to qwords */
} icp_qat_fw_auth_cd_ctrl_hdr_t;
@ -1100,8 +1180,7 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
/**< LW 27 */
uint8_t cipher_state_sz;
/**< State size in quad words of the cipher algorithm used in this
* session.
* Set to zero if the algorithm doesnt provide any state */
* session. Set to zero if the algorithm doesnt provide any state */
uint8_t cipher_key_sz;
/**< Key size in quad words of the cipher algorithm used in this session
@ -1109,17 +1188,16 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
uint8_t cipher_cfg_offset;
/**< Quad word offset from the content descriptor parameters address
* i.e.
* (content_address + (cd_hdr_sz << 3)) to the parameters for the cipher
* processing */
* i.e. (content_address + (cd_hdr_sz << 3)) to the parameters for the
* cipher processing */
uint8_t next_curr_id_cipher;
/**< This field combines the next and current id (each four bits) -
* the next id is the most significant nibble.
* Next Id: Set to the next slice to pass the ciphered data through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* any more slices after cipher.
* Current Id: Initialised with the cipher slice type */
* the next id is the most significant nibble.
* Next Id: Set to the next slice to pass the ciphered data through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* any more slices after cipher.
* Current Id: Initialised with the cipher slice type */
/**< LW 28 */
uint8_t cipher_padding_sz;
@ -1134,16 +1212,14 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
uint8_t hash_cfg_offset;
/**< Quad word offset from the content descriptor parameters address to
* the
* parameters for the auth processing */
* the parameters for the auth processing */
uint8_t next_curr_id_auth;
/**< This field combines the next and current id (each four bits) -
* the next id is the most significant nibble.
* Next Id: Set to the next slice to pass the authentication data
* through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* any more slices after authentication.
* through. Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go
* through any more slices after authentication.
* Current Id: Initialised with the authentication slice type */
/**< LW 29 */
@ -1170,8 +1246,7 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
uint8_t inner_state2_offset;
/**< Quad word offset from the content descriptor parameters pointer to
* the
* inner state2 value */
* the inner state2 value */
uint8_t inner_state2_sz;
/**< Size in bytes of inner hash state2 data. Must be a qword multiple
@ -1180,8 +1255,7 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
/**< LW 31 */
uint8_t outer_config_offset;
/**< Quad word offset from the content descriptor parameters pointer to
* the
* outer configuration information */
* the outer configuration information */
uint8_t outer_state1_sz;
/**< Size in bytes of the outer state1 value */
@ -1191,10 +1265,8 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
uint8_t outer_prefix_offset;
/**< Quad word offset from the start of the inner prefix data to the
* outer
* prefix information. Should equal the rounded inner prefix size,
* converted
* to qwords */
* outer prefix information. Should equal the rounded inner prefix size,
* converted to qwords */
} icp_qat_fw_cipher_auth_cd_ctrl_hdr_t;
@ -1204,9 +1276,9 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
* + ===== + --- + --- + --- + --- + --- + --- + --- + ---- +
* | Bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* + ===== + --- + --- + --- + --- + --- + --- + --- + ---- +
* | Flags | Rsv | Rsv | Rsv | ZUC |SNOW | Rsv | Rsv |NESTED|
* | | | | |EIA3 | 3G | | | |
* | | | | | |UIA2 | | | |
* | Flags | Rsv | Rsv | Rsv | ZUC |SNOW |SKIP |SKIP |NESTED|
* | | | | |EIA3 | 3G |LOAD |LOAD | |
* | | | | | |UIA2 |OUTER|INNER| |
* + ===== + --- + --- + --- + --- + --- + --- + --- + ---- +
*/
@ -1233,6 +1305,42 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
* requires nested hashing
*/
/* Bit 1 */
#define QAT_FW_LA_SKIP_INNER_STATE1_LOAD_BITPOS 1
/**< @ingroup icp_qat_fw_comn
* Bit position of the Skipping Inner State1 Load bit */
#define QAT_FW_LA_SKIP_INNER_STATE1_LOAD 1
/**< @ingroup icp_qat_fw_comn
* Value indicating the skipping of inner hash state load */
#define QAT_FW_LA_NO_SKIP_INNER_STATE1_LOAD 0
/**< @ingroup icp_qat_fw_comn
* Value indicating the no skipping of inner hash state load */
#define QAT_FW_LA_SKIP_INNER_STATE1_LOAD_MASK 0x1
/**< @ingroup icp_qat_fw_comn
* Bit mask of Skipping Inner State1 Load bit */
/* Bit 2 */
#define QAT_FW_LA_SKIP_OUTER_STATE1_LOAD_BITPOS 2
/**< @ingroup icp_qat_fw_comn
* Bit position of the Skipping Outer State1 Load bit */
#define QAT_FW_LA_SKIP_OUTER_STATE1_LOAD 1
/**< @ingroup icp_qat_fw_comn
* Value indicating the skipping of outer hash state load */
#define QAT_FW_LA_NO_SKIP_OUTER_STATE1_LOAD 0
/**< @ingroup icp_qat_fw_comn
* Value indicating the no skipping of outer hash state load */
#define QAT_FW_LA_SKIP_OUTER_STATE1_LOAD_MASK 0x1
/**< @ingroup icp_qat_fw_comn
* Bit mask of Skipping Outer State1 Load bit */
/* Bit 3 */
#define QAT_FW_LA_SNOW3G_UIA2_BITPOS 3
@ -1261,6 +1369,24 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
/**< @ingroup icp_qat_fw_la
* One bit mask used to determine the use of hash algorithm ZUC-EIA3 */
/* Bit 5 */
#define QAT_FW_LA_MODE2_BITPOS 5
/**< @ingroup icp_qat_fw_comn
* Bit position of the Mode 2 bit */
#define QAT_FW_LA_MODE2 1
/**< @ingroup icp_qat_fw_comn
* Value indicating the Mode 2*/
#define QAT_FW_LA_NO_MODE2 0
/**< @ingroup icp_qat_fw_comn
* Value indicating the no Mode 2*/
#define QAT_FW_LA_MODE2_MASK 0x1
/**< @ingroup icp_qat_fw_comn
* Bit mask of Mode 2 */
/* Macros for extracting hash flags */
/**
@ -1279,6 +1405,67 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
QAT_FW_LA_AUTH_HDR_NESTED_BITPOS, \
QAT_FW_LA_AUTH_HDR_NESTED_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
*
* @description
* Macro for extraction of the "Skipping Inner State1 Load state" hash flag
*
* @param flags Hash Flags
*
*****************************************************************************/
#define ICP_QAT_FW_HASH_FLAG_SKIP_INNER_STATE1_LOAD_GET(flags) \
QAT_FIELD_GET(flags, \
QAT_FW_LA_SKIP_INNER_STATE1_LOAD_BITPOS, \
QAT_FW_LA_INNER_STATE1_LOAD_MASK)
/**
******************************************************************************
* Macro for setting the "Skipping Inner State1 Load" hash flag
*
* @param flags Hash Flags
* @param val Value of the flag
*
*****************************************************************************/
#define ICP_QAT_FW_HASH_FLAG_SKIP_INNER_STATE1_LOAD_SET(flags, val) \
QAT_FIELD_SET(flags, \
val, \
QAT_FW_LA_SKIP_INNER_STATE1_LOAD_BITPOS, \
QAT_FW_LA_SKIP_INNER_STATE1_LOAD_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
*
* @description
* Macro for extraction of the "Skipping Outer State1 Load state" hash flag
*
* @param flags Hash Flags
*
*****************************************************************************/
#define ICP_QAT_FW_HASH_FLAG_SKIP_OUTER_STATE1_LOAD_GET(flags) \
QAT_FIELD_GET(flags, \
QAT_FW_LA_SKIP_OUTER_STATE1_LOAD_BITPOS, \
QAT_FW_LA_SKIP_OUTER_STATE1_LOAD_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
*
* @description
* Macro for setting the "Skipping Outer State1 Load" hash flag
*
* @param flags Hash Flags
* @param val Value of the flag
*
*****************************************************************************/
#define ICP_QAT_FW_HASH_FLAG_SKIP_OUTER_STATE1_LOAD_SET(flags, val) \
QAT_FIELD_SET(flags, \
val, \
QAT_FW_LA_SKIP_OUTER_STATE1_LOAD_BITPOS, \
QAT_FW_LA_SKIP_OUTER_STATE1_LOAD_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
@ -1328,6 +1515,40 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
QAT_FW_LA_AUTH_HDR_NESTED_BITPOS, \
QAT_FW_LA_AUTH_HDR_NESTED_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
*
* @description
* Macro for setting the "Skipping Inner State1 Load" hash flag
*
* @param flags Hash Flags
* @param val Value of the flag
*
*****************************************************************************/
#define ICP_QAT_FW_HASH_FLAG_SKIP_INNER_STATE1_LOAD_SET(flags, val) \
QAT_FIELD_SET(flags, \
val, \
QAT_FW_LA_SKIP_INNER_STATE1_LOAD_BITPOS, \
QAT_FW_LA_SKIP_INNER_STATE1_LOAD_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
*
* @description
* Macro for setting the "Skipping Outer State1 Load" hash flag
*
* @param flags Hash Flags
* @param val Value of the flag
*
*****************************************************************************/
#define ICP_QAT_FW_HASH_FLAG_SKIP_OUTER_STATE1_LOAD_SET(flags, val) \
QAT_FIELD_SET(flags, \
val, \
QAT_FW_LA_SKIP_OUTER_STATE1_LOAD_BITPOS, \
QAT_FW_LA_SKIP_OUTER_STATE1_LOAD_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
@ -1362,6 +1583,20 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
QAT_FW_LA_ZUC_EIA3_BITPOS, \
QAT_FW_LA_ZUC_EIA3_MASK)
/**
******************************************************************************
* @ingroup icp_qat_fw_la
*
* @description
* Macro for setting the "Mode 2" hash flag
*
* @param flags Hash Flags
* @param val Value of the flag
*
*****************************************************************************/
#define ICP_QAT_FW_HASH_FLAG_MODE2_SET(flags, val) \
QAT_FIELD_SET(flags, val, QAT_FW_LA_MODE2_BITPOS, QAT_FW_LA_MODE2_MASK)
#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
#define ICP_QAT_FW_SPC_AAD_SZ_MAX 0x3FFF
@ -1494,8 +1729,7 @@ typedef struct icp_qat_fw_la_auth_req_params_s {
uint8_t aad_sz;
/**< Size in bytes of padded AAD data to prefix to the packet
* for CCM
* or GCM processing */
* for CCM or GCM processing */
} u2;
uint8_t resrvd1;
@ -1535,8 +1769,7 @@ typedef struct icp_qat_fw_la_auth_req_params_resrvd_flds_s {
uint8_t aad_sz;
/**< Size in bytes of padded AAD data to prefix to the packet
* for CCM
* or GCM processing */
* for CCM or GCM processing */
} u2;
uint8_t resrvd1;
@ -1565,8 +1798,7 @@ typedef struct icp_qat_fw_la_key_gen_common_s {
/**< SSL3 */
uint16_t secret_lgth_ssl;
/**< Length of Secret information for SSL. In the case of TLS
* the
* secret is supplied in the content descriptor */
* the secret is supplied in the content descriptor */
/**< MGF */
uint16_t mask_length;
@ -1692,8 +1924,7 @@ typedef struct icp_qat_fw_la_ssl3_req_params_s {
uint8_t aad_sz;
/**< Size in bytes of padded AAD data to prefix to the packet
* for CCM
* or GCM processing */
* for CCM or GCM processing */
} u2;
uint8_t resrvd1;
@ -1731,8 +1962,7 @@ typedef struct icp_qat_fw_la_mgf_req_params_s {
uint8_t aad_sz;
/**< Size in bytes of padded AAD data to prefix to the packet
* for CCM
* or GCM processing */
* for CCM or GCM processing */
} u2;
uint8_t resrvd1;
@ -1779,16 +2009,14 @@ typedef struct icp_qat_fw_la_trng_req_mid_s {
/**< LWs 6-13 */
uint64_t opaque_data;
/**< Opaque data passed unmodified from the request to response messages
* by
* firmware (fw) */
* by firmware (fw) */
uint64_t resrvd1;
/**< Reserved, unused for TRNG */
uint64_t dest_data_addr;
/**< Generic definition of the destination data supplied to the QAT AE.
* The
* common flags are used to further describe the attributes of this
* The common flags are used to further describe the attributes of this
* field */
uint32_t resrvd2;
@ -1796,7 +2024,7 @@ typedef struct icp_qat_fw_la_trng_req_mid_s {
uint32_t entropy_length;
/**< Size of the data in bytes to process. Used by the get_random
* command. Set to 0 for commands that dont need a length parameter */
* command. Set to 0 for commands that dont need a length parameter */
} icp_qat_fw_la_trng_req_mid_t;
@ -2235,10 +2463,8 @@ struct icp_qat_fw_hkdf_label {
union {
uint8_t label_flags;
/**< For first-level labels: each bit in [0..3] will trigger a
* child
* Expand-Label operation on the corresponding sublabel. Bits
* [4..7]
* are reserved.
* child Expand-Label operation on the corresponding sublabel.
* Bits [4..7] are reserved.
*/
uint8_t sublabel_flags;

View File

@ -25,22 +25,22 @@
/* ========================================================================= */
typedef enum {
ICP_QAT_HW_AE_0 = 0, /*!< ID of AE0 */
ICP_QAT_HW_AE_1 = 1, /*!< ID of AE1 */
ICP_QAT_HW_AE_2 = 2, /*!< ID of AE2 */
ICP_QAT_HW_AE_3 = 3, /*!< ID of AE3 */
ICP_QAT_HW_AE_4 = 4, /*!< ID of AE4 */
ICP_QAT_HW_AE_5 = 5, /*!< ID of AE5 */
ICP_QAT_HW_AE_6 = 6, /*!< ID of AE6 */
ICP_QAT_HW_AE_7 = 7, /*!< ID of AE7 */
ICP_QAT_HW_AE_8 = 8, /*!< ID of AE8 */
ICP_QAT_HW_AE_9 = 9, /*!< ID of AE9 */
ICP_QAT_HW_AE_10 = 10, /*!< ID of AE10 */
ICP_QAT_HW_AE_11 = 11, /*!< ID of AE11 */
ICP_QAT_HW_AE_12 = 12, /*!< ID of AE12 */
ICP_QAT_HW_AE_13 = 13, /*!< ID of AE13 */
ICP_QAT_HW_AE_14 = 14, /*!< ID of AE14 */
ICP_QAT_HW_AE_15 = 15, /*!< ID of AE15 */
ICP_QAT_HW_AE_0 = 0, /*!< ID of AE0 */
ICP_QAT_HW_AE_1 = 1, /*!< ID of AE1 */
ICP_QAT_HW_AE_2 = 2, /*!< ID of AE2 */
ICP_QAT_HW_AE_3 = 3, /*!< ID of AE3 */
ICP_QAT_HW_AE_4 = 4, /*!< ID of AE4 */
ICP_QAT_HW_AE_5 = 5, /*!< ID of AE5 */
ICP_QAT_HW_AE_6 = 6, /*!< ID of AE6 */
ICP_QAT_HW_AE_7 = 7, /*!< ID of AE7 */
ICP_QAT_HW_AE_8 = 8, /*!< ID of AE8 */
ICP_QAT_HW_AE_9 = 9, /*!< ID of AE9 */
ICP_QAT_HW_AE_10 = 10, /*!< ID of AE10 */
ICP_QAT_HW_AE_11 = 11, /*!< ID of AE11 */
ICP_QAT_HW_AE_12 = 12, /*!< ID of AE12 */
ICP_QAT_HW_AE_13 = 13, /*!< ID of AE13 */
ICP_QAT_HW_AE_14 = 14, /*!< ID of AE14 */
ICP_QAT_HW_AE_15 = 15, /*!< ID of AE15 */
ICP_QAT_HW_AE_DELIMITER = 16 /**< Delimiter type */
} icp_qat_hw_ae_id_t;
@ -49,12 +49,12 @@ typedef enum {
/* ========================================================================= */
typedef enum {
ICP_QAT_HW_QAT_0 = 0, /*!< ID of QAT0 */
ICP_QAT_HW_QAT_1 = 1, /*!< ID of QAT1 */
ICP_QAT_HW_QAT_2 = 2, /*!< ID of QAT2 */
ICP_QAT_HW_QAT_3 = 3, /*!< ID of QAT3 */
ICP_QAT_HW_QAT_4 = 4, /*!< ID of QAT4 */
ICP_QAT_HW_QAT_5 = 5, /*!< ID of QAT5 */
ICP_QAT_HW_QAT_0 = 0, /*!< ID of QAT0 */
ICP_QAT_HW_QAT_1 = 1, /*!< ID of QAT1 */
ICP_QAT_HW_QAT_2 = 2, /*!< ID of QAT2 */
ICP_QAT_HW_QAT_3 = 3, /*!< ID of QAT3 */
ICP_QAT_HW_QAT_4 = 4, /*!< ID of QAT4 */
ICP_QAT_HW_QAT_5 = 5, /*!< ID of QAT5 */
ICP_QAT_HW_QAT_DELIMITER = 6 /**< Delimiter type */
} icp_qat_hw_qat_id_t;
@ -79,24 +79,24 @@ typedef enum {
ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, /*!< SHA-256 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, /*!< SHA-384 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, /*!< SHA-512 hashing */
ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, /*!< AES-XCBC-MAC hashing */
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, /*!< AES-CBC-MAC hashing */
ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, /*!< AES-XCBC-MAC hashing */
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, /*!< AES-CBC-MAC hashing */
ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, /*!< AES F9 hashing */
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, /*!< Galois 128 bit hashing */
ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, /*!< Galois 64 hashing */
ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, /*!< Kasumi F9 hashing */
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, /*!< Galois 128 bit hashing */
ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, /*!< Galois 64 hashing */
ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, /*!< Kasumi F9 hashing */
ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, /*!< UIA2/SNOW_3G F9 hashing */
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 =
14, /*!< 128_EIA3/ZUC_3G hashing */
ICP_QAT_HW_AUTH_ALGO_SM3 = 15, /*!< SM3 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA3_224 = 16, /*!< SHA3-224 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, /*!< SHA3-256 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA3_384 = 18, /*!< SHA3-384 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, /*!< SHA3-512 hashing */
ICP_QAT_HW_AUTH_ALGO_SHAKE_128 = 20, /*!< SHAKE-128 hashing */
ICP_QAT_HW_AUTH_ALGO_SHAKE_256 = 21, /*!< SHAKE-256 hashing */
ICP_QAT_HW_AUTH_ALGO_POLY = 22, /*!< POLY hashing */
ICP_QAT_HW_AUTH_ALGO_DELIMITER = 23 /**< Delimiter type */
14, /*!< 128_EIA3/ZUC_3G hashing */
ICP_QAT_HW_AUTH_ALGO_SM3 = 15, /*!< SM3 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA3_224 = 16, /*!< SHA3-224 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, /*!< SHA3-256 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA3_384 = 18, /*!< SHA3-384 hashing */
ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, /*!< SHA3-512 hashing */
ICP_QAT_HW_AUTH_RESERVED_4 = 20, /*!< Reserved */
ICP_QAT_HW_AUTH_RESERVED_5 = 21, /*!< Reserved */
ICP_QAT_HW_AUTH_ALGO_POLY = 22, /*!< POLY hashing */
ICP_QAT_HW_AUTH_ALGO_DELIMITER = 23 /**< Delimiter type */
} icp_qat_hw_auth_algo_t;
/**
@ -118,9 +118,9 @@ typedef enum {
*****************************************************************************/
typedef enum {
ICP_QAT_HW_AUTH_MODE0 = 0, /*!< QAT Auth Mode0 configuration */
ICP_QAT_HW_AUTH_MODE1 = 1, /*!< QAT Auth Mode1 configuration */
ICP_QAT_HW_AUTH_MODE2 = 2, /*!< QAT AuthMode2 configuration */
ICP_QAT_HW_AUTH_MODE0 = 0, /*!< QAT Auth Mode0 configuration */
ICP_QAT_HW_AUTH_MODE1 = 1, /*!< QAT Auth Mode1 configuration */
ICP_QAT_HW_AUTH_MODE2 = 2, /*!< QAT AuthMode2 configuration */
ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 /**< Delimiter type */
} icp_qat_hw_auth_mode_t;
@ -269,7 +269,7 @@ typedef struct icp_qat_hw_auth_config_s {
/**< Flag usage - see additional notes @description for
* ICP_QAT_HW_AUTH_CONFIG_BUILD and
* ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER macros.
*/
*/
#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0
/**< @ingroup icp_qat_hw_defs
@ -461,7 +461,7 @@ typedef struct icp_qat_hw_auth_setup_s {
#define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n - 1)))
/* State1 */
#define ICP_QAT_HW_NULL_STATE1_SZ 64
#define ICP_QAT_HW_NULL_STATE1_SZ 32
/**< @ingroup icp_qat_hw_defs
* State1 block size for NULL hashing */
#define ICP_QAT_HW_MD5_STATE1_SZ 16
@ -474,7 +474,7 @@ typedef struct icp_qat_hw_auth_setup_s {
#define ICP_QAT_HW_SHA224_STATE1_SZ 32
/**< @ingroup icp_qat_hw_defs
* State1 block size for SHA24 */
#define ICP_QAT_HW_SHA3_224_STATE1_SZ 32
#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
/**< @ingroup icp_qat_hw_defs
* State1 block size for SHA3_224 */
#define ICP_QAT_HW_SHA256_STATE1_SZ 32
@ -486,7 +486,7 @@ typedef struct icp_qat_hw_auth_setup_s {
#define ICP_QAT_HW_SHA384_STATE1_SZ 64
/**< @ingroup icp_qat_hw_defs
* State1 block size for SHA384 */
#define ICP_QAT_HW_SHA3_384_STATE1_SZ 64
#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
/**< @ingroup icp_qat_hw_defs
* State1 block size for SHA3_384 */
#define ICP_QAT_HW_SHA512_STATE1_SZ 64
@ -516,15 +516,15 @@ typedef struct icp_qat_hw_auth_setup_s {
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
/**< @ingroup icp_cpm_hw_defs
* State1 block size for EIA3 */
#define ICP_QAT_HW_SM3_STATE1_SZ 32
/**< @ingroup icp_qat_hw_defs
* State1 block size for SM3 */
#define ICP_QAT_HW_SHA3_STATEFUL_STATE1_SZ 200
/** <@ingroup icp_cpm_hw_defs
* State1 block size for stateful SHA3 processing*/
#define ICP_QAT_HW_SM3_STATE1_SZ 32
/**< @ingroup icp_cpm_hw_defs
* State1 block size for SM3 */
/* State2 */
#define ICP_QAT_HW_NULL_STATE2_SZ 64
#define ICP_QAT_HW_NULL_STATE2_SZ 32
/**< @ingroup icp_qat_hw_defs
* State2 block size for NULL hashing */
#define ICP_QAT_HW_MD5_STATE2_SZ 16
@ -537,25 +537,25 @@ typedef struct icp_qat_hw_auth_setup_s {
#define ICP_QAT_HW_SHA224_STATE2_SZ 32
/**< @ingroup icp_qat_hw_defs
* State2 block size for SHA224 */
#define ICP_QAT_HW_SHA3_224_STATE2_SZ 32
#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
/**< @ingroup icp_qat_hw_defs
* State2 block size for SHA3_224 */
#define ICP_QAT_HW_SHA256_STATE2_SZ 32
/**< @ingroup icp_qat_hw_defs
* State2 block size for SHA256 */
#define ICP_QAT_HW_SHA3_256_STATE2_SZ 32
#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
/**< @ingroup icp_qat_hw_defs
* State2 block size for SHA3_256 */
#define ICP_QAT_HW_SHA384_STATE2_SZ 64
/**< @ingroup icp_qat_hw_defs
* State2 block size for SHA384 */
#define ICP_QAT_HW_SHA3_384_STATE2_SZ 64
#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
/**< @ingroup icp_qat_hw_defs
* State2 block size for SHA3_384 */
#define ICP_QAT_HW_SHA512_STATE2_SZ 64
/**< @ingroup icp_qat_hw_defs
* State2 block size for SHA512 */
#define ICP_QAT_HW_SHA3_512_STATE2_SZ 64
#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
/**< @ingroup icp_qat_hw_defs
* State2 block size for SHA3_512 */
#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
@ -598,6 +598,9 @@ typedef struct icp_qat_hw_auth_setup_s {
#define ICP_QAT_HW_SM3_STATE2_SZ 32
/**< @ingroup icp_qat_hw_defs
* State2 block size for SM3 */
#define ICP_QAT_HW_SHA3_STATEFUL_STATE2_SZ 208
/** <@ingroup icp_cpm_hw_defs
* State2 block size for stateful SHA3 processing*/
/* ************************************************************************* */
/* ************************************************************************* */
@ -647,10 +650,33 @@ typedef struct icp_qat_hw_auth_sha3_512_s {
icp_qat_hw_auth_setup_t outer_setup;
/**< Outer configuration word for the slice */
/* State2 size is zero - this may change for future implementations */
uint8_t state2[ICP_QAT_HW_SHA3_512_STATE2_SZ];
} icp_qat_hw_auth_sha3_512_t;
/**
*****************************************************************************
* @ingroup icp_qat_hw_defs
* Definition of stateful SHA3 auth algorithm processing struct
* @description
* This structs described the parameters to pass to the slice for
* configuring it for stateful SHA3 processing. This is the largest
* possible setup block for authentication
*
*****************************************************************************/
typedef struct icp_qat_hw_auth_sha3_stateful_s {
icp_qat_hw_auth_setup_t inner_setup;
/**< Inner loop configuration word for the slice */
uint8_t inner_state1[ICP_QAT_HW_SHA3_STATEFUL_STATE1_SZ];
/**< Inner hash block */
icp_qat_hw_auth_setup_t outer_setup;
/**< Outer configuration word for the slice */
uint8_t outer_state1[ICP_QAT_HW_SHA3_STATEFUL_STATE1_SZ];
/**< Outer hash block */
} icp_qat_hw_auth_sha3_stateful_t;
/**
*****************************************************************************
* @ingroup icp_qat_hw_defs
@ -662,6 +688,8 @@ typedef struct icp_qat_hw_auth_sha3_512_s {
typedef union icp_qat_hw_auth_algo_blk_u {
icp_qat_hw_auth_sha512_t sha512;
/**< SHA512 Hashing */
icp_qat_hw_auth_sha3_stateful_t sha3_stateful;
/**< Stateful SHA3 Hashing */
} icp_qat_hw_auth_algo_blk_t;
@ -691,11 +719,11 @@ typedef enum {
ICP_QAT_HW_CIPHER_ALGO_NULL = 0, /*!< Null ciphering */
ICP_QAT_HW_CIPHER_ALGO_DES = 1, /*!< DES ciphering */
ICP_QAT_HW_CIPHER_ALGO_3DES = 2, /*!< 3DES ciphering */
ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, /*!< AES-128 ciphering */
ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, /*!< AES-192 ciphering */
ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, /*!< AES-256 ciphering */
ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, /*!< AES-128 ciphering */
ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, /*!< AES-192 ciphering */
ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, /*!< AES-256 ciphering */
ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, /*!< ARC4 ciphering */
ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, /*!< Kasumi */
ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, /*!< Kasumi */
ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, /*!< Snow_3G */
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, /*!< ZUC_3G */
ICP_QAT_HW_CIPHER_ALGO_SM4 = 10, /*!< SM4 ciphering */
@ -723,7 +751,7 @@ typedef enum {
ICP_QAT_HW_CIPHER_CTR_MODE = 2, /*!< CTR mode */
ICP_QAT_HW_CIPHER_F8_MODE = 3, /*!< F8 mode */
ICP_QAT_HW_CIPHER_AEAD_MODE = 4, /*!< AES-GCM SPC AEAD mode */
ICP_QAT_HW_CIPHER_RESERVED_MODE = 5, /*!< Reserved */
ICP_QAT_HW_CIPHER_CCM_MODE = 5, /*!< AES-CCM SPC AEAD mode */
ICP_QAT_HW_CIPHER_XTS_MODE = 6, /*!< XTS mode */
ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 /**< Delimiter type */
} icp_qat_hw_cipher_mode_t;
@ -746,6 +774,23 @@ typedef struct icp_qat_hw_cipher_config_s {
/**< Reserved */
} icp_qat_hw_cipher_config_t;
/**
*****************************************************************************
* @ingroup icp_qat_hw_defs
* Cipher Configuration Struct
*
* @description
* Configuration data used for setting up the QAT UCS Cipher Slice
*
*****************************************************************************/
typedef struct icp_qat_hw_ucs_cipher_config_s {
uint32_t val;
/**< Cipher slice configuration */
uint32_t reserved[3];
/**< Reserved */
} icp_qat_hw_ucs_cipher_config_t;
/**
*****************************************************************************
* @ingroup icp_qat_hw_defs
@ -851,6 +896,10 @@ typedef enum {
/**< @ingroup icp_qat_hw_defs
* Define for the cipher XTS mode key size */
#define QAT_CIPHER_MODE_UCS_XTS_KEY_SZ_MULT 1
/**< @ingroup icp_qat_hw_defs
* Define for the UCS cipher XTS mode key size */
/**
******************************************************************************
* @ingroup icp_qat_hw_defs
@ -931,6 +980,16 @@ typedef enum {
#define ICP_QAT_HW_AES_256_KEY_SZ 32
/**< @ingroup icp_qat_hw_defs
* Define the key size for AES256 */
/* AES UCS */
#define ICP_QAT_HW_UCS_AES_128_KEY_SZ ICP_QAT_HW_AES_128_KEY_SZ
/**< @ingroup icp_qat_hw_defs
* Define the key size for AES128 for UCS slice*/
#define ICP_QAT_HW_UCS_AES_192_KEY_SZ 32
/**< @ingroup icp_qat_hw_defs
* Define the key size for AES192 for UCS slice*/
#define ICP_QAT_HW_UCS_AES_256_KEY_SZ ICP_QAT_HW_AES_256_KEY_SZ
/**< @ingroup icp_qat_hw_defs
* Define the key size for AES256 for UCS slice*/
#define ICP_QAT_HW_AES_128_F8_KEY_SZ \
(ICP_QAT_HW_AES_128_KEY_SZ * QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
/**< @ingroup icp_qat_hw_defs
@ -951,6 +1010,14 @@ typedef enum {
(ICP_QAT_HW_AES_256_KEY_SZ * QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
/**< @ingroup icp_qat_hw_defs
* Define the key size for AES256 XTS */
#define ICP_QAT_HW_UCS_AES_128_XTS_KEY_SZ \
(ICP_QAT_HW_UCS_AES_128_KEY_SZ * QAT_CIPHER_MODE_UCS_XTS_KEY_SZ_MULT)
/**< @ingroup icp_qat_hw_defs
* Define the key size for AES128 XTS for the UCS Slice*/
#define ICP_QAT_HW_UCS_AES_256_XTS_KEY_SZ \
(ICP_QAT_HW_UCS_AES_256_KEY_SZ * QAT_CIPHER_MODE_UCS_XTS_KEY_SZ_MULT)
/**< @ingroup icp_qat_hw_defs
* Define the key size for AES256 XTS for the UCS Slice*/
#define ICP_QAT_HW_KASUMI_KEY_SZ 16
/**< @ingroup icp_qat_hw_defs
* Define the key size for Kasumi */
@ -1090,10 +1157,10 @@ typedef enum {
*****************************************************************************/
typedef enum {
ICP_QAT_HW_TRNG_NEG_0 = 0, /*!< TRNG Neg Zero Test */
ICP_QAT_HW_TRNG_NEG_1 = 1, /*!< TRNG Neg One Test */
ICP_QAT_HW_TRNG_NEG_0 = 0, /*!< TRNG Neg Zero Test */
ICP_QAT_HW_TRNG_NEG_1 = 1, /*!< TRNG Neg One Test */
ICP_QAT_HW_TRNG_POS = 2, /*!< TRNG POS Test */
ICP_QAT_HW_TRNG_POS_VNC = 3, /*!< TRNG POS VNC Test */
ICP_QAT_HW_TRNG_POS_VNC = 3, /*!< TRNG POS VNC Test */
ICP_QAT_HW_TRNG_KAT_DELIMITER = 4 /**< Delimiter type */
} icp_qat_hw_trng_kat_mode_t;
@ -1388,7 +1455,7 @@ typedef enum {
typedef enum {
ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0, /*!< Deflate compression */
ICP_QAT_HW_COMPRESSION_DEPRECATED = 1, /*!< Deprecated */
ICP_QAT_HW_COMPRESSION_DEPRECATED = 1, /*!< Deprecated */
ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2 /**< Delimiter type */
} icp_qat_hw_compression_algo_t;
@ -1470,11 +1537,11 @@ typedef enum {
*****************************************************************************/
typedef struct icp_qat_hw_compression_config_s {
uint32_t val;
/**< Compression slice configuration */
uint32_t lower_val;
/**< Compression slice configuration lower LW */
uint32_t reserved;
/**< Reserved */
uint32_t upper_val;
/**< Compression slice configuration upper LW */
} icp_qat_hw_compression_config_t;
/* Private defines */

View File

@ -0,0 +1,292 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/**
*****************************************************************************
* @file icp_qat_hw_2x_comp.h
* @defgroup ICP QAT HW accessors for using the for 2.x Compression Slice
* definitions
* @ingroup icp_qat_hw_2x_comp
* @description
* This file documents definitions for the QAT HW COMP SLICE
*
*****************************************************************************/
#ifndef _ICP_QAT_HW_20_COMP_H_
#define _ICP_QAT_HW_20_COMP_H_
#include "icp_qat_hw_20_comp_defs.h" // For HW definitions
#include "icp_qat_fw.h" //For Set Field Macros.
#ifdef WIN32
#include <stdlib.h> // built in support for _byteswap_ulong
#define BYTE_SWAP_32 _byteswap_ulong
#else
#define BYTE_SWAP_32 __builtin_bswap32
#endif
/**
*****************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Definition of the hw config csr. This representation has to be further
* processed by the corresponding config build function.
*
*****************************************************************************/
typedef struct icp_qat_hw_comp_20_config_csr_lower_s {
// Fields programmable directly by the SW.
icp_qat_hw_comp_20_extended_delay_match_mode_t edmm;
icp_qat_hw_comp_20_hw_comp_format_t algo;
icp_qat_hw_comp_20_search_depth_t sd;
icp_qat_hw_comp_20_hbs_control_t hbs;
// Fields programmable directly by the FW.
// Block Drop enable. (Set by FW)
icp_qat_hw_comp_20_abd_t abd;
icp_qat_hw_comp_20_lllbd_ctrl_t lllbd;
// Advanced HW control (Set to default vals)
icp_qat_hw_comp_20_skip_hash_collision_t hash_col;
icp_qat_hw_comp_20_skip_hash_update_t hash_update;
icp_qat_hw_comp_20_byte_skip_t skip_ctrl;
} icp_qat_hw_comp_20_config_csr_lower_t;
/**
*****************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Build the longword as expected by the HW
*
*****************************************************************************/
static inline uint32_t
ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(icp_qat_hw_comp_20_config_csr_lower_t csr)
{
uint32_t val32 = 0;
// Programmable values
QAT_FIELD_SET(val32,
csr.algo,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK);
QAT_FIELD_SET(val32,
csr.sd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK);
QAT_FIELD_SET(
val32,
csr.edmm,
ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK);
QAT_FIELD_SET(val32,
csr.hbs,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
QAT_FIELD_SET(val32,
csr.lllbd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
QAT_FIELD_SET(val32,
csr.hash_col,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK);
QAT_FIELD_SET(val32,
csr.hash_update,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK);
QAT_FIELD_SET(val32,
csr.skip_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK);
// Default values.
QAT_FIELD_SET(val32,
csr.abd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
QAT_FIELD_SET(val32,
csr.lllbd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
return BYTE_SWAP_32(val32);
}
/**
*****************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Definition of the hw config csr. This representation has to be further
* processed by the corresponding config build function.
*
*****************************************************************************/
typedef struct icp_qat_hw_comp_20_config_csr_upper_s {
icp_qat_hw_comp_20_scb_control_t scb_ctrl;
icp_qat_hw_comp_20_rmb_control_t rmb_ctrl;
icp_qat_hw_comp_20_som_control_t som_ctrl;
icp_qat_hw_comp_20_skip_hash_rd_control_t skip_hash_ctrl;
icp_qat_hw_comp_20_scb_unload_control_t scb_unload_ctrl;
icp_qat_hw_comp_20_disable_token_fusion_control_t
disable_token_fusion_ctrl;
icp_qat_hw_comp_20_scb_mode_reset_mask_t scb_mode_reset;
uint16_t lazy;
uint16_t nice;
} icp_qat_hw_comp_20_config_csr_upper_t;
/**
*****************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Build the longword as expected by the HW
*
*****************************************************************************/
static inline uint32_t
ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(icp_qat_hw_comp_20_config_csr_upper_t csr)
{
uint32_t val32 = 0;
QAT_FIELD_SET(val32,
csr.scb_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK);
QAT_FIELD_SET(val32,
csr.rmb_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK);
QAT_FIELD_SET(val32,
csr.som_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK);
QAT_FIELD_SET(val32,
csr.skip_hash_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK);
QAT_FIELD_SET(val32,
csr.scb_unload_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK);
QAT_FIELD_SET(
val32,
csr.disable_token_fusion_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK);
QAT_FIELD_SET(val32,
csr.scb_mode_reset,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
QAT_FIELD_SET(val32,
csr.lazy,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK);
QAT_FIELD_SET(val32,
csr.nice,
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
return BYTE_SWAP_32(val32);
}
/**
*****************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Definition of the hw config csr. This representation has to be further
* processed by the corresponding config build function.
*
*****************************************************************************/
typedef struct icp_qat_hw_decomp_20_config_csr_lower_s {
/* Fields programmable directly by the SW. */
icp_qat_hw_decomp_20_hbs_control_t hbs;
/* Advanced HW control (Set to default vals) */
icp_qat_hw_decomp_20_hw_comp_format_t algo;
} icp_qat_hw_decomp_20_config_csr_lower_t;
/**
*****************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Build the longword as expected by the HW
*
*****************************************************************************/
static inline uint32_t
ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(
icp_qat_hw_decomp_20_config_csr_lower_t csr)
{
uint32_t val32 = 0;
QAT_FIELD_SET(val32,
csr.hbs,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
QAT_FIELD_SET(val32,
csr.algo,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK);
return BYTE_SWAP_32(val32);
}
/**
*****************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Definition of the hw config csr. This representation has to be further
* processed by the corresponding config build function.
*
*****************************************************************************/
typedef struct icp_qat_hw_decomp_20_config_csr_upper_s {
/* Advanced HW control (Set to default vals) */
icp_qat_hw_decomp_20_speculative_decoder_control_t sdc;
icp_qat_hw_decomp_20_mini_cam_control_t mcc;
} icp_qat_hw_decomp_20_config_csr_upper_t;
/**
*****************************************************************************
* @ingroup icp_qat_fw_comn
*
* @description
* Build the longword as expected by the HW
*
*****************************************************************************/
static inline uint32_t
ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(
icp_qat_hw_decomp_20_config_csr_upper_t csr)
{
uint32_t val32 = 0;
QAT_FIELD_SET(
val32,
csr.sdc,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK);
QAT_FIELD_SET(val32,
csr.mcc,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
return BYTE_SWAP_32(val32);
}
#endif /* ICP_QAT_HW__2X_COMP_H_ */

View File

@ -0,0 +1,443 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/*
****************************************************************************
* @file icp_qat_hw_20_comp_defs.h, (autogenerated at 04-19-18 16:06)
* @defgroup icp_qat_hw_comp_20
* @ingroup icp_qat_hw_comp_20
* @description
* This file represents the HW configuration CSR definitions
****************************************************************************
*/
#ifndef _ICP_QAT_HW_20_COMP_DEFS_H
#define _ICP_QAT_HW_20_COMP_DEFS_H
/*****************************************************************************/
/* SCB Disabled - Set by FW, located in upper 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible SCB_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0,
/* Normal Mode using SCB (Default) */
ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1,
/* Legacy CPM1.x Mode with SCB disabled. */
} icp_qat_hw_comp_20_scb_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE
/*****************************************************************************/
/* Reset Bit Mask Disabled - Set by FW , located in upper 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible RMB_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0,
/* Reset all data structures with a set_config command. (Set by FW) */
ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1,
/* Reset only the Frequency Counters (LFCT) with a set_config command.
*/
} icp_qat_hw_comp_20_rmb_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL
/*****************************************************************************/
/* Slice Operation Mode (SOM) - Set By FW, located in upper 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible SOM_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0,
/* Normal mode. */
ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1,
/* Replay mode */
ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2,
/* Input CRC Mode */
ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3,
/* Reserved. */
} icp_qat_hw_comp_20_som_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE
/*****************************************************************************/
/* Skip Hash Read (Set By FW) , located in upper 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible SKIP_HASH_RD_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
/* When set to 0, hash reads are not skipped. */
ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
/* Hash reads are skipped. */
} icp_qat_hw_comp_20_skip_hash_rd_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP
/*****************************************************************************/
/* SCB Unload Disable, located in upper 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible SCB_UNLOAD_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0,
/* Unloads the LFCT and flushes the State Registers. */
ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1,
/* Does not unload the LFCT, but flushes the State Registers. */
} icp_qat_hw_comp_20_scb_unload_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD
/*****************************************************************************/
/* Disable token fusion, located in upper 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible DISABLE_TOKEN_FUSION_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0,
/* Enables token fusion. */
ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1,
/* Disables token fusion. */
} icp_qat_hw_comp_20_disable_token_fusion_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE
/*****************************************************************************/
/* SCB Mode Reset Mask (Set By FW) , located in upper 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible SCB_MODE_RESET_MASK field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0,
/* iLZ77 mode: Reset LFCT, OBC */
ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1,
/* iLZ77 mode: Reset LFCT, OBC, HB, HT */
} icp_qat_hw_comp_20_scb_mode_reset_mask_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS
/*****************************************************************************/
/* Lazy - For iLZ77 and Static DEFLATE, Lazy = 102h , located in upper
* 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258
/*****************************************************************************/
/* Nice - For iLZ77 and Static DEFLATE, Nice = 103h , located in upper
* 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259
/*****************************************************************************/
/* History Buffer Size (Set By the Driver/ Application), located in lower 32bit
*/
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible HBS_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
/* 000b - 32KB */
ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_64KB = 0x1,
/* 001b - 64KB */
} icp_qat_hw_comp_20_hbs_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB
/*****************************************************************************/
/* Adaptive Block Drop (Set By FW if Dynamic), located in lower 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible ABD field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0,
/* 0b - Feature enabled. */
ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1,
/* 1b - Feature disabled. */
} icp_qat_hw_comp_20_abd_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED
/*****************************************************************************/
/* Literal+Length Limit Block Drop Block Drop, (Set By FW if Dynamic) , located
* in lower 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible LLLBD_CTRL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
/* 0b - Feature enabled. */
ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
/* 1b - Feature disabled. */
} icp_qat_hw_comp_20_lllbd_ctrl_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED
/*****************************************************************************/
/* Search Depth (SD) (Set By Driver/Application), located in lower 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible SEARCH_DEPTH field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1,
/* 0001b - Level 1 (search depth = 2^1 = 2) */
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3,
/* 0011b - Level 6 (search depth = 2^3 = 8) */
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4,
/* 0100b - Level 9 (search depth = 2^4 = 16) */
} icp_qat_hw_comp_20_search_depth_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1
/*****************************************************************************/
/* Compression Format (Set By Driver/Application. Also See CMD ID), located in
* lower 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible HW_COMP_FORMAT field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0,
/* 000 - iLZ77. (Must set Min_Match = 3 bytes and HB size = 32KB.) */
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1,
/* 001 - Static DEFLATE. (Must set Min_Match = 3 bytes and HB size =
32KB.) */
} icp_qat_hw_comp_20_hw_comp_format_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE
/*****************************************************************************/
/* Skip Hash Collision (Set By FW to default value), located in lower 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible SKIP_HASH_COLLISION field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0,
/* When set to 0, hash collisions are allowed. */
ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
/* When set to 0, hash collisions are allowed. */
} icp_qat_hw_comp_20_skip_hash_collision_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW
/*****************************************************************************/
/* Skip Hash Update (Set By FW to default value) , located in lower 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible SKIP_HASH_UPDATE field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0,
/* 0 - hash updates are not skipped. */
ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
/* 1 - hash updates are skipped. */
} icp_qat_hw_comp_20_skip_hash_update_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW
/*****************************************************************************/
/* 3-Byte Match Skip (Set By FW to default value), located in lower 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible BYTE_SKIP field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0,
/* 0 - Use 3-byte token */
ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1,
/* 0 - Use 3-byte literal */
} icp_qat_hw_comp_20_byte_skip_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN
/*****************************************************************************/
/* Extended Delayed Match Mode enabled (Set By the Driver), located in lower
* 32bit */
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible EXTENDED_DELAY_MATCH_MODE field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0,
/* 0 - EXTENDED_DELAY_MATCH_MODE disabled */
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1,
/* 1 - EXTENDED_DELAY_MATCH_MODE enabled */
} icp_qat_hw_comp_20_extended_delay_match_mode_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED
/*****************************************************************************/
/* Speculative Decoder Disable (Set By the Driver/ Application), located in
* upper 32bit */
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible SPECULATIVE_DECODER_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0,
/* 0b - Enabled */
ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1,
/* 1b - Disabled */
} icp_qat_hw_decomp_20_speculative_decoder_control_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE
/*****************************************************************************/
/* Mini CAM Disable (Set By the Driver/ Application), located in upper 32bit */
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible MINI_CAM_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0,
/* 0b - Enabled */
ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1,
/* 1b - Disabled */
} icp_qat_hw_decomp_20_mini_cam_control_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE
/*****************************************************************************/
/* History Buffer Size (Set By the Driver/ Application), located in lower 32bit
*/
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible HBS_CONTROL field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
/* 000b - 32KB */
} icp_qat_hw_decomp_20_hbs_control_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB
/*****************************************************************************/
/* Decompression Format (Set By Driver/Application. Also See CMD ID), located in
* lower 32bit */
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7
/*
****************************************************************************
* @ingroup icp_qat_hw_defs
* @description
* Enumeration of possible HW_DECOMP_FORMAT field values
*****************************************************************************/
typedef enum {
ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1,
/* 001 - Static DEFLATE. (Must set Min_Match = 3 bytes and HB size =
32KB.) */
} icp_qat_hw_decomp_20_hw_comp_format_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE
#endif //_ICP_QAT_HW_20_COMP_DEFS_H

View File

@ -868,4 +868,41 @@ CpaStatus icp_sal_dp_SymGetInflightRequests(CpaInstanceHandle instanceHandle,
*
*****************************************************************************/
CpaStatus icp_sal_AsymPerformOpNow(CpaInstanceHandle instanceHandle);
/**
*****************************************************************************
* @ingroup icp_sal_setForceAEADMACVerify
* Sets forceAEADMacVerify for particular instance to force HW MAC
* validation.
*
* @description
* By default HW MAC verification is set to CPA_TRUE - this utility
* function allows to change default behavior.
*
* @assumptions
* None
* @sideEffects
* None
* @blocking
* None
* @reentrant
* No
* @threadSafe
* No
*
* @param[in] instanceHandle Crypto API instance handle.
* @param[in] forceAEADMacVerify new value
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @pre
* None
* @post
* None
* @see
* None
*
*****************************************************************************/
CpaStatus icp_sal_setForceAEADMACVerify(CpaInstanceHandle instanceHandle,
CpaBoolean forceAEADMacVerify);
#endif

View File

@ -26,8 +26,8 @@
/**< Max length of hardware version string */
/* Part name and number of the accelerator device */
#define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3
#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 11
#define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3
#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 12
#define SAL_INFO2_DRIVER_SW_VERSION_PATCH_NUMBER 0
/**

View File

@ -97,7 +97,8 @@ typedef enum device_type_e {
DEVICE_200XX,
DEVICE_200XXVF,
DEVICE_C4XXX,
DEVICE_C4XXXVF
DEVICE_C4XXXVF,
DEVICE_GEN4
} device_type_t;
/*

View File

@ -372,13 +372,20 @@ void
icp_adf_updateQueueTail(icp_comms_trans_handle trans_handle)
{
struct adf_etr_ring_data *ring = trans_handle;
struct adf_hw_csr_ops *csr_ops;
ICP_CHECK_FOR_NULL_PARAM_VOID(ring);
ICP_CHECK_FOR_NULL_PARAM_VOID(ring->bank);
ICP_CHECK_FOR_NULL_PARAM_VOID(ring->bank->accel_dev);
WRITE_CSR_RING_TAIL(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring->tail);
csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
ICP_CHECK_FOR_NULL_PARAM_VOID(csr_ops);
csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring->tail);
ring->csr_tail_offset = ring->tail;
}

View File

@ -848,4 +848,25 @@ CpaStatus qatUtilsAESEncrypt(uint8_t *key,
uint32_t keyLenInBytes,
uint8_t *in,
uint8_t *out);
/**
* @ingroup QatUtils
*
* @brief Converts AES forward key to reverse key
*
* @param key - pointer to symetric key.
* keyLenInBytes - key length
* out - pointer to output buffer for reversed key
* The in and out buffers need to be at least AES block size long
* as defined in rfc3686 (16 bytes)
*
* @li Reentrant: yes
* @li IRQ safe: yes
*
* @return - CPA_STATUS_SUCCESS/CPA_STATUS_FAIL
*
*/
CpaStatus qatUtilsAESKeyExpansionForward(uint8_t *key,
uint32_t keyLenInBytes,
uint32_t *out);
#endif

View File

@ -3,6 +3,10 @@
/* $FreeBSD$ */
#include "qat_utils.h"
#define AES_128_KEY_LEN_BYTES 16
#define AES_192_KEY_LEN_BYTES 24
#define AES_256_KEY_LEN_BYTES 32
CpaStatus
qatUtilsHashMD5(uint8_t *in, uint8_t *out)
{
@ -150,3 +154,38 @@ qatUtilsAESEncrypt(uint8_t *key,
return CPA_STATUS_SUCCESS;
}
CpaStatus
qatUtilsAESKeyExpansionForward(uint8_t *key,
uint32_t keyLenInBytes,
uint32_t *out)
{
rijndael_ctx ctx;
uint32_t i = 0, j = 0;
uint32_t lw_per_round = 4;
int32_t lw_left_to_copy = keyLenInBytes / lw_per_round;
uint32_t *key_pointer = NULL;
/* Error check for wrong input key len */
if (AES_128_KEY_LEN_BYTES != keyLenInBytes &&
AES_192_KEY_LEN_BYTES != keyLenInBytes &&
AES_256_KEY_LEN_BYTES != keyLenInBytes) {
return CPA_STATUS_INVALID_PARAM;
}
rijndael_set_key(&ctx, key, keyLenInBytes << BYTE_TO_BITS_SHIFT);
/* Pointer to the last round of expanded key. */
key_pointer = &ctx.ek[lw_per_round * ctx.Nr];
while (lw_left_to_copy > 0) {
for (i = 0; i < MIN(lw_left_to_copy, lw_per_round); i++, j++) {
out[j] = __builtin_bswap32(key_pointer[i]);
}
lw_left_to_copy -= lw_per_round;
key_pointer -= lw_left_to_copy;
}
return CPA_STATUS_SUCCESS;
}

View File

@ -99,11 +99,24 @@ adf_ae_fw_load(struct adf_accel_dev *accel_dev)
*/
if (hw_device->get_obj_name && hw_device->get_obj_cfg_ae_mask) {
unsigned long service_mask = hw_device->service_mask;
enum adf_accel_unit_services service_type =
ADF_ACCEL_SERVICE_NULL;
if (hw_device->service_mask &&
!(test_bit(i, &service_mask)))
if (hw_device->get_service_type)
service_type =
hw_device->get_service_type(accel_dev, i);
else
service_type = BIT(i);
if (service_mask && !(service_mask & service_type))
continue;
obj_name = hw_device->get_obj_name(accel_dev, BIT(i));
obj_name =
hw_device->get_obj_name(accel_dev, service_type);
cfg_ae_mask =
hw_device->get_obj_cfg_ae_mask(accel_dev,
service_type);
if (!obj_name) {
device_printf(
GET_DEV(accel_dev),
@ -111,10 +124,8 @@ adf_ae_fw_load(struct adf_accel_dev *accel_dev)
BIT(i));
goto out_err;
}
if (!hw_device->get_obj_cfg_ae_mask(accel_dev, BIT(i)))
if (!cfg_ae_mask)
continue;
cfg_ae_mask =
hw_device->get_obj_cfg_ae_mask(accel_dev, BIT(i));
if (qat_uclo_set_cfg_ae_mask(loader_data->fw_loader,
cfg_ae_mask)) {
device_printf(GET_DEV(accel_dev),
@ -172,17 +183,12 @@ adf_ae_start(struct adf_accel_dev *accel_dev)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
uint32_t ae_ctr;
if (!hw_data->fw_name)
return 0;
for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
if (hw_data->ae_mask & (1 << ae)) {
qat_hal_start(loader_data->fw_loader, ae, 0xFF);
ae_ctr++;
}
}
ae_ctr = qat_hal_start(loader_data->fw_loader);
device_printf(GET_DEV(accel_dev),
"qat_dev%d started %d acceleration engines\n",
accel_dev->accel_id,

View File

@ -157,16 +157,23 @@ adf_cfg_init_and_insert_inst(struct adf_cfg_bundle *bundle,
{
struct adf_cfg_instance *cfg_instance = NULL;
int ring_pair_index = 0;
int ring_index = 0;
int i = 0;
u8 serv_type;
int num_req_rings = bundle->num_of_rings / 2;
int num_rings_per_srv = num_req_rings / ADF_CFG_NUM_SERVICES;
int num_rings_per_srv = 0;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u16 ring_to_svc_map = GET_HW_DATA(accel_dev)->ring_to_svc_map;
/* init the bundle with instance information */
for (ring_pair_index = 0; ring_pair_index < ADF_CFG_NUM_SERVICES;
for (ring_pair_index = 0; ring_pair_index < bundle->max_cfg_svc_num;
ring_pair_index++) {
serv_type = GET_SRV_TYPE(ring_to_svc_map, ring_pair_index);
adf_get_ring_svc_map_data(hw_data,
bundle->number,
ring_pair_index,
&serv_type,
&ring_index,
&num_rings_per_srv);
for (i = 0; i < num_rings_per_srv; i++) {
cfg_instance = malloc(sizeof(*cfg_instance),
M_QAT,
@ -219,8 +226,9 @@ adf_cfg_bundle_init(struct adf_cfg_bundle *bundle,
{
int i = 0;
bundle->number = bank_num;
/* init ring to service mapping for this bundle */
adf_cfg_init_ring2serv_mapping(accel_dev, bundle);
adf_cfg_init_ring2serv_mapping(accel_dev, bundle, device);
/* init the bundle with instance information */
adf_cfg_init_and_insert_inst(bundle, device, bank_num, accel_dev);
@ -229,7 +237,6 @@ adf_cfg_bundle_init(struct adf_cfg_bundle *bundle,
bundle->type = FREE;
bundle->polling_mode = -1;
bundle->section_index = 0;
bundle->number = bank_num;
bundle->sections = malloc(sizeof(char *) * bundle->max_section,
M_QAT,
@ -262,18 +269,25 @@ adf_cfg_bundle_clear(struct adf_cfg_bundle *bundle,
}
static void
adf_cfg_assign_serv_to_rings(struct adf_cfg_bundle *bundle, u16 ring_to_svc_map)
adf_cfg_assign_serv_to_rings(struct adf_hw_device_data *hw_data,
struct adf_cfg_bundle *bundle,
struct adf_cfg_device *device)
{
int ring_pair_index = 0;
int ring_index = 0;
u8 serv_type = 0;
int num_req_rings = bundle->num_of_rings / 2;
int num_rings_per_srv = num_req_rings / ADF_CFG_NUM_SERVICES;
int num_rings_per_srv = 0;
for (ring_pair_index = 0; ring_pair_index < ADF_CFG_NUM_SERVICES;
for (ring_pair_index = 0; ring_pair_index < bundle->max_cfg_svc_num;
ring_pair_index++) {
serv_type = GET_SRV_TYPE(ring_to_svc_map, ring_pair_index);
ring_index = num_rings_per_srv * ring_pair_index;
adf_get_ring_svc_map_data(hw_data,
bundle->number,
ring_pair_index,
&serv_type,
&ring_index,
&num_rings_per_srv);
switch (serv_type) {
case CRYPTO:
ASSIGN_SERV_TO_RINGS(bundle,
@ -283,7 +297,7 @@ adf_cfg_assign_serv_to_rings(struct adf_cfg_bundle *bundle, u16 ring_to_svc_map)
num_rings_per_srv);
ring_pair_index++;
ring_index = num_rings_per_srv * ring_pair_index;
if (ring_pair_index == ADF_CFG_NUM_SERVICES)
if (ring_pair_index == bundle->max_cfg_svc_num)
break;
ASSIGN_SERV_TO_RINGS(bundle,
ring_index,
@ -324,7 +338,7 @@ adf_cfg_assign_serv_to_rings(struct adf_cfg_bundle *bundle, u16 ring_to_svc_map)
/* unknown service type */
pr_err("Unknown service type %d, mask 0x%x.\n",
serv_type,
ring_to_svc_map);
hw_data->ring_to_svc_map);
}
}
@ -333,13 +347,18 @@ adf_cfg_assign_serv_to_rings(struct adf_cfg_bundle *bundle, u16 ring_to_svc_map)
void
adf_cfg_init_ring2serv_mapping(struct adf_accel_dev *accel_dev,
struct adf_cfg_bundle *bundle)
struct adf_cfg_bundle *bundle,
struct adf_cfg_device *device)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_cfg_ring *ring_in_bundle;
int ring_num = 0;
bundle->num_of_rings = hw_data->num_rings_per_bank;
if (hw_data->num_rings_per_bank >= (2 * ADF_CFG_NUM_SERVICES))
bundle->max_cfg_svc_num = ADF_CFG_NUM_SERVICES;
else
bundle->max_cfg_svc_num = 1;
bundle->rings =
malloc(bundle->num_of_rings * sizeof(struct adf_cfg_ring *),
@ -356,7 +375,7 @@ adf_cfg_init_ring2serv_mapping(struct adf_accel_dev *accel_dev,
bundle->rings[ring_num] = ring_in_bundle;
}
adf_cfg_assign_serv_to_rings(bundle, hw_data->ring_to_svc_map);
adf_cfg_assign_serv_to_rings(hw_data, bundle, device);
return;
}

View File

@ -49,7 +49,29 @@ void adf_cfg_bundle_clear(struct adf_cfg_bundle *bundle,
struct adf_accel_dev *accel_dev);
void adf_cfg_init_ring2serv_mapping(struct adf_accel_dev *accel_dev,
struct adf_cfg_bundle *bundle);
struct adf_cfg_bundle *bundle,
struct adf_cfg_device *device);
int adf_cfg_rel_ring2serv_mapping(struct adf_cfg_bundle *bundle);
static inline void
adf_get_ring_svc_map_data(struct adf_hw_device_data *hw_data,
int bundle_num,
int ring_pair_index,
u8 *serv_type,
int *ring_index,
int *num_rings_per_srv)
{
if (hw_data->get_ring_svc_map_data)
return hw_data->get_ring_svc_map_data(ring_pair_index,
hw_data->ring_to_svc_map,
serv_type,
ring_index,
num_rings_per_srv,
bundle_num);
*serv_type = GET_SRV_TYPE(hw_data->ring_to_svc_map, ring_pair_index);
*num_rings_per_srv =
hw_data->num_rings_per_bank / (2 * ADF_CFG_NUM_SERVICES);
*ring_index = (*num_rings_per_srv) * ring_pair_index;
}
#endif

View File

@ -249,7 +249,6 @@ adf_cfg_get_ring_pairs(struct adf_cfg_device *device,
int i = 0;
int ret = EFAULT;
struct adf_cfg_instance *free_inst = NULL;
struct adf_cfg_bundle *first_free_bundle = NULL;
enum adf_cfg_bundle_type free_bundle_type;
int first_user_bundle = 0;
@ -304,29 +303,25 @@ adf_cfg_get_ring_pairs(struct adf_cfg_device *device,
return ret;
} else if (!first_free_bundle &&
adf_cfg_is_free(device->bundles[i])) {
first_free_bundle = device->bundles[i];
}
}
for (i = 0; i < device->bundle_num; i++) {
if (adf_cfg_is_free(device->bundles[i])) {
free_inst = adf_cfg_get_free_instance(
device,
device->bundles[i],
inst,
process_name);
if (!free_inst)
continue;
if (first_free_bundle) {
free_inst = adf_cfg_get_free_instance(device,
first_free_bundle,
inst,
process_name);
if (!free_inst)
ret = adf_cfg_get_ring_pairs_from_bundle(
device->bundles[i],
inst,
process_name,
free_inst);
return ret;
ret = adf_cfg_get_ring_pairs_from_bundle(
first_free_bundle, inst, process_name, free_inst);
if (free_bundle_type == KERNEL) {
device->max_kernel_bundle_nr =
first_free_bundle->number;
}
return ret;
}
}
pr_err("Don't have enough rings for instance %s in process %s\n",

View File

@ -286,10 +286,16 @@ adf_cfg_add_asym_inst_info(struct adf_accel_dev *accel_dev,
key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_BANK_NUM_FORMAT,
inst_index);
if (adf_cy_inst_cross_banks(accel_dev))
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_ASYM_BANK_NUM_FORMAT,
inst_index);
else
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_BANK_NUM_FORMAT,
inst_index);
bank_number = asym_inst->bundle;
adf_cfg_add_key_value_param(
accel_dev, derived_sec, key, (void *)&bank_number, ADF_DEC);
@ -337,10 +343,17 @@ adf_cfg_add_sym_inst_info(struct adf_accel_dev *accel_dev,
key = malloc(ADF_CFG_MAX_KEY_LEN_IN_BYTES, M_QAT, M_WAITOK | M_ZERO);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_BANK_NUM_FORMAT,
inst_index);
if (adf_cy_inst_cross_banks(accel_dev))
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_SYM_BANK_NUM_FORMAT,
inst_index);
else
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_BANK_NUM_FORMAT,
inst_index);
bank_number = sym_inst->bundle;
adf_cfg_add_key_value_param(
accel_dev, derived_sec, key, (void *)&bank_number, ADF_DEC);

View File

@ -27,10 +27,7 @@
#define ADF_CONST_TABLE_VERSION (1)
/* Admin Messages Registers */
#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
#define ADF_MAILBOX_STRIDE 0x1000
#define ADF_ADMINMSG_LEN 32
#define FREEBSD_ALLIGNMENT_SIZE 64
#define ADF_INIT_CONFIG_SIZE 1024
@ -146,7 +143,7 @@ adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
hw_data->get_admin_info(&admin_csrs_info);
int offset = ae * ADF_ADMINMSG_LEN * 2;
int mb_offset =
ae * ADF_DH895XCC_MAILBOX_STRIDE + admin_csrs_info.mailbox_offset;
ae * ADF_MAILBOX_STRIDE + admin_csrs_info.mailbox_offset;
int times, received;
struct icp_qat_fw_init_admin_req *request = in;
@ -294,7 +291,7 @@ adf_set_fw_constants(struct adf_accel_dev *accel_dev)
struct icp_qat_fw_init_admin_req req;
struct icp_qat_fw_init_admin_resp resp;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
u32 ae_mask = hw_device->ae_mask;
u32 ae_mask = hw_device->admin_ae_mask;
explicit_bzero(&req, sizeof(req));
req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;

View File

@ -20,6 +20,7 @@ static int adf_ring_show(SYSCTL_HANDLER_ARGS)
{
struct adf_etr_ring_data *ring = arg1;
struct adf_etr_bank_data *bank = ring->bank;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
struct resource *csr = ring->bank->csr_addr;
struct sbuf sb;
int error, word;
@ -29,13 +30,13 @@ static int adf_ring_show(SYSCTL_HANDLER_ARGS)
{
int head, tail, empty;
head = READ_CSR_RING_HEAD(csr,
bank->bank_number,
ring->ring_number);
tail = READ_CSR_RING_TAIL(csr,
bank->bank_number,
ring->ring_number);
empty = READ_CSR_E_STAT(csr, bank->bank_number);
head = csr_ops->read_csr_ring_head(csr,
bank->bank_number,
ring->ring_number);
tail = csr_ops->read_csr_ring_tail(csr,
bank->bank_number,
ring->ring_number);
empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
sbuf_cat(&sb, "\n------- Ring configuration -------\n");
sbuf_printf(&sb,
@ -119,6 +120,7 @@ static int adf_bank_show(SYSCTL_HANDLER_ARGS)
{
struct adf_etr_bank_data *bank;
struct adf_accel_dev *accel_dev = NULL;
struct adf_hw_csr_ops *csr_ops = NULL;
struct adf_hw_device_data *hw_data = NULL;
u8 num_rings_per_bank = 0;
struct sbuf sb;
@ -127,6 +129,7 @@ static int adf_bank_show(SYSCTL_HANDLER_ARGS)
sbuf_new_for_sysctl(&sb, NULL, 128, req);
bank = arg1;
accel_dev = bank->accel_dev;
csr_ops = GET_CSR_OPS(bank->accel_dev);
hw_data = accel_dev->hw_device;
num_rings_per_bank = hw_data->num_rings_per_bank;
sbuf_printf(&sb,
@ -140,13 +143,13 @@ static int adf_bank_show(SYSCTL_HANDLER_ARGS)
if (!(bank->ring_mask & 1 << ring_id))
continue;
head = READ_CSR_RING_HEAD(csr,
bank->bank_number,
ring->ring_number);
tail = READ_CSR_RING_TAIL(csr,
bank->bank_number,
ring->ring_number);
empty = READ_CSR_E_STAT(csr, bank->bank_number);
head = csr_ops->read_csr_ring_head(csr,
bank->bank_number,
ring->ring_number);
tail = csr_ops->read_csr_ring_tail(csr,
bank->bank_number,
ring->ring_number);
empty = csr_ops->read_csr_e_stat(csr, bank->bank_number);
sbuf_printf(&sb,
"ring num %02d, head %04x, tail %04x, empty: %d\n",

View File

@ -0,0 +1,132 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2021 Intel Corporation */
/* $FreeBSD$ */
#include "adf_gen2_hw_data.h"
#include "icp_qat_hw.h"
static u64
build_csr_ring_base_addr(bus_addr_t addr, u32 size)
{
return BUILD_RING_BASE_ADDR(addr, size);
}
static u32
read_csr_ring_head(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
}
static void
write_csr_ring_head(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value)
{
WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
}
static u32
read_csr_ring_tail(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
}
static void
write_csr_ring_tail(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value)
{
WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
}
static u32
read_csr_e_stat(struct resource *csr_base_addr, u32 bank)
{
return READ_CSR_E_STAT(csr_base_addr, bank);
}
static void
write_csr_ring_config(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value)
{
WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
}
static void
write_csr_ring_base(struct resource *csr_base_addr,
u32 bank,
u32 ring,
bus_addr_t addr)
{
WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
}
static void
write_csr_int_flag(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
}
static void
write_csr_int_srcsel(struct resource *csr_base_addr, u32 bank)
{
WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
}
static void
write_csr_int_col_en(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
}
static void
write_csr_int_col_ctl(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
}
static void
write_csr_int_flag_and_col(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
}
static u32
read_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank)
{
return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank);
}
static void
write_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
}
void
adf_gen2_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
{
struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
csr_info->arb_enable_mask = 0xFF;
csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
csr_ops->read_csr_ring_head = read_csr_ring_head;
csr_ops->write_csr_ring_head = write_csr_ring_head;
csr_ops->read_csr_ring_tail = read_csr_ring_tail;
csr_ops->write_csr_ring_tail = write_csr_ring_tail;
csr_ops->read_csr_e_stat = read_csr_e_stat;
csr_ops->write_csr_ring_config = write_csr_ring_config;
csr_ops->write_csr_ring_base = write_csr_ring_base;
csr_ops->write_csr_int_flag = write_csr_int_flag;
csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
csr_ops->write_csr_int_col_en = write_csr_int_col_en;
csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en;
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
}
EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_info);

View File

@ -0,0 +1,176 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2021 Intel Corporation */
/* $FreeBSD$ */
#include "adf_accel_devices.h"
#include "adf_gen4_hw_data.h"
static u64
build_csr_ring_base_addr(bus_addr_t addr, u32 size)
{
return BUILD_RING_BASE_ADDR(addr, size);
}
static u32
read_csr_ring_head(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
}
static void
write_csr_ring_head(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value)
{
WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
}
static u32
read_csr_ring_tail(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
}
static void
write_csr_ring_tail(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value)
{
WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
}
static u32
read_csr_e_stat(struct resource *csr_base_addr, u32 bank)
{
return READ_CSR_E_STAT(csr_base_addr, bank);
}
static void
write_csr_ring_config(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value)
{
WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
}
static void
write_csr_ring_base(struct resource *csr_base_addr,
u32 bank,
u32 ring,
bus_addr_t addr)
{
WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
}
static void
write_csr_int_flag(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
}
static void
write_csr_int_srcsel(struct resource *csr_base_addr, u32 bank)
{
WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
}
static void
write_csr_int_col_en(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
}
static void
write_csr_int_col_ctl(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
}
static void
write_csr_int_flag_and_col(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
}
static u32
read_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank)
{
return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank);
}
static void
write_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
}
void
adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
{
struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
csr_info->arb_enable_mask = 0x1;
csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
csr_ops->read_csr_ring_head = read_csr_ring_head;
csr_ops->write_csr_ring_head = write_csr_ring_head;
csr_ops->read_csr_ring_tail = read_csr_ring_tail;
csr_ops->write_csr_ring_tail = write_csr_ring_tail;
csr_ops->read_csr_e_stat = read_csr_e_stat;
csr_ops->write_csr_ring_config = write_csr_ring_config;
csr_ops->write_csr_ring_base = write_csr_ring_base;
csr_ops->write_csr_int_flag = write_csr_int_flag;
csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
csr_ops->write_csr_int_col_en = write_csr_int_col_en;
csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en;
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_info);
static inline void
adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower)
{
*lower = lower_32_bits(value);
*upper = upper_32_bits(value);
}
int
adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
u32 ssm_wdt_pke_high = 0;
u32 ssm_wdt_pke_low = 0;
u32 ssm_wdt_high = 0;
u32 ssm_wdt_low = 0;
struct resource *pmisc_addr;
struct adf_bar *pmisc;
int pmisc_id;
pmisc_id = hw_data->get_misc_bar_id(hw_data);
pmisc = &GET_BARS(accel_dev)[pmisc_id];
pmisc_addr = pmisc->virt_addr;
/* Convert 64bit WDT timer value into 32bit values for
* mmio write to 32bit CSRs.
*/
adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
adf_gen4_unpack_ssm_wdtimer(timer_val_pke,
&ssm_wdt_pke_high,
&ssm_wdt_pke_low);
/* Enable WDT for sym and dc */
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
/* Enable WDT for pke */
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
return 0;
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);

View File

@ -68,10 +68,14 @@ adf_get_hb_timer(struct adf_accel_dev *accel_dev, unsigned int *value)
unsigned int timer_val = ADF_CFG_HB_DEFAULT_VALUE;
u32 clk_per_sec = 0;
if (!hw_data->get_ae_clock)
/* HB clock may be different than AE clock */
if (hw_data->get_hb_clock) {
clk_per_sec = (u32)hw_data->get_hb_clock(hw_data);
} else if (hw_data->get_ae_clock) {
clk_per_sec = (u32)hw_data->get_ae_clock(hw_data);
} else {
return EINVAL;
clk_per_sec = (u32)hw_data->get_ae_clock(hw_data);
}
/* Get Heartbeat Timer value from the configuration */
if (!adf_cfg_get_param_value(accel_dev,
@ -99,24 +103,19 @@ adf_get_hb_timer(struct adf_accel_dev *accel_dev, unsigned int *value)
return 0;
}
struct adf_hb_count {
u16 ae_thread[ADF_NUM_HB_CNT_PER_AE];
};
int
adf_get_heartbeat_status(struct adf_accel_dev *accel_dev)
{
struct icp_qat_fw_init_admin_hb_cnt *live_s, *last_s, *curr_s;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct icp_qat_fw_init_admin_hb_stats *live_s =
(struct icp_qat_fw_init_admin_hb_stats *)
accel_dev->admin->virt_hb_addr;
const size_t max_aes = hw_device->get_num_aes(hw_device);
const size_t hb_ctrs = hw_device->heartbeat_ctr_num;
const size_t stats_size =
max_aes * sizeof(struct icp_qat_fw_init_admin_hb_stats);
max_aes * hb_ctrs * sizeof(struct icp_qat_fw_init_admin_hb_cnt);
int ret = 0;
size_t ae, thr;
u16 *count_s;
unsigned long ae_mask = 0;
int num_threads_per_ae = ADF_NUM_HB_CNT_PER_AE;
/*
* Memory layout of Heartbeat
@ -127,13 +126,19 @@ adf_get_heartbeat_status(struct adf_accel_dev *accel_dev)
* \_______________/\_______________/\________/
* ^ ^ ^
* | | |
* | | max_aes * sizeof(adf_hb_count)
* | max_aes * sizeof(icp_qat_fw_init_admin_hb_stats)
* max_aes * sizeof(icp_qat_fw_init_admin_hb_stats)
* | | max_aes * hb_ctrs *
* | | sizeof(u16)
* | |
* | max_aes * hb_ctrs *
* | sizeof(icp_qat_fw_init_admin_hb_cnt)
* |
* max_aes * hb_ctrs *
* sizeof(icp_qat_fw_init_admin_hb_cnt)
*/
struct icp_qat_fw_init_admin_hb_stats *curr_s;
struct icp_qat_fw_init_admin_hb_stats *last_s = live_s + max_aes;
struct adf_hb_count *count = (struct adf_hb_count *)(last_s + max_aes);
live_s = (struct icp_qat_fw_init_admin_hb_cnt *)
accel_dev->admin->virt_hb_addr;
last_s = live_s + (max_aes * hb_ctrs);
count_s = (u16 *)(last_s + (max_aes * hb_ctrs));
curr_s = malloc(stats_size, M_QAT, M_WAITOK | M_ZERO);
@ -142,23 +147,25 @@ adf_get_heartbeat_status(struct adf_accel_dev *accel_dev)
for_each_set_bit(ae, &ae_mask, max_aes)
{
for (thr = 0; thr < num_threads_per_ae; ++thr) {
struct icp_qat_fw_init_admin_hb_cnt *curr =
&curr_s[ae].stats[thr];
struct icp_qat_fw_init_admin_hb_cnt *prev =
&last_s[ae].stats[thr];
u16 req = curr->req_heartbeat_cnt;
u16 resp = curr->resp_heartbeat_cnt;
u16 last = prev->resp_heartbeat_cnt;
struct icp_qat_fw_init_admin_hb_cnt *curr =
curr_s + ae * hb_ctrs;
struct icp_qat_fw_init_admin_hb_cnt *prev =
last_s + ae * hb_ctrs;
u16 *count = count_s + ae * hb_ctrs;
for (thr = 0; thr < hb_ctrs; ++thr) {
u16 req = curr[thr].req_heartbeat_cnt;
u16 resp = curr[thr].resp_heartbeat_cnt;
u16 last = prev[thr].resp_heartbeat_cnt;
if ((thr == ADF_AE_ADMIN_THREAD || req != resp) &&
resp == last) {
u16 retry = ++count[ae].ae_thread[thr];
u16 retry = ++count[thr];
if (retry >= ADF_CFG_HB_COUNT_THRESHOLD)
ret = EIO;
} else {
count[ae].ae_thread[thr] = 0;
count[thr] = 0;
}
}
}

View File

@ -97,14 +97,30 @@ adf_init_gen2_arb(struct adf_accel_dev *accel_dev)
void
adf_update_ring_arb(struct adf_etr_ring_data *ring)
{
WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
ring->bank->bank_number,
ring->bank->ring_mask & 0xFF);
int shift;
u32 arben, arben_tx, arben_rx, arb_mask;
struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
arb_mask = csr_info->arb_enable_mask;
shift = hweight32(arb_mask);
arben_tx = ring->bank->ring_mask & arb_mask;
arben_rx = (ring->bank->ring_mask >> shift) & arb_mask;
arben = arben_tx & arben_rx;
csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
ring->bank->bank_number,
arben);
}
void
adf_enable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask)
adf_enable_ring_arb(struct adf_accel_dev *accel_dev,
void *csr_addr,
unsigned int bank_nr,
unsigned int mask)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct resource *csr = csr_addr;
u32 arbenable;
@ -112,16 +128,20 @@ adf_enable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask)
return;
mutex_lock(&csr_arb_lock);
arbenable = READ_CSR_ARB_RINGSRVARBEN(csr, bank_nr);
arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr);
arbenable |= mask & 0xFF;
WRITE_CSR_ARB_RINGSRVARBEN(csr, bank_nr, arbenable);
csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable);
mutex_unlock(&csr_arb_lock);
}
void
adf_disable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask)
adf_disable_ring_arb(struct adf_accel_dev *accel_dev,
void *csr_addr,
unsigned int bank_nr,
unsigned int mask)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct resource *csr = csr_addr;
u32 arbenable;
@ -129,15 +149,16 @@ adf_disable_ring_arb(void *csr_addr, unsigned int bank_nr, unsigned int mask)
return;
mutex_lock(&csr_arb_lock);
arbenable = READ_CSR_ARB_RINGSRVARBEN(csr, bank_nr);
arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr);
arbenable &= ~mask & 0xFF;
WRITE_CSR_ARB_RINGSRVARBEN(csr, bank_nr, arbenable);
csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable);
mutex_unlock(&csr_arb_lock);
}
void
adf_exit_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct arb_info info;
struct resource *csr;
@ -166,12 +187,13 @@ adf_exit_arb(struct adf_accel_dev *accel_dev)
/* Disable arbitration on all rings */
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
}
void
adf_disable_arb(struct adf_accel_dev *accel_dev)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct resource *csr;
unsigned int i;
@ -182,5 +204,5 @@ adf_disable_arb(struct adf_accel_dev *accel_dev)
/* Disable arbitration on all rings */
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
}

View File

@ -213,7 +213,7 @@ adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
unsigned int mask;
u32 clk_per_sec = hw_data->get_clock_speed(hw_data);
u32 timer_val = ADF_WDT_TIMER_SYM_COMP_MS * (clk_per_sec / 1000);
u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
u32 timer_val_pke = ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE;
char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
/* Get Watch Dog Timer for CySym+Comp from the configuration */
@ -289,6 +289,12 @@ adf_dev_init(struct adf_accel_dev *accel_dev)
return EFAULT;
}
if (hw_data->init_device && hw_data->init_device(accel_dev)) {
device_printf(GET_DEV(accel_dev),
"Failed to initialize device\n");
return EFAULT;
}
if (hw_data->init_accel_units && hw_data->init_accel_units(accel_dev)) {
device_printf(GET_DEV(accel_dev),
"Failed initialize accel_units\n");
@ -343,7 +349,8 @@ adf_dev_init(struct adf_accel_dev *accel_dev)
hw_data->enable_error_correction(accel_dev);
if (hw_data->enable_vf2pf_comms(accel_dev)) {
if (hw_data->enable_vf2pf_comms &&
hw_data->enable_vf2pf_comms(accel_dev)) {
device_printf(GET_DEV(accel_dev),
"QAT: Failed to enable vf2pf comms\n");
return EFAULT;

View File

@ -38,6 +38,9 @@ adf_enable_msix(struct adf_accel_dev *accel_dev)
int num_vectors = 0;
u_int *vectors;
if (hw_data->set_msix_rttable)
hw_data->set_msix_rttable(accel_dev);
/* If SR-IOV is disabled, add entries for each bank */
if (!accel_dev->u1.pf.vf_info) {
msix_num_entries += hw_data->num_banks;
@ -90,8 +93,11 @@ adf_msix_isr_bundle(void *bank_ptr)
{
struct adf_etr_bank_data *bank = bank_ptr;
struct adf_etr_data *priv_data = bank->accel_dev->transport;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
bank->bank_number,
0);
adf_response_handler((uintptr_t)&priv_data->banks[bank->bank_number]);
return;
}

View File

@ -73,27 +73,36 @@ adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
static void
adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
mtx_lock(&bank->lock);
bank->irq_mask |= (1 << ring);
mtx_unlock(&bank->lock);
WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
WRITE_CSR_INT_COL_CTL(bank->csr_addr,
bank->bank_number,
bank->irq_coalesc_timer);
csr_ops->write_csr_int_col_en(bank->csr_addr,
bank->bank_number,
bank->irq_mask);
csr_ops->write_csr_int_col_ctl(bank->csr_addr,
bank->bank_number,
bank->irq_coalesc_timer);
}
static void
adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
mtx_lock(&bank->lock);
bank->irq_mask &= ~(1 << ring);
mtx_unlock(&bank->lock);
WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
csr_ops->write_csr_int_col_en(bank->csr_addr,
bank->bank_number,
bank->irq_mask);
}
int
adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 msg_size = 0;
if (atomic_add_return(1, ring->inflights) > ring->max_inflights) {
@ -110,10 +119,10 @@ adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
ring->tail = adf_modulo(ring->tail + msg_size,
ADF_RING_SIZE_MODULO(ring->ring_size));
WRITE_CSR_RING_TAIL(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring->tail);
csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring->tail);
ring->csr_tail_offset = ring->tail;
mtx_unlock(&ring->lock);
return 0;
@ -122,6 +131,7 @@ adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
int
adf_handle_response(struct adf_etr_ring_data *ring, u32 quota)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 msg_counter = 0;
u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
@ -139,10 +149,10 @@ adf_handle_response(struct adf_etr_ring_data *ring, u32 quota)
msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
}
if (msg_counter > 0)
WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring->head);
csr_ops->write_csr_ring_head(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring->head);
return msg_counter;
}
@ -154,6 +164,7 @@ adf_poll_bank(u32 accel_id, u32 bank_num, u32 quota)
struct adf_etr_data *trans_data;
struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring;
struct adf_hw_csr_ops *csr_ops;
u32 rings_not_empty;
u32 ring_num;
u32 resp_total = 0;
@ -168,12 +179,14 @@ adf_poll_bank(u32 accel_id, u32 bank_num, u32 quota)
return EINVAL;
}
csr_ops = GET_CSR_OPS(accel_dev);
trans_data = accel_dev->transport;
bank = &trans_data->banks[bank_num];
mtx_lock(&bank->lock);
/* Read the ring status CSR to determine which rings are empty. */
rings_not_empty = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
rings_not_empty =
csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number);
/* Complement to find which rings have data to be processed. */
rings_not_empty = (~rings_not_empty) & bank->ring_mask;
@ -265,25 +278,27 @@ adf_poll_all_banks(u32 accel_id, u32 quota)
static void
adf_configure_tx_ring(struct adf_etr_ring_data *ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
WRITE_CSR_RING_CONFIG(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring_config);
csr_ops->write_csr_ring_config(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring_config);
}
static void
adf_configure_rx_ring(struct adf_etr_ring_data *ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 ring_config = BUILD_RESP_RING_CONFIG(ring->ring_size,
ADF_RING_NEAR_WATERMARK_512,
ADF_RING_NEAR_WATERMARK_0);
WRITE_CSR_RING_CONFIG(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring_config);
csr_ops->write_csr_ring_config(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring_config);
}
static int
@ -292,6 +307,7 @@ adf_init_ring(struct adf_etr_ring_data *ring)
struct adf_etr_bank_data *bank = ring->bank;
struct adf_accel_dev *accel_dev = bank->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
u64 ring_base;
u32 ring_size_bytes = ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
@ -323,11 +339,12 @@ adf_init_ring(struct adf_etr_ring_data *ring)
else
adf_configure_rx_ring(ring);
ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
WRITE_CSR_RING_BASE(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring_base);
ring_base =
csr_ops->build_csr_ring_base_addr(ring->dma_addr, ring->ring_size);
csr_ops->write_csr_ring_base(ring->bank->csr_addr,
ring->bank->bank_number,
ring->ring_number,
ring_base);
mtx_init(&ring->lock, "adf bank", NULL, MTX_DEF);
return 0;
}
@ -443,19 +460,20 @@ void
adf_remove_ring(struct adf_etr_ring_data *ring)
{
struct adf_etr_bank_data *bank = ring->bank;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
/* Disable interrupts for the given ring */
adf_disable_ring_irq(bank, ring->ring_number);
/* Clear PCI config space */
WRITE_CSR_RING_CONFIG(bank->csr_addr,
bank->bank_number,
ring->ring_number,
0);
WRITE_CSR_RING_BASE(bank->csr_addr,
bank->bank_number,
ring->ring_number,
0);
csr_ops->write_csr_ring_config(bank->csr_addr,
bank->bank_number,
ring->ring_number,
0);
csr_ops->write_csr_ring_base(bank->csr_addr,
bank->bank_number,
ring->ring_number,
0);
adf_ring_debugfs_rm(ring);
adf_unreserve_ring(bank, ring->ring_number);
/* Disable HW arbitration for the given ring */
@ -468,10 +486,12 @@ adf_ring_response_handler(struct adf_etr_bank_data *bank)
{
struct adf_accel_dev *accel_dev = bank->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
u8 num_rings_per_bank = hw_data->num_rings_per_bank;
u32 empty_rings, i;
empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
empty_rings =
csr_ops->read_csr_e_stat(bank->csr_addr, bank->bank_number);
empty_rings = ~empty_rings & bank->irq_mask;
for (i = 0; i < num_rings_per_bank; ++i) {
@ -484,12 +504,13 @@ void
adf_response_handler(uintptr_t bank_addr)
{
struct adf_etr_bank_data *bank = (void *)bank_addr;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
/* Handle all the responses and re-enable IRQs */
adf_ring_response_handler(bank);
WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr,
bank->bank_number,
bank->irq_mask);
csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
bank->bank_number,
bank->irq_mask);
}
static inline int
@ -548,10 +569,12 @@ adf_init_bank(struct adf_accel_dev *accel_dev,
struct resource *csr_addr)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
struct adf_etr_ring_data *ring;
struct adf_etr_ring_data *tx_ring;
u32 i, coalesc_enabled = 0;
u8 num_rings_per_bank = hw_data->num_rings_per_bank;
u32 irq_mask = BIT(num_rings_per_bank) - 1;
u32 size = 0;
explicit_bzero(bank, sizeof(*bank));
@ -580,8 +603,8 @@ adf_init_bank(struct adf_accel_dev *accel_dev,
bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
for (i = 0; i < num_rings_per_bank; i++) {
WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
ring = &bank->rings[i];
if (hw_data->tx_rings_mask & (1 << i)) {
ring->inflights =
@ -605,8 +628,8 @@ adf_init_bank(struct adf_accel_dev *accel_dev,
goto err;
}
WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
return 0;
err:
for (i = 0; i < num_rings_per_bank; i++) {

View File

@ -75,6 +75,29 @@ static const uint64_t inst[] = {
0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
};
static const uint64_t inst_CPM2X[] = {
0x0F0000C0000ull, 0x0D802C00011ull, 0x0F0000C0001ull, 0x0FC066C0001ull,
0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F000500300ull,
0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0A0580C0000ull,
0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, 0x0A0584C0000ull,
0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, 0x0A0588C0000ull,
0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, 0x0A058CC0000ull,
0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, 0x0A05C0C0000ull,
0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, 0x0A05C4C0000ull,
0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, 0x0A05C8C0000ull,
0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, 0x0A05CCC0000ull,
0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, 0x0A0400C0000ull,
0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, 0x0A0402C0000ull,
0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, 0x0A0404C0000ull,
0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, 0x0A0406C0000ull,
0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, 0x0A0408C0000ull,
0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, 0x0A040AC0000ull,
0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, 0x0A040CC0000ull,
0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, 0x0A040EC0000ull,
0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, 0x0D81341C010ull,
0x0E000000001ull, 0x0E000010000ull,
};
void
qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
unsigned char ae,
@ -206,6 +229,11 @@ qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
pr_err("QAT: No next neigh for CPM2X\n");
return EINVAL;
}
qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
csr &= IGNORE_W1C_MASK;
@ -339,6 +367,21 @@ qat_hal_get_reg_addr(unsigned int type, unsigned short reg_num)
return reg_addr;
}
static u32
qat_hal_get_ae_mask_gen4(struct icp_qat_fw_loader_handle *handle)
{
u32 tg = 0, ae;
u32 valid_ae_mask = 0;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
if (handle->hal_handle->ae_mask & (1 << ae)) {
tg = ae / 4;
valid_ae_mask |= (1 << (tg * 2));
}
}
return valid_ae_mask;
}
void
qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
{
@ -353,15 +396,26 @@ qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
ae_reset_csr[1] = ICP_RESET_CPP1;
if (handle->hal_handle->ae_mask > 0xffff)
++cpp_num;
} else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
ae_reset_csr[0] = ICP_RESET_CPP0;
} else {
ae_reset_csr[0] = ICP_RESET;
}
for (i = 0; i < cpp_num; i++) {
if (i == 0) {
valid_ae_mask = handle->hal_handle->ae_mask & 0xFFFF;
valid_slice_mask =
handle->hal_handle->slice_mask & 0x3F;
if (IS_QAT_GEN4(
pci_get_device(GET_DEV(handle->accel_dev)))) {
valid_ae_mask =
qat_hal_get_ae_mask_gen4(handle);
valid_slice_mask =
handle->hal_handle->slice_mask;
} else {
valid_ae_mask =
handle->hal_handle->ae_mask & 0xFFFF;
valid_slice_mask =
handle->hal_handle->slice_mask & 0x3F;
}
} else {
valid_ae_mask =
(handle->hal_handle->ae_mask >> AES_PER_CPP) &
@ -509,7 +563,7 @@ qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
unsigned long ae_mask = handle->hal_handle->ae_mask;
misc_ctl_csr =
(IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) ?
(IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) ?
MISC_CONTROL_C4XXX :
MISC_CONTROL;
/* stop the timestamp timers */
@ -586,6 +640,9 @@ qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
clk_csr[1] = ICP_GLOBAL_CLK_ENABLE_CPP1;
if (handle->hal_handle->ae_mask > 0xffff)
++cpp_num;
} else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
ae_reset_csr[0] = ICP_RESET_CPP0;
clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0;
} else {
ae_reset_csr[0] = ICP_RESET;
clk_csr[0] = ICP_GLOBAL_CLK_ENABLE;
@ -593,9 +650,18 @@ qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
for (i = 0; i < cpp_num; i++) {
if (i == 0) {
valid_ae_mask = handle->hal_handle->ae_mask & 0xFFFF;
valid_slice_mask =
handle->hal_handle->slice_mask & 0x3F;
if (IS_QAT_GEN4(
pci_get_device(GET_DEV(handle->accel_dev)))) {
valid_ae_mask =
qat_hal_get_ae_mask_gen4(handle);
valid_slice_mask =
handle->hal_handle->slice_mask;
} else {
valid_ae_mask =
handle->hal_handle->ae_mask & 0xFFFF;
valid_slice_mask =
handle->hal_handle->slice_mask & 0x3F;
}
} else {
valid_ae_mask =
(handle->hal_handle->ae_mask >> AES_PER_CPP) &
@ -714,7 +780,24 @@ qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
const uint64_t *uword)
{
unsigned int ustore_addr;
unsigned int i;
unsigned int i, ae_in_group;
if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
ae_in_group = ae / 4 * 4;
for (i = 0; i < AE_TG_NUM_CPM2X; i++) {
if (ae_in_group + i == ae)
continue;
if (ae_in_group + i >= handle->hal_handle->ae_max_num)
break;
if (qat_hal_check_ae_active(handle, ae_in_group + i)) {
pr_err(
"ae%d in T_group is active, cannot write to ustore!\n",
ae_in_group + i);
return;
}
}
}
qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
uaddr |= UA_ECS;
@ -826,10 +909,25 @@ qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
csr_val &= IGNORE_W1C_MASK;
csr_val |= CE_NN_MODE;
if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
csr_val |= CE_NN_MODE;
}
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
qat_hal_wr_uwords(
handle, ae, 0, ARRAY_SIZE(inst), (const uint64_t *)inst);
if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
if (ae % 4 == 0)
qat_hal_wr_uwords(handle,
ae,
0,
ARRAY_SIZE(inst_CPM2X),
(const uint64_t *)inst_CPM2X);
} else {
qat_hal_wr_uwords(handle,
ae,
0,
ARRAY_SIZE(inst),
(const uint64_t *)inst);
}
qat_hal_wr_indr_csr(handle,
ae,
ctx_mask,
@ -971,6 +1069,7 @@ qat_hal_init(struct adf_accel_dev *accel_dev)
malloc(sizeof(*handle->hal_handle), M_QAT, M_WAITOK | M_ZERO);
handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
handle->hal_handle->ae_mask = hw_data->ae_mask;
handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
handle->hal_handle->slice_mask = hw_data->accel_mask;
handle->cfg_ae_mask = 0xFFFFFFFF;
/* create AE objects */
@ -1038,17 +1137,23 @@ qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
free(handle, M_QAT);
}
void
qat_hal_start(struct icp_qat_fw_loader_handle *handle,
unsigned char ae,
unsigned int ctx_mask)
int
qat_hal_start(struct icp_qat_fw_loader_handle *handle)
{
unsigned char ae = 0;
int retry = 0;
unsigned int fcu_sts = 0;
unsigned int fcu_ctl_csr, fcu_sts_csr;
unsigned long ae_mask = handle->hal_handle->ae_mask;
u32 ae_ctr = 0;
if (handle->fw_auth) {
if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
{
ae_ctr++;
}
if (IS_QAT_GEN3_OR_GEN4(
pci_get_device(GET_DEV(handle->accel_dev)))) {
fcu_ctl_csr = FCU_CONTROL_C4XXX;
fcu_sts_csr = FCU_STATUS_C4XXX;
@ -1061,17 +1166,27 @@ qat_hal_start(struct icp_qat_fw_loader_handle *handle,
pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
return;
return ae_ctr;
} while (retry++ < FW_AUTH_MAX_RETRY);
pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n",
ae,
fcu_sts);
return 0;
} else {
qat_hal_put_wakeup_event(handle,
ae,
(~ctx_mask) & ICP_QAT_UCLO_AE_ALL_CTX,
0x10000);
qat_hal_enable_ctx(handle, ae, ctx_mask);
for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
{
qat_hal_put_wakeup_event(handle,
ae,
0,
IS_QAT_GEN4(
pci_get_device(GET_DEV(
handle->accel_dev))) ?
0x80000000 :
0x10000);
qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
ae_ctr++;
}
return ae_ctr;
}
}
@ -1193,7 +1308,7 @@ qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, &ind_lm_addr_byte0);
qat_hal_rd_indr_csr(
handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, &ind_lm_addr_byte1);
if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
qat_hal_rd_indr_csr(
handle, ae, ctx, LM_ADDR_2_INDIRECT, &ind_lm_addr2);
qat_hal_rd_indr_csr(
@ -1286,7 +1401,7 @@ qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
(1 << ctx),
INDIRECT_LM_ADDR_1_BYTE_INDEX,
ind_lm_addr_byte1);
if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
qat_hal_wr_indr_csr(
handle, ae, (1 << ctx), LM_ADDR_2_INDIRECT, ind_lm_addr2);
qat_hal_wr_indr_csr(
@ -1832,6 +1947,11 @@ qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
int stat = 0;
unsigned char ctx;
if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
pr_err("QAT: No next neigh for CPM2X\n");
return EINVAL;
}
if (ctx_mask == 0)
return EINVAL;

View File

@ -46,9 +46,13 @@ qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
encap_image->img_ptr->ctx_assigned;
ae_data->shareable_ustore =
ICP_QAT_SHARED_USTORE_MODE(encap_image->img_ptr->ae_mode);
ae_data->eff_ustore_size = ae_data->shareable_ustore ?
(obj_handle->ustore_phy_size << 1) :
obj_handle->ustore_phy_size;
if (obj_handle->prod_type == ICP_QAT_AC_4XXX_A_DEV_TYPE)
ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
else {
ae_data->eff_ustore_size = ae_data->shareable_ustore ?
(obj_handle->ustore_phy_size << 1) :
obj_handle->ustore_phy_size;
}
} else {
ae_slice->ctx_mask_assigned = 0;
}
@ -324,9 +328,13 @@ qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
{
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int ae;
unsigned int lmem;
if (qat_uclo_fetch_initmem_ae(
handle, init_mem, ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
lmem = IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))) ?
ICP_QAT_UCLO_MAX_LMEM_REG_2X :
ICP_QAT_UCLO_MAX_LMEM_REG;
if (qat_uclo_fetch_initmem_ae(handle, init_mem, lmem, &ae))
return EINVAL;
if (qat_uclo_create_batch_init_list(
handle, init_mem, ae, &obj_handle->lm_init_tab[ae]))
@ -411,6 +419,8 @@ qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
{
unsigned long cfg_ae_mask = handle->cfg_ae_mask;
unsigned long ae_assigned = uof_image->ae_assigned;
const bool gen4 =
IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)));
if (!test_bit(ae, &cfg_ae_mask))
continue;
@ -418,7 +428,8 @@ qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
if (!test_bit(ae, &ae_assigned))
continue;
if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1)) {
if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1) &&
!gen4) {
qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
if (test_bit(neigh_ae, &ae_assigned))
@ -427,7 +438,7 @@ qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
patt_pos = page->beg_addr_p + page->micro_words_num;
if (obj_handle->ae_data[ae].shareable_ustore) {
if (obj_handle->ae_data[ae].shareable_ustore && !gen4) {
qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
if (init[ae] == 0 && page->beg_addr_p != 0) {
qat_hal_wr_coalesce_uwords(handle,
@ -445,6 +456,9 @@ qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
init[ae] = 1;
init[neigh_ae] = 1;
} else {
if (gen4 && (ae % 4 != 0))
continue;
qat_hal_wr_uwords(handle,
(unsigned char)ae,
0,
@ -727,6 +741,61 @@ qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
return 0;
}
static int
UcLo_checkTGroupList2X(struct icp_qat_fw_loader_handle *handle)
{
int i;
unsigned int swAe = 0;
unsigned int ii, jj;
struct icp_qat_uclo_aedata *ae_data0, *ae_datax;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
for (i = 0; i < obj_handle->uimage_num; i++) {
struct icp_qat_uof_image *image =
obj_handle->ae_uimage[i].img_ptr;
if (image->numpages > 1) {
pr_err(
"Only 1 page is allowed in a UOF for CPM2X; We found %d in %s\n",
image->numpages,
qat_uclo_get_string(&obj_handle->str_table,
image->img_name));
return EINVAL;
}
}
for (swAe = 0;
(swAe < obj_handle->ae_num) && (swAe < ICP_QAT_UCLO_MAX_AE);
swAe += AE_TG_NUM_CPM2X) {
if (!qat_hal_check_ae_active(handle, swAe)) {
continue;
}
for (ii = swAe; ii < (swAe + AE_TG_NUM_CPM2X); ii++) {
ae_data0 = &obj_handle->ae_data[ii];
if (ae_data0->slice_num != 1) // not assigned
continue;
for (jj = ii + 1; jj < (swAe + AE_TG_NUM_CPM2X); jj++) {
ae_datax = &obj_handle->ae_data[jj];
if (ae_datax->slice_num != 1) // not assigned
continue;
if (ae_data0->ae_slices[0]
.encap_image->img_ptr !=
ae_datax->ae_slices[0]
.encap_image->img_ptr) {
pr_err("Only 1 list is allowed in a ");
pr_err("Tgroup for CPM2X;\n");
pr_err("ME%d, %d is assigned", ii, jj);
pr_err(" different list files\n");
return EINVAL;
}
}
}
}
return 0;
}
static int
qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
{
@ -752,6 +821,11 @@ qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
return EINVAL;
}
}
if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
if (UcLo_checkTGroupList2X(handle)) {
return EINVAL;
}
}
if (!mflag) {
pr_err("QAT: uimage uses AE not set");
return EINVAL;
@ -817,6 +891,9 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_200XX_DEV_TYPE;
case ADF_C4XXX_PCI_DEVICE_ID:
return ICP_QAT_AC_C4XXX_DEV_TYPE;
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
pci_get_device(GET_DEV(handle->accel_dev)));
@ -1001,11 +1078,13 @@ qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
ae_mode = (char)ICP_QAT_SHARED_USTORE_MODE(uof_image->ae_mode);
qat_hal_set_ae_scs_mode(handle, ae, ae_mode);
nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
return EFAULT;
if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
return EFAULT;
}
}
ae_mode = (char)ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, ae_mode)) {
@ -1017,7 +1096,7 @@ qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
return EFAULT;
}
if (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) {
if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
ae_mode = (char)ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, ae_mode)) {
pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
@ -1168,12 +1247,14 @@ qat_uclo_map_suof_file_hdr(const struct icp_qat_fw_loader_handle *handle,
}
static void
qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_img_hdr *suof_img_hdr,
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
{
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
const struct icp_qat_simg_ae_mode *ae_mode;
struct icp_qat_suof_objhdr *suof_objhdr;
unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
suof_img_hdr->simg_buf =
(suof_handle->suof_buf + suof_chunk_hdr->offset +
@ -1187,9 +1268,10 @@ qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
suof_img_hdr->css_key =
(suof_img_hdr->css_header + sizeof(struct icp_qat_css_hdr));
suof_img_hdr->css_signature = suof_img_hdr->css_key +
ICP_QAT_CSS_FWSK_MODULUS_LEN + ICP_QAT_CSS_FWSK_EXPONENT_LEN;
ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) +
ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id);
suof_img_hdr->css_simg =
suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN;
suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN(device_id);
ae_mode = (const struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
suof_img_hdr->ae_mask = ae_mode->ae_mask;
@ -1277,7 +1359,8 @@ qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE,
aeMax_img = ICP_QAT_UCLO_MAX_AE;
unsigned int i = 0;
struct icp_qat_suof_img_hdr img_header;
@ -1305,7 +1388,7 @@ qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
}
for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
qat_uclo_map_simg(handle->sobj_handle,
qat_uclo_map_simg(handle,
&suof_img_hdr[i],
&suof_chunk_hdr[1 + i]);
ret = qat_uclo_check_simg_compat(handle, &suof_img_hdr[i]);
@ -1315,9 +1398,29 @@ qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
ae0_img = i;
}
qat_uclo_tail_img(suof_img_hdr,
ae0_img,
suof_handle->img_table.num_simgs);
if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
qat_uclo_tail_img(suof_img_hdr,
ae0_img,
suof_handle->img_table.num_simgs);
} else {
if (suof_handle->img_table.num_simgs == 1)
return 0;
qat_uclo_tail_img(suof_img_hdr,
ae0_img,
suof_handle->img_table.num_simgs - 1);
for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
if ((suof_img_hdr[i].ae_mask &
(0x1 << (handle->hal_handle->ae_max_num - 1))) !=
0) {
aeMax_img = i;
break;
}
}
qat_uclo_tail_img(suof_img_hdr,
aeMax_img,
suof_handle->img_table.num_simgs);
}
return 0;
}
@ -1335,7 +1438,7 @@ qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) -
sizeof(struct icp_qat_auth_chunk);
if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
fcu_ctl_csr = FCU_CONTROL_C4XXX;
fcu_sts_csr = FCU_STATUS_C4XXX;
fcu_dram_hi_csr = FCU_DRAM_ADDR_HI_C4XXX;
@ -1372,6 +1475,103 @@ qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
return EINVAL;
}
static int
qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle, int imgid)
{
struct icp_qat_suof_handle *sobj_handle;
if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))))
return 0;
sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
if (handle->hal_handle->admin_ae_mask &
sobj_handle->img_table.simg_hdr[imgid].ae_mask)
return 0;
return 1;
}
static int
qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
unsigned int i = 0;
unsigned int fcuSts = 0, fcuAeBroadcastMask = 0;
unsigned int retry = 0;
unsigned int fcuStsCsr = 0;
unsigned int fcuCtlCsr = 0;
unsigned int loadedAes = 0;
unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
if (IS_QAT_GEN4(device_id)) {
fcuCtlCsr = FCU_CONTROL_4XXX;
fcuStsCsr = FCU_STATUS_4XXX;
} else {
pr_err("Uclo_BroadcastLoadFW only applicable for CPM20\n");
return EINVAL;
}
for (i = 0; i < ICP_QAT_UCLO_MAX_AE; i++) {
if (!test_bit(i, (unsigned long *)&handle->hal_handle->ae_mask))
continue;
if (qat_hal_check_ae_active(handle, (unsigned char)i)) {
pr_err(
"Uclo_BroadcastLoadFW error (invalid AE status)\n");
return EINVAL;
}
if ((desc->ae_mask >> i) & 0x1) {
fcuAeBroadcastMask |= 1 << i;
}
}
if (fcuAeBroadcastMask) {
retry = 0;
SET_FCU_CSR(handle,
FCU_ME_BROADCAST_MASK_TYPE,
fcuAeBroadcastMask);
SET_FCU_CSR(handle, fcuCtlCsr, FCU_CTRL_CMD_LOAD);
do {
msleep(FW_AUTH_WAIT_PERIOD);
fcuSts = GET_FCU_CSR(handle, fcuStsCsr);
if ((fcuSts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_FAIL) {
pr_err(
"Uclo_BroadcastLoadFW fail (fcu_status = 0x%x)\n",
fcuSts & FCU_AUTH_STS_MASK);
return EINVAL;
} else if ((fcuSts & FCU_AUTH_STS_MASK) ==
FCU_STS_LOAD_DONE) {
if (IS_QAT_GEN4(device_id))
loadedAes =
GET_FCU_CSR(handle,
FCU_AE_LOADED_4XXX);
else
loadedAes =
(fcuSts >> FCU_LOADED_AE_POS);
if ((loadedAes & fcuAeBroadcastMask) ==
fcuAeBroadcastMask)
break;
} else if ((fcuSts & FCU_AUTH_STS_MASK) ==
FCU_STS_VERI_DONE) {
SET_FCU_CSR(handle,
fcuCtlCsr,
FCU_CTRL_CMD_LOAD);
}
} while (retry++ < FW_BROADCAST_MAX_RETRY);
if (retry > FW_BROADCAST_MAX_RETRY) {
pr_err(
"Uclo_BroadcastLoadFW fail(fcu_status = 0x%x),retry = %d\n",
fcuSts & FCU_AUTH_STS_MASK,
retry);
return EINVAL;
}
}
return 0;
}
static int
qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
struct icp_firml_dram_desc *dram_desc,
@ -1417,14 +1617,16 @@ qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_auth_chunk *auth_chunk;
u64 virt_addr, bus_addr, virt_base;
unsigned int length, simg_offset = sizeof(*auth_chunk);
unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
if (size >
(ICP_QAT_AE_IMG_OFFSET(device_id) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
pr_err("QAT: error, input image size overflow %d\n", size);
return EINVAL;
}
length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
ICP_QAT_CSS_AE_SIMG_LEN(device_id) + simg_offset :
size + ICP_QAT_CSS_FWSK_PAD_LEN(device_id) + simg_offset;
if (qat_uclo_simg_alloc(handle, img_desc, length)) {
pr_err("QAT: error, allocate continuous dram fail\n");
return -ENOMEM;
@ -1451,42 +1653,43 @@ qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
memcpy((void *)(uintptr_t)virt_addr,
(const void *)(image + sizeof(*css_hdr)),
ICP_QAT_CSS_FWSK_MODULUS_LEN);
ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id));
/* padding */
explicit_bzero((void *)(uintptr_t)(virt_addr +
ICP_QAT_CSS_FWSK_MODULUS_LEN),
ICP_QAT_CSS_FWSK_PAD_LEN);
explicit_bzero((void *)(uintptr_t)(
virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)),
ICP_QAT_CSS_FWSK_PAD_LEN(device_id));
/* exponent */
memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
ICP_QAT_CSS_FWSK_PAD_LEN),
memcpy((void *)(uintptr_t)(virt_addr +
ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) +
ICP_QAT_CSS_FWSK_PAD_LEN(device_id)),
(const void *)(image + sizeof(*css_hdr) +
ICP_QAT_CSS_FWSK_MODULUS_LEN),
ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)),
sizeof(unsigned int));
/* signature */
bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) +
ICP_QAT_CSS_FWSK_PUB_LEN;
virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
ICP_QAT_CSS_FWSK_PUB_LEN(device_id);
virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(device_id);
auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->signature_low = (unsigned int)bus_addr;
memcpy((void *)(uintptr_t)virt_addr,
(const void *)(image + sizeof(*css_hdr) +
ICP_QAT_CSS_FWSK_MODULUS_LEN +
ICP_QAT_CSS_FWSK_EXPONENT_LEN),
ICP_QAT_CSS_SIGNATURE_LEN);
ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) +
ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id)),
ICP_QAT_CSS_SIGNATURE_LEN(device_id));
bus_addr =
ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) +
ICP_QAT_CSS_SIGNATURE_LEN;
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
ICP_QAT_CSS_SIGNATURE_LEN(device_id);
virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(device_id);
auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->img_low = (unsigned int)bus_addr;
auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(device_id);
memcpy((void *)(uintptr_t)virt_addr,
(const void *)(image + ICP_QAT_AE_IMG_OFFSET),
(const void *)(image + ICP_QAT_AE_IMG_OFFSET(device_id)),
auth_desc->img_len);
virt_addr = virt_base;
/* AE firmware */
@ -1506,7 +1709,8 @@ qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
(unsigned int)(bus_addr >> BITS_IN_DWORD);
auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
virt_addr += sizeof(struct icp_qat_css_hdr) +
ICP_QAT_CSS_FWSK_PUB_LEN + ICP_QAT_CSS_SIGNATURE_LEN;
ICP_QAT_CSS_FWSK_PUB_LEN(device_id) +
ICP_QAT_CSS_SIGNATURE_LEN(device_id);
auth_desc->ae_mask =
((struct icp_qat_simg_ae_mode *)virt_addr)->ae_mask &
handle->cfg_ae_mask;
@ -1528,7 +1732,7 @@ qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
unsigned int loaded_aes = FCU_LOADED_AE_POS;
unsigned long ae_mask = handle->hal_handle->ae_mask;
if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
fcu_ctl_csr = FCU_CONTROL_C4XXX;
fcu_sts_csr = FCU_STATUS_C4XXX;
@ -1549,14 +1753,19 @@ qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
}
SET_FCU_CSR(handle,
fcu_ctl_csr,
(FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
(FCU_CTRL_CMD_LOAD |
(IS_QAT_GEN4(
pci_get_device(GET_DEV(handle->accel_dev))) ?
(1 << FCU_CTRL_BROADCAST_POS) :
0) |
(i << FCU_CTRL_AE_POS)));
do {
pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
if ((fcu_sts & FCU_AUTH_STS_MASK) ==
FCU_STS_LOAD_DONE) {
loaded_aes = IS_QAT_GEN3(pci_get_device(
loaded_aes = IS_QAT_GEN3_OR_GEN4(pci_get_device(
GET_DEV(handle->accel_dev))) ?
GET_FCU_CSR(handle, FCU_AE_LOADED_C4XXX) :
(fcu_sts >> FCU_LOADED_AE_POS);
@ -1606,6 +1815,16 @@ qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
qat_uclo_simg_free(handle, &img_desc);
} else {
if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
device_printf(
NULL, "QAT: PKE service is not allowed because ");
device_printf(NULL, "MMP fw will not be loaded for ");
device_printf(NULL,
"device 0x%x",
pci_get_device(
GET_DEV(handle->accel_dev)));
return status;
}
if (pci_get_device(GET_DEV(handle->accel_dev)) ==
ADF_C3XXX_PCI_DEVICE_ID) {
pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
@ -2044,7 +2263,8 @@ qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
uw_relative_addr + i,
fill_pat);
if (obj_handle->ae_data[ae].shareable_ustore)
if (obj_handle->ae_data[ae].shareable_ustore &&
!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))))
/* copy the buffer to ustore */
qat_hal_wr_coalesce_uwords(handle,
(unsigned char)ae,
@ -2140,10 +2360,16 @@ qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
goto wr_err;
if (qat_uclo_auth_fw(handle, desc))
goto wr_err;
if (qat_uclo_load_fw(handle, desc))
goto wr_err;
if (qat_uclo_is_broadcast(handle, i)) {
if (qat_uclo_broadcast_load_fw(handle, desc))
goto wr_err;
} else {
if (qat_uclo_load_fw(handle, desc))
goto wr_err;
}
qat_uclo_simg_free(handle, &img_desc);
}
return 0;
wr_err:
qat_uclo_simg_free(handle, &img_desc);

View File

@ -6,6 +6,7 @@
#include <adf_cfg.h>
#include <adf_pf2vf_msg.h>
#include <adf_dev_err.h>
#include <adf_gen2_hw_data.h>
#include "adf_200xx_hw_data.h"
#include "icp_qat_hw.h"
#include "adf_heartbeat.h"
@ -495,6 +496,7 @@ adf_init_hw_data_200xx(struct adf_hw_device_data *hw_data)
hw_data->get_errsou_offset = get_errsou_offset;
hw_data->get_clock_speed = get_clock_speed;
hw_data->get_sku = get_sku;
hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
hw_data->fw_name = ADF_200XX_FW;
hw_data->fw_mmp_name = ADF_200XX_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
@ -532,6 +534,8 @@ adf_init_hw_data_200xx(struct adf_hw_device_data *hw_data)
hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
hw_data->pre_reset = adf_dev_pre_reset;
hw_data->post_reset = adf_dev_post_reset;
adf_gen2_init_hw_csr_info(&hw_data->csr_info);
}
void

View File

@ -145,6 +145,7 @@ adf_attach(device_t dev)
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
hw_data->admin_ae_mask = hw_data->ae_mask;
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* If the device has no acceleration engines then ignore it. */

View File

@ -0,0 +1,973 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_cfg.h>
#include <adf_common_drv.h>
#include <adf_dev_err.h>
#include <adf_pf2vf_msg.h>
#include <adf_gen4_hw_data.h>
#include "adf_4xxx_hw_data.h"
#include "adf_heartbeat.h"
#include "icp_qat_fw_init_admin.h"
#include "icp_qat_hw.h"
#define ADF_CONST_TABLE_SIZE 1024
struct adf_fw_config {
u32 ae_mask;
char *obj_name;
};
/* Accel unit information */
static const struct adf_accel_unit adf_4xxx_au_a_ae[] = {
{ 0x1, 0x1, 0xF, 0x1B, 4, ADF_ACCEL_SERVICE_NULL },
{ 0x2, 0x1, 0xF0, 0x6C0, 4, ADF_ACCEL_SERVICE_NULL },
{ 0x4, 0x1, 0x100, 0xF000, 1, ADF_ACCEL_ADMIN },
};
/* Worker thread to service arbiter mappings */
static u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 0x5555555, 0x5555555,
0x5555555, 0x5555555,
0xAAAAAAA, 0xAAAAAAA,
0xAAAAAAA, 0xAAAAAAA,
0x0 };
/* Masks representing ME thread-service mappings.
* Thread 7 carries out Admin work and is thus
* left out.
*/
static u8 default_active_thd_mask = 0x7F;
static u8 dc_me_active_thd_mask = 0x03;
static u32 thrd_to_arb_map_gen[ADF_4XXX_MAX_ACCELENGINES] = { 0 };
#define ADF_4XXX_ASYM_SYM \
(ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXX_DC \
(COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXX_SYM \
(SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXX_ASYM \
(ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXX_ASYM_DC \
(ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXX_SYM_DC \
(SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXX_NA \
(NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXX_DEFAULT_RING_TO_SRV_MAP ADF_4XXX_ASYM_SYM
struct adf_enabled_services {
const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
u16 rng_to_svc_msk;
};
static struct adf_enabled_services adf_4xxx_svcs[] = {
{ "dc", ADF_4XXX_DC },
{ "sym", ADF_4XXX_SYM },
{ "asym", ADF_4XXX_ASYM },
{ "dc;asym", ADF_4XXX_ASYM_DC },
{ "asym;dc", ADF_4XXX_ASYM_DC },
{ "sym;dc", ADF_4XXX_SYM_DC },
{ "dc;sym", ADF_4XXX_SYM_DC },
{ "asym;sym", ADF_4XXX_ASYM_SYM },
{ "sym;asym", ADF_4XXX_ASYM_SYM },
};
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
.instances = 0,
};
static u32
get_accel_mask(struct adf_accel_dev *accel_dev)
{
return ADF_4XXX_ACCELERATORS_MASK;
}
static u32
get_ae_mask(struct adf_accel_dev *accel_dev)
{
u32 fusectl4 = accel_dev->hw_device->fuses;
return ~fusectl4 & ADF_4XXX_ACCELENGINES_MASK;
}
static int
get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
u32 i = 0;
*ring_to_svc_map = 0;
/* Get the services enabled by user */
snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
return EFAULT;
for (i = 0; i < ARRAY_SIZE(adf_4xxx_svcs); i++) {
if (!strncmp(val,
adf_4xxx_svcs[i].svcs_enabled,
ADF_CFG_MAX_KEY_LEN_IN_BYTES)) {
*ring_to_svc_map = adf_4xxx_svcs[i].rng_to_svc_msk;
return 0;
}
}
device_printf(GET_DEV(accel_dev),
"Invalid services enabled: %s\n",
val);
return EFAULT;
}
static u32
get_num_accels(struct adf_hw_device_data *self)
{
return ADF_4XXX_MAX_ACCELERATORS;
}
static u32
get_num_aes(struct adf_hw_device_data *self)
{
if (!self || !self->ae_mask)
return 0;
return hweight32(self->ae_mask);
}
static u32
get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_4XXX_PMISC_BAR;
}
static u32
get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_4XXX_ETR_BAR;
}
static u32
get_sram_bar_id(struct adf_hw_device_data *self)
{
return ADF_4XXX_SRAM_BAR;
}
/*
* The vector routing table is used to select the MSI-X entry to use for each
* interrupt source.
* The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
* The final entry corresponds to VF2PF or error interrupts.
* This vector table could be used to configure one MSI-X entry to be shared
* between multiple interrupt sources.
*
* The default routing is set to have a one to one correspondence between the
* interrupt source and the MSI-X entry used.
*/
static void
set_msix_default_rttable(struct adf_accel_dev *accel_dev)
{
struct resource *csr;
int i;
csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
}
static u32
adf_4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
{
device_t pdev = accel_dev->accel_pci_dev.pci_dev;
u32 fusectl1;
u32 capabilities;
/* Read accelerator capabilities mask */
fusectl1 = pci_read_config(pdev, ADF_4XXX_FUSECTL1_OFFSET, 4);
capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
ICP_ACCEL_CAPABILITIES_COMPRESSION |
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT |
ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
ICP_ACCEL_CAPABILITIES_AES_V2 | ICP_ACCEL_CAPABILITIES_RL;
if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
}
if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4;
}
return capabilities;
}
static u32
get_hb_clock(struct adf_hw_device_data *self)
{
/*
* 4XXX uses KPT counter for HB
*/
return ADF_4XXX_KPT_COUNTER_FREQ;
}
static u32
get_ae_clock(struct adf_hw_device_data *self)
{
/*
* Clock update interval is <16> ticks for qat_4xxx.
*/
return self->clock_frequency / 16;
}
static int
measure_clock(struct adf_accel_dev *accel_dev)
{
u32 frequency;
int ret = 0;
ret = adf_dev_measure_clock(accel_dev,
&frequency,
ADF_4XXX_MIN_AE_FREQ,
ADF_4XXX_MAX_AE_FREQ);
if (ret)
return ret;
accel_dev->hw_device->clock_frequency = frequency;
return 0;
}
static int
adf_4xxx_configure_accel_units(struct adf_accel_dev *accel_dev)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 };
char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC))
goto err;
snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
snprintf(val_str,
sizeof(val_str),
ADF_CFG_ASYM ADF_SERVICES_SEPARATOR ADF_CFG_SYM);
if (adf_cfg_add_key_value_param(
accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR))
goto err;
return 0;
err:
device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n");
return EINVAL;
}
static u32
get_num_accel_units(struct adf_hw_device_data *self)
{
return ADF_4XXX_MAX_ACCELUNITS;
}
static void
get_accel_unit(struct adf_hw_device_data *self,
struct adf_accel_unit **accel_unit)
{
memcpy(*accel_unit, adf_4xxx_au_a_ae, sizeof(adf_4xxx_au_a_ae));
}
static void
adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev)
{
if (accel_dev->au_info) {
kfree(accel_dev->au_info->au);
accel_dev->au_info->au = NULL;
kfree(accel_dev->au_info);
accel_dev->au_info = NULL;
}
}
static int
get_accel_unit_config(struct adf_accel_dev *accel_dev,
u8 *num_sym_au,
u8 *num_dc_au,
u8 *num_asym_au)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
u32 num_au = hw_data->get_num_accel_units(hw_data);
/* One AU will be allocated by default if a service enabled */
u32 alloc_au = 1;
/* There's always one AU that is used for Admin AE */
u32 service_mask = ADF_ACCEL_ADMIN;
char *token, *cur_str;
u32 disabled_caps = 0;
/* Get the services enabled by user */
snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
return EFAULT;
cur_str = val;
token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
while (token) {
if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM)))
service_mask |= ADF_ACCEL_CRYPTO;
if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM)))
service_mask |= ADF_ACCEL_ASYM;
/* cy means both asym & crypto should be enabled
* Hardware resources allocation check will be done later
*/
if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY)))
service_mask |= ADF_ACCEL_ASYM | ADF_ACCEL_CRYPTO;
if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC)))
service_mask |= ADF_ACCEL_COMPRESSION;
token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
}
/* Ensure the user won't enable more services than it can support */
if (hweight32(service_mask) > num_au) {
device_printf(GET_DEV(accel_dev),
"Can't enable more services than ");
device_printf(GET_DEV(accel_dev), "%d!\n", num_au);
return EFAULT;
} else if (hweight32(service_mask) == 2) {
/* Due to limitation, besides AU for Admin AE
* only 2 more AUs can be allocated
*/
alloc_au = 2;
}
if (service_mask & ADF_ACCEL_CRYPTO)
*num_sym_au = alloc_au;
if (service_mask & ADF_ACCEL_ASYM)
*num_asym_au = alloc_au;
if (service_mask & ADF_ACCEL_COMPRESSION)
*num_dc_au = alloc_au;
/*update capability*/
if (!*num_sym_au || !(service_mask & ADF_ACCEL_CRYPTO)) {
disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
ICP_ACCEL_CAPABILITIES_SHA3_EXT |
ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
ICP_ACCEL_CAPABILITIES_AES_V2;
}
if (!*num_asym_au || !(service_mask & ADF_ACCEL_ASYM)) {
disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
}
if (!*num_dc_au || !(service_mask & ADF_ACCEL_COMPRESSION)) {
disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION |
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
accel_dev->hw_device->extended_dc_capabilities = 0;
}
accel_dev->hw_device->accel_capabilities_mask =
adf_4xxx_get_hw_cap(accel_dev) & ~disabled_caps;
hw_data->service_mask = service_mask;
hw_data->service_to_load_mask = service_mask;
return 0;
}
static int
adf_init_accel_unit_services(struct adf_accel_dev *accel_dev)
{
u8 num_sym_au = 0, num_dc_au = 0, num_asym_au = 0;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 num_au = hw_data->get_num_accel_units(hw_data);
u32 au_size = num_au * sizeof(struct adf_accel_unit);
u8 i;
if (get_accel_unit_config(
accel_dev, &num_sym_au, &num_dc_au, &num_asym_au))
return EFAULT;
accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL);
if (!accel_dev->au_info)
return ENOMEM;
accel_dev->au_info->au = kzalloc(au_size, GFP_KERNEL);
if (!accel_dev->au_info->au) {
kfree(accel_dev->au_info);
accel_dev->au_info = NULL;
return ENOMEM;
}
accel_dev->au_info->num_cy_au = num_sym_au;
accel_dev->au_info->num_dc_au = num_dc_au;
accel_dev->au_info->num_asym_au = num_asym_au;
get_accel_unit(hw_data, &accel_dev->au_info->au);
/* Enable ASYM accel units */
for (i = 0; i < num_au && num_asym_au > 0; i++) {
if (accel_dev->au_info->au[i].services ==
ADF_ACCEL_SERVICE_NULL) {
accel_dev->au_info->au[i].services = ADF_ACCEL_ASYM;
num_asym_au--;
}
}
/* Enable SYM accel units */
for (i = 0; i < num_au && num_sym_au > 0; i++) {
if (accel_dev->au_info->au[i].services ==
ADF_ACCEL_SERVICE_NULL) {
accel_dev->au_info->au[i].services = ADF_ACCEL_CRYPTO;
num_sym_au--;
}
}
/* Enable compression accel units */
for (i = 0; i < num_au && num_dc_au > 0; i++) {
if (accel_dev->au_info->au[i].services ==
ADF_ACCEL_SERVICE_NULL) {
accel_dev->au_info->au[i].services =
ADF_ACCEL_COMPRESSION;
num_dc_au--;
}
}
accel_dev->au_info->dc_ae_msk |=
hw_data->get_obj_cfg_ae_mask(accel_dev, ADF_ACCEL_COMPRESSION);
return 0;
}
static int
adf_init_accel_units(struct adf_accel_dev *accel_dev)
{
return adf_init_accel_unit_services(accel_dev);
}
static void
adf_exit_accel_units(struct adf_accel_dev *accel_dev)
{
/* reset the AU service */
adf_exit_accel_unit_services(accel_dev);
}
static const char *
get_obj_name(struct adf_accel_dev *accel_dev,
enum adf_accel_unit_services service)
{
switch (service) {
case ADF_ACCEL_ASYM:
return ADF_4XXX_ASYM_OBJ;
case ADF_ACCEL_CRYPTO:
return ADF_4XXX_SYM_OBJ;
case ADF_ACCEL_COMPRESSION:
return ADF_4XXX_DC_OBJ;
case ADF_ACCEL_ADMIN:
return ADF_4XXX_ADMIN_OBJ;
default:
return NULL;
}
}
static uint32_t
get_objs_num(struct adf_accel_dev *accel_dev)
{
return ADF_4XXX_MAX_OBJ;
}
static uint32_t
get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
enum adf_accel_unit_services service)
{
u32 ae_mask = 0;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 num_au = hw_data->get_num_accel_units(hw_data);
struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
u32 i = 0;
if (service == ADF_ACCEL_SERVICE_NULL)
return 0;
for (i = 0; i < num_au; i++) {
if (accel_unit[i].services == service)
ae_mask |= accel_unit[i].ae_mask;
}
return ae_mask;
}
static enum adf_accel_unit_services
adf_4xxx_get_service_type(struct adf_accel_dev *accel_dev, s32 obj_num)
{
struct adf_accel_unit *accel_unit;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u8 num_au = hw_data->get_num_accel_units(hw_data);
int i;
if (!hw_data->service_to_load_mask)
return ADF_ACCEL_SERVICE_NULL;
if (accel_dev->au_info && accel_dev->au_info->au)
accel_unit = accel_dev->au_info->au;
else
return ADF_ACCEL_SERVICE_NULL;
for (i = num_au - 2; i >= 0; i--) {
if (hw_data->service_to_load_mask & accel_unit[i].services) {
hw_data->service_to_load_mask &=
~accel_unit[i].services;
return accel_unit[i].services;
}
}
/* admin AE should be loaded last */
if (hw_data->service_to_load_mask & accel_unit[num_au - 1].services) {
hw_data->service_to_load_mask &=
~accel_unit[num_au - 1].services;
return accel_unit[num_au - 1].services;
}
return ADF_ACCEL_SERVICE_NULL;
}
static void
get_ring_svc_map_data(int ring_pair_index,
u16 ring_to_svc_map,
u8 *serv_type,
int *ring_index,
int *num_rings_per_srv,
int bundle_num)
{
*serv_type =
GET_SRV_TYPE(ring_to_svc_map, bundle_num % ADF_CFG_NUM_SERVICES);
*ring_index = 0;
*num_rings_per_srv = ADF_4XXX_NUM_RINGS_PER_BANK / 2;
}
static int
adf_get_dc_extcapabilities(struct adf_accel_dev *accel_dev, u32 *capabilities)
{
struct icp_qat_fw_init_admin_req req;
struct icp_qat_fw_init_admin_resp resp;
u8 i;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u8 num_au = hw_data->get_num_accel_units(hw_data);
u32 first_dc_ae = 0;
for (i = 0; i < num_au; i++) {
if (accel_dev->au_info->au[i].services &
ADF_ACCEL_COMPRESSION) {
first_dc_ae = accel_dev->au_info->au[i].ae_mask;
first_dc_ae &= ~(first_dc_ae - 1);
}
}
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
if (likely(first_dc_ae)) {
if (adf_send_admin(accel_dev, &req, &resp, first_dc_ae) ||
resp.status) {
*capabilities = 0;
return EFAULT;
}
*capabilities = resp.extended_features;
}
return 0;
}
static int
adf_get_fw_status(struct adf_accel_dev *accel_dev,
u8 *major,
u8 *minor,
u8 *patch)
{
struct icp_qat_fw_init_admin_req req;
struct icp_qat_fw_init_admin_resp resp;
u32 ae_mask = 1;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.cmd_id = ICP_QAT_FW_STATUS_GET;
if (adf_send_admin(accel_dev, &req, &resp, ae_mask))
return EFAULT;
*major = resp.version_major_num;
*minor = resp.version_minor_num;
*patch = resp.version_patch_num;
return 0;
}
static int
adf_4xxx_send_admin_init(struct adf_accel_dev *accel_dev)
{
int ret = 0;
struct icp_qat_fw_init_admin_req req;
struct icp_qat_fw_init_admin_resp resp;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 ae_mask = hw_data->ae_mask;
u32 admin_ae_mask = hw_data->admin_ae_mask;
u8 num_au = hw_data->get_num_accel_units(hw_data);
u8 i;
u32 dc_capabilities = 0;
for (i = 0; i < num_au; i++) {
if (accel_dev->au_info->au[i].services ==
ADF_ACCEL_SERVICE_NULL)
ae_mask &= ~accel_dev->au_info->au[i].ae_mask;
if (accel_dev->au_info->au[i].services != ADF_ACCEL_ADMIN)
admin_ae_mask &= ~accel_dev->au_info->au[i].ae_mask;
}
if (!accel_dev->admin) {
device_printf(GET_DEV(accel_dev), "adf_admin not available\n");
return EFAULT;
}
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
if (adf_send_admin(accel_dev, &req, &resp, admin_ae_mask)) {
device_printf(GET_DEV(accel_dev),
"Error sending constants config message\n");
return EFAULT;
}
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.cmd_id = ICP_QAT_FW_INIT_ME;
if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) {
device_printf(GET_DEV(accel_dev),
"Error sending init message\n");
return EFAULT;
}
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
req.init_cfg_ptr = accel_dev->admin->phy_hb_addr;
if (adf_get_hb_timer(accel_dev, &req.heartbeat_ticks))
return EINVAL;
if (adf_send_admin(accel_dev, &req, &resp, ae_mask))
device_printf(GET_DEV(accel_dev),
"Heartbeat is not supported\n");
ret = adf_get_dc_extcapabilities(accel_dev, &dc_capabilities);
if (unlikely(ret)) {
device_printf(GET_DEV(accel_dev),
"Could not get FW ext. capabilities\n");
}
accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
adf_get_fw_status(accel_dev,
&accel_dev->fw_versions.fw_version_major,
&accel_dev->fw_versions.fw_version_minor,
&accel_dev->fw_versions.fw_version_patch);
device_printf(GET_DEV(accel_dev),
"FW version: %d.%d.%d\n",
accel_dev->fw_versions.fw_version_major,
accel_dev->fw_versions.fw_version_minor,
accel_dev->fw_versions.fw_version_patch);
return ret;
}
static enum dev_sku_info
get_sku(struct adf_hw_device_data *self)
{
return DEV_SKU_1;
}
static struct adf_accel_unit *
get_au_by_ae(struct adf_accel_dev *accel_dev, int ae_num)
{
int i = 0;
struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
if (!accel_unit)
return NULL;
for (i = 0; i < ADF_4XXX_MAX_ACCELUNITS; i++)
if (accel_unit[i].ae_mask & BIT(ae_num))
return &accel_unit[i];
return NULL;
}
static bool
check_accel_unit_service(enum adf_accel_unit_services au_srv,
enum adf_cfg_service_type ring_srv)
{
if ((au_srv & ADF_ACCEL_SERVICE_NULL) && ring_srv == NA)
return true;
if ((au_srv & ADF_ACCEL_COMPRESSION) && ring_srv == COMP)
return true;
if ((au_srv & ADF_ACCEL_ASYM) && ring_srv == ASYM)
return true;
if ((au_srv & ADF_ACCEL_CRYPTO) && ring_srv == SYM)
return true;
return false;
}
static void
adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev,
u32 *thrd_to_arb_map_gen)
{
struct adf_accel_unit *au = NULL;
int engine = 0;
int thread = 0;
int service;
u16 ena_srv_mask;
u16 service_type;
u32 service_mask;
unsigned long thd_srv_mask = default_active_thd_mask;
ena_srv_mask = accel_dev->hw_device->ring_to_svc_map;
/* If ring_to_svc_map is not changed, return default arbiter value */
if (ena_srv_mask == ADF_4XXX_DEFAULT_RING_TO_SRV_MAP) {
memcpy(thrd_to_arb_map_gen,
thrd_to_arb_map,
sizeof(thrd_to_arb_map_gen[0]) *
ADF_4XXX_MAX_ACCELENGINES);
return;
}
for (engine = 0; engine < ADF_4XXX_MAX_ACCELENGINES - 1; engine++) {
thrd_to_arb_map_gen[engine] = 0;
service_mask = 0;
au = get_au_by_ae(accel_dev, engine);
if (!au)
continue;
for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) {
service_type = GET_SRV_TYPE(ena_srv_mask, service);
if (check_accel_unit_service(au->services,
service_type))
service_mask |= BIT(service);
}
if (au->services == ADF_ACCEL_COMPRESSION)
thd_srv_mask = dc_me_active_thd_mask;
else
thd_srv_mask = default_active_thd_mask;
for_each_set_bit(thread, &thd_srv_mask, 8)
{
thrd_to_arb_map_gen[engine] |=
(service_mask << (ADF_CFG_MAX_SERVICES * thread));
}
}
}
static void
adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
u32 const **arb_map_config)
{
int i;
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
for (i = 1; i < ADF_4XXX_MAX_ACCELENGINES; i++) {
if (~hw_device->ae_mask & (1 << i))
thrd_to_arb_map[i] = 0;
}
adf_4xxx_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_gen);
*arb_map_config = thrd_to_arb_map_gen;
}
static void
get_arb_info(struct arb_info *arb_info)
{
arb_info->wrk_cfg_offset = ADF_4XXX_ARB_CONFIG;
arb_info->arbiter_offset = ADF_4XXX_ARB_OFFSET;
arb_info->wrk_thd_2_srv_arb_map = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
}
static void
get_admin_info(struct admin_info *admin_csrs_info)
{
admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
}
static void
adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
struct resource *csr = misc_bar->virt_addr;
/* Enable all in errsou3 except VFLR notification on host */
ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY);
}
static void
adf_enable_ints(struct adf_accel_dev *accel_dev)
{
struct resource *addr;
addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
/* Enable bundle interrupts */
ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
/* Enable misc interrupts */
ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
}
static int
adf_init_device(struct adf_accel_dev *accel_dev)
{
struct resource *addr;
u32 status;
u32 csr;
int ret;
addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
/* Temporarily mask PM interrupt */
csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
csr |= ADF_4XXX_PM_SOU;
ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
/* Set DRV_ACTIVE bit to power up the device */
ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
/* Poll status register to make sure the device is powered up */
status = 0;
ret = read_poll_timeout(ADF_CSR_RD,
status,
status & ADF_4XXX_PM_INIT_STATE,
ADF_4XXX_PM_POLL_DELAY_US,
ADF_4XXX_PM_POLL_TIMEOUT_US,
true,
addr,
ADF_4XXX_PM_STATUS);
if (ret)
device_printf(GET_DEV(accel_dev),
"Failed to power up the device\n");
return ret;
}
void
adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
hw_data->num_logical_accel = 1;
hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
hw_data->enable_error_correction = adf_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_arb_info = get_arb_info;
hw_data->get_admin_info = get_admin_info;
hw_data->get_accel_cap = adf_4xxx_get_hw_cap;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->get_sku = get_sku;
hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
hw_data->fw_name = ADF_4XXX_FW;
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->send_admin_init = adf_4xxx_send_admin_init;
hw_data->init_arb = adf_init_gen2_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->init_device = adf_init_device;
hw_data->reset_device = adf_reset_flr;
hw_data->restore_device = adf_dev_restore;
hw_data->init_accel_units = adf_init_accel_units;
hw_data->exit_accel_units = adf_exit_accel_units;
hw_data->get_num_accel_units = get_num_accel_units;
hw_data->configure_accel_units = adf_4xxx_configure_accel_units;
hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->get_objs_num = get_objs_num;
hw_data->get_obj_name = get_obj_name;
hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
hw_data->get_service_type = adf_4xxx_get_service_type;
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->config_device = adf_config_device;
hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
hw_data->get_hb_clock = get_hb_clock;
hw_data->get_heartbeat_status = adf_get_heartbeat_status;
hw_data->get_ae_clock = get_ae_clock;
hw_data->measure_clock = measure_clock;
hw_data->query_storage_cap = 1;
adf_gen4_init_hw_csr_info(&hw_data->csr_info);
}
void
adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
}

View File

@ -0,0 +1,111 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007 - 2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_4XXX_HW_DATA_H_
#define ADF_4XXX_HW_DATA_H_
#include <adf_accel_devices.h>
/* PCIe configuration space */
#define ADF_4XXX_SRAM_BAR 0
#define ADF_4XXX_PMISC_BAR 1
#define ADF_4XXX_ETR_BAR 2
#define ADF_4XXX_RX_RINGS_OFFSET 1
#define ADF_4XXX_TX_RINGS_MASK 0x1
#define ADF_4XXX_MAX_ACCELERATORS 1
#define ADF_4XXX_MAX_ACCELENGINES 9
#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
/* 2 Accel units dedicated to services and */
/* 1 Accel unit dedicated to Admin AE */
#define ADF_4XXX_MAX_ACCELUNITS 3
/* Physical function fuses */
#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8)
#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC)
#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0)
#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4)
#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8)
#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC)
#define ADF_4XXX_ACCELERATORS_MASK (0x1)
#define ADF_4XXX_ACCELENGINES_MASK (0x1FF)
#define ADF_4XXX_ADMIN_AE_MASK (0x100)
#define ADF_4XXX_ETR_MAX_BANKS 64
/* MSIX interrupt */
#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040)
#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044)
#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084)
#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i)*0x04))
/* Bank and ring configuration */
#define ADF_4XXX_NUM_RINGS_PER_BANK 2
/* Error source registers */
#define ADF_4XXX_ERRSOU0 (0x41A200)
#define ADF_4XXX_ERRSOU1 (0x41A204)
#define ADF_4XXX_ERRSOU2 (0x41A208)
#define ADF_4XXX_ERRSOU3 (0x41A20C)
/* Error source mask registers */
#define ADF_4XXX_ERRMSK0 (0x41A210)
#define ADF_4XXX_ERRMSK1 (0x41A214)
#define ADF_4XXX_ERRMSK2 (0x41A218)
#define ADF_4XXX_ERRMSK3 (0x41A21C)
#define ADF_4XXX_VFLNOTIFY BIT(7)
/* Arbiter configuration */
#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
#define ADF_4XXX_ARB_OFFSET (0x0)
#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400)
/* Admin Interface Reg Offset */
#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574)
#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
/* Power management */
#define ADF_4XXX_PM_POLL_DELAY_US 20
#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC
#define ADF_4XXX_PM_STATUS (0x50A00C)
#define ADF_4XXX_PM_INTERRUPT (0x50A028)
#define ADF_4XXX_PM_DRV_ACTIVE BIT(20)
#define ADF_4XXX_PM_INIT_STATE BIT(21)
/* Power management source in ERRSOU2 and ERRMSK2 */
#define ADF_4XXX_PM_SOU BIT(18)
/* Firmware Binaries */
#define ADF_4XXX_FW "qat_4xxx_fw"
#define ADF_4XXX_MMP "qat_4xxx_mmp_fw"
#define ADF_4XXX_DC_OBJ "qat_4xxx_dc.bin"
#define ADF_4XXX_SYM_OBJ "qat_4xxx_sym.bin"
#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin"
#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin"
/* Only 3 types of images can be loaded including the admin image */
#define ADF_4XXX_MAX_OBJ 3
#define ADF_4XXX_AE_FREQ (1000 * 1000000)
#define ADF_4XXX_KPT_COUNTER_FREQ (100 * 1000000)
#define ADF_4XXX_MIN_AE_FREQ (9 * 1000000)
#define ADF_4XXX_MAX_AE_FREQ (1100 * 1000000)
/* qat_4xxx fuse bits are different from old GENs, redefine them */
enum icp_qat_4xxx_slice_mask {
ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1),
ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2),
ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
};
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
#endif

View File

@ -0,0 +1,267 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007 - 2022 Intel Corporation */
/* $FreeBSD$ */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_accel_devices.h"
#include "adf_4xxx_hw_data.h"
#include "adf_gen4_hw_data.h"
#include "adf_fw_counters.h"
#include "adf_cfg_device.h"
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <machine/bus_dma.h>
#include <dev/pci/pcireg.h>
#include "adf_heartbeat_dbg.h"
#include "adf_cnvnr_freq_counters.h"
static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx");
#define ADF_SYSTEM_DEVICE(device_id) \
{ \
PCI_VENDOR_ID_INTEL, device_id \
}
static const struct pci_device_id adf_pci_tbl[] =
{ ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
{
0,
} };
static int
adf_probe(device_t dev)
{
const struct pci_device_id *id;
for (id = adf_pci_tbl; id->vendor != 0; id++) {
if (pci_get_vendor(dev) == id->vendor &&
pci_get_device(dev) == id->device) {
device_set_desc(dev,
"Intel " ADF_4XXX_DEVICE_NAME
" QuickAssist");
return BUS_PROBE_GENERIC;
}
}
return ENXIO;
}
static void
adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
if (accel_dev->dma_tag)
bus_dma_tag_destroy(accel_dev->dma_tag);
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
bus_free_resource(accel_pci_dev->pci_dev,
SYS_RES_MEMORY,
bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (pci_get_device(accel_pci_dev->pci_dev)) {
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
adf_clean_hw_data_4xxx(accel_dev->hw_device);
break;
default:
break;
}
free(accel_dev->hw_device, M_QAT_4XXX);
accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
adf_devmgr_rm_dev(accel_dev, NULL);
}
static int
adf_attach(device_t dev)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
int ret, rid;
struct adf_cfg_device *cfg_dev = NULL;
/* Set pci MaxPayLoad to 256. Implemented to avoid the issue of
* Pci-passthrough causing Maxpayload to be reset to 128 bytes
* when the device is reset.
*/
if (pci_get_max_payload(dev) != 256)
pci_set_max_payload(dev, 256);
accel_dev = device_get_softc(dev);
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = dev;
if (bus_get_domain(dev, &accel_pci_dev->node) != 0)
accel_pci_dev->node = 0;
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called
*/
if (adf_devmgr_add_dev(accel_dev, NULL)) {
device_printf(dev, "Failed to add new accelerator device.\n");
return ENXIO;
}
/* Allocate and configure device configuration structure */
hw_data = malloc(sizeof(*hw_data), M_QAT_4XXX, M_WAITOK | M_ZERO);
accel_dev->hw_device = hw_data;
adf_init_hw_data_4xxx(accel_dev->hw_device);
accel_pci_dev->revid = pci_get_revid(dev);
hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4);
if (accel_pci_dev->revid == 0x00) {
device_printf(dev, "A0 stepping is not supported.\n");
ret = ENODEV;
goto out_err;
}
/* Get PPAERUCM values and store */
ret = adf_aer_store_ppaerucm_reg(dev, hw_data);
if (ret)
goto out_err;
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||
(~hw_data->ae_mask & 0x01)) {
device_printf(dev, "No acceleration units found\n");
ret = ENXIO;
goto out_err;
}
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
ret = adf_clock_debugfs_add(accel_dev);
if (ret)
goto out_err;
pci_set_max_read_req(dev, 1024);
ret = bus_dma_tag_create(bus_get_dma_tag(dev),
1,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL,
NULL,
BUS_SPACE_MAXSIZE,
/* BUS_SPACE_UNRESTRICTED */ 1,
BUS_SPACE_MAXSIZE,
0,
NULL,
NULL,
&accel_dev->dma_tag);
if (ret)
goto out_err;
if (hw_data->get_accel_cap) {
hw_data->accel_capabilities_mask =
hw_data->get_accel_cap(accel_dev);
}
/* Find and map all the device's BARS */
i = 0;
for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0;
bar_nr++) {
struct adf_bar *bar;
rid = PCIR_BAR(bar_nr);
if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0)
continue;
bar = &accel_pci_dev->pci_bars[i++];
bar->virt_addr = bus_alloc_resource_any(dev,
SYS_RES_MEMORY,
&rid,
RF_ACTIVE);
if (!bar->virt_addr) {
device_printf(dev, "Failed to map BAR %d\n", bar_nr);
ret = ENXIO;
goto out_err;
}
bar->base_addr = rman_get_start(bar->virt_addr);
bar->size = rman_get_size(bar->virt_addr);
}
pci_enable_busmaster(dev);
if (!accel_dev->hw_device->config_device) {
ret = EFAULT;
goto out_err;
}
ret = accel_dev->hw_device->config_device(accel_dev);
if (ret)
goto out_err;
ret = adf_dev_init(accel_dev);
if (ret)
goto out_dev_shutdown;
ret = adf_dev_start(accel_dev);
if (ret)
goto out_dev_stop;
cfg_dev = accel_dev->cfg->dev;
adf_cfg_device_clear(cfg_dev, accel_dev);
free(cfg_dev, M_QAT);
accel_dev->cfg->dev = NULL;
return ret;
out_dev_stop:
adf_dev_stop(accel_dev);
out_dev_shutdown:
adf_dev_shutdown(accel_dev);
out_err:
adf_cleanup_accel(accel_dev);
return ret;
}
static int
adf_detach(device_t dev)
{
struct adf_accel_dev *accel_dev = device_get_softc(dev);
if (adf_dev_stop(accel_dev)) {
device_printf(dev, "Failed to stop QAT accel dev\n");
return EBUSY;
}
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
return 0;
}
static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe),
DEVMETHOD(device_attach, adf_attach),
DEVMETHOD(device_detach, adf_detach),
DEVMETHOD_END };
static driver_t adf_driver = { "qat",
adf_methods,
sizeof(struct adf_accel_dev) };
DRIVER_MODULE_ORDERED(qat_4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD);
MODULE_VERSION(qat_4xxx, 1);
MODULE_DEPEND(qat_4xxx, qat_common, 1, 1, 1);
MODULE_DEPEND(qat_4xxx, qat_api, 1, 1, 1);
MODULE_DEPEND(qat_4xxx, linuxkpi, 1, 1, 1);

View File

@ -6,6 +6,7 @@
#include <adf_cfg.h>
#include <adf_pf2vf_msg.h>
#include <adf_dev_err.h>
#include <adf_gen2_hw_data.h>
#include "adf_c3xxx_hw_data.h"
#include "icp_qat_hw.h"
#include "adf_heartbeat.h"
@ -368,6 +369,7 @@ adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->get_errsou_offset = get_errsou_offset;
hw_data->get_clock_speed = get_clock_speed;
hw_data->get_sku = get_sku;
hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
hw_data->fw_name = ADF_C3XXX_FW;
hw_data->fw_mmp_name = ADF_C3XXX_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
@ -406,6 +408,8 @@ adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
hw_data->pre_reset = adf_dev_pre_reset;
hw_data->post_reset = adf_dev_post_reset;
adf_gen2_init_hw_csr_info(&hw_data->csr_info);
}
void

View File

@ -133,6 +133,7 @@ adf_attach(device_t dev)
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
hw_data->admin_ae_mask = hw_data->ae_mask;
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* If the device has no acceleration engines then ignore it. */

View File

@ -9,6 +9,7 @@
#include <adf_dev_err.h>
#include <adf_cfg.h>
#include <adf_fw_counters.h>
#include <adf_gen2_hw_data.h>
#include "adf_c4xxx_hw_data.h"
#include "adf_c4xxx_reset.h"
#include "adf_c4xxx_inline.h"
@ -754,7 +755,6 @@ c4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY |
ICP_ACCEL_CAPABILITIES_ECEDMONT;
if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
@ -2128,74 +2128,6 @@ configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
}
}
static int
adf_get_heartbeat_status_c4xxx(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_device = accel_dev->hw_device;
struct icp_qat_fw_init_c4xxx_admin_hb_stats *live_s =
(struct icp_qat_fw_init_c4xxx_admin_hb_stats *)
accel_dev->admin->virt_hb_addr;
const size_t max_aes = hw_device->get_num_aes(hw_device);
const size_t stats_size =
max_aes * sizeof(struct icp_qat_fw_init_c4xxx_admin_hb_stats);
int ret = 0;
size_t ae = 0, thr;
unsigned long ae_mask = 0;
int num_threads_per_ae = ADF_NUM_THREADS_PER_AE;
/*
* Memory layout of Heartbeat
*
* +----------------+----------------+---------+
* | Live value | Last value | Count |
* +----------------+----------------+---------+
* \_______________/\_______________/\________/
* ^ ^ ^
* | | |
* | | max_aes * sizeof(adf_hb_count)
* | max_aes *
* sizeof(icp_qat_fw_init_c4xxx_admin_hb_stats)
* max_aes * sizeof(icp_qat_fw_init_c4xxx_admin_hb_stats)
*/
struct icp_qat_fw_init_c4xxx_admin_hb_stats *curr_s;
struct icp_qat_fw_init_c4xxx_admin_hb_stats *last_s = live_s + max_aes;
struct adf_hb_count *count = (struct adf_hb_count *)(last_s + max_aes);
curr_s = malloc(stats_size, M_QAT, M_WAITOK | M_ZERO);
memcpy(curr_s, live_s, stats_size);
ae_mask = hw_device->ae_mask;
for_each_set_bit(ae, &ae_mask, max_aes)
{
for (thr = 0; thr < num_threads_per_ae; ++thr) {
struct icp_qat_fw_init_admin_hb_cnt *curr =
&curr_s[ae].stats[thr];
struct icp_qat_fw_init_admin_hb_cnt *prev =
&last_s[ae].stats[thr];
u16 req = curr->req_heartbeat_cnt;
u16 resp = curr->resp_heartbeat_cnt;
u16 last = prev->resp_heartbeat_cnt;
if ((thr == ADF_AE_ADMIN_THREAD || req != resp) &&
resp == last) {
u16 retry = ++count[ae].ae_thread[thr];
if (retry >= ADF_CFG_HB_COUNT_THRESHOLD)
ret = EIO;
} else {
count[ae].ae_thread[thr] = 0;
}
}
}
/* Copy current stats for the next iteration */
memcpy(last_s, curr_s, stats_size);
free(curr_s, M_QAT);
return ret;
}
void
adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
{
@ -2230,6 +2162,7 @@ adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
hw_data->get_clock_speed = get_clock_speed;
hw_data->get_eth_doorbell_msg = get_eth_doorbell_msg;
hw_data->get_sku = get_sku;
hw_data->heartbeat_ctr_num = ADF_NUM_THREADS_PER_AE;
hw_data->check_prod_sku = c4xxx_check_prod_sku;
hw_data->fw_name = ADF_C4XXX_FW;
hw_data->fw_mmp_name = ADF_C4XXX_MMP;
@ -2256,7 +2189,7 @@ adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
hw_data->reset_hw_units = adf_c4xxx_reset_hw_units;
hw_data->exit_accel_units = adf_exit_accel_units;
hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
hw_data->get_heartbeat_status = adf_get_heartbeat_status_c4xxx;
hw_data->get_heartbeat_status = adf_get_heartbeat_status;
hw_data->get_ae_clock = get_ae_clock;
hw_data->clock_frequency = ADF_C4XXX_AE_FREQ;
hw_data->measure_clock = measure_clock;
@ -2275,6 +2208,9 @@ adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
hw_data->count_ras_event = adf_fw_count_ras_event;
hw_data->config_device = adf_config_device;
hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
adf_gen2_init_hw_csr_info(&hw_data->csr_info);
hw_data->csr_info.arb_enable_mask = 0xF;
}
void

View File

@ -130,6 +130,7 @@ adf_attach(device_t dev)
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
hw_data->admin_ae_mask = hw_data->ae_mask;
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||

View File

@ -6,6 +6,7 @@
#include <adf_cfg.h>
#include <adf_pf2vf_msg.h>
#include <adf_dev_err.h>
#include <adf_gen2_hw_data.h>
#include "adf_c62x_hw_data.h"
#include "icp_qat_hw.h"
#include "adf_cfg.h"
@ -373,6 +374,7 @@ adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->get_errsou_offset = get_errsou_offset;
hw_data->get_clock_speed = get_clock_speed;
hw_data->get_sku = get_sku;
hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
hw_data->fw_name = ADF_C62X_FW;
hw_data->fw_mmp_name = ADF_C62X_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
@ -411,6 +413,8 @@ adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
hw_data->pre_reset = adf_dev_pre_reset;
hw_data->post_reset = adf_dev_post_reset;
adf_gen2_init_hw_csr_info(&hw_data->csr_info);
}
void

View File

@ -133,6 +133,7 @@ adf_attach(device_t dev)
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
hw_data->admin_ae_mask = hw_data->ae_mask;
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||

View File

@ -7,6 +7,7 @@
#include <adf_pf2vf_msg.h>
#include <adf_common_drv.h>
#include <adf_dev_err.h>
#include <adf_gen2_hw_data.h>
#include "adf_dh895xcc_hw_data.h"
#include "icp_qat_hw.h"
#include "adf_heartbeat.h"
@ -361,6 +362,7 @@ adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_clock_speed = get_clock_speed;
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_sku = get_sku;
hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
hw_data->fw_name = ADF_DH895XCC_FW;
hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
hw_data->init_admin_comms = adf_init_admin_comms;
@ -396,6 +398,8 @@ adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
hw_data->pre_reset = adf_dev_pre_reset;
hw_data->post_reset = adf_dev_post_reset;
adf_gen2_init_hw_csr_info(&hw_data->csr_info);
}
void

View File

@ -126,6 +126,7 @@ adf_attach(device_t dev)
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
hw_data->admin_ae_mask = hw_data->ae_mask;
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||

View File

@ -44,6 +44,7 @@ SRCS+= common/crypto/sym/qat/lac_sym_qat.c
SRCS+= common/crypto/sym/qat/lac_sym_qat_hash.c
SRCS+= common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c
SRCS+= common/crypto/sym/qat/lac_sym_qat_cipher.c
SRCS+= common/crypto/sym/qat/lac_sym_qat_constants_table.c
SRCS+= common/crypto/sym/qat/lac_sym_qat_key.c
SRCS+= common/crypto/sym/key/lac_sym_key.c
SRCS+= common/stubs/lac_stubs.c

View File

@ -9,6 +9,8 @@ SRCS+= adf_accel_engine.c adf_freebsd_admin.c adf_aer.c adf_cfg.c qat_common_mod
SRCS+= adf_heartbeat.c adf_freebsd_heartbeat_dbg.c
SRCS+= adf_dev_mgr.c adf_hw_arbiter.c
SRCS+= adf_init.c adf_transport.c adf_isr.c adf_fw_counters.c adf_dev_err.c
SRCS+= adf_gen2_hw_data.c
SRCS+= adf_gen4_hw_data.c
SRCS+= qat_freebsd.c
SRCS+= adf_freebsd_cfg_dev_dbg.c adf_freebsd_ver_dbg.c
SRCS+= adf_cfg_device.c adf_cfg_section.c adf_cfg_instance.c adf_cfg_bundle.c

View File

@ -6,6 +6,7 @@
KMOD= qat_hw
SRCS+= qat_c62x/adf_c62x_hw_data.c qat_c62x/adf_drv.c
SRCS+= qat_200xx/adf_200xx_hw_data.c qat_200xx/adf_drv.c
SRCS+= qat_4xxx/adf_4xxx_hw_data.c qat_4xxx/adf_drv.c
SRCS+= qat_c3xxx/adf_c3xxx_hw_data.c qat_c3xxx/adf_drv.c
SRCS+= qat_dh895xcc/adf_dh895xcc_hw_data.c qat_dh895xcc/adf_drv.c
SRCS+= qat_c4xxx/adf_c4xxx_hw_data.c qat_c4xxx/adf_drv.c qat_c4xxx/adf_c4xxx_ae_config.c qat_c4xxx/adf_c4xxx_misc_error_stats.c

View File

@ -5,6 +5,7 @@ SUBDIR= qat_c62x \
qat_200xx \
qat_c3xxx \
qat_c4xxx \
qat_dh895xcc
qat_dh895xcc \
qat_4xxx
.include <bsd.subdir.mk>

Some files were not shown because too many files have changed in this diff Show More