common/qat: update firmware headers

Updated to latest firmware headers files for QuickAssist devices.
Includes updates for symmetric crypto, PKE and Compression services.

Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
This commit is contained in:
Fiona Trahe 2018-07-13 03:28:10 +01:00 committed by Pablo de Lara
parent 711f43ba56
commit b769101307
3 changed files with 654 additions and 27 deletions

View File

@ -117,6 +117,10 @@ struct icp_qat_fw_comn_resp {
#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 #define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 #define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F #define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ #define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
icp_qat_fw_comn_req_hdr_t.service_type icp_qat_fw_comn_req_hdr_t.service_type
@ -133,6 +137,16 @@ struct icp_qat_fw_comn_resp {
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
QAT_FIELD_GET(hdr_flags, \
ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
QAT_FIELD_GET(hdr_flags, \
ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
ICP_QAT_FW_COMN_CNV_FLAG_MASK)
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ #define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
@ -204,29 +218,44 @@ struct icp_qat_fw_comn_resp {
& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
#define ICP_QAT_FW_COMN_NEXT_ID_SET_2(next_curr_id, val) \
do { \
(next_curr_id) = \
(((next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
(((val) << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \
ICP_QAT_FW_COMN_NEXT_ID_MASK)) \
} while (0)
#define ICP_QAT_FW_COMN_CURR_ID_SET_2(next_curr_id, val) \
do { \
(next_curr_id) = \
(((next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) \
} while (0)
#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 #define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 #define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \ #define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \ #define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \ #define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
(((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
(((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
(((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ #define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
QAT_COMN_RESP_CRYPTO_STATUS_MASK) QAT_COMN_RESP_CRYPTO_STATUS_MASK)
#define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_PKE_STATUS_BITPOS, \
QAT_COMN_RESP_PKE_STATUS_MASK)
#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ #define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
QAT_COMN_RESP_CMP_STATUS_MASK) QAT_COMN_RESP_CMP_STATUS_MASK)
@ -235,10 +264,18 @@ struct icp_qat_fw_comn_resp {
QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
QAT_COMN_RESP_XLAT_STATUS_MASK) QAT_COMN_RESP_XLAT_STATUS_MASK)
#define ICP_QAT_FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS, \
QAT_COMN_RESP_XLT_WA_APPLIED_MASK)
#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ #define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
#define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 #define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 #define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 #define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
@ -257,8 +294,16 @@ struct icp_qat_fw_comn_resp {
#define ERR_CODE_OVERFLOW_ERROR -11 #define ERR_CODE_OVERFLOW_ERROR -11
#define ERR_CODE_SOFT_ERROR -12 #define ERR_CODE_SOFT_ERROR -12
#define ERR_CODE_FATAL_ERROR -13 #define ERR_CODE_FATAL_ERROR -13
#define ERR_CODE_SSM_ERROR -14 #define ERR_CODE_COMP_OUTPUT_CORRUPTION -14
#define ERR_CODE_ENDPOINT_ERROR -15 #define ERR_CODE_HW_INCOMPLETE_FILE -15
#define ERR_CODE_SSM_ERROR -16
#define ERR_CODE_ENDPOINT_ERROR -17
#define ERR_CODE_CNV_ERROR -18
#define ERR_CODE_EMPTY_DYM_BLOCK -19
#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20
#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21
#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22
#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23
enum icp_qat_fw_slice { enum icp_qat_fw_slice {
ICP_QAT_FW_SLICE_NULL = 0, ICP_QAT_FW_SLICE_NULL = 0,

View File

@ -0,0 +1,482 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
* Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_FW_COMP_H_
#define _ICP_QAT_FW_COMP_H_
#include "icp_qat_fw.h"
enum icp_qat_fw_comp_cmd_id {
ICP_QAT_FW_COMP_CMD_STATIC = 0,
/*!< Static Compress Request */
ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
/*!< Dynamic Compress Request */
ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
/*!< Decompress Request */
ICP_QAT_FW_COMP_CMD_DELIMITER
/**< Delimiter type */
};
/**< Flag usage */
#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
/**< @ingroup icp_qat_fw_comp
* Flag representing that session is stateless
*/
#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
/**< @ingroup icp_qat_fw_comp
* Flag representing that session is stateful
*/
#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
/**< @ingroup icp_qat_fw_comp
* Flag representing that autoselectbest is NOT used
*/
#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
/**< @ingroup icp_qat_fw_comp
* Flag representing that autoselectbest is used
*/
#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
/**< @ingroup icp_qat_fw_comp
* Flag representing that enhanced autoselectbest is NOT used
*/
#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
/**< @ingroup icp_qat_fw_comp
* Flag representing that enhanced autoselectbest is used
*/
#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
/**< @ingroup icp_qat_fw_comp
* Flag representing that enhanced autoselectbest is NOT used
*/
#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
/**< @ingroup icp_qat_fw_comp
* Flag representing that enhanced autoselectbest is used
*/
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
/**< @ingroup icp_qat_fw_comp
* Flag representing secure RAM from being used as
* an intermediate buffer is DISABLED.
*/
#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
/**< @ingroup icp_qat_fw_comp
* Flag representing secure RAM from being used as
* an intermediate buffer is ENABLED.
*/
/**< Flag mask & bit position */
#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
/**< @ingroup icp_qat_fw_comp
* Starting bit position for the session type
*/
#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask used to determine the session type
*/
#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
/**< @ingroup icp_qat_fw_comp
* Starting bit position for auto select best
*/
#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask for auto select best
*/
#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
/**< @ingroup icp_qat_fw_comp
* Starting bit position for enhanced auto select best
*/
#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask for enhanced auto select best
*/
#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
/**< @ingroup icp_qat_fw_comp
* Starting bit position for disabling type zero header write back
* when Enhanced autoselect best is enabled. If set firmware does
* not return type0 store block header, only copies src to dest.
* (if best output is Type0)
*/
#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask for auto select best
*/
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
/**< @ingroup icp_qat_fw_comp
* Starting bit position for flag used to disable secure ram from
* being used as an intermediate buffer.
*/
#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask for disable secure ram for use as an intermediate
* buffer.
*/
#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
ret_uncomp, secure_ram) \
((((sesstype)&ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \
<< ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
(((autoselect)&ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \
<< ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
(((enhanced_asb)&ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \
<< ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
(((ret_uncomp)&ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \
<< ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
(((secure_ram)&ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \
<< ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
union icp_qat_fw_comp_req_hdr_cd_pars {
/**< LWs 2-5 */
struct {
uint64_t content_desc_addr;
/**< Address of the content descriptor */
uint16_t content_desc_resrvd1;
/**< Content descriptor reserved field */
uint8_t content_desc_params_sz;
/**< Size of the content descriptor parameters in quad words.
* These parameters describe the session setup configuration
* info for the slices that this request relies upon i.e.
* the configuration word and cipher key needed by the cipher
* slice if there is a request for cipher processing.
*/
uint8_t content_desc_hdr_resrvd2;
/**< Content descriptor reserved field */
uint32_t content_desc_resrvd3;
/**< Content descriptor reserved field */
} s;
struct {
uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
/* Compression Slice Config Word */
uint32_t content_desc_resrvd4;
/**< Content descriptor reserved field */
} sl;
};
struct icp_qat_fw_comp_req_params {
/**< LW 14 */
uint32_t comp_len;
/**< Size of input to process in bytes Note: Only EOP requests can be
* odd for decompression. IA must set LSB to zero for odd sized
* intermediate inputs
*/
/**< LW 15 */
uint32_t out_buffer_sz;
/**< Size of output buffer in bytes */
/**< LW 16 */
uint32_t initial_crc32;
/**< CRC of previously processed bytes */
/**< LW 17 */
uint32_t initial_adler;
/**< Adler of previously processed bytes */
/**< LW 18 */
uint32_t req_par_flags;
/**< LW 19 */
uint32_t rsrvd;
};
#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr) \
((((sop)&ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \
(((eop)&ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \
(((bfinal)&ICP_QAT_FW_COMP_BFINAL_MASK) \
<< ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \
((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \
<< ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS))
#define ICP_QAT_FW_COMP_NOT_SOP 0
/**< @ingroup icp_qat_fw_comp
* Flag representing that a request is NOT Start of Packet
*/
#define ICP_QAT_FW_COMP_SOP 1
/**< @ingroup icp_qat_fw_comp
* Flag representing that a request IS Start of Packet
*/
#define ICP_QAT_FW_COMP_NOT_EOP 0
/**< @ingroup icp_qat_fw_comp
* Flag representing that a request is NOT Start of Packet
*/
#define ICP_QAT_FW_COMP_EOP 1
/**< @ingroup icp_qat_fw_comp
* Flag representing that a request IS End of Packet
*/
#define ICP_QAT_FW_COMP_NOT_BFINAL 0
/**< @ingroup icp_qat_fw_comp
* Flag representing to indicate firmware this is not the last block
*/
#define ICP_QAT_FW_COMP_BFINAL 1
/**< @ingroup icp_qat_fw_comp
* Flag representing to indicate firmware this is the last block
*/
#define ICP_QAT_FW_COMP_NO_CNV 0
/**< @ingroup icp_qat_fw_comp
* Flag indicating that NO cnv check is to be performed on the request
*/
#define ICP_QAT_FW_COMP_CNV 1
/**< @ingroup icp_qat_fw_comp
* Flag indicating that a cnv check IS to be performed on the request
*/
#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
/**< @ingroup icp_qat_fw_comp
* Flag indicating that NO cnv recovery is to be performed on the request
*/
#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
/**< @ingroup icp_qat_fw_comp
* Flag indicating that a cnv recovery is to be performed on the request
*/
#define ICP_QAT_FW_COMP_SOP_BITPOS 0
/**< @ingroup icp_qat_fw_comp
* Starting bit position for SOP
*/
#define ICP_QAT_FW_COMP_SOP_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask used to determine SOP
*/
#define ICP_QAT_FW_COMP_EOP_BITPOS 1
/**< @ingroup icp_qat_fw_comp
* Starting bit position for EOP
*/
#define ICP_QAT_FW_COMP_EOP_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask used to determine EOP
*/
#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask for the bfinal bit
*/
#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
/**< @ingroup icp_qat_fw_comp
* Starting bit position for the bfinal bit
*/
#define ICP_QAT_FW_COMP_CNV_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask for the CNV bit
*/
#define ICP_QAT_FW_COMP_CNV_BITPOS 16
/**< @ingroup icp_qat_fw_comp
* Starting bit position for the CNV bit
*/
#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1
/**< @ingroup icp_qat_fw_comp
* One bit mask for the CNV Recovery bit
*/
#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17
/**< @ingroup icp_qat_fw_comp
* Starting bit position for the CNV Recovery bit
*/
struct icp_qat_fw_xlt_req_params {
/**< LWs 20-21 */
uint64_t inter_buff_ptr;
/**< This field specifies the physical address of an intermediate
* buffer SGL array. The array contains a pair of 64-bit
* intermediate buffer pointers to SGL buffer descriptors, one pair
* per CPM. Please refer to the CPM1.6 Firmware Interface HLD
* specification for more details.
*/
};
struct icp_qat_fw_comp_cd_hdr {
/**< LW 24 */
uint16_t ram_bank_flags;
/**< Flags to show which ram banks to access */
uint8_t comp_cfg_offset;
/**< Quad word offset from the content descriptor parameters address
* to the parameters for the compression processing
*/
uint8_t next_curr_id;
/**< This field combines the next and current id (each four bits) -
* the next id is the most significant nibble.
* Next Id: Set to the next slice to pass the compressed data through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* anymore slices after compression
* Current Id: Initialised with the compression slice type
*/
/**< LW 25 */
uint32_t resrvd;
/**< LWs 26-27 */
uint64_t comp_state_addr;
/**< Pointer to compression state */
/**< LWs 28-29 */
uint64_t ram_banks_addr;
/**< Pointer to banks */
};
struct icp_qat_fw_xlt_cd_hdr {
/**< LW 30 */
uint16_t resrvd1;
/**< Reserved field and assumed set to 0 */
uint8_t resrvd2;
/**< Reserved field and assumed set to 0 */
uint8_t next_curr_id;
/**< This field combines the next and current id (each four bits) -
* the next id is the most significant nibble.
* Next Id: Set to the next slice to pass the translated data through.
* Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
* any more slices after compression
* Current Id: Initialised with the translation slice type
*/
/**< LW 31 */
uint32_t resrvd3;
/**< Reserved and should be set to zero, needed for quadword
* alignment
*/
};
struct icp_qat_fw_comp_req {
/**< LWs 0-1 */
struct icp_qat_fw_comn_req_hdr comn_hdr;
/**< Common request header - for Service Command Id,
* use service-specific Compression Command Id.
* Service Specific Flags - use Compression Command Flags
*/
/**< LWs 2-5 */
union icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
/**< Compression service-specific content descriptor field which points
* either to a content descriptor parameter block or contains the
* compression slice config word.
*/
/**< LWs 6-13 */
struct icp_qat_fw_comn_req_mid comn_mid;
/**< Common request middle section */
/**< LWs 14-19 */
struct icp_qat_fw_comp_req_params comp_pars;
/**< Compression request Parameters block */
/**< LWs 20-21 */
union {
struct icp_qat_fw_xlt_req_params xlt_pars;
/**< Translation request Parameters block */
uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
/**< Reserved if not used for translation */
} u1;
/**< LWs 22-23 */
union {
uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
/**< Reserved - not used if Batch and Pack is disabled.*/
uint64_t bnp_res_table_addr;
/**< A generic pointer to the unbounded list of
* icp_qat_fw_resp_comp_pars members. This pointer is only
* used when the Batch and Pack is enabled.
*/
} u3;
/**< LWs 24-29 */
struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
/**< Compression request content descriptor control block header */
/**< LWs 30-31 */
union {
struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
/**< Translation request content descriptor
* control block header
*/
uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
/**< Reserved if not used for translation */
} u2;
};
struct icp_qat_fw_resp_comp_pars {
/**< LW 4 */
uint32_t input_byte_counter;
/**< Input byte counter */
/**< LW 5 */
uint32_t output_byte_counter;
/**< Output byte counter */
/**< LW 6 & 7*/
union {
uint64_t curr_chksum;
struct {
/**< LW 6 */
uint32_t curr_crc32;
/**< LW 7 */
uint32_t curr_adler_32;
};
};
};
struct icp_qat_fw_comp_resp {
/**< LWs 0-1 */
struct icp_qat_fw_comn_resp_hdr comn_resp;
/**< Common interface response format see icp_qat_fw.h */
/**< LWs 2-3 */
uint64_t opaque_data;
/**< Opaque data passed from the request to the response message */
/**< LWs 4-7 */
struct icp_qat_fw_resp_comp_pars comp_resp_pars;
/**< Common response params (checksums and byte counts) */
};
#endif

View File

@ -72,19 +72,44 @@ struct icp_qat_hw_auth_config {
#define QAT_AUTH_ALGO_MASK 0xF #define QAT_AUTH_ALGO_MASK 0xF
#define QAT_AUTH_CMP_BITPOS 8 #define QAT_AUTH_CMP_BITPOS 8
#define QAT_AUTH_CMP_MASK 0x7F #define QAT_AUTH_CMP_MASK 0x7F
#define QAT_AUTH_SHA3_PADDING_BITPOS 16 #define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16
#define QAT_AUTH_SHA3_PADDING_MASK 0x1 #define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1
#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17
#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1
#define QAT_AUTH_ALGO_SHA3_BITPOS 22 #define QAT_AUTH_ALGO_SHA3_BITPOS 22
#define QAT_AUTH_ALGO_SHA3_MASK 0x3 #define QAT_AUTH_ALGO_SHA3_MASK 0x3
#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ #define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16
(((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ #define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF
((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ #define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24
(((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \ #define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF
QAT_AUTH_ALGO_SHA3_BITPOS) | \ #define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0
(((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \ #define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1
(algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \ #define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0
& QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \ #define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0
((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) #define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1
#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0
#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0
#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
(((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
(((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \
<< QAT_AUTH_ALGO_SHA3_BITPOS) | \
(((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) & \
QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \
<< QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \
(((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) & \
QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \
<< QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \
(((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \
((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) & \
QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \
<< QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \
(((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) & \
QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \
<< QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS))
struct icp_qat_hw_auth_counter { struct icp_qat_hw_auth_counter {
uint32_t counter; uint32_t counter;
@ -107,13 +132,13 @@ struct icp_qat_hw_auth_setup {
#define ICP_QAT_HW_MD5_STATE1_SZ 16 #define ICP_QAT_HW_MD5_STATE1_SZ 16
#define ICP_QAT_HW_SHA1_STATE1_SZ 20 #define ICP_QAT_HW_SHA1_STATE1_SZ 20
#define ICP_QAT_HW_SHA224_STATE1_SZ 32 #define ICP_QAT_HW_SHA224_STATE1_SZ 32
#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
#define ICP_QAT_HW_SHA256_STATE1_SZ 32 #define ICP_QAT_HW_SHA256_STATE1_SZ 32
#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 #define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
#define ICP_QAT_HW_SHA384_STATE1_SZ 64 #define ICP_QAT_HW_SHA384_STATE1_SZ 64
#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
#define ICP_QAT_HW_SHA512_STATE1_SZ 64 #define ICP_QAT_HW_SHA512_STATE1_SZ 64
#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 #define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 #define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 #define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
#define ICP_QAT_HW_AES_F9_STATE1_SZ 32 #define ICP_QAT_HW_AES_F9_STATE1_SZ 32
@ -121,17 +146,18 @@ struct icp_qat_hw_auth_setup {
#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 #define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 #define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 #define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
#define ICP_QAT_HW_NULL_STATE2_SZ 32 #define ICP_QAT_HW_NULL_STATE2_SZ 32
#define ICP_QAT_HW_MD5_STATE2_SZ 16 #define ICP_QAT_HW_MD5_STATE2_SZ 16
#define ICP_QAT_HW_SHA1_STATE2_SZ 20 #define ICP_QAT_HW_SHA1_STATE2_SZ 20
#define ICP_QAT_HW_SHA224_STATE2_SZ 32 #define ICP_QAT_HW_SHA224_STATE2_SZ 32
#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
#define ICP_QAT_HW_SHA256_STATE2_SZ 32 #define ICP_QAT_HW_SHA256_STATE2_SZ 32
#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 #define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
#define ICP_QAT_HW_SHA384_STATE2_SZ 64 #define ICP_QAT_HW_SHA384_STATE2_SZ 64
#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
#define ICP_QAT_HW_SHA512_STATE2_SZ 64 #define ICP_QAT_HW_SHA512_STATE2_SZ 64
#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 #define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48 #define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 #define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 #define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
@ -154,6 +180,12 @@ struct icp_qat_hw_auth_sha512 {
uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
}; };
struct icp_qat_hw_auth_sha3_512 {
struct icp_qat_hw_auth_setup inner_setup;
uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ];
struct icp_qat_hw_auth_setup outer_setup;
};
struct icp_qat_hw_auth_algo_blk { struct icp_qat_hw_auth_algo_blk {
struct icp_qat_hw_auth_sha512 sha; struct icp_qat_hw_auth_sha512 sha;
}; };
@ -283,4 +315,72 @@ struct icp_qat_hw_cipher_algo_blk {
uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ]; uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
} __rte_cache_aligned; } __rte_cache_aligned;
/* ========================================================================= */
/* COMPRESSION SLICE */
/* ========================================================================= */
enum icp_qat_hw_compression_direction {
ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
};
enum icp_qat_hw_compression_delayed_match {
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
};
enum icp_qat_hw_compression_algo {
ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
};
enum icp_qat_hw_compression_depth {
ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4
};
enum icp_qat_hw_compression_file_type {
ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
};
struct icp_qat_hw_compression_config {
uint32_t val;
uint32_t reserved;
};
#define QAT_COMPRESSION_DIR_BITPOS 4
#define QAT_COMPRESSION_DIR_MASK 0x7
#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
#define QAT_COMPRESSION_ALGO_BITPOS 31
#define QAT_COMPRESSION_ALGO_MASK 0x1
#define QAT_COMPRESSION_DEPTH_BITPOS 28
#define QAT_COMPRESSION_DEPTH_MASK 0x7
#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \
dir, delayed, algo, depth, filetype) \
((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \
(((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) \
<< QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
(((algo) & QAT_COMPRESSION_ALGO_MASK) \
<< QAT_COMPRESSION_ALGO_BITPOS) | \
(((depth) & QAT_COMPRESSION_DEPTH_MASK) \
<< QAT_COMPRESSION_DEPTH_BITPOS) | \
(((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) \
<< QAT_COMPRESSION_FILE_TYPE_BITPOS))
#endif #endif