compress/qat: define gen-specific structs and functions

This patch adds the compression data structure and function
prototypes for different QAT generations.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
Signed-off-by: Arek Kusztal <arkadiuszx.kusztal@intel.com>
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Kai Ji <kai.ji@intel.com>
Acked-by: Ciara Power <ciara.power@intel.com>
This commit is contained in:
Fan Zhang 2021-11-04 10:34:53 +00:00 committed by Akhil Goyal
parent 4c778f1a02
commit 4c6912d3ac
8 changed files with 675 additions and 174 deletions

View File

@ -0,0 +1,195 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
* Copyright(c) 2021 Intel Corporation
*/
#ifndef _ICP_QAT_HW_GEN4_COMP_H_
#define _ICP_QAT_HW_GEN4_COMP_H_
#include "icp_qat_fw.h"
#include "icp_qat_hw_gen4_comp_defs.h"
struct icp_qat_hw_comp_20_config_csr_lower {
icp_qat_hw_comp_20_extended_delay_match_mode_t edmm;
icp_qat_hw_comp_20_hw_comp_format_t algo;
icp_qat_hw_comp_20_search_depth_t sd;
icp_qat_hw_comp_20_hbs_control_t hbs;
icp_qat_hw_comp_20_abd_t abd;
icp_qat_hw_comp_20_lllbd_ctrl_t lllbd;
icp_qat_hw_comp_20_min_match_control_t mmctrl;
icp_qat_hw_comp_20_skip_hash_collision_t hash_col;
icp_qat_hw_comp_20_skip_hash_update_t hash_update;
icp_qat_hw_comp_20_byte_skip_t skip_ctrl;
};
static inline uint32_t ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(
struct icp_qat_hw_comp_20_config_csr_lower csr)
{
uint32_t val32 = 0;
QAT_FIELD_SET(val32, csr.algo,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK);
QAT_FIELD_SET(val32, csr.sd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK);
QAT_FIELD_SET(val32, csr.edmm,
ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK);
QAT_FIELD_SET(val32, csr.hbs,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.lllbd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
QAT_FIELD_SET(val32, csr.mmctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.hash_col,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK);
QAT_FIELD_SET(val32, csr.hash_update,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK);
QAT_FIELD_SET(val32, csr.skip_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK);
QAT_FIELD_SET(val32, csr.abd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
QAT_FIELD_SET(val32, csr.lllbd,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK);
return rte_bswap32(val32);
}
struct icp_qat_hw_comp_20_config_csr_upper {
icp_qat_hw_comp_20_scb_control_t scb_ctrl;
icp_qat_hw_comp_20_rmb_control_t rmb_ctrl;
icp_qat_hw_comp_20_som_control_t som_ctrl;
icp_qat_hw_comp_20_skip_hash_rd_control_t skip_hash_ctrl;
icp_qat_hw_comp_20_scb_unload_control_t scb_unload_ctrl;
icp_qat_hw_comp_20_disable_token_fusion_control_t
disable_token_fusion_ctrl;
icp_qat_hw_comp_20_lbms_t lbms;
icp_qat_hw_comp_20_scb_mode_reset_mask_t scb_mode_reset;
uint16_t lazy;
uint16_t nice;
};
static inline uint32_t ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(
struct icp_qat_hw_comp_20_config_csr_upper csr)
{
uint32_t val32 = 0;
QAT_FIELD_SET(val32, csr.scb_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.rmb_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.som_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.skip_hash_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.scb_unload_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.disable_token_fusion_ctrl,
ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.lbms,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK);
QAT_FIELD_SET(val32, csr.scb_mode_reset,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK);
QAT_FIELD_SET(val32, csr.lazy,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK);
QAT_FIELD_SET(val32, csr.nice,
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
return rte_bswap32(val32);
}
struct icp_qat_hw_decomp_20_config_csr_lower {
icp_qat_hw_decomp_20_hbs_control_t hbs;
icp_qat_hw_decomp_20_lbms_t lbms;
icp_qat_hw_decomp_20_hw_comp_format_t algo;
icp_qat_hw_decomp_20_min_match_control_t mmctrl;
icp_qat_hw_decomp_20_lz4_block_checksum_present_t lbc;
};
static inline uint32_t ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(
struct icp_qat_hw_decomp_20_config_csr_lower csr)
{
uint32_t val32 = 0;
QAT_FIELD_SET(val32, csr.hbs,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.lbms,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK);
QAT_FIELD_SET(val32, csr.algo,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK);
QAT_FIELD_SET(val32, csr.mmctrl,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.lbc,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
return rte_bswap32(val32);
}
struct icp_qat_hw_decomp_20_config_csr_upper {
icp_qat_hw_decomp_20_speculative_decoder_control_t sdc;
icp_qat_hw_decomp_20_mini_cam_control_t mcc;
};
static inline uint32_t ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(
struct icp_qat_hw_decomp_20_config_csr_upper csr)
{
uint32_t val32 = 0;
QAT_FIELD_SET(val32, csr.sdc,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK);
QAT_FIELD_SET(val32, csr.mcc,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
return rte_bswap32(val32);
}
#endif /* _ICP_QAT_HW_GEN4_COMP_H_ */

View File

@ -0,0 +1,299 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
* Copyright(c) 2021 Intel Corporation
*/
#ifndef _ICP_QAT_HW_GEN4_COMP_DEFS_H
#define _ICP_QAT_HW_GEN4_COMP_DEFS_H
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_BITPOS 31
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_SCB_CONTROL_ENABLE = 0x0,
ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE = 0x1,
} icp_qat_hw_comp_20_scb_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SCB_CONTROL_DISABLE
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_BITPOS 30
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL = 0x0,
ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_FC_ONLY = 0x1,
} icp_qat_hw_comp_20_rmb_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_RMB_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_RMB_CONTROL_RESET_ALL
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_BITPOS 28
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_MASK 0x3
typedef enum {
ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE = 0x0,
ICP_QAT_HW_COMP_20_SOM_CONTROL_REPLAY_MODE = 0x1,
ICP_QAT_HW_COMP_20_SOM_CONTROL_INPUT_CRC = 0x2,
ICP_QAT_HW_COMP_20_SOM_CONTROL_RESERVED_MODE = 0x3,
} icp_qat_hw_comp_20_som_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SOM_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SOM_CONTROL_NORMAL_MODE
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_BITPOS 27
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP = 0x0,
ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_SKIP_HASH_READS = 0x1,
} icp_qat_hw_comp_20_skip_hash_rd_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_RD_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SKIP_HASH_RD_CONTROL_NO_SKIP
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_BITPOS 26
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD = 0x0,
ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_NO_UNLOAD = 0x1,
} icp_qat_hw_comp_20_scb_unload_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_UNLOAD_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SCB_UNLOAD_CONTROL_UNLOAD
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_BITPOS 21
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE = 0x0,
ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_DISABLE = 0x1,
} icp_qat_hw_comp_20_disable_token_fusion_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_DISABLE_TOKEN_FUSION_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_DISABLE_TOKEN_FUSION_CONTROL_ENABLE
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_BITPOS 19
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_MASK 0x3
typedef enum {
ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB = 0x0,
ICP_QAT_HW_COMP_20_LBMS_LBMS_256KB = 0x1,
ICP_QAT_HW_COMP_20_LBMS_LBMS_1MB = 0x2,
ICP_QAT_HW_COMP_20_LBMS_LBMS_4MB = 0x3,
} icp_qat_hw_comp_20_lbms_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_LBMS_LBMS_64KB
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_BITPOS 18
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS = 0x0,
ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS_AND_HISTORY = 0x1,
} icp_qat_hw_comp_20_scb_mode_reset_mask_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SCB_MODE_RESET_MASK_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SCB_MODE_RESET_MASK_RESET_COUNTERS
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_BITPOS 9
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_MASK 0x1ff
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL 258
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS 0
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK 0x1ff
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL 259
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
typedef enum {
ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
} icp_qat_hw_comp_20_hbs_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_HBS_CONTROL_HBS_IS_32KB
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS 13
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED = 0x0,
ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED = 0x1,
} icp_qat_hw_comp_20_abd_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_ABD_ABD_ENABLED
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_BITPOS 12
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED = 0x0,
ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED = 0x1,
} icp_qat_hw_comp_20_lllbd_ctrl_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_LLLBD_CTRL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_BITPOS 8
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_MASK 0xf
typedef enum {
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1 = 0x1,
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_6 = 0x3,
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_9 = 0x4,
} icp_qat_hw_comp_20_search_depth_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SEARCH_DEPTH_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_BITPOS 5
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_MASK 0x7
typedef enum {
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77 = 0x0,
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE = 0x1,
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4 = 0x2,
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S = 0x3,
} icp_qat_hw_comp_20_hw_comp_format_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_HW_COMP_FORMAT_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_DEFLATE
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
} icp_qat_hw_comp_20_min_match_control_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_MIN_MATCH_CONTROL_MATCH_3B
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_BITPOS 3
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW = 0x0,
ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_DONT_ALLOW = 0x1,
} icp_qat_hw_comp_20_skip_hash_collision_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_COLLISION_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SKIP_HASH_COLLISION_ALLOW
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_BITPOS 2
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW = 0x0,
ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW = 0x1,
} icp_qat_hw_comp_20_skip_hash_update_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_SKIP_HASH_UPDATE_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_ALLOW
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_BITPOS 1
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN = 0x0,
ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL = 0x1,
} icp_qat_hw_comp_20_byte_skip_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_BYTE_SKIP_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_TOKEN
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_BITPOS 0
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_MASK 0x1
typedef enum {
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED = 0x0,
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED = 0x1,
} icp_qat_hw_comp_20_extended_delay_match_mode_t;
#define ICP_QAT_HW_COMP_20_CONFIG_CSR_EXTENDED_DELAY_MATCH_MODE_DEFAULT_VAL \
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_BITPOS 31
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_MASK 0x1
typedef enum {
ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE = 0x0,
ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_DISABLE = 0x1,
} icp_qat_hw_decomp_20_speculative_decoder_control_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_SPECULATIVE_DECODER_CONTROL_DEFAULT_VAL\
ICP_QAT_HW_DECOMP_20_SPECULATIVE_DECODER_CONTROL_ENABLE
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS 30
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK 0x1
typedef enum {
ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE = 0x0,
ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_DISABLE = 0x1,
} icp_qat_hw_decomp_20_mini_cam_control_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_MINI_CAM_CONTROL_ENABLE
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_BITPOS 14
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_MASK 0x7
typedef enum {
ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB = 0x0,
} icp_qat_hw_decomp_20_hbs_control_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HBS_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_HBS_CONTROL_HBS_IS_32KB
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_BITPOS 8
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_MASK 0x3
typedef enum {
ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB = 0x0,
ICP_QAT_HW_DECOMP_20_LBMS_LBMS_256KB = 0x1,
ICP_QAT_HW_DECOMP_20_LBMS_LBMS_1MB = 0x2,
ICP_QAT_HW_DECOMP_20_LBMS_LBMS_4MB = 0x3,
} icp_qat_hw_decomp_20_lbms_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LBMS_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_LBMS_LBMS_64KB
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_BITPOS 5
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_MASK 0x7
typedef enum {
ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE = 0x1,
ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4 = 0x2,
ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S = 0x3,
} icp_qat_hw_decomp_20_hw_comp_format_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_HW_DECOMP_FORMAT_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_BITPOS 4
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_MASK 0x1
typedef enum {
ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B = 0x0,
ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_4B = 0x1,
} icp_qat_hw_decomp_20_min_match_control_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MIN_MATCH_CONTROL_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_MIN_MATCH_CONTROL_MATCH_3B
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS 3
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK 0x1
typedef enum {
ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT = 0x0,
ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_PRESENT = 0x1,
} icp_qat_hw_decomp_20_lz4_block_checksum_present_t;
#define ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_DEFAULT_VAL \
ICP_QAT_HW_DECOMP_20_LZ4_BLOCK_CHKSUM_ABSENT
#endif /* _ICP_QAT_HW_GEN4_COMP_DEFS_H */

View File

@ -13,9 +13,9 @@
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
/* Intel(R) QuickAssist Technology device generation is enumerated
* from one according to the generation of the device
* from one according to the generation of the device.
* QAT_GEN* is used as the index to find all devices
*/
enum qat_device_gen {
QAT_GEN1,
QAT_GEN2,

View File

@ -49,12 +49,6 @@ struct qat_dev_cmd_param {
uint16_t val;
};
enum qat_comp_num_im_buffers {
QAT_NUM_INTERM_BUFS_GEN1 = 12,
QAT_NUM_INTERM_BUFS_GEN2 = 20,
QAT_NUM_INTERM_BUFS_GEN3 = 64
};
struct qat_device_info {
const struct rte_memzone *mz;
/**< mz to store the qat_pci_device so it can be
@ -137,7 +131,6 @@ struct qat_pci_device {
struct qat_gen_hw_data {
enum qat_device_gen dev_gen;
const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE];
enum qat_comp_num_im_buffers comp_num_im_bufs_required;
struct qat_pf2vf_dev *pf2vf_dev;
};

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018-2019 Intel Corporation
* Copyright(c) 2018-2021 Intel Corporation
*/
#include <rte_mempool.h>
@ -332,7 +332,8 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
return 0;
}
static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
static inline uint32_t
adf_modulo(uint32_t data, uint32_t modulo_mask)
{
return data & modulo_mask;
}
@ -793,8 +794,9 @@ qat_comp_stream_size(void)
return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
}
static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
enum qat_comp_request_type request)
static void
qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
enum qat_comp_request_type request)
{
if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
@ -811,16 +813,17 @@ static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
}
static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
const struct rte_memzone *interm_buff_mz,
const struct rte_comp_xform *xform,
const struct qat_comp_stream *stream,
enum rte_comp_op_type op_type)
static int
qat_comp_create_templates(struct qat_comp_xform *qat_xform,
const struct rte_memzone *interm_buff_mz,
const struct rte_comp_xform *xform,
const struct qat_comp_stream *stream,
enum rte_comp_op_type op_type,
enum qat_device_gen qat_dev_gen)
{
struct icp_qat_fw_comp_req *comp_req;
int comp_level, algo;
uint32_t req_par_flags;
int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
int res;
if (unlikely(qat_xform == NULL)) {
QAT_LOG(ERR, "Session was not created for this device");
@ -839,46 +842,17 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
}
}
if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS)
req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
ICP_QAT_FW_COMP_BFINAL,
ICP_QAT_FW_COMP_CNV,
ICP_QAT_FW_COMP_CNV_RECOVERY);
} else {
if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
else if (xform->compress.level == 1)
comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
else if (xform->compress.level == 2)
comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
else if (xform->compress.level == 3)
comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
else if (xform->compress.level >= 4 &&
xform->compress.level <= 9)
comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
else {
QAT_LOG(ERR, "compression level not supported");
return -EINVAL;
}
else
req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
ICP_QAT_FW_COMP_CNV_RECOVERY);
}
switch (xform->compress.algo) {
case RTE_COMP_ALGO_DEFLATE:
algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
break;
case RTE_COMP_ALGO_LZS:
default:
/* RTE_COMP_NULL */
QAT_LOG(ERR, "compression algorithm not supported");
return -EINVAL;
}
comp_req = &qat_xform->qat_comp_req_tmpl;
@ -899,18 +873,10 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
comp_req->comp_cd_ctrl.comp_state_addr =
stream->state_registers_decomp_phys;
/* Enable A, B, C, D, and E (CAMs). */
/* RAM bank flags */
comp_req->comp_cd_ctrl.ram_bank_flags =
ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank E */
ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank D */
ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank C */
ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank B */
ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
qat_comp_gen_dev_ops[qat_dev_gen]
.qat_comp_get_ram_bank_flags();
comp_req->comp_cd_ctrl.ram_banks_addr =
stream->inflate_context_phys;
@ -924,13 +890,11 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
}
comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
direction,
/* In CPM 1.6 only valid mode ! */
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
/* Translate level to depth */
comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
res = qat_comp_gen_dev_ops[qat_dev_gen].qat_comp_set_slice_cfg_word(
qat_xform, xform, op_type,
comp_req->cd_pars.sl.comp_slice_cfg_word);
if (res)
return res;
comp_req->comp_pars.initial_adler = 1;
comp_req->comp_pars.initial_crc32 = 0;
@ -958,7 +922,8 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
ICP_QAT_FW_SLICE_XLAT);
comp_req->u1.xlt_pars.inter_buff_ptr =
interm_buff_mz->iova;
(qat_comp_get_num_im_bufs_required(qat_dev_gen)
== 0) ? 0 : interm_buff_mz->iova;
}
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
@ -991,6 +956,8 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,
void **private_xform)
{
struct qat_comp_dev_private *qat = dev->data->dev_private;
enum qat_device_gen qat_dev_gen = qat->qat_dev->qat_dev_gen;
unsigned int im_bufs = qat_comp_get_num_im_bufs_required(qat_dev_gen);
if (unlikely(private_xform == NULL)) {
QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
@ -1012,7 +979,8 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,
if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
&& qat->interm_buff_mz == NULL))
&& qat->interm_buff_mz == NULL
&& im_bufs > 0))
qat_xform->qat_comp_request_type =
QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
@ -1020,7 +988,8 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,
RTE_COMP_HUFFMAN_DYNAMIC ||
xform->compress.deflate.huffman ==
RTE_COMP_HUFFMAN_DEFAULT) &&
qat->interm_buff_mz != NULL)
(qat->interm_buff_mz != NULL ||
im_bufs == 0))
qat_xform->qat_comp_request_type =
QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
@ -1039,7 +1008,8 @@ qat_comp_private_xform_create(struct rte_compressdev *dev,
}
if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
NULL, RTE_COMP_OP_STATELESS)) {
NULL, RTE_COMP_OP_STATELESS,
qat_dev_gen)) {
QAT_LOG(ERR, "QAT: Problem with setting compression");
return -EINVAL;
}
@ -1138,7 +1108,8 @@ qat_comp_stream_create(struct rte_compressdev *dev,
ptr->qat_xform.checksum_type = xform->decompress.chksum;
if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
xform, ptr, RTE_COMP_OP_STATEFUL)) {
xform, ptr, RTE_COMP_OP_STATEFUL,
qat->qat_dev->qat_dev_gen)) {
QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
rte_mempool_put(qat->streampool, *stream);
*stream = NULL;

View File

@ -28,14 +28,16 @@
#define QAT_MIN_OUT_BUF_SIZE 46
/* maximum size of the state registers */
#define QAT_STATE_REGISTERS_MAX_SIZE 64
#define QAT_STATE_REGISTERS_MAX_SIZE 256 /* 64 bytes for GEN1-3, 256 for GEN4 */
/* decompressor context size */
#define QAT_INFLATE_CONTEXT_SIZE_GEN1 36864
#define QAT_INFLATE_CONTEXT_SIZE_GEN2 34032
#define QAT_INFLATE_CONTEXT_SIZE_GEN3 34032
#define QAT_INFLATE_CONTEXT_SIZE RTE_MAX(RTE_MAX(QAT_INFLATE_CONTEXT_SIZE_GEN1,\
QAT_INFLATE_CONTEXT_SIZE_GEN2), QAT_INFLATE_CONTEXT_SIZE_GEN3)
#define QAT_INFLATE_CONTEXT_SIZE_GEN4 36864
#define QAT_INFLATE_CONTEXT_SIZE RTE_MAX(RTE_MAX(RTE_MAX(\
QAT_INFLATE_CONTEXT_SIZE_GEN1, QAT_INFLATE_CONTEXT_SIZE_GEN2), \
QAT_INFLATE_CONTEXT_SIZE_GEN3), QAT_INFLATE_CONTEXT_SIZE_GEN4)
enum qat_comp_request_type {
QAT_COMP_REQUEST_FIXED_COMP_STATELESS,

View File

@ -9,30 +9,29 @@
#define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
struct qat_comp_gen_dev_ops qat_comp_gen_dev_ops[QAT_N_GENS];
struct stream_create_info {
struct qat_comp_dev_private *comp_dev;
int socket_id;
int error;
};
static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
{/* COMPRESSION - deflate */
.algo = RTE_COMP_ALGO_DEFLATE,
.comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
RTE_COMP_FF_CRC32_CHECKSUM |
RTE_COMP_FF_ADLER32_CHECKSUM |
RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
RTE_COMP_FF_HUFFMAN_FIXED |
RTE_COMP_FF_HUFFMAN_DYNAMIC |
RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
RTE_COMP_FF_STATEFUL_DECOMPRESSION,
.window_size = {.min = 15, .max = 15, .increment = 0} },
{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
static struct
qat_comp_capabilities_info qat_comp_get_capa_info(
enum qat_device_gen qat_dev_gen, struct qat_pci_device *qat_dev)
{
struct qat_comp_capabilities_info ret = { .data = NULL, .size = 0 };
static void
if (qat_dev_gen >= QAT_N_GENS)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(qat_comp_gen_dev_ops[qat_dev_gen]
.qat_comp_get_capabilities, ret);
return qat_comp_gen_dev_ops[qat_dev_gen]
.qat_comp_get_capabilities(qat_dev);
}
void
qat_comp_stats_get(struct rte_compressdev *dev,
struct rte_compressdev_stats *stats)
{
@ -52,7 +51,7 @@ qat_comp_stats_get(struct rte_compressdev *dev,
stats->dequeue_err_count = qat_stats.dequeue_err_count;
}
static void
void
qat_comp_stats_reset(struct rte_compressdev *dev)
{
struct qat_comp_dev_private *qat_priv;
@ -67,7 +66,7 @@ qat_comp_stats_reset(struct rte_compressdev *dev)
}
static int
int
qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
{
struct qat_comp_dev_private *qat_private = dev->data->dev_private;
@ -95,23 +94,18 @@ qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
&(dev->data->queue_pairs[queue_pair_id]));
}
static int
int
qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
uint32_t max_inflight_ops, int socket_id)
uint32_t max_inflight_ops, int socket_id)
{
struct qat_qp *qp;
int ret = 0;
uint32_t i;
struct qat_qp_config qat_qp_conf;
struct qat_qp_config qat_qp_conf = {0};
struct qat_qp **qp_addr =
(struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
struct qat_comp_dev_private *qat_private = dev->data->dev_private;
struct qat_pci_device *qat_dev = qat_private->qat_dev;
const struct qat_qp_hw_data *comp_hw_qps =
qat_gen_config[qat_private->qat_dev->qat_dev_gen]
.qp_hw_data[QAT_SERVICE_COMPRESSION];
const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
struct qat_qp *qp;
uint32_t i;
int ret;
/* If qp is already in use free ring memory and qp metadata. */
if (*qp_addr != NULL) {
@ -125,7 +119,13 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
return -EINVAL;
}
qat_qp_conf.hw = qp_hw_data;
qat_qp_conf.hw = qat_qp_get_hw_data(qat_dev, QAT_SERVICE_COMPRESSION,
qp_id);
if (qat_qp_conf.hw == NULL) {
QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
return -EINVAL;
}
qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
qat_qp_conf.nb_descriptors = max_inflight_ops;
qat_qp_conf.socket_id = socket_id;
@ -134,7 +134,6 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
if (ret != 0)
return ret;
/* store a link to the qp in the qat_pci_device */
qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
= *qp_addr;
@ -189,7 +188,7 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
#define QAT_IM_BUFFER_DEBUG 0
static const struct rte_memzone *
const struct rte_memzone *
qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
uint32_t buff_size)
{
@ -202,8 +201,8 @@ qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
uint32_t full_size;
uint32_t offset_of_flat_buffs;
int i;
int num_im_sgls = qat_gen_config[
comp_dev->qat_dev->qat_dev_gen].comp_num_im_bufs_required;
int num_im_sgls = qat_comp_get_num_im_bufs_required(
comp_dev->qat_dev->qat_dev_gen);
QAT_LOG(DEBUG, "QAT COMP device %s needs %d sgls",
comp_dev->qat_dev->name, num_im_sgls);
@ -480,8 +479,8 @@ _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
/* Free intermediate buffers */
if (comp_dev->interm_buff_mz) {
char mz_name[RTE_MEMZONE_NAMESIZE];
int i = qat_gen_config[
comp_dev->qat_dev->qat_dev_gen].comp_num_im_bufs_required;
int i = qat_comp_get_num_im_bufs_required(
comp_dev->qat_dev->qat_dev_gen);
while (--i >= 0) {
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
@ -509,28 +508,13 @@ _qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
}
}
static int
int
qat_comp_dev_config(struct rte_compressdev *dev,
struct rte_compressdev_config *config)
{
struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
int ret = 0;
if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
QAT_LOG(WARNING,
"RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
" QAT device can't be used for Dynamic Deflate. "
"Did you really intend to do this?");
} else {
comp_dev->interm_buff_mz =
qat_comp_setup_inter_buffers(comp_dev,
RTE_PMD_QAT_COMP_IM_BUFFER_SIZE);
if (comp_dev->interm_buff_mz == NULL) {
ret = -ENOMEM;
goto error_out;
}
}
if (config->max_nb_priv_xforms) {
comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
config, config->max_nb_priv_xforms);
@ -558,19 +542,19 @@ qat_comp_dev_config(struct rte_compressdev *dev,
return ret;
}
static int
int
qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
{
return 0;
}
static void
void
qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
{
}
static int
int
qat_comp_dev_close(struct rte_compressdev *dev)
{
int i;
@ -588,8 +572,7 @@ qat_comp_dev_close(struct rte_compressdev *dev)
return ret;
}
static void
void
qat_comp_dev_info_get(struct rte_compressdev *dev,
struct rte_compressdev_info *info)
{
@ -662,27 +645,6 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
return ret;
}
static struct rte_compressdev_ops compress_qat_ops = {
/* Device related operations */
.dev_configure = qat_comp_dev_config,
.dev_start = qat_comp_dev_start,
.dev_stop = qat_comp_dev_stop,
.dev_close = qat_comp_dev_close,
.dev_infos_get = qat_comp_dev_info_get,
.stats_get = qat_comp_stats_get,
.stats_reset = qat_comp_stats_reset,
.queue_pair_setup = qat_comp_qp_setup,
.queue_pair_release = qat_comp_qp_release,
/* Compression related operations */
.private_xform_create = qat_comp_private_xform_create,
.private_xform_free = qat_comp_private_xform_free,
.stream_create = qat_comp_stream_create,
.stream_free = qat_comp_stream_free
};
/* An rte_driver is needed in the registration of the device with compressdev.
* The actual qat pci's rte_driver can't be used as its name represents
* the whole pci device with all services. Think of this as a holder for a name
@ -693,6 +655,7 @@ static const struct rte_driver compdev_qat_driver = {
.name = qat_comp_drv_name,
.alias = qat_comp_drv_name
};
int
qat_comp_dev_create(struct qat_pci_device *qat_pci_dev,
struct qat_dev_cmd_param *qat_dev_cmd_param)
@ -708,17 +671,21 @@ qat_comp_dev_create(struct qat_pci_device *qat_pci_dev,
char capa_memz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
struct rte_compressdev *compressdev;
struct qat_comp_dev_private *comp_dev;
struct qat_comp_capabilities_info capabilities_info;
const struct rte_compressdev_capabilities *capabilities;
const struct qat_comp_gen_dev_ops *qat_comp_gen_ops =
&qat_comp_gen_dev_ops[qat_pci_dev->qat_dev_gen];
uint64_t capa_size;
if (qat_pci_dev->qat_dev_gen == QAT_GEN4) {
QAT_LOG(ERR, "Compression PMD not supported on QAT 4xxx");
return -EFAULT;
}
snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
qat_pci_dev->name, "comp");
QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
if (qat_comp_gen_ops->compressdev_ops == NULL) {
QAT_LOG(DEBUG, "Device %s does not support compression", name);
return -ENOTSUP;
}
/* Populate subset device to use in compressdev device creation */
qat_dev_instance->comp_rte_dev.driver = &compdev_qat_driver;
qat_dev_instance->comp_rte_dev.numa_node =
@ -733,13 +700,13 @@ qat_comp_dev_create(struct qat_pci_device *qat_pci_dev,
if (compressdev == NULL)
return -ENODEV;
compressdev->dev_ops = &compress_qat_ops;
compressdev->dev_ops = qat_comp_gen_ops->compressdev_ops;
compressdev->enqueue_burst = (compressdev_enqueue_pkt_burst_t)
qat_enqueue_comp_op_burst;
compressdev->dequeue_burst = qat_comp_pmd_dequeue_first_op_burst;
compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
compressdev->feature_flags =
qat_comp_gen_ops->qat_comp_get_feature_flags();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
@ -752,22 +719,20 @@ qat_comp_dev_create(struct qat_pci_device *qat_pci_dev,
comp_dev->qat_dev = qat_pci_dev;
comp_dev->compressdev = compressdev;
switch (qat_pci_dev->qat_dev_gen) {
case QAT_GEN1:
case QAT_GEN2:
case QAT_GEN3:
capabilities = qat_comp_gen_capabilities;
capa_size = sizeof(qat_comp_gen_capabilities);
break;
default:
capabilities = qat_comp_gen_capabilities;
capa_size = sizeof(qat_comp_gen_capabilities);
capabilities_info = qat_comp_get_capa_info(qat_pci_dev->qat_dev_gen,
qat_pci_dev);
if (capabilities_info.data == NULL) {
QAT_LOG(DEBUG,
"QAT gen %d capabilities unknown, default to GEN1",
qat_pci_dev->qat_dev_gen);
break;
capabilities_info = qat_comp_get_capa_info(QAT_GEN1,
qat_pci_dev);
}
capabilities = capabilities_info.data;
capa_size = capabilities_info.size;
comp_dev->capa_mz = rte_memzone_lookup(capa_memz_name);
if (comp_dev->capa_mz == NULL) {
comp_dev->capa_mz = rte_memzone_reserve(capa_memz_name,

View File

@ -11,10 +11,44 @@
#include <rte_compressdev_pmd.h>
#include "qat_device.h"
#include "qat_comp.h"
/**< Intel(R) QAT Compression PMD driver name */
#define COMPRESSDEV_NAME_QAT_PMD compress_qat
/* Private data structure for a QAT compression device capability. */
struct qat_comp_capabilities_info {
const struct rte_compressdev_capabilities *data;
uint64_t size;
};
/**
* Function prototypes for GENx specific compress device operations.
**/
typedef struct qat_comp_capabilities_info (*get_comp_capabilities_info_t)
(struct qat_pci_device *qat_dev);
typedef uint16_t (*get_comp_ram_bank_flags_t)(void);
typedef int (*set_comp_slice_cfg_word_t)(struct qat_comp_xform *qat_xform,
const struct rte_comp_xform *xform,
enum rte_comp_op_type op_type, uint32_t *comp_slice_cfg_word);
typedef unsigned int (*get_comp_num_im_bufs_required_t)(void);
typedef uint64_t (*get_comp_feature_flags_t)(void);
struct qat_comp_gen_dev_ops {
struct rte_compressdev_ops *compressdev_ops;
get_comp_feature_flags_t qat_comp_get_feature_flags;
get_comp_capabilities_info_t qat_comp_get_capabilities;
get_comp_ram_bank_flags_t qat_comp_get_ram_bank_flags;
set_comp_slice_cfg_word_t qat_comp_set_slice_cfg_word;
get_comp_num_im_bufs_required_t qat_comp_get_num_im_bufs_required;
};
extern struct qat_comp_gen_dev_ops qat_comp_gen_dev_ops[];
/** private data structure for a QAT compression device.
* This QAT device is a device offering only a compression service,
* there can be one of these on each qat_pci_device (VF).
@ -37,6 +71,41 @@ struct qat_comp_dev_private {
uint16_t min_enq_burst_threshold;
};
int
qat_comp_dev_config(struct rte_compressdev *dev,
struct rte_compressdev_config *config);
int
qat_comp_dev_start(struct rte_compressdev *dev __rte_unused);
void
qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused);
int
qat_comp_dev_close(struct rte_compressdev *dev);
void
qat_comp_dev_info_get(struct rte_compressdev *dev,
struct rte_compressdev_info *info);
void
qat_comp_stats_get(struct rte_compressdev *dev,
struct rte_compressdev_stats *stats);
void
qat_comp_stats_reset(struct rte_compressdev *dev);
int
qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id);
int
qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
uint32_t max_inflight_ops, int socket_id);
const struct rte_memzone *
qat_comp_setup_inter_buffers(struct qat_comp_dev_private *comp_dev,
uint32_t buff_size);
int
qat_comp_dev_create(struct qat_pci_device *qat_pci_dev,
struct qat_dev_cmd_param *qat_dev_cmd_param);
@ -44,5 +113,12 @@ qat_comp_dev_create(struct qat_pci_device *qat_pci_dev,
int
qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev);
static __rte_always_inline unsigned int
qat_comp_get_num_im_bufs_required(enum qat_device_gen gen)
{
return (*qat_comp_gen_dev_ops[gen].qat_comp_get_num_im_bufs_required)();
}
#endif
#endif /* _QAT_COMP_PMD_H_ */