crypto/qat: read HW slice configuration

Read slice configuration of QAT capabilities.
This will allow to recognize if specific HW function
is available on particular generation of device.

Signed-off-by: Arek Kusztal <arkadiuszx.kusztal@intel.com>
Acked-by: Kai Ji <kai.ji@intel.com>
This commit is contained in:
Arek Kusztal 2022-10-18 15:01:54 +01:00 committed by Akhil Goyal
parent b3cbbcdffa
commit b6ac58aee5
9 changed files with 254 additions and 87 deletions

View File

@ -41,14 +41,42 @@ static struct rte_cryptodev_capabilities qat_asym_crypto_caps_gen1[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
struct qat_capabilities_info
qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev __rte_unused)
int
qat_asym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
const char *capa_memz_name,
const uint16_t __rte_unused slice_map)
{
struct qat_capabilities_info capa_info;
capa_info.data = qat_asym_crypto_caps_gen1;
capa_info.size = sizeof(qat_asym_crypto_caps_gen1);
return capa_info;
const uint32_t size = sizeof(qat_asym_crypto_caps_gen1);
uint32_t i;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
internals->capa_mz = rte_memzone_reserve(capa_memz_name,
size, rte_socket_id(), 0);
if (internals->capa_mz == NULL) {
QAT_LOG(DEBUG,
"Error allocating memzone for capabilities");
return -1;
}
}
struct rte_cryptodev_capabilities *addr =
(struct rte_cryptodev_capabilities *)
internals->capa_mz->addr;
const struct rte_cryptodev_capabilities *capabilities =
qat_asym_crypto_caps_gen1;
const uint32_t capa_num =
size / sizeof(struct rte_cryptodev_capabilities);
uint32_t curr_capa = 0;
for (i = 0; i < capa_num; i++) {
memcpy(addr + curr_capa, capabilities + i,
sizeof(struct rte_cryptodev_capabilities));
curr_capa++;
}
internals->qat_dev_capabilities = internals->capa_mz->addr;
return 0;
}
uint64_t

View File

@ -275,13 +275,42 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
};
static struct qat_capabilities_info
qat_sym_crypto_cap_get_gen2(struct qat_pci_device *qat_dev __rte_unused)
static int
qat_sym_crypto_cap_get_gen2(struct qat_cryptodev_private *internals,
const char *capa_memz_name,
const uint16_t __rte_unused slice_map)
{
struct qat_capabilities_info capa_info;
capa_info.data = qat_sym_crypto_caps_gen2;
capa_info.size = sizeof(qat_sym_crypto_caps_gen2);
return capa_info;
const uint32_t size = sizeof(qat_sym_crypto_caps_gen2);
uint32_t i;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
internals->capa_mz = rte_memzone_reserve(capa_memz_name,
size, rte_socket_id(), 0);
if (internals->capa_mz == NULL) {
QAT_LOG(DEBUG,
"Error allocating memzone for capabilities");
return -1;
}
}
struct rte_cryptodev_capabilities *addr =
(struct rte_cryptodev_capabilities *)
internals->capa_mz->addr;
const struct rte_cryptodev_capabilities *capabilities =
qat_sym_crypto_caps_gen2;
const uint32_t capa_num =
size / sizeof(struct rte_cryptodev_capabilities);
uint32_t curr_capa = 0;
for (i = 0; i < capa_num; i++) {
memcpy(addr + curr_capa, capabilities + i,
sizeof(struct rte_cryptodev_capabilities));
curr_capa++;
}
internals->qat_dev_capabilities = internals->capa_mz->addr;
return 0;
}
RTE_INIT(qat_sym_crypto_gen2_init)

View File

@ -143,13 +143,81 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen3[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
static struct qat_capabilities_info
qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
static int
check_cipher_capa(const struct rte_cryptodev_capabilities *cap,
enum rte_crypto_cipher_algorithm algo)
{
struct qat_capabilities_info capa_info;
capa_info.data = qat_sym_crypto_caps_gen3;
capa_info.size = sizeof(qat_sym_crypto_caps_gen3);
return capa_info;
if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
return 0;
if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
return 0;
if (cap->sym.cipher.algo != algo)
return 0;
return 1;
}
static int
check_auth_capa(const struct rte_cryptodev_capabilities *cap,
enum rte_crypto_auth_algorithm algo)
{
if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
return 0;
if (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
return 0;
if (cap->sym.auth.algo != algo)
return 0;
return 1;
}
static int
qat_sym_crypto_cap_get_gen3(struct qat_cryptodev_private *internals,
const char *capa_memz_name, const uint16_t slice_map)
{
const uint32_t size = sizeof(qat_sym_crypto_caps_gen3);
uint32_t i;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
internals->capa_mz = rte_memzone_reserve(capa_memz_name,
size, rte_socket_id(), 0);
if (internals->capa_mz == NULL) {
QAT_LOG(DEBUG,
"Error allocating memzone for capabilities");
return -1;
}
}
struct rte_cryptodev_capabilities *addr =
(struct rte_cryptodev_capabilities *)
internals->capa_mz->addr;
const struct rte_cryptodev_capabilities *capabilities =
qat_sym_crypto_caps_gen3;
const uint32_t capa_num =
size / sizeof(struct rte_cryptodev_capabilities);
uint32_t curr_capa = 0;
for (i = 0; i < capa_num; i++) {
if (slice_map & ICP_ACCEL_MASK_SM4_SLICE && (
check_cipher_capa(&capabilities[i],
RTE_CRYPTO_CIPHER_SM4_ECB) ||
check_cipher_capa(&capabilities[i],
RTE_CRYPTO_CIPHER_SM4_CBC) ||
check_cipher_capa(&capabilities[i],
RTE_CRYPTO_CIPHER_SM4_CTR))) {
continue;
}
if (slice_map & ICP_ACCEL_MASK_SM3_SLICE && (
check_auth_capa(&capabilities[i],
RTE_CRYPTO_AUTH_SM3))) {
continue;
}
memcpy(addr + curr_capa, capabilities + i,
sizeof(struct rte_cryptodev_capabilities));
curr_capa++;
}
internals->qat_dev_capabilities = internals->capa_mz->addr;
return 0;
}
static __rte_always_inline void

View File

@ -103,13 +103,42 @@ static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
static struct qat_capabilities_info
qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
static int
qat_sym_crypto_cap_get_gen4(struct qat_cryptodev_private *internals,
const char *capa_memz_name,
const uint16_t __rte_unused slice_map)
{
struct qat_capabilities_info capa_info;
capa_info.data = qat_sym_crypto_caps_gen4;
capa_info.size = sizeof(qat_sym_crypto_caps_gen4);
return capa_info;
const uint32_t size = sizeof(qat_sym_crypto_caps_gen4);
uint32_t i;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
internals->capa_mz = rte_memzone_reserve(capa_memz_name,
size, rte_socket_id(), 0);
if (internals->capa_mz == NULL) {
QAT_LOG(DEBUG,
"Error allocating memzone for capabilities");
return -1;
}
}
struct rte_cryptodev_capabilities *addr =
(struct rte_cryptodev_capabilities *)
internals->capa_mz->addr;
const struct rte_cryptodev_capabilities *capabilities =
qat_sym_crypto_caps_gen4;
const uint32_t capa_num =
size / sizeof(struct rte_cryptodev_capabilities);
uint32_t curr_capa = 0;
for (i = 0; i < capa_num; i++) {
memcpy(addr + curr_capa, capabilities + i,
sizeof(struct rte_cryptodev_capabilities));
curr_capa++;
}
internals->qat_dev_capabilities = internals->capa_mz->addr;
return 0;
}
static __rte_always_inline void

View File

@ -928,8 +928,9 @@ void
qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
uint8_t hash_flag);
struct qat_capabilities_info
qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev);
int
qat_asym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
const char *capa_memz_name, const uint16_t slice_map);
uint64_t
qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev);

View File

@ -152,13 +152,42 @@ struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
};
static struct qat_capabilities_info
qat_sym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev __rte_unused)
static int
qat_sym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
const char *capa_memz_name,
const uint16_t __rte_unused slice_map)
{
struct qat_capabilities_info capa_info;
capa_info.data = qat_sym_crypto_caps_gen1;
capa_info.size = sizeof(qat_sym_crypto_caps_gen1);
return capa_info;
const uint32_t size = sizeof(qat_sym_crypto_caps_gen1);
uint32_t i;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
internals->capa_mz = rte_memzone_reserve(capa_memz_name,
size, rte_socket_id(), 0);
if (internals->capa_mz == NULL) {
QAT_LOG(DEBUG,
"Error allocating memzone for capabilities");
return -1;
}
}
struct rte_cryptodev_capabilities *addr =
(struct rte_cryptodev_capabilities *)
internals->capa_mz->addr;
const struct rte_cryptodev_capabilities *capabilities =
qat_sym_crypto_caps_gen1;
const uint32_t capa_num =
size / sizeof(struct rte_cryptodev_capabilities);
uint32_t curr_capa = 0;
for (i = 0; i < capa_num; i++) {
memcpy(addr + curr_capa, capabilities + i,
sizeof(struct rte_cryptodev_capabilities));
curr_capa++;
}
internals->qat_dev_capabilities = internals->capa_mz->addr;
return 0;
}
uint64_t

View File

@ -1276,14 +1276,12 @@ qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
.socket_id = qat_dev_instance->pci_dev->device.numa_node,
.private_data_size = sizeof(struct qat_cryptodev_private)
};
struct qat_capabilities_info capa_info;
const struct rte_cryptodev_capabilities *capabilities;
const struct qat_crypto_gen_dev_ops *gen_dev_ops =
&qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
char name[RTE_CRYPTODEV_NAME_MAX_LEN];
char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
uint64_t capa_size;
int i = 0;
uint16_t slice_map = 0;
snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
qat_pci_dev->name, "asym");
@ -1340,38 +1338,37 @@ qat_asym_dev_create(struct qat_pci_device *qat_pci_dev,
internals->qat_dev = qat_pci_dev;
internals->dev_id = cryptodev->data->dev_id;
capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
capabilities = capa_info.data;
capa_size = capa_info.size;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
internals->capa_mz = rte_memzone_reserve(capa_memz_name,
capa_size, rte_socket_id(), 0);
if (internals->capa_mz == NULL) {
QAT_LOG(DEBUG,
"Error allocating memzone for capabilities, "
"destroying PMD for %s",
name);
rte_cryptodev_pmd_destroy(cryptodev);
memset(&qat_dev_instance->asym_rte_dev, 0,
sizeof(qat_dev_instance->asym_rte_dev));
return -EFAULT;
}
}
memcpy(internals->capa_mz->addr, capabilities, capa_size);
internals->qat_dev_capabilities = internals->capa_mz->addr;
while (1) {
if (qat_dev_cmd_param[i].name == NULL)
break;
if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME))
internals->min_enq_burst_threshold =
qat_dev_cmd_param[i].val;
if (!strcmp(qat_dev_cmd_param[i].name, QAT_CMD_SLICE_MAP))
slice_map = qat_dev_cmd_param[i].val;
i++;
}
if (slice_map & ICP_ACCEL_MASK_PKE_SLICE) {
QAT_LOG(ERR, "Device %s does not support PKE slice",
name);
rte_cryptodev_pmd_destroy(cryptodev);
memset(&qat_dev_instance->asym_rte_dev, 0,
sizeof(qat_dev_instance->asym_rte_dev));
return -1;
}
if (gen_dev_ops->get_capabilities(internals,
capa_memz_name, slice_map) < 0) {
QAT_LOG(ERR,
"Device cannot obtain capabilities, destroying PMD for %s",
name);
rte_cryptodev_pmd_destroy(cryptodev);
memset(&qat_dev_instance->asym_rte_dev, 0,
sizeof(qat_dev_instance->asym_rte_dev));
return -1;
}
qat_pci_dev->asym_dev = internals;
internals->service_type = QAT_SERVICE_ASYMMETRIC;
QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d",

View File

@ -44,8 +44,8 @@ struct qat_capabilities_info {
uint64_t size;
};
typedef struct qat_capabilities_info (*get_capabilities_info_t)
(struct qat_pci_device *qat_dev);
typedef int (*get_capabilities_info_t)(struct qat_cryptodev_private *internals,
const char *capa_memz_name, uint16_t slice_map);
typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);

View File

@ -182,6 +182,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
struct qat_dev_cmd_param *qat_dev_cmd_param __rte_unused)
{
int i = 0, ret = 0;
uint16_t slice_map = 0;
struct qat_device_info *qat_dev_instance =
&qat_pci_devs[qat_pci_dev->qat_dev_id];
struct rte_cryptodev_pmd_init_params init_params = {
@ -193,11 +194,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct rte_cryptodev *cryptodev;
struct qat_cryptodev_private *internals;
struct qat_capabilities_info capa_info;
const struct rte_cryptodev_capabilities *capabilities;
const struct qat_crypto_gen_dev_ops *gen_dev_ops =
&qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
uint64_t capa_size;
snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
qat_pci_dev->name, "sym");
@ -277,37 +275,25 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
internals->dev_id = cryptodev->data->dev_id;
capa_info = gen_dev_ops->get_capabilities(qat_pci_dev);
capabilities = capa_info.data;
capa_size = capa_info.size;
internals->capa_mz = rte_memzone_lookup(capa_memz_name);
if (internals->capa_mz == NULL) {
internals->capa_mz = rte_memzone_reserve(capa_memz_name,
capa_size, rte_socket_id(), 0);
if (internals->capa_mz == NULL) {
QAT_LOG(DEBUG,
"Error allocating memzone for capabilities, "
"destroying PMD for %s", name);
ret = -EFAULT;
goto error;
}
}
memcpy(internals->capa_mz->addr, capabilities, capa_size);
internals->qat_dev_capabilities = internals->capa_mz->addr;
while (1) {
if (qat_dev_cmd_param[i].name == NULL)
break;
while (qat_dev_cmd_param[i].name != NULL) {
if (!strcmp(qat_dev_cmd_param[i].name, SYM_ENQ_THRESHOLD_NAME))
internals->min_enq_burst_threshold =
qat_dev_cmd_param[i].val;
if (!strcmp(qat_dev_cmd_param[i].name, QAT_IPSEC_MB_LIB))
qat_ipsec_mb_lib = qat_dev_cmd_param[i].val;
if (!strcmp(qat_dev_cmd_param[i].name, QAT_CMD_SLICE_MAP))
slice_map = qat_dev_cmd_param[i].val;
i++;
}
if (gen_dev_ops->get_capabilities(internals,
capa_memz_name, slice_map) < 0) {
QAT_LOG(ERR,
"Device cannot obtain capabilities, destroying PMD for %s",
name);
ret = -1;
goto error;
}
internals->service_type = QAT_SERVICE_SYMMETRIC;
qat_pci_dev->sym_dev = internals;
QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",