common/qat: define build request and dequeue operations
This patch introduce build request and dequeue op function pointers to the qat queue pair implementation. The function pointers are assigned during qat session generation based on input crypto operation request. Signed-off-by: Kai Ji <kai.ji@intel.com> Acked-by: Fan Zhang <roy.fan.zhang@intel.com>
This commit is contained in:
parent
521fbc716e
commit
c3352e724d
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2015-2018 Intel Corporation
|
||||
* Copyright(c) 2015-2022 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <rte_common.h>
|
||||
@ -547,7 +547,9 @@ adf_modulo(uint32_t data, uint32_t modulo_mask)
|
||||
}
|
||||
|
||||
uint16_t
|
||||
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
|
||||
qat_enqueue_op_burst(void *qp,
|
||||
__rte_unused qat_op_build_request_t op_build_request,
|
||||
void **ops, uint16_t nb_ops)
|
||||
{
|
||||
register struct qat_queue *queue;
|
||||
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
|
||||
@ -814,7 +816,9 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
|
||||
}
|
||||
|
||||
uint16_t
|
||||
qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
|
||||
qat_dequeue_op_burst(void *qp, void **ops,
|
||||
__rte_unused qat_op_dequeue_t qat_dequeue_process_response,
|
||||
uint16_t nb_ops)
|
||||
{
|
||||
struct qat_queue *rx_queue;
|
||||
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018-2022 Intel Corporation
|
||||
*/
|
||||
#ifndef _QAT_QP_H_
|
||||
#define _QAT_QP_H_
|
||||
@ -36,6 +36,51 @@ struct qat_queue {
|
||||
/* number of responses processed since last CSR head write */
|
||||
};
|
||||
|
||||
/**
|
||||
* Type define qat_op_build_request_t function pointer, passed in as argument
|
||||
* in enqueue op burst, where a build request assigned base on the type of
|
||||
* crypto op.
|
||||
*
|
||||
* @param in_op
|
||||
* An input op pointer
|
||||
* @param out_msg
|
||||
* out_meg pointer
|
||||
* @param op_cookie
|
||||
* op cookie pointer
|
||||
* @param opaque
|
||||
* an opaque data may be used to store context may be useful between
|
||||
* 2 enqueue operations.
|
||||
* @param dev_gen
|
||||
* qat device gen id
|
||||
* @return
|
||||
* - 0 if the crypto request is build successfully,
|
||||
* - EINVAL if error
|
||||
**/
|
||||
typedef int (*qat_op_build_request_t)(void *in_op, uint8_t *out_msg,
|
||||
void *op_cookie, uint64_t *opaque, enum qat_device_gen dev_gen);
|
||||
|
||||
/**
|
||||
* Type define qat_op_dequeue_t function pointer, passed in as argument
|
||||
* in dequeue op burst, where a dequeue op assigned base on the type of
|
||||
* crypto op.
|
||||
*
|
||||
* @param op
|
||||
* An input op pointer
|
||||
* @param resp
|
||||
* qat response msg pointer
|
||||
* @param op_cookie
|
||||
* op cookie pointer
|
||||
* @param dequeue_err_count
|
||||
* dequeue error counter
|
||||
* @return
|
||||
* - 0 if dequeue OP is successful
|
||||
* - EINVAL if error
|
||||
**/
|
||||
typedef int (*qat_op_dequeue_t)(void **op, uint8_t *resp, void *op_cookie,
|
||||
uint64_t *dequeue_err_count __rte_unused);
|
||||
|
||||
#define QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE 2
|
||||
|
||||
struct qat_qp {
|
||||
void *mmap_bar_addr;
|
||||
struct qat_queue tx_q;
|
||||
@ -44,6 +89,7 @@ struct qat_qp {
|
||||
struct rte_mempool *op_cookie_pool;
|
||||
void **op_cookies;
|
||||
uint32_t nb_descriptors;
|
||||
uint64_t opaque[QAT_BUILD_REQUEST_MAX_OPAQUE_SIZE];
|
||||
enum qat_device_gen qat_dev_gen;
|
||||
enum qat_service_type service_type;
|
||||
struct qat_pci_device *qat_dev;
|
||||
@ -78,13 +124,15 @@ struct qat_qp_config {
|
||||
};
|
||||
|
||||
uint16_t
|
||||
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
|
||||
qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
|
||||
void **ops, uint16_t nb_ops);
|
||||
|
||||
uint16_t
|
||||
qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
|
||||
|
||||
uint16_t
|
||||
qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
|
||||
qat_dequeue_op_burst(void *qp, void **ops,
|
||||
qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
|
||||
|
||||
int
|
||||
qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2015-2019 Intel Corporation
|
||||
* Copyright(c) 2015-2022 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <rte_malloc.h>
|
||||
@ -620,7 +620,7 @@ static uint16_t
|
||||
qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
|
||||
uint16_t nb_ops)
|
||||
{
|
||||
uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
|
||||
uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
|
||||
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
|
||||
|
||||
if (ret) {
|
||||
@ -639,7 +639,7 @@ qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
|
||||
} else {
|
||||
tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
|
||||
(compressdev_dequeue_pkt_burst_t)
|
||||
qat_dequeue_op_burst;
|
||||
qat_comp_pmd_enq_deq_dummy_op_burst;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
|
@ -62,13 +62,13 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
|
||||
uint16_t qat_asym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
|
||||
uint16_t nb_ops)
|
||||
{
|
||||
return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
|
||||
return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
|
||||
}
|
||||
|
||||
uint16_t qat_asym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
|
||||
uint16_t nb_ops)
|
||||
{
|
||||
return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
|
||||
return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
|
||||
}
|
||||
|
||||
/* An rte_driver is needed in the registration of both the device and the driver
|
||||
|
@ -49,14 +49,14 @@ static uint16_t
|
||||
qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
|
||||
uint16_t nb_ops)
|
||||
{
|
||||
return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
|
||||
return qat_enqueue_op_burst(qp, NULL, (void **)ops, nb_ops);
|
||||
}
|
||||
|
||||
static uint16_t
|
||||
qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
|
||||
uint16_t nb_ops)
|
||||
{
|
||||
return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
|
||||
return qat_dequeue_op_burst(qp, (void **)ops, NULL, nb_ops);
|
||||
}
|
||||
|
||||
/* An rte_driver is needed in the registration of both the device and the driver
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause
|
||||
* Copyright(c) 2015-2019 Intel Corporation
|
||||
* Copyright(c) 2015-2022 Intel Corporation
|
||||
*/
|
||||
#ifndef _QAT_SYM_SESSION_H_
|
||||
#define _QAT_SYM_SESSION_H_
|
||||
@ -63,6 +63,16 @@ enum qat_sym_proto_flag {
|
||||
QAT_CRYPTO_PROTO_FLAG_ZUC = 4
|
||||
};
|
||||
|
||||
struct qat_sym_session;
|
||||
|
||||
/*
|
||||
* typedef qat_op_build_request_t function pointer, passed in as argument
|
||||
* in enqueue op burst, where a build request assigned base on the type of
|
||||
* crypto op.
|
||||
*/
|
||||
typedef int (*qat_sym_build_request_t)(void *in_op, struct qat_sym_session *ctx,
|
||||
uint8_t *out_msg, void *op_cookie);
|
||||
|
||||
/* Common content descriptor */
|
||||
struct qat_sym_cd {
|
||||
struct icp_qat_hw_cipher_algo_blk cipher;
|
||||
@ -107,6 +117,7 @@ struct qat_sym_session {
|
||||
/* Some generations need different setup of counter */
|
||||
uint32_t slice_types;
|
||||
enum qat_sym_proto_flag qat_proto_flag;
|
||||
qat_sym_build_request_t build_request[2];
|
||||
};
|
||||
|
||||
int
|
||||
|
Loading…
Reference in New Issue
Block a user