crypto/qat: make dequeue function generic

Queue-handling code in dequeue is made generic, so it can
be used by other services in future. This is done by
 - Removing all sym-specific refs in input params - replace with void ptrs.
 - Wrapping this generic dequeue with the sym-specific dequeue
   called through the API.
 - extracting the sym-specific response processing into a new fn.
 - Setting a fn ptr for process_response in qp on qp creation
 - Passing void * params to this, in the service-specific implementation
   qat_sym_process_response cast back to sym structs.

Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
This commit is contained in:
Fiona Trahe 2018-06-13 14:13:53 +02:00 committed by Pablo de Lara
parent 54f0884d29
commit 0bdd36e122
3 changed files with 72 additions and 40 deletions

View File

@ -198,6 +198,7 @@ int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
= dev->data->dev_private;
qp->qat_dev_gen = internals->qat_dev_gen;
qp->build_request = qat_sym_build_request;
qp->process_response = qat_sym_process_response;
dev->data->queue_pairs[queue_pair_id] = qp;
return 0;

View File

@ -300,70 +300,91 @@ qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
}
uint16_t
qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
int
qat_sym_process_response(void **op, uint8_t *resp,
__rte_unused void *op_cookie,
__rte_unused enum qat_device_gen qat_dev_gen)
{
struct icp_qat_fw_comn_resp *resp_msg =
(struct icp_qat_fw_comn_resp *)resp;
struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
(resp_msg->opaque_data);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
struct qat_sym_session *sess = (struct qat_sym_session *)
get_session_private_data(
rx_op->sym->session,
cryptodev_qat_driver_id);
if (sess->bpi_ctx)
qat_bpicipher_postprocess(sess, rx_op);
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*op = (void *)rx_op;
return 0;
}
static uint16_t
qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
{
struct qat_queue *rx_queue, *tx_queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
uint32_t head;
uint32_t resp_counter = 0;
uint8_t *resp_msg;
rx_queue = &(tmp_qp->rx_q);
tx_queue = &(tmp_qp->tx_q);
head = rx_queue->head;
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)rx_queue->base_addr + head);
resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_ops) {
rx_op = (struct rte_crypto_op *)(uintptr_t)
(resp_msg->opaque_data);
resp_counter != nb_ops) {
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
struct qat_sym_session *sess =
(struct qat_sym_session *)
get_session_private_data(
rx_op->sym->session,
cryptodev_qat_driver_id);
if (sess->bpi_ctx)
qat_bpicipher_postprocess(sess, rx_op);
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
tmp_qp->process_response(ops, resp_msg,
tmp_qp->op_cookies[head / rx_queue->msg_size],
tmp_qp->qat_dev_gen);
head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)rx_queue->base_addr + head);
*ops = rx_op;
resp_msg = (uint8_t *)rx_queue->base_addr + head;
ops++;
msg_counter++;
resp_counter++;
}
if (msg_counter > 0) {
if (resp_counter > 0) {
rx_queue->head = head;
tmp_qp->stats.dequeued_count += msg_counter;
rx_queue->nb_processed_responses += msg_counter;
tmp_qp->inflights16 -= msg_counter;
tmp_qp->stats.dequeued_count += resp_counter;
rx_queue->nb_processed_responses += resp_counter;
tmp_qp->inflights16 -= resp_counter;
if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
rxq_free_desc(tmp_qp, rx_queue);
}
/* also check if tail needs to be advanced */
if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
tx_queue->tail != tx_queue->csr_tail) {
tx_queue->tail != tx_queue->csr_tail) {
txq_write_tail(tmp_qp, tx_queue);
}
return msg_counter;
return resp_counter;
}
uint16_t
qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
}
static inline int

View File

@ -32,6 +32,11 @@ typedef int (*build_request_t)(void *op,
enum qat_device_gen qat_dev_gen);
/**< Build a request from an op. */
typedef int (*process_response_t)(void **ops,
uint8_t *resp, void *op_cookie,
enum qat_device_gen qat_dev_gen);
/**< Process a response descriptor and return the associated op. */
struct qat_sym_session;
/**
@ -69,6 +74,7 @@ struct qat_qp {
uint32_t nb_descriptors;
enum qat_device_gen qat_dev_gen;
build_request_t build_request;
process_response_t process_response;
} __rte_cache_aligned;
@ -76,6 +82,10 @@ int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
void *op_cookie, enum qat_device_gen qat_dev_gen);
int
qat_sym_process_response(void **op, uint8_t *resp,
__rte_unused void *op_cookie, enum qat_device_gen qat_dev_gen);
void qat_sym_stats_get(struct rte_cryptodev *dev,
struct rte_cryptodev_stats *stats);
void qat_sym_stats_reset(struct rte_cryptodev *dev);