crypto/qat: move generic qp function to qp file

Move the generic enqueue and dequeue fns from
the qat_sym.c file to the qat_qp.c file
Move generic qp structs to a new qat_qp.h file

Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
This commit is contained in:
Fiona Trahe 2018-06-13 14:13:54 +02:00 committed by Pablo de Lara
parent 0bdd36e122
commit 9f27a860dc
4 changed files with 216 additions and 197 deletions

View File

@ -13,7 +13,9 @@
#include <rte_prefetch.h>
#include "qat_logs.h"
#include "qat_qp.h"
#include "qat_sym.h"
#include "adf_transport_access_macros.h"
#define ADF_MAX_SYM_DESC 4096
@ -450,3 +452,153 @@ static void adf_configure_queues(struct qat_qp *qp)
WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
queue->hw_queue_number, queue_config);
}
static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
{
uint32_t div = data >> shift;
uint32_t mult = div << shift;
return data - mult;
}
static inline void
txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
q->hw_queue_number, q->tail);
q->nb_pending_requests = 0;
q->csr_tail = q->tail;
}
static inline
void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
old_head = q->csr_head;
new_head = q->head;
max_head = qp->nb_descriptors * q->msg_size;
/* write out free descriptors */
void *cur_desc = (uint8_t *)q->base_addr + old_head;
if (new_head < old_head) {
memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
} else {
memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
}
q->nb_processed_responses = 0;
q->csr_head = new_head;
/* write current head to CSR */
WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
q->hw_queue_number, new_head);
}
uint16_t
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_ops_sent = 0;
register int ret;
uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
if (unlikely(nb_ops == 0))
return 0;
/* read params used a lot in main loop into registers */
queue = &(tmp_qp->tx_q);
base_addr = (uint8_t *)queue->base_addr;
tail = queue->tail;
/* Find how many can actually fit on the ring */
tmp_qp->inflights16 += nb_ops;
overflow = tmp_qp->inflights16 - queue->max_inflights;
if (overflow > 0) {
tmp_qp->inflights16 -= overflow;
nb_ops_possible = nb_ops - overflow;
if (nb_ops_possible == 0)
return 0;
}
while (nb_ops_sent != nb_ops_possible) {
ret = tmp_qp->build_request(*ops, base_addr + tail,
tmp_qp->op_cookies[tail / queue->msg_size],
tmp_qp->qat_dev_gen);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
/*
* This message cannot be enqueued,
* decrease number of ops that wasn't sent
*/
tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
if (nb_ops_sent == 0)
return 0;
goto kick_tail;
}
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
ops++;
nb_ops_sent++;
}
kick_tail:
queue->tail = tail;
tmp_qp->stats.enqueued_count += nb_ops_sent;
queue->nb_pending_requests += nb_ops_sent;
if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
txq_write_tail(tmp_qp, queue);
}
return nb_ops_sent;
}
uint16_t
qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
{
struct qat_queue *rx_queue, *tx_queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t head;
uint32_t resp_counter = 0;
uint8_t *resp_msg;
rx_queue = &(tmp_qp->rx_q);
tx_queue = &(tmp_qp->tx_q);
head = rx_queue->head;
resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
resp_counter != nb_ops) {
tmp_qp->process_response(ops, resp_msg,
tmp_qp->op_cookies[head / rx_queue->msg_size],
tmp_qp->qat_dev_gen);
head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
resp_msg = (uint8_t *)rx_queue->base_addr + head;
ops++;
resp_counter++;
}
if (resp_counter > 0) {
rx_queue->head = head;
tmp_qp->stats.dequeued_count += resp_counter;
rx_queue->nb_processed_responses += resp_counter;
tmp_qp->inflights16 -= resp_counter;
if (rx_queue->nb_processed_responses >
QAT_CSR_HEAD_WRITE_THRESH)
rxq_free_desc(tmp_qp, rx_queue);
}
/* also check if tail needs to be advanced */
if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
tx_queue->tail != tx_queue->csr_tail) {
txq_write_tail(tmp_qp, tx_queue);
}
return resp_counter;
}

View File

@ -0,0 +1,63 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2018 Intel Corporation
*/
#ifndef _QAT_QP_H_
#define _QAT_QP_H_
#include "qat_common.h"
typedef int (*build_request_t)(void *op,
uint8_t *req, void *op_cookie,
enum qat_device_gen qat_dev_gen);
/**< Build a request from an op. */
typedef int (*process_response_t)(void **ops,
uint8_t *resp, void *op_cookie,
enum qat_device_gen qat_dev_gen);
/**< Process a response descriptor and return the associated op. */
/**
* Structure associated with each queue.
*/
struct qat_queue {
char memz_name[RTE_MEMZONE_NAMESIZE];
void *base_addr; /* Base address */
rte_iova_t base_phys_addr; /* Queue physical address */
uint32_t head; /* Shadow copy of the head */
uint32_t tail; /* Shadow copy of the tail */
uint32_t modulo;
uint32_t msg_size;
uint16_t max_inflights;
uint32_t queue_size;
uint8_t hw_bundle_number;
uint8_t hw_queue_number;
/* HW queue aka ring offset on bundle */
uint32_t csr_head; /* last written head value */
uint32_t csr_tail; /* last written tail value */
uint16_t nb_processed_responses;
/* number of responses processed since last CSR head write */
uint16_t nb_pending_requests;
/* number of requests pending since last CSR tail write */
};
struct qat_qp {
void *mmap_bar_addr;
uint16_t inflights16;
struct qat_queue tx_q;
struct qat_queue rx_q;
struct rte_cryptodev_stats stats;
struct rte_mempool *op_cookie_pool;
void **op_cookies;
uint32_t nb_descriptors;
enum qat_device_gen qat_dev_gen;
build_request_t build_request;
process_response_t process_response;
} __rte_cache_aligned;
uint16_t
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
uint16_t
qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
#endif /* _QAT_QP_H_ */

View File

@ -14,6 +14,7 @@
#include "qat_logs.h"
#include "qat_sym_session.h"
#include "qat_sym.h"
#include "qat_qp.h"
#include "adf_transport_access_macros.h"
#define BYTE_LENGTH 8
@ -83,8 +84,6 @@ cipher_decrypt_err:
/** Creates a context in either AES or DES in ECB mode
* Depends on openssl libcrypto
*/
static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline uint32_t
qat_bpicipher_preprocess(struct qat_sym_session *ctx,
@ -197,102 +196,6 @@ qat_bpicipher_postprocess(struct qat_sym_session *ctx,
return sym_op->cipher.data.length - last_block_len;
}
static inline void
txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
q->hw_queue_number, q->tail);
q->nb_pending_requests = 0;
q->csr_tail = q->tail;
}
static uint16_t
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_ops_sent = 0;
register int ret;
uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
if (unlikely(nb_ops == 0))
return 0;
/* read params used a lot in main loop into registers */
queue = &(tmp_qp->tx_q);
base_addr = (uint8_t *)queue->base_addr;
tail = queue->tail;
/* Find how many can actually fit on the ring */
tmp_qp->inflights16 += nb_ops;
overflow = tmp_qp->inflights16 - queue->max_inflights;
if (overflow > 0) {
tmp_qp->inflights16 -= overflow;
nb_ops_possible = nb_ops - overflow;
if (nb_ops_possible == 0)
return 0;
}
while (nb_ops_sent != nb_ops_possible) {
ret = tmp_qp->build_request(*ops, base_addr + tail,
tmp_qp->op_cookies[tail / queue->msg_size],
tmp_qp->qat_dev_gen);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
/*
* This message cannot be enqueued,
* decrease number of ops that wasn't sent
*/
tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
if (nb_ops_sent == 0)
return 0;
goto kick_tail;
}
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
ops++;
nb_ops_sent++;
}
kick_tail:
queue->tail = tail;
tmp_qp->stats.enqueued_count += nb_ops_sent;
queue->nb_pending_requests += nb_ops_sent;
if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
txq_write_tail(tmp_qp, queue);
}
return nb_ops_sent;
}
static inline
void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
old_head = q->csr_head;
new_head = q->head;
max_head = qp->nb_descriptors * q->msg_size;
/* write out free descriptors */
void *cur_desc = (uint8_t *)q->base_addr + old_head;
if (new_head < old_head) {
memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
} else {
memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
}
q->nb_processed_responses = 0;
q->csr_head = new_head;
/* write current head to CSR */
WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
q->hw_queue_number, new_head);
}
uint16_t
qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
@ -336,49 +239,6 @@ qat_sym_process_response(void **op, uint8_t *resp,
return 0;
}
static uint16_t
qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
{
struct qat_queue *rx_queue, *tx_queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t head;
uint32_t resp_counter = 0;
uint8_t *resp_msg;
rx_queue = &(tmp_qp->rx_q);
tx_queue = &(tmp_qp->tx_q);
head = rx_queue->head;
resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
resp_counter != nb_ops) {
tmp_qp->process_response(ops, resp_msg,
tmp_qp->op_cookies[head / rx_queue->msg_size],
tmp_qp->qat_dev_gen);
head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
resp_msg = (uint8_t *)rx_queue->base_addr + head;
ops++;
resp_counter++;
}
if (resp_counter > 0) {
rx_queue->head = head;
tmp_qp->stats.dequeued_count += resp_counter;
rx_queue->nb_processed_responses += resp_counter;
tmp_qp->inflights16 -= resp_counter;
if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
rxq_free_desc(tmp_qp, rx_queue);
}
/* also check if tail needs to be advanced */
if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
tx_queue->tail != tx_queue->csr_tail) {
txq_write_tail(tmp_qp, tx_queue);
}
return resp_counter;
}
uint16_t
qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
@ -903,13 +763,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
return 0;
}
static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
{
uint32_t div = data >> shift;
uint32_t mult = div << shift;
return data - mult;
}
void qat_sym_stats_get(struct rte_cryptodev *dev,
struct rte_cryptodev_stats *stats)

View File

@ -27,57 +27,8 @@
#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
/* number of inflights below which no tail write coalescing should occur */
typedef int (*build_request_t)(void *op,
uint8_t *req, void *op_cookie,
enum qat_device_gen qat_dev_gen);
/**< Build a request from an op. */
typedef int (*process_response_t)(void **ops,
uint8_t *resp, void *op_cookie,
enum qat_device_gen qat_dev_gen);
/**< Process a response descriptor and return the associated op. */
struct qat_sym_session;
/**
* Structure associated with each queue.
*/
struct qat_queue {
char memz_name[RTE_MEMZONE_NAMESIZE];
void *base_addr; /* Base address */
rte_iova_t base_phys_addr; /* Queue physical address */
uint32_t head; /* Shadow copy of the head */
uint32_t tail; /* Shadow copy of the tail */
uint32_t modulo;
uint32_t msg_size;
uint16_t max_inflights;
uint32_t queue_size;
uint8_t hw_bundle_number;
uint8_t hw_queue_number;
/* HW queue aka ring offset on bundle */
uint32_t csr_head; /* last written head value */
uint32_t csr_tail; /* last written tail value */
uint16_t nb_processed_responses;
/* number of responses processed since last CSR head write */
uint16_t nb_pending_requests;
/* number of requests pending since last CSR tail write */
};
struct qat_qp {
void *mmap_bar_addr;
uint16_t inflights16;
struct qat_queue tx_q;
struct qat_queue rx_q;
struct rte_cryptodev_stats stats;
struct rte_mempool *op_cookie_pool;
void **op_cookies;
uint32_t nb_descriptors;
enum qat_device_gen qat_dev_gen;
build_request_t build_request;
process_response_t process_response;
} __rte_cache_aligned;
int
qat_sym_build_request(void *in_op, uint8_t *out_msg,
void *op_cookie, enum qat_device_gen qat_dev_gen);