qat: Add support for separate AAD and output buffers

MFC after:	1 week
Sponsored by:	Rubicon Communications, LLC ("Netgate")
This commit is contained in:
Mark Johnston 2021-01-27 15:30:58 -05:00
parent d0d2e523ba
commit bd674d8b1f
5 changed files with 287 additions and 106 deletions

View File

@ -1172,9 +1172,8 @@ static void
qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb,
struct qat_sym_cookie *qsc)
{
explicit_bzero(qsc->qsc_iv_buf, sizeof(qsc->qsc_iv_buf));
explicit_bzero(qsc->qsc_auth_res, sizeof(qsc->qsc_auth_res));
explicit_bzero(qsc->qsc_iv_buf, EALG_MAX_BLOCK_LEN);
explicit_bzero(qsc->qsc_auth_res, QAT_SYM_HASH_BUFFER_LEN);
mtx_lock(&qcb->qcb_bank_mtx);
qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
@ -1350,48 +1349,17 @@ struct qat_crypto_load_cb_arg {
int error;
};
static void
qat_crypto_load_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
int error)
static int
qat_crypto_populate_buf_list(struct buffer_list_desc *buffers,
bus_dma_segment_t *segs, int niseg, int noseg, int skip)
{
struct cryptop *crp;
struct flat_buffer_desc *flatbuf;
struct qat_crypto_load_cb_arg *arg;
struct qat_session *qs;
struct qat_sym_cookie *qsc;
bus_addr_t addr;
bus_size_t len;
int iseg, oseg, skip;
int iseg, oseg;
arg = _arg;
if (error != 0) {
arg->error = error;
return;
}
crp = arg->crp;
qs = arg->qs;
qsc = arg->qsc;
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
/*
* The firmware expects AAD to be in a contiguous buffer and
* padded to a multiple of 16 bytes. To satisfy these
* constraints we bounce the AAD into a per-request buffer.
*/
crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length,
qsc->qsc_gcm_aad);
memset(qsc->qsc_gcm_aad + crp->crp_aad_length, 0,
roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) -
crp->crp_aad_length);
skip = crp->crp_payload_start;
} else if (crp->crp_aad_length > 0) {
skip = crp->crp_aad_start;
} else {
skip = crp->crp_payload_start;
}
for (iseg = oseg = 0; iseg < nseg; iseg++) {
for (iseg = 0, oseg = noseg; iseg < niseg && oseg < QAT_MAXSEG;
iseg++) {
addr = segs[iseg].ds_addr;
len = segs[iseg].ds_len;
@ -1406,11 +1374,117 @@ qat_crypto_load_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
}
}
flatbuf = &qsc->qsc_flat_bufs[oseg++];
flatbuf = &buffers->flat_bufs[oseg++];
flatbuf->data_len_in_bytes = (uint32_t)len;
flatbuf->phy_buffer = (uint64_t)addr;
}
qsc->qsc_buf_list.num_buffers = oseg;
buffers->num_buffers = oseg;
return iseg < niseg ? E2BIG : 0;
}
static void
qat_crypto_load_aadbuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
int error)
{
struct qat_crypto_load_cb_arg *arg;
struct qat_sym_cookie *qsc;
arg = _arg;
if (error != 0) {
arg->error = error;
return;
}
qsc = arg->qsc;
arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs,
nseg, 0, 0);
}
static void
qat_crypto_load_buf_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
int error)
{
struct cryptop *crp;
struct qat_crypto_load_cb_arg *arg;
struct qat_session *qs;
struct qat_sym_cookie *qsc;
int noseg, skip;
arg = _arg;
if (error != 0) {
arg->error = error;
return;
}
crp = arg->crp;
qs = arg->qs;
qsc = arg->qsc;
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
/* AAD was handled in qat_crypto_load(). */
skip = crp->crp_payload_start;
noseg = 0;
} else if (crp->crp_aad == NULL && crp->crp_aad_length > 0) {
skip = crp->crp_aad_start;
noseg = 0;
} else {
skip = crp->crp_payload_start;
noseg = crp->crp_aad == NULL ?
0 : qsc->qsc_buf_list.num_buffers;
}
arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs,
nseg, noseg, skip);
}
static void
qat_crypto_load_obuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
int error)
{
struct buffer_list_desc *ibufs, *obufs;
struct flat_buffer_desc *ibuf, *obuf;
struct cryptop *crp;
struct qat_crypto_load_cb_arg *arg;
struct qat_session *qs;
struct qat_sym_cookie *qsc;
int buflen, osegs, tocopy;
arg = _arg;
if (error != 0) {
arg->error = error;
return;
}
crp = arg->crp;
qs = arg->qs;
qsc = arg->qsc;
/*
* The payload must start at the same offset in the output SG list as in
* the input SG list. Copy over SG entries from the input corresponding
* to the AAD buffer.
*/
osegs = 0;
if (qs->qs_auth_algo != HW_AUTH_ALGO_GALOIS_128 &&
crp->crp_aad_length > 0) {
tocopy = crp->crp_aad == NULL ?
crp->crp_payload_start - crp->crp_aad_start :
crp->crp_aad_length;
ibufs = &qsc->qsc_buf_list;
obufs = &qsc->qsc_obuf_list;
for (; osegs < ibufs->num_buffers && tocopy > 0; osegs++) {
ibuf = &ibufs->flat_bufs[osegs];
obuf = &obufs->flat_bufs[osegs];
obuf->phy_buffer = ibuf->phy_buffer;
buflen = imin(ibuf->data_len_in_bytes, tocopy);
obuf->data_len_in_bytes = buflen;
tocopy -= buflen;
}
}
arg->error = qat_crypto_populate_buf_list(&qsc->qsc_obuf_list, segs,
nseg, osegs, crp->crp_payload_output_start);
}
static int
@ -1426,10 +1500,52 @@ qat_crypto_load(struct qat_session *qs, struct qat_sym_cookie *qsc,
arg.qs = qs;
arg.qsc = qsc;
arg.error = 0;
error = bus_dmamap_load_crp(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
crp, qat_crypto_load_cb, &arg, BUS_DMA_NOWAIT);
if (error == 0)
error = arg.error;
error = 0;
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128 &&
crp->crp_aad_length > 0) {
/*
* The firmware expects AAD to be in a contiguous buffer and
* padded to a multiple of 16 bytes. To satisfy these
* constraints we bounce the AAD into a per-request buffer.
* There is a small limit on the AAD size so this is not too
* onerous.
*/
memset(qsc->qsc_gcm_aad, 0, QAT_GCM_AAD_SIZE_MAX);
if (crp->crp_aad == NULL) {
crypto_copydata(crp, crp->crp_aad_start,
crp->crp_aad_length, qsc->qsc_gcm_aad);
} else {
memcpy(qsc->qsc_gcm_aad, crp->crp_aad,
crp->crp_aad_length);
}
} else if (crp->crp_aad != NULL) {
error = bus_dmamap_load(
qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag,
qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap,
crp->crp_aad, crp->crp_aad_length,
qat_crypto_load_aadbuf_cb, &arg, BUS_DMA_NOWAIT);
if (error == 0)
error = arg.error;
}
if (error == 0) {
error = bus_dmamap_load_crp_buffer(
qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag,
qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap,
&crp->crp_buf, qat_crypto_load_buf_cb, &arg,
BUS_DMA_NOWAIT);
if (error == 0)
error = arg.error;
}
if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
error = bus_dmamap_load_crp_buffer(
qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag,
qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap,
&crp->crp_obuf, qat_crypto_load_obuf_cb, &arg,
BUS_DMA_NOWAIT);
if (error == 0)
error = arg.error;
}
return error;
}
@ -1444,11 +1560,11 @@ qat_crypto_select_bank(struct qat_crypto *qcy)
static int
qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
{
int error, i, bank;
int curname = 0;
char *name;
int bank, curname, error, i, j;
bank = qcb->qcb_bank;
curname = 0;
name = qcb->qcb_ring_names[curname++];
snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
@ -1480,10 +1596,16 @@ qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
qsc->qsc_self_dma_tag = qdm->qdm_dma_tag;
qsc->qsc_bulk_req_params_buf_paddr =
qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
u.qsc_bulk_cookie.qsbc_req_params_buf);
qsc_bulk_cookie.qsbc_req_params_buf);
qsc->qsc_buffer_list_desc_paddr =
qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
qsc_buf_list);
qsc->qsc_obuffer_list_desc_paddr =
qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
qsc_obuf_list);
qsc->qsc_obuffer_list_desc_paddr =
qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
qsc_obuf_list);
qsc->qsc_iv_buf_paddr =
qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
qsc_iv_buf);
@ -1499,24 +1621,25 @@ qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
qcb->qcb_symck_free[i] = qsc;
qcb->qcb_symck_free_count++;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
QAT_MAXLEN, /* maxsize */
QAT_MAXSEG, /* nsegments */
QAT_MAXLEN, /* maxsegsize */
BUS_DMA_COHERENT, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&qsc->qsc_buf_dma_tag);
if (error != 0)
return error;
error = bus_dmamap_create(qsc->qsc_buf_dma_tag,
BUS_DMA_COHERENT, &qsc->qsc_buf_dmamap);
if (error)
return error;
for (j = 0; j < QAT_SYM_DMA_COUNT; j++) {
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
1, 0, /* alignment, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
QAT_MAXLEN, /* maxsize */
QAT_MAXSEG, /* nsegments */
QAT_MAXLEN, /* maxsegsize */
BUS_DMA_COHERENT, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&qsc->qsc_dma[j].qsd_dma_tag);
if (error != 0)
return error;
error = bus_dmamap_create(qsc->qsc_dma[j].qsd_dma_tag,
BUS_DMA_COHERENT, &qsc->qsc_dma[j].qsd_dmamap);
if (error != 0)
return error;
}
}
return 0;
@ -1534,10 +1657,17 @@ static void
qat_crypto_bank_deinit(struct qat_softc *sc, struct qat_crypto_bank *qcb)
{
struct qat_dmamem *qdm;
int i;
struct qat_sym_cookie *qsc;
int i, j;
for (i = 0; i < QAT_NSYMCOOKIE; i++) {
qdm = &qcb->qcb_symck_dmamems[i];
qsc = qcb->qcb_symck_free[i];
for (j = 0; j < QAT_SYM_DMA_COUNT; j++) {
bus_dmamap_destroy(qsc->qsc_dma[j].qsd_dma_tag,
qsc->qsc_dma[j].qsd_dmamap);
bus_dma_tag_destroy(qsc->qsc_dma[j].qsd_dma_tag);
}
qat_free_dmamem(sc, qdm);
}
qat_free_dmamem(sc, &qcb->qcb_sym_tx->qr_dma);
@ -1653,6 +1783,15 @@ qat_crypto_stop(struct qat_softc *sc)
(void)crypto_unregister_all(qcy->qcy_cid);
}
static void
qat_crypto_sym_dma_unload(struct qat_sym_cookie *qsc, enum qat_sym_dma i)
{
bus_dmamap_sync(qsc->qsc_dma[i].qsd_dma_tag, qsc->qsc_dma[i].qsd_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(qsc->qsc_dma[i].qsd_dma_tag,
qsc->qsc_dma[i].qsd_dmamap);
}
static int
qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
{
@ -1669,16 +1808,19 @@ qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset);
qsbc = &qsc->u.qsc_bulk_cookie;
qsbc = &qsc->qsc_bulk_cookie;
qcy = qsbc->qsbc_crypto;
qs = qsbc->qsbc_session;
crp = qsbc->qsbc_cb_tag;
bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_sync(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap);
if (crp->crp_aad != NULL)
qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_AADBUF);
qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_BUF);
if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_OBUF);
error = 0;
if ((auth_sz = qs->qs_auth_mlen) != 0) {
@ -1719,6 +1861,10 @@ qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
static int
qat_probesession(device_t dev, const struct crypto_session_params *csp)
{
if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
0)
return EINVAL;
if (csp->csp_cipher_alg == CRYPTO_AES_XTS &&
qat_lookup(dev)->qatp_chip == QAT_CHIP_C2XXX) {
/*
@ -2092,15 +2238,26 @@ qat_process(device_t dev, struct cryptop *crp, int hint)
if (error != 0)
goto fail2;
qsbc = &qsc->u.qsc_bulk_cookie;
qsbc = &qsc->qsc_bulk_cookie;
qsbc->qsbc_crypto = qcy;
qsbc->qsbc_session = qs;
qsbc->qsbc_cb_tag = crp;
sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, crp);
bus_dmamap_sync(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
if (crp->crp_aad != NULL) {
bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag,
qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
}
bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag,
qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag,
qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
}
bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);

View File

@ -818,7 +818,7 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb,
enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
enum fw_slice next_slice;
qsbc = &qsc->u.qsc_bulk_cookie;
qsbc = &qsc->qsc_bulk_cookie;
bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
memcpy(bulk_req, &desc->qcd_req_cache, QAT_HW15_SESSION_REQ_CACHE_SIZE);
@ -826,7 +826,13 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb,
bulk_req->comn_hdr.comn_req_flags =
qat_hw15_get_comn_req_flags(qcb->qcb_bank % 2);
bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
bulk_req->comn_mid.dest_data_addr =
qsc->qsc_obuffer_list_desc_paddr;
} else {
bulk_req->comn_mid.dest_data_addr =
qsc->qsc_buffer_list_desc_paddr;
}
bulk_req->req_params_addr = qsc->qsc_bulk_req_params_buf_paddr;
bulk_req->comn_ftr.next_request_addr = 0;
bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
@ -918,8 +924,14 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb,
cipher_req->curr_id = FW_SLICE_CIPHER;
cipher_req->next_id = next_slice;
cipher_req->cipher_off = crp->crp_aad_length == 0 ? 0 :
crp->crp_payload_start - crp->crp_aad_start;
if (crp->crp_aad_length == 0) {
cipher_req->cipher_off = 0;
} else if (crp->crp_aad == NULL) {
cipher_req->cipher_off =
crp->crp_payload_start - crp->crp_aad_start;
} else {
cipher_req->cipher_off = crp->crp_aad_length;
}
cipher_req->cipher_len = crp->crp_payload_length;
cipher_req->state_address = qsc->qsc_iv_buf_paddr;
}

View File

@ -581,13 +581,19 @@ qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb __unused,
uint8_t *req_params_ptr;
enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
qsbc = &qsc->u.qsc_bulk_cookie;
qsbc = &qsc->qsc_bulk_cookie;
bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req));
bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
bulk_req->comn_mid.dest_data_addr = qsc->qsc_buffer_list_desc_paddr;
if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
bulk_req->comn_mid.dest_data_addr =
qsc->qsc_obuffer_list_desc_paddr;
} else {
bulk_req->comn_mid.dest_data_addr =
qsc->qsc_buffer_list_desc_paddr;
}
if (__predict_false(crp->crp_cipher_key != NULL ||
crp->crp_auth_key != NULL))
qat_hw17_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
@ -643,9 +649,15 @@ qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb __unused,
}
} else {
if (cmd_id != FW_LA_CMD_AUTH) {
cipher_param->cipher_offset =
crp->crp_aad_length == 0 ? 0 :
crp->crp_payload_start - crp->crp_aad_start;
if (crp->crp_aad_length == 0) {
cipher_param->cipher_offset = 0;
} else if (crp->crp_aad == NULL) {
cipher_param->cipher_offset =
crp->crp_payload_start - crp->crp_aad_start;
} else {
cipher_param->cipher_offset =
crp->crp_aad_length;
}
cipher_param->cipher_length = crp->crp_payload_length;
}
if (cmd_id != FW_LA_CMD_CIPHER) {

View File

@ -1550,10 +1550,13 @@ struct flat_buffer_desc {
uint64_t phy_buffer;
};
#define HW_MAXSEG 32
struct buffer_list_desc {
uint64_t resrvd;
uint32_t num_buffers;
uint32_t reserved;
struct flat_buffer_desc flat_bufs[HW_MAXSEG];
};
/* -------------------------------------------------------------------------- */

View File

@ -69,13 +69,11 @@
#define QAT_NSYMREQ 256
#define QAT_NSYMCOOKIE ((QAT_NSYMREQ * 2 + 1) * 2)
#define QAT_NASYMREQ 64
#define QAT_BATCH_SUBMIT_FREE_SPACE 2
#define QAT_EV_NAME_SIZE 32
#define QAT_RING_NAME_SIZE 32
#define QAT_MAXSEG 32 /* max segments for sg dma */
#define QAT_MAXSEG HW_MAXSEG /* max segments for sg dma */
#define QAT_MAXLEN 65535 /* IP_MAXPACKET */
#define QAT_HB_INTERVAL 500 /* heartbeat msec */
@ -519,7 +517,7 @@ struct qat_sym_hash_def {
struct qat_sym_bulk_cookie {
uint8_t qsbc_req_params_buf[QAT_SYM_REQ_PARAMS_SIZE_PADDED];
/* memory block reserved for request params
/* memory block reserved for request params, QAT 1.5 only
* NOTE: Field must be correctly aligned in memory for access by QAT
* engine */
struct qat_crypto *qsbc_crypto;
@ -539,25 +537,26 @@ struct qat_sym_bulk_cookie {
HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE, \
QAT_OPTIMAL_ALIGN)
enum qat_sym_dma {
QAT_SYM_DMA_AADBUF = 0,
QAT_SYM_DMA_BUF,
QAT_SYM_DMA_OBUF,
QAT_SYM_DMA_COUNT,
};
struct qat_sym_dmamap {
bus_dmamap_t qsd_dmamap;
bus_dma_tag_t qsd_dma_tag;
};
struct qat_sym_cookie {
union qat_sym_cookie_u {
/* should be 64byte aligned */
struct qat_sym_bulk_cookie qsc_bulk_cookie;
/* symmetric bulk cookie */
#ifdef notyet
struct qat_sym_key_cookie qsc_key_cookie;
/* symmetric key cookie */
struct qat_sym_nrbg_cookie qsc_nrbg_cookie;
/* symmetric NRBG cookie */
#endif
} u;
struct qat_sym_bulk_cookie qsc_bulk_cookie;
/* should be 64-byte aligned */
struct buffer_list_desc qsc_buf_list;
struct flat_buffer_desc qsc_flat_bufs[QAT_MAXSEG]; /* should be here */
struct buffer_list_desc qsc_obuf_list;
bus_dmamap_t qsc_self_dmamap; /* self DMA mapping and
end of DMA region */
bus_dmamap_t qsc_self_dmamap;
bus_dma_tag_t qsc_self_dma_tag;
uint8_t qsc_iv_buf[EALG_MAX_BLOCK_LEN];
@ -565,12 +564,11 @@ struct qat_sym_cookie {
uint8_t qsc_gcm_aad[QAT_GCM_AAD_SIZE_MAX];
uint8_t qsc_content_desc[CONTENT_DESC_MAX_SIZE];
bus_dmamap_t qsc_buf_dmamap; /* qsc_flat_bufs DMA mapping */
bus_dma_tag_t qsc_buf_dma_tag;
void *qsc_buf;
struct qat_sym_dmamap qsc_dma[QAT_SYM_DMA_COUNT];
bus_addr_t qsc_bulk_req_params_buf_paddr;
bus_addr_t qsc_buffer_list_desc_paddr;
bus_addr_t qsc_obuffer_list_desc_paddr;
bus_addr_t qsc_iv_buf_paddr;
bus_addr_t qsc_auth_res_paddr;
bus_addr_t qsc_gcm_aad_paddr;
@ -578,7 +576,7 @@ struct qat_sym_cookie {
};
CTASSERT(offsetof(struct qat_sym_cookie,
u.qsc_bulk_cookie.qsbc_req_params_buf) % QAT_OPTIMAL_ALIGN == 0);
qsc_bulk_cookie.qsbc_req_params_buf) % QAT_OPTIMAL_ALIGN == 0);
CTASSERT(offsetof(struct qat_sym_cookie, qsc_buf_list) % QAT_OPTIMAL_ALIGN == 0);
#define MAX_CIPHER_SETUP_BLK_SZ \
@ -614,7 +612,6 @@ struct qat_crypto_desc {
uint8_t qcd_req_cache[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
} __aligned(QAT_OPTIMAL_ALIGN);
/* should be aligned to 64bytes */
struct qat_session {
struct qat_crypto_desc *qs_dec_desc; /* should be at top of struct*/
/* decrypt or auth then decrypt or auth */