Don't dynamically allocate data structures for KTLS crypto requests.

Allocate iovec arrays and struct cryptop and struct ocf_operation
objects on the stack to reduce avoid the overhead of malloc().

These structures are all small enough to fit on the stack of the KTLS
worker threads.

Reviewed by:	gallatin
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D25692
This commit is contained in:
John Baldwin 2020-07-20 22:32:39 +00:00
parent ac11d85740
commit 33a1a488d5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=363379

View File

@ -51,7 +51,6 @@ struct ocf_session {
struct ocf_operation {
struct ocf_session *os;
bool done;
struct iovec iov[0];
};
static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
@ -109,18 +108,18 @@ ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
{
struct uio uio, out_uio, *tag_uio;
struct tls_aead_data ad;
struct cryptop *crp;
struct cryptop crp;
struct ocf_session *os;
struct ocf_operation *oo;
struct ocf_operation oo;
struct iovec iov[iovcnt + 1];
int i, error;
uint16_t tls_comp_len;
bool inplace;
os = tls->cipher;
oo = malloc(sizeof(*oo) + (iovcnt + 1) * sizeof(struct iovec),
M_KTLS_OCF, M_WAITOK | M_ZERO);
oo->os = os;
oo.os = os;
oo.done = false;
uio.uio_iov = iniov;
uio.uio_iovcnt = iovcnt;
@ -134,11 +133,11 @@ ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
out_uio.uio_segflg = UIO_SYSSPACE;
out_uio.uio_td = curthread;
crp = crypto_getreq(os->sid, M_WAITOK);
crypto_initreq(&crp, os->sid);
/* Setup the IV. */
memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
/* Setup the AAD. */
tls_comp_len = ntohs(hdr->tls_length) -
@ -148,19 +147,19 @@ ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
ad.tls_vmajor = hdr->tls_vmajor;
ad.tls_vminor = hdr->tls_vminor;
ad.tls_length = htons(tls_comp_len);
crp->crp_aad = &ad;
crp->crp_aad_length = sizeof(ad);
crp.crp_aad = &ad;
crp.crp_aad_length = sizeof(ad);
/* Compute payload length and determine if encryption is in place. */
inplace = true;
crp->crp_payload_start = 0;
crp.crp_payload_start = 0;
for (i = 0; i < iovcnt; i++) {
if (iniov[i].iov_base != outiov[i].iov_base)
inplace = false;
crp->crp_payload_length += iniov[i].iov_len;
crp.crp_payload_length += iniov[i].iov_len;
}
uio.uio_resid = crp->crp_payload_length;
out_uio.uio_resid = crp->crp_payload_length;
uio.uio_resid = crp.crp_payload_length;
out_uio.uio_resid = crp.crp_payload_length;
if (inplace)
tag_uio = &uio;
@ -168,21 +167,21 @@ ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
tag_uio = &out_uio;
/* Duplicate iovec and append vector for tag. */
memcpy(oo->iov, tag_uio->uio_iov, iovcnt * sizeof(struct iovec));
tag_uio->uio_iov = oo->iov;
tag_uio->uio_iov[iovcnt].iov_base = trailer;
tag_uio->uio_iov[iovcnt].iov_len = AES_GMAC_HASH_LEN;
memcpy(iov, tag_uio->uio_iov, iovcnt * sizeof(struct iovec));
iov[iovcnt].iov_base = trailer;
iov[iovcnt].iov_len = AES_GMAC_HASH_LEN;
tag_uio->uio_iov = iov;
tag_uio->uio_iovcnt++;
crp->crp_digest_start = tag_uio->uio_resid;
crp.crp_digest_start = tag_uio->uio_resid;
tag_uio->uio_resid += AES_GMAC_HASH_LEN;
crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
crypto_use_uio(crp, &uio);
crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
crypto_use_uio(&crp, &uio);
if (!inplace)
crypto_use_output_uio(crp, &out_uio);
crp->crp_opaque = oo;
crp->crp_callback = ktls_ocf_callback;
crypto_use_output_uio(&crp, &out_uio);
crp.crp_opaque = &oo;
crp.crp_callback = ktls_ocf_callback;
counter_u64_add(ocf_tls12_gcm_crypts, 1);
if (inplace)
@ -190,28 +189,27 @@ ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
else
counter_u64_add(ocf_separate_output, 1);
for (;;) {
error = crypto_dispatch(crp);
error = crypto_dispatch(&crp);
if (error)
break;
mtx_lock(&os->lock);
while (!oo->done)
mtx_sleep(oo, &os->lock, 0, "ocfktls", 0);
while (!oo.done)
mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
mtx_unlock(&os->lock);
if (crp->crp_etype != EAGAIN) {
error = crp->crp_etype;
if (crp.crp_etype != EAGAIN) {
error = crp.crp_etype;
break;
}
crp->crp_etype = 0;
crp->crp_flags &= ~CRYPTO_F_DONE;
oo->done = false;
crp.crp_etype = 0;
crp.crp_flags &= ~CRYPTO_F_DONE;
oo.done = false;
counter_u64_add(ocf_retries, 1);
}
crypto_freereq(crp);
free(oo, M_KTLS_OCF);
crypto_destroyreq(&crp);
return (error);
}
@ -223,22 +221,19 @@ ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
struct uio uio, out_uio;
struct tls_aead_data_13 ad;
char nonce[12];
struct cryptop *crp;
struct cryptop crp;
struct ocf_session *os;
struct ocf_operation *oo;
struct iovec *iov, *out_iov;
struct ocf_operation oo;
struct iovec iov[iovcnt + 1], out_iov[iovcnt + 1];
int i, error;
bool inplace;
os = tls->cipher;
oo = malloc(sizeof(*oo) + (iovcnt + 1) * sizeof(*iov) * 2, M_KTLS_OCF,
M_WAITOK | M_ZERO);
oo->os = os;
iov = oo->iov;
out_iov = iov + iovcnt + 2;
oo.os = os;
oo.done = false;
crp = crypto_getreq(os->sid, M_WAITOK);
crypto_initreq(&crp, os->sid);
/* Setup the nonce. */
memcpy(nonce, tls->params.iv, tls->params.iv_len);
@ -249,22 +244,22 @@ ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
ad.tls_vmajor = hdr->tls_vmajor;
ad.tls_vminor = hdr->tls_vminor;
ad.tls_length = hdr->tls_length;
crp->crp_aad = &ad;
crp->crp_aad_length = sizeof(ad);
crp.crp_aad = &ad;
crp.crp_aad_length = sizeof(ad);
/* Compute payload length and determine if encryption is in place. */
inplace = true;
crp->crp_payload_start = 0;
crp.crp_payload_start = 0;
for (i = 0; i < iovcnt; i++) {
if (iniov[i].iov_base != outiov[i].iov_base)
inplace = false;
crp->crp_payload_length += iniov[i].iov_len;
crp.crp_payload_length += iniov[i].iov_len;
}
/* Store the record type as the first byte of the trailer. */
trailer[0] = record_type;
crp->crp_payload_length++;
crp->crp_digest_start = crp->crp_payload_length;
crp.crp_payload_length++;
crp.crp_digest_start = crp.crp_payload_length;
/*
* Duplicate the input iov to append the trailer. Always
@ -277,10 +272,10 @@ ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
uio.uio_iov = iov;
uio.uio_iovcnt = iovcnt + 1;
uio.uio_offset = 0;
uio.uio_resid = crp->crp_payload_length + AES_GMAC_HASH_LEN;
uio.uio_resid = crp.crp_payload_length + AES_GMAC_HASH_LEN;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_td = curthread;
crypto_use_uio(crp, &uio);
crypto_use_uio(&crp, &uio);
if (!inplace) {
/* Duplicate the output iov to append the trailer. */
@ -290,19 +285,19 @@ ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
out_uio.uio_iov = out_iov;
out_uio.uio_iovcnt = iovcnt + 1;
out_uio.uio_offset = 0;
out_uio.uio_resid = crp->crp_payload_length +
out_uio.uio_resid = crp.crp_payload_length +
AES_GMAC_HASH_LEN;
out_uio.uio_segflg = UIO_SYSSPACE;
out_uio.uio_td = curthread;
crypto_use_output_uio(crp, &out_uio);
crypto_use_output_uio(&crp, &out_uio);
}
crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
crp->crp_opaque = oo;
crp->crp_callback = ktls_ocf_callback;
crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
crp.crp_opaque = &oo;
crp.crp_callback = ktls_ocf_callback;
memcpy(crp->crp_iv, nonce, sizeof(nonce));
memcpy(crp.crp_iv, nonce, sizeof(nonce));
counter_u64_add(ocf_tls13_gcm_crypts, 1);
if (inplace)
@ -310,28 +305,27 @@ ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
else
counter_u64_add(ocf_separate_output, 1);
for (;;) {
error = crypto_dispatch(crp);
error = crypto_dispatch(&crp);
if (error)
break;
mtx_lock(&os->lock);
while (!oo->done)
mtx_sleep(oo, &os->lock, 0, "ocfktls", 0);
while (!oo.done)
mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
mtx_unlock(&os->lock);
if (crp->crp_etype != EAGAIN) {
error = crp->crp_etype;
if (crp.crp_etype != EAGAIN) {
error = crp.crp_etype;
break;
}
crp->crp_etype = 0;
crp->crp_flags &= ~CRYPTO_F_DONE;
oo->done = false;
crp.crp_etype = 0;
crp.crp_flags &= ~CRYPTO_F_DONE;
oo.done = false;
counter_u64_add(ocf_retries, 1);
}
crypto_freereq(crp);
free(oo, M_KTLS_OCF);
crypto_destroyreq(&crp);
return (error);
}