ktls: Support asynchronous dispatch of AEAD ciphers.
KTLS OCF support was originally targeted at software backends that used host CPU cycles to encrypt TLS records. As a result, each KTLS worker thread queued a single TLS record at a time and waited for it to be encrypted before processing another TLS record. This works well for software backends but limits throughput on OCF drivers for coprocessors that support asynchronous operation such as qat(4) or ccr(4). This change uses an alternate function (ktls_encrypt_async) when encrypt TLS records via a coprocessor. This function queues TLS records for encryption and returns. It defers the work done after a TLS record has been encrypted (such as marking the mbufs ready) to a callback invoked asynchronously by the coprocessor driver when a record has been encrypted. - Add a struct ktls_ocf_state that holds the per-request state stored on the stack for synchronous requests. Asynchronous requests malloc this structure while synchronous requests continue to allocate this structure on the stack. - Add a ktls_encrypt_async() variant of ktls_encrypt() which does not perform request completion after dispatching a request to OCF. Instead, the ktls_ocf backends invoke ktls_encrypt_cb() when a TLS record request completes for an asynchronous request. - Flag AEAD software TLS sessions as async if the backend driver selected by OCF is an async driver. - Pull code to create and dispatch an OCF request out of ktls_encrypt() into a new ktls_encrypt_one() function used by both ktls_encrypt() and ktls_encrypt_async(). - Pull code to "finish" the VM page shuffling for a file-backed TLS record into a helper function ktls_finish_noanon() used by both ktls_encrypt() and ktls_encrypt_cb(). Reviewed by: markj Tested on: ccr(4) (jhb), qat(4) (markj) Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D31665
This commit is contained in:
parent
35a0342508
commit
470e851c4b
@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/param.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/domainset.h>
|
||||
#include <sys/endian.h>
|
||||
#include <sys/ktls.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mbuf.h>
|
||||
@ -73,7 +74,8 @@ __FBSDID("$FreeBSD$");
|
||||
#ifdef TCP_OFFLOAD
|
||||
#include <netinet/tcp_offload.h>
|
||||
#endif
|
||||
#include <opencrypto/xform.h>
|
||||
#include <opencrypto/cryptodev.h>
|
||||
#include <opencrypto/ktls.h>
|
||||
#include <vm/uma_dbg.h>
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
@ -2024,6 +2026,72 @@ ktls_buffer_alloc(struct ktls_wq *wq, struct mbuf *m)
|
||||
return (buf);
|
||||
}
|
||||
|
||||
static int
|
||||
ktls_encrypt_record(struct ktls_wq *wq, struct mbuf *m,
|
||||
struct ktls_session *tls, struct ktls_ocf_encrypt_state *state)
|
||||
{
|
||||
vm_page_t pg;
|
||||
int error, i, len, off;
|
||||
|
||||
KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) == (M_EXTPG | M_NOTREADY),
|
||||
("%p not unready & nomap mbuf\n", m));
|
||||
KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen,
|
||||
("page count %d larger than maximum frame length %d", m->m_epg_npgs,
|
||||
ktls_maxlen));
|
||||
|
||||
/* Anonymous mbufs are encrypted in place. */
|
||||
if ((m->m_epg_flags & EPG_FLAG_ANON) != 0)
|
||||
return (tls->sw_encrypt(state, tls, m, NULL, 0));
|
||||
|
||||
/*
|
||||
* For file-backed mbufs (from sendfile), anonymous wired
|
||||
* pages are allocated and used as the encryption destination.
|
||||
*/
|
||||
if ((state->cbuf = ktls_buffer_alloc(wq, m)) != NULL) {
|
||||
len = ptoa(m->m_epg_npgs - 1) + m->m_epg_last_len -
|
||||
m->m_epg_1st_off;
|
||||
state->dst_iov[0].iov_base = (char *)state->cbuf +
|
||||
m->m_epg_1st_off;
|
||||
state->dst_iov[0].iov_len = len;
|
||||
state->parray[0] = DMAP_TO_PHYS((vm_offset_t)state->cbuf);
|
||||
i = 1;
|
||||
} else {
|
||||
off = m->m_epg_1st_off;
|
||||
for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
|
||||
do {
|
||||
pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_NOOBJ | VM_ALLOC_NODUMP |
|
||||
VM_ALLOC_WIRED | VM_ALLOC_WAITFAIL);
|
||||
} while (pg == NULL);
|
||||
|
||||
len = m_epg_pagelen(m, i, off);
|
||||
state->parray[i] = VM_PAGE_TO_PHYS(pg);
|
||||
state->dst_iov[i].iov_base =
|
||||
(char *)PHYS_TO_DMAP(state->parray[i]) + off;
|
||||
state->dst_iov[i].iov_len = len;
|
||||
}
|
||||
}
|
||||
KASSERT(i + 1 <= nitems(state->dst_iov), ("dst_iov is too small"));
|
||||
state->dst_iov[i].iov_base = m->m_epg_trail;
|
||||
state->dst_iov[i].iov_len = m->m_epg_trllen;
|
||||
|
||||
error = tls->sw_encrypt(state, tls, m, state->dst_iov, i + 1);
|
||||
|
||||
if (__predict_false(error != 0)) {
|
||||
/* Free the anonymous pages. */
|
||||
if (state->cbuf != NULL)
|
||||
uma_zfree(ktls_buffer_zone, state->cbuf);
|
||||
else {
|
||||
for (i = 0; i < m->m_epg_npgs; i++) {
|
||||
pg = PHYS_TO_VM_PAGE(state->parray[i]);
|
||||
(void)vm_page_unwire_noq(pg);
|
||||
vm_page_free(pg);
|
||||
}
|
||||
}
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
void
|
||||
ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
|
||||
{
|
||||
@ -2055,19 +2123,48 @@ ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
|
||||
counter_u64_add(ktls_cnt_tx_queued, 1);
|
||||
}
|
||||
|
||||
#define MAX_TLS_PAGES (1 + btoc(TLS_MAX_MSG_SIZE_V10_2))
|
||||
/*
|
||||
* Once a file-backed mbuf (from sendfile) has been encrypted, free
|
||||
* the pages from the file and replace them with the anonymous pages
|
||||
* allocated in ktls_encrypt_record().
|
||||
*/
|
||||
static void
|
||||
ktls_finish_nonanon(struct mbuf *m, struct ktls_ocf_encrypt_state *state)
|
||||
{
|
||||
int i;
|
||||
|
||||
MPASS((m->m_epg_flags & EPG_FLAG_ANON) == 0);
|
||||
|
||||
/* Free the old pages. */
|
||||
m->m_ext.ext_free(m);
|
||||
|
||||
/* Replace them with the new pages. */
|
||||
if (state->cbuf != NULL) {
|
||||
for (i = 0; i < m->m_epg_npgs; i++)
|
||||
m->m_epg_pa[i] = state->parray[0] + ptoa(i);
|
||||
|
||||
/* Contig pages should go back to the cache. */
|
||||
m->m_ext.ext_free = ktls_free_mext_contig;
|
||||
} else {
|
||||
for (i = 0; i < m->m_epg_npgs; i++)
|
||||
m->m_epg_pa[i] = state->parray[i];
|
||||
|
||||
/* Use the basic free routine. */
|
||||
m->m_ext.ext_free = mb_free_mext_pgs;
|
||||
}
|
||||
|
||||
/* Pages are now writable. */
|
||||
m->m_epg_flags |= EPG_FLAG_ANON;
|
||||
}
|
||||
|
||||
static __noinline void
|
||||
ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
|
||||
{
|
||||
struct ktls_ocf_encrypt_state state;
|
||||
struct ktls_session *tls;
|
||||
struct socket *so;
|
||||
struct mbuf *m;
|
||||
vm_paddr_t parray[MAX_TLS_PAGES + 1];
|
||||
struct iovec dst_iov[MAX_TLS_PAGES + 2];
|
||||
vm_page_t pg;
|
||||
void *cbuf;
|
||||
int error, i, len, npages, off, total_pages;
|
||||
int error, npages, total_pages;
|
||||
|
||||
so = top->m_epg_so;
|
||||
tls = top->m_epg_tls;
|
||||
@ -2101,86 +2198,19 @@ ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
|
||||
KASSERT(m->m_epg_tls == tls,
|
||||
("different TLS sessions in a single mbuf chain: %p vs %p",
|
||||
tls, m->m_epg_tls));
|
||||
KASSERT((m->m_flags & (M_EXTPG | M_NOTREADY)) ==
|
||||
(M_EXTPG | M_NOTREADY),
|
||||
("%p not unready & nomap mbuf (top = %p)\n", m, top));
|
||||
KASSERT(npages + m->m_epg_npgs <= total_pages,
|
||||
("page count mismatch: top %p, total_pages %d, m %p", top,
|
||||
total_pages, m));
|
||||
KASSERT(ptoa(m->m_epg_npgs) <= ktls_maxlen,
|
||||
("page count %d larger than maximum frame length %d",
|
||||
m->m_epg_npgs, ktls_maxlen));
|
||||
|
||||
/*
|
||||
* For anonymous mbufs, encryption is done in place.
|
||||
* For file-backed mbufs (from sendfile), anonymous
|
||||
* wired pages are allocated and used as the
|
||||
* encryption destination.
|
||||
*/
|
||||
if ((m->m_epg_flags & EPG_FLAG_ANON) != 0) {
|
||||
error = (*tls->sw_encrypt)(tls, m, NULL, 0);
|
||||
} else {
|
||||
if ((cbuf = ktls_buffer_alloc(wq, m)) != NULL) {
|
||||
len = ptoa(m->m_epg_npgs - 1) +
|
||||
m->m_epg_last_len - m->m_epg_1st_off;
|
||||
dst_iov[0].iov_base = (char *)cbuf +
|
||||
m->m_epg_1st_off;
|
||||
dst_iov[0].iov_len = len;
|
||||
parray[0] = DMAP_TO_PHYS((vm_offset_t)cbuf);
|
||||
i = 1;
|
||||
} else {
|
||||
off = m->m_epg_1st_off;
|
||||
for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
|
||||
do {
|
||||
pg = vm_page_alloc(NULL, 0,
|
||||
VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_NOOBJ |
|
||||
VM_ALLOC_NODUMP |
|
||||
VM_ALLOC_WIRED |
|
||||
VM_ALLOC_WAITFAIL);
|
||||
} while (pg == NULL);
|
||||
|
||||
len = m_epg_pagelen(m, i, off);
|
||||
parray[i] = VM_PAGE_TO_PHYS(pg);
|
||||
dst_iov[i].iov_base =
|
||||
(char *)(void *)PHYS_TO_DMAP(
|
||||
parray[i]) + off;
|
||||
dst_iov[i].iov_len = len;
|
||||
}
|
||||
}
|
||||
KASSERT(i + 1 <= nitems(dst_iov),
|
||||
("dst_iov is too small"));
|
||||
dst_iov[i].iov_base = m->m_epg_trail;
|
||||
dst_iov[i].iov_len = m->m_epg_trllen;
|
||||
|
||||
error = (*tls->sw_encrypt)(tls, m, dst_iov, i + 1);
|
||||
|
||||
/* Free the old pages. */
|
||||
m->m_ext.ext_free(m);
|
||||
|
||||
/* Replace them with the new pages. */
|
||||
if (cbuf != NULL) {
|
||||
for (i = 0; i < m->m_epg_npgs; i++)
|
||||
m->m_epg_pa[i] = parray[0] + ptoa(i);
|
||||
|
||||
/* Contig pages should go back to the cache. */
|
||||
m->m_ext.ext_free = ktls_free_mext_contig;
|
||||
} else {
|
||||
for (i = 0; i < m->m_epg_npgs; i++)
|
||||
m->m_epg_pa[i] = parray[i];
|
||||
|
||||
/* Use the basic free routine. */
|
||||
m->m_ext.ext_free = mb_free_mext_pgs;
|
||||
}
|
||||
|
||||
/* Pages are now writable. */
|
||||
m->m_epg_flags |= EPG_FLAG_ANON;
|
||||
}
|
||||
error = ktls_encrypt_record(wq, m, tls, &state);
|
||||
if (error) {
|
||||
counter_u64_add(ktls_offload_failed_crypto, 1);
|
||||
break;
|
||||
}
|
||||
|
||||
if ((m->m_epg_flags & EPG_FLAG_ANON) == 0)
|
||||
ktls_finish_nonanon(m, &state);
|
||||
|
||||
npages += m->m_epg_nrdy;
|
||||
|
||||
/*
|
||||
@ -2208,6 +2238,118 @@ ktls_encrypt(struct ktls_wq *wq, struct mbuf *top)
|
||||
CURVNET_RESTORE();
|
||||
}
|
||||
|
||||
void
|
||||
ktls_encrypt_cb(struct ktls_ocf_encrypt_state *state, int error)
|
||||
{
|
||||
struct ktls_session *tls;
|
||||
struct socket *so;
|
||||
struct mbuf *m;
|
||||
int npages;
|
||||
|
||||
m = state->m;
|
||||
|
||||
if ((m->m_epg_flags & EPG_FLAG_ANON) == 0)
|
||||
ktls_finish_nonanon(m, state);
|
||||
|
||||
so = state->so;
|
||||
free(state, M_KTLS);
|
||||
|
||||
/*
|
||||
* Drop a reference to the session now that it is no longer
|
||||
* needed. Existing code depends on encrypted records having
|
||||
* no associated session vs yet-to-be-encrypted records having
|
||||
* an associated session.
|
||||
*/
|
||||
tls = m->m_epg_tls;
|
||||
m->m_epg_tls = NULL;
|
||||
ktls_free(tls);
|
||||
|
||||
if (error != 0)
|
||||
counter_u64_add(ktls_offload_failed_crypto, 1);
|
||||
|
||||
CURVNET_SET(so->so_vnet);
|
||||
npages = m->m_epg_nrdy;
|
||||
|
||||
if (error == 0) {
|
||||
(void)(*so->so_proto->pr_usrreqs->pru_ready)(so, m, npages);
|
||||
} else {
|
||||
so->so_proto->pr_usrreqs->pru_abort(so);
|
||||
so->so_error = EIO;
|
||||
mb_free_notready(m, npages);
|
||||
}
|
||||
|
||||
SOCK_LOCK(so);
|
||||
sorele(so);
|
||||
CURVNET_RESTORE();
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to ktls_encrypt, but used with asynchronous OCF backends
|
||||
* (coprocessors) where encryption does not use host CPU resources and
|
||||
* it can be beneficial to queue more requests than CPUs.
|
||||
*/
|
||||
static __noinline void
|
||||
ktls_encrypt_async(struct ktls_wq *wq, struct mbuf *top)
|
||||
{
|
||||
struct ktls_ocf_encrypt_state *state;
|
||||
struct ktls_session *tls;
|
||||
struct socket *so;
|
||||
struct mbuf *m, *n;
|
||||
int error, mpages, npages, total_pages;
|
||||
|
||||
so = top->m_epg_so;
|
||||
tls = top->m_epg_tls;
|
||||
KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
|
||||
KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
|
||||
#ifdef INVARIANTS
|
||||
top->m_epg_so = NULL;
|
||||
#endif
|
||||
total_pages = top->m_epg_enc_cnt;
|
||||
npages = 0;
|
||||
|
||||
error = 0;
|
||||
for (m = top; npages != total_pages; m = n) {
|
||||
KASSERT(m->m_epg_tls == tls,
|
||||
("different TLS sessions in a single mbuf chain: %p vs %p",
|
||||
tls, m->m_epg_tls));
|
||||
KASSERT(npages + m->m_epg_npgs <= total_pages,
|
||||
("page count mismatch: top %p, total_pages %d, m %p", top,
|
||||
total_pages, m));
|
||||
|
||||
state = malloc(sizeof(*state), M_KTLS, M_WAITOK | M_ZERO);
|
||||
soref(so);
|
||||
state->so = so;
|
||||
state->m = m;
|
||||
|
||||
mpages = m->m_epg_nrdy;
|
||||
n = m->m_next;
|
||||
|
||||
error = ktls_encrypt_record(wq, m, tls, state);
|
||||
if (error) {
|
||||
counter_u64_add(ktls_offload_failed_crypto, 1);
|
||||
free(state, M_KTLS);
|
||||
CURVNET_SET(so->so_vnet);
|
||||
SOCK_LOCK(so);
|
||||
sorele(so);
|
||||
CURVNET_RESTORE();
|
||||
break;
|
||||
}
|
||||
|
||||
npages += mpages;
|
||||
}
|
||||
|
||||
CURVNET_SET(so->so_vnet);
|
||||
if (error != 0) {
|
||||
so->so_proto->pr_usrreqs->pru_abort(so);
|
||||
so->so_error = EIO;
|
||||
mb_free_notready(m, total_pages - npages);
|
||||
}
|
||||
|
||||
SOCK_LOCK(so);
|
||||
sorele(so);
|
||||
CURVNET_RESTORE();
|
||||
}
|
||||
|
||||
static void
|
||||
ktls_alloc_thread(void *ctx)
|
||||
{
|
||||
@ -2306,7 +2448,10 @@ ktls_work_thread(void *ctx)
|
||||
ktls_free(m->m_epg_tls);
|
||||
m_free_raw(m);
|
||||
} else {
|
||||
ktls_encrypt(wq, m);
|
||||
if (m->m_epg_tls->sync_dispatch)
|
||||
ktls_encrypt(wq, m);
|
||||
else
|
||||
ktls_encrypt_async(wq, m);
|
||||
counter_u64_add(ktls_cnt_tx_queued, -1);
|
||||
}
|
||||
}
|
||||
|
53
sys/opencrypto/ktls.h
Normal file
53
sys/opencrypto/ktls.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
||||
*
|
||||
* Copyright (c) 2021 Netflix Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef __OPENCRYPTO_KTLS_H__
|
||||
#define __OPENCRYPTO_KTLS_H__
|
||||
|
||||
#define MAX_TLS_PAGES (1 + btoc(TLS_MAX_MSG_SIZE_V10_2))
|
||||
|
||||
struct ktls_ocf_encrypt_state {
|
||||
struct socket *so;
|
||||
struct mbuf *m;
|
||||
void *cbuf;
|
||||
struct iovec dst_iov[MAX_TLS_PAGES + 2];
|
||||
vm_paddr_t parray[MAX_TLS_PAGES + 1];
|
||||
|
||||
struct cryptop crp;
|
||||
struct uio uio;
|
||||
union {
|
||||
struct tls_mac_data mac;
|
||||
struct tls_aead_data aead;
|
||||
struct tls_aead_data_13 aead13;
|
||||
};
|
||||
};
|
||||
|
||||
void ktls_encrypt_cb(struct ktls_ocf_encrypt_state *state, int error);
|
||||
void ktls_ocf_free(struct ktls_session *tls);
|
||||
int ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction);
|
||||
|
||||
#endif /* !__OPENCRYPTO_KTLS_H__ */
|
@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/pmap.h>
|
||||
#include <vm/vm_param.h>
|
||||
#include <opencrypto/cryptodev.h>
|
||||
#include <opencrypto/ktls.h>
|
||||
|
||||
struct ocf_session {
|
||||
crypto_session_t sid;
|
||||
@ -180,13 +181,53 @@ ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp)
|
||||
}
|
||||
|
||||
static int
|
||||
ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
struct iovec *outiov, int outiovcnt)
|
||||
ktls_ocf_dispatch_async_cb(struct cryptop *crp)
|
||||
{
|
||||
struct ktls_ocf_encrypt_state *state;
|
||||
int error;
|
||||
|
||||
state = crp->crp_opaque;
|
||||
if (crp->crp_etype == EAGAIN) {
|
||||
crp->crp_etype = 0;
|
||||
crp->crp_flags &= ~CRYPTO_F_DONE;
|
||||
counter_u64_add(ocf_retries, 1);
|
||||
error = crypto_dispatch(crp);
|
||||
if (error != 0) {
|
||||
crypto_destroyreq(crp);
|
||||
ktls_encrypt_cb(state, error);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
error = crp->crp_etype;
|
||||
crypto_destroyreq(crp);
|
||||
ktls_encrypt_cb(state, error);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
ktls_ocf_dispatch_async(struct ktls_ocf_encrypt_state *state,
|
||||
struct cryptop *crp)
|
||||
{
|
||||
int error;
|
||||
|
||||
crp->crp_opaque = state;
|
||||
crp->crp_callback = ktls_ocf_dispatch_async_cb;
|
||||
error = crypto_dispatch(crp);
|
||||
if (error != 0)
|
||||
crypto_destroyreq(crp);
|
||||
return (error);
|
||||
}
|
||||
|
||||
static int
|
||||
ktls_ocf_tls_cbc_encrypt(struct ktls_ocf_encrypt_state *state,
|
||||
struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
|
||||
int outiovcnt)
|
||||
{
|
||||
const struct tls_record_layer *hdr;
|
||||
struct uio uio;
|
||||
struct tls_mac_data ad;
|
||||
struct cryptop crp;
|
||||
struct uio *uio;
|
||||
struct tls_mac_data *ad;
|
||||
struct cryptop *crp;
|
||||
struct ocf_session *os;
|
||||
struct iovec iov[m->m_epg_npgs + 2];
|
||||
u_int pgoff;
|
||||
@ -198,6 +239,9 @@ ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
|
||||
os = tls->cipher;
|
||||
hdr = (const struct tls_record_layer *)m->m_epg_hdr;
|
||||
crp = &state->crp;
|
||||
uio = &state->uio;
|
||||
MPASS(tls->sync_dispatch);
|
||||
|
||||
#ifdef INVARIANTS
|
||||
if (os->implicit_iv) {
|
||||
@ -221,15 +265,16 @@ ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
|
||||
|
||||
/* Initialize the AAD. */
|
||||
ad.seq = htobe64(m->m_epg_seqno);
|
||||
ad.type = hdr->tls_type;
|
||||
ad.tls_vmajor = hdr->tls_vmajor;
|
||||
ad.tls_vminor = hdr->tls_vminor;
|
||||
ad.tls_length = htons(tls_comp_len);
|
||||
ad = &state->mac;
|
||||
ad->seq = htobe64(m->m_epg_seqno);
|
||||
ad->type = hdr->tls_type;
|
||||
ad->tls_vmajor = hdr->tls_vmajor;
|
||||
ad->tls_vminor = hdr->tls_vminor;
|
||||
ad->tls_length = htons(tls_comp_len);
|
||||
|
||||
/* First, compute the MAC. */
|
||||
iov[0].iov_base = &ad;
|
||||
iov[0].iov_len = sizeof(ad);
|
||||
iov[0].iov_base = ad;
|
||||
iov[0].iov_len = sizeof(*ad);
|
||||
pgoff = m->m_epg_1st_off;
|
||||
for (i = 0; i < m->m_epg_npgs; i++, pgoff = 0) {
|
||||
iov[i + 1].iov_base = (void *)PHYS_TO_DMAP(m->m_epg_pa[i] +
|
||||
@ -238,23 +283,23 @@ ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
}
|
||||
iov[m->m_epg_npgs + 1].iov_base = m->m_epg_trail;
|
||||
iov[m->m_epg_npgs + 1].iov_len = os->mac_len;
|
||||
uio.uio_iov = iov;
|
||||
uio.uio_iovcnt = m->m_epg_npgs + 2;
|
||||
uio.uio_offset = 0;
|
||||
uio.uio_segflg = UIO_SYSSPACE;
|
||||
uio.uio_td = curthread;
|
||||
uio.uio_resid = sizeof(ad) + tls_comp_len + os->mac_len;
|
||||
uio->uio_iov = iov;
|
||||
uio->uio_iovcnt = m->m_epg_npgs + 2;
|
||||
uio->uio_offset = 0;
|
||||
uio->uio_segflg = UIO_SYSSPACE;
|
||||
uio->uio_td = curthread;
|
||||
uio->uio_resid = sizeof(*ad) + tls_comp_len + os->mac_len;
|
||||
|
||||
crypto_initreq(&crp, os->mac_sid);
|
||||
crp.crp_payload_start = 0;
|
||||
crp.crp_payload_length = sizeof(ad) + tls_comp_len;
|
||||
crp.crp_digest_start = crp.crp_payload_length;
|
||||
crp.crp_op = CRYPTO_OP_COMPUTE_DIGEST;
|
||||
crp.crp_flags = CRYPTO_F_CBIMM;
|
||||
crypto_use_uio(&crp, &uio);
|
||||
error = ktls_ocf_dispatch(os, &crp);
|
||||
crypto_initreq(crp, os->mac_sid);
|
||||
crp->crp_payload_start = 0;
|
||||
crp->crp_payload_length = sizeof(*ad) + tls_comp_len;
|
||||
crp->crp_digest_start = crp->crp_payload_length;
|
||||
crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
|
||||
crp->crp_flags = CRYPTO_F_CBIMM;
|
||||
crypto_use_uio(crp, uio);
|
||||
error = ktls_ocf_dispatch(os, crp);
|
||||
|
||||
crypto_destroyreq(&crp);
|
||||
crypto_destroyreq(crp);
|
||||
if (error) {
|
||||
#ifdef INVARIANTS
|
||||
if (os->implicit_iv) {
|
||||
@ -272,27 +317,27 @@ ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
m->m_epg_trail[os->mac_len + i] = pad;
|
||||
|
||||
/* Finally, encrypt the record. */
|
||||
crypto_initreq(&crp, os->sid);
|
||||
crp.crp_payload_start = m->m_epg_hdrlen;
|
||||
crp.crp_payload_length = tls_comp_len + m->m_epg_trllen;
|
||||
KASSERT(crp.crp_payload_length % AES_BLOCK_LEN == 0,
|
||||
crypto_initreq(crp, os->sid);
|
||||
crp->crp_payload_start = m->m_epg_hdrlen;
|
||||
crp->crp_payload_length = tls_comp_len + m->m_epg_trllen;
|
||||
KASSERT(crp->crp_payload_length % AES_BLOCK_LEN == 0,
|
||||
("invalid encryption size"));
|
||||
crypto_use_single_mbuf(&crp, m);
|
||||
crp.crp_op = CRYPTO_OP_ENCRYPT;
|
||||
crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
|
||||
crypto_use_single_mbuf(crp, m);
|
||||
crp->crp_op = CRYPTO_OP_ENCRYPT;
|
||||
crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
|
||||
if (os->implicit_iv)
|
||||
memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN);
|
||||
memcpy(crp->crp_iv, os->iv, AES_BLOCK_LEN);
|
||||
else
|
||||
memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN);
|
||||
memcpy(crp->crp_iv, hdr + 1, AES_BLOCK_LEN);
|
||||
|
||||
if (outiov != NULL) {
|
||||
uio.uio_iov = outiov;
|
||||
uio.uio_iovcnt = outiovcnt;
|
||||
uio.uio_offset = 0;
|
||||
uio.uio_segflg = UIO_SYSSPACE;
|
||||
uio.uio_td = curthread;
|
||||
uio.uio_resid = crp.crp_payload_length;
|
||||
crypto_use_output_uio(&crp, &uio);
|
||||
uio->uio_iov = outiov;
|
||||
uio->uio_iovcnt = outiovcnt;
|
||||
uio->uio_offset = 0;
|
||||
uio->uio_segflg = UIO_SYSSPACE;
|
||||
uio->uio_td = curthread;
|
||||
uio->uio_resid = crp->crp_payload_length;
|
||||
crypto_use_output_uio(crp, uio);
|
||||
}
|
||||
|
||||
if (os->implicit_iv)
|
||||
@ -303,9 +348,9 @@ ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
counter_u64_add(ocf_separate_output, 1);
|
||||
else
|
||||
counter_u64_add(ocf_inplace, 1);
|
||||
error = ktls_ocf_dispatch(os, &crp);
|
||||
error = ktls_ocf_dispatch(os, crp);
|
||||
|
||||
crypto_destroyreq(&crp);
|
||||
crypto_destroyreq(crp);
|
||||
|
||||
if (os->implicit_iv) {
|
||||
KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
|
||||
@ -323,26 +368,29 @@ ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
}
|
||||
|
||||
static int
|
||||
ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
struct iovec *outiov, int outiovcnt)
|
||||
ktls_ocf_tls12_aead_encrypt(struct ktls_ocf_encrypt_state *state,
|
||||
struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
|
||||
int outiovcnt)
|
||||
{
|
||||
const struct tls_record_layer *hdr;
|
||||
struct uio uio;
|
||||
struct tls_aead_data ad;
|
||||
struct cryptop crp;
|
||||
struct uio *uio;
|
||||
struct tls_aead_data *ad;
|
||||
struct cryptop *crp;
|
||||
struct ocf_session *os;
|
||||
int error;
|
||||
uint16_t tls_comp_len;
|
||||
|
||||
os = tls->cipher;
|
||||
hdr = (const struct tls_record_layer *)m->m_epg_hdr;
|
||||
crp = &state->crp;
|
||||
uio = &state->uio;
|
||||
|
||||
crypto_initreq(&crp, os->sid);
|
||||
crypto_initreq(crp, os->sid);
|
||||
|
||||
/* Setup the IV. */
|
||||
if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
|
||||
memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
|
||||
memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
|
||||
memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
|
||||
memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
|
||||
sizeof(uint64_t));
|
||||
} else {
|
||||
/*
|
||||
@ -350,41 +398,42 @@ ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
* identically to constructing the IV for AEAD in TLS
|
||||
* 1.3.
|
||||
*/
|
||||
memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
|
||||
*(uint64_t *)(crp.crp_iv + 4) ^= htobe64(m->m_epg_seqno);
|
||||
memcpy(crp->crp_iv, tls->params.iv, tls->params.iv_len);
|
||||
*(uint64_t *)(crp->crp_iv + 4) ^= htobe64(m->m_epg_seqno);
|
||||
}
|
||||
|
||||
/* Setup the AAD. */
|
||||
ad = &state->aead;
|
||||
tls_comp_len = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
|
||||
ad.seq = htobe64(m->m_epg_seqno);
|
||||
ad.type = hdr->tls_type;
|
||||
ad.tls_vmajor = hdr->tls_vmajor;
|
||||
ad.tls_vminor = hdr->tls_vminor;
|
||||
ad.tls_length = htons(tls_comp_len);
|
||||
crp.crp_aad = &ad;
|
||||
crp.crp_aad_length = sizeof(ad);
|
||||
ad->seq = htobe64(m->m_epg_seqno);
|
||||
ad->type = hdr->tls_type;
|
||||
ad->tls_vmajor = hdr->tls_vmajor;
|
||||
ad->tls_vminor = hdr->tls_vminor;
|
||||
ad->tls_length = htons(tls_comp_len);
|
||||
crp->crp_aad = ad;
|
||||
crp->crp_aad_length = sizeof(*ad);
|
||||
|
||||
/* Set fields for input payload. */
|
||||
crypto_use_single_mbuf(&crp, m);
|
||||
crp.crp_payload_start = m->m_epg_hdrlen;
|
||||
crp.crp_payload_length = tls_comp_len;
|
||||
crypto_use_single_mbuf(crp, m);
|
||||
crp->crp_payload_start = m->m_epg_hdrlen;
|
||||
crp->crp_payload_length = tls_comp_len;
|
||||
|
||||
if (outiov != NULL) {
|
||||
crp.crp_digest_start = crp.crp_payload_length;
|
||||
crp->crp_digest_start = crp->crp_payload_length;
|
||||
|
||||
uio.uio_iov = outiov;
|
||||
uio.uio_iovcnt = outiovcnt;
|
||||
uio.uio_offset = 0;
|
||||
uio.uio_segflg = UIO_SYSSPACE;
|
||||
uio.uio_td = curthread;
|
||||
uio.uio_resid = crp.crp_payload_length + tls->params.tls_tlen;
|
||||
crypto_use_output_uio(&crp, &uio);
|
||||
uio->uio_iov = outiov;
|
||||
uio->uio_iovcnt = outiovcnt;
|
||||
uio->uio_offset = 0;
|
||||
uio->uio_segflg = UIO_SYSSPACE;
|
||||
uio->uio_td = curthread;
|
||||
uio->uio_resid = crp->crp_payload_length + tls->params.tls_tlen;
|
||||
crypto_use_output_uio(crp, uio);
|
||||
} else
|
||||
crp.crp_digest_start = crp.crp_payload_start +
|
||||
crp.crp_payload_length;
|
||||
crp->crp_digest_start = crp->crp_payload_start +
|
||||
crp->crp_payload_length;
|
||||
|
||||
crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
|
||||
crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
|
||||
crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
|
||||
crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
|
||||
if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
|
||||
counter_u64_add(ocf_tls12_gcm_crypts, 1);
|
||||
else
|
||||
@ -393,9 +442,11 @@ ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
counter_u64_add(ocf_separate_output, 1);
|
||||
else
|
||||
counter_u64_add(ocf_inplace, 1);
|
||||
error = ktls_ocf_dispatch(os, &crp);
|
||||
|
||||
crypto_destroyreq(&crp);
|
||||
if (tls->sync_dispatch) {
|
||||
error = ktls_ocf_dispatch(os, crp);
|
||||
crypto_destroyreq(crp);
|
||||
} else
|
||||
error = ktls_ocf_dispatch_async(state, crp);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -467,61 +518,66 @@ ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
|
||||
}
|
||||
|
||||
static int
|
||||
ktls_ocf_tls13_aead_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
struct iovec *outiov, int outiovcnt)
|
||||
ktls_ocf_tls13_aead_encrypt(struct ktls_ocf_encrypt_state *state,
|
||||
struct ktls_session *tls, struct mbuf *m, struct iovec *outiov,
|
||||
int outiovcnt)
|
||||
{
|
||||
const struct tls_record_layer *hdr;
|
||||
struct uio uio;
|
||||
struct tls_aead_data_13 ad;
|
||||
char nonce[12];
|
||||
struct cryptop crp;
|
||||
struct uio *uio;
|
||||
struct tls_aead_data_13 *ad;
|
||||
struct cryptop *crp;
|
||||
struct ocf_session *os;
|
||||
char nonce[12];
|
||||
int error;
|
||||
|
||||
os = tls->cipher;
|
||||
hdr = (const struct tls_record_layer *)m->m_epg_hdr;
|
||||
crp = &state->crp;
|
||||
uio = &state->uio;
|
||||
|
||||
crypto_initreq(&crp, os->sid);
|
||||
crypto_initreq(crp, os->sid);
|
||||
|
||||
/* Setup the nonce. */
|
||||
memcpy(nonce, tls->params.iv, tls->params.iv_len);
|
||||
*(uint64_t *)(nonce + 4) ^= htobe64(m->m_epg_seqno);
|
||||
|
||||
/* Setup the AAD. */
|
||||
ad.type = hdr->tls_type;
|
||||
ad.tls_vmajor = hdr->tls_vmajor;
|
||||
ad.tls_vminor = hdr->tls_vminor;
|
||||
ad.tls_length = hdr->tls_length;
|
||||
crp.crp_aad = &ad;
|
||||
crp.crp_aad_length = sizeof(ad);
|
||||
ad = &state->aead13;
|
||||
ad->type = hdr->tls_type;
|
||||
ad->tls_vmajor = hdr->tls_vmajor;
|
||||
ad->tls_vminor = hdr->tls_vminor;
|
||||
ad->tls_length = hdr->tls_length;
|
||||
crp->crp_aad = ad;
|
||||
crp->crp_aad_length = sizeof(*ad);
|
||||
|
||||
/* Set fields for input payload. */
|
||||
crypto_use_single_mbuf(&crp, m);
|
||||
crp.crp_payload_start = m->m_epg_hdrlen;
|
||||
crp.crp_payload_length = m->m_len - (m->m_epg_hdrlen + m->m_epg_trllen);
|
||||
crypto_use_single_mbuf(crp, m);
|
||||
crp->crp_payload_start = m->m_epg_hdrlen;
|
||||
crp->crp_payload_length = m->m_len -
|
||||
(m->m_epg_hdrlen + m->m_epg_trllen);
|
||||
|
||||
/* Store the record type as the first byte of the trailer. */
|
||||
m->m_epg_trail[0] = m->m_epg_record_type;
|
||||
crp.crp_payload_length++;
|
||||
crp->crp_payload_length++;
|
||||
|
||||
if (outiov != NULL) {
|
||||
crp.crp_digest_start = crp.crp_payload_length;
|
||||
crp->crp_digest_start = crp->crp_payload_length;
|
||||
|
||||
uio.uio_iov = outiov;
|
||||
uio.uio_iovcnt = outiovcnt;
|
||||
uio.uio_offset = 0;
|
||||
uio.uio_segflg = UIO_SYSSPACE;
|
||||
uio.uio_td = curthread;
|
||||
uio.uio_resid = m->m_len - m->m_epg_hdrlen;
|
||||
crypto_use_output_uio(&crp, &uio);
|
||||
uio->uio_iov = outiov;
|
||||
uio->uio_iovcnt = outiovcnt;
|
||||
uio->uio_offset = 0;
|
||||
uio->uio_segflg = UIO_SYSSPACE;
|
||||
uio->uio_td = curthread;
|
||||
uio->uio_resid = m->m_len - m->m_epg_hdrlen;
|
||||
crypto_use_output_uio(crp, uio);
|
||||
} else
|
||||
crp.crp_digest_start = crp.crp_payload_start +
|
||||
crp.crp_payload_length;
|
||||
crp->crp_digest_start = crp->crp_payload_start +
|
||||
crp->crp_payload_length;
|
||||
|
||||
crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
|
||||
crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
|
||||
crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
|
||||
crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
|
||||
|
||||
memcpy(crp.crp_iv, nonce, sizeof(nonce));
|
||||
memcpy(crp->crp_iv, nonce, sizeof(nonce));
|
||||
|
||||
if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
|
||||
counter_u64_add(ocf_tls13_gcm_crypts, 1);
|
||||
@ -531,9 +587,11 @@ ktls_ocf_tls13_aead_encrypt(struct ktls_session *tls, struct mbuf *m,
|
||||
counter_u64_add(ocf_separate_output, 1);
|
||||
else
|
||||
counter_u64_add(ocf_inplace, 1);
|
||||
error = ktls_ocf_dispatch(os, &crp);
|
||||
|
||||
crypto_destroyreq(&crp);
|
||||
if (tls->sync_dispatch) {
|
||||
error = ktls_ocf_dispatch(os, crp);
|
||||
crypto_destroyreq(crp);
|
||||
} else
|
||||
error = ktls_ocf_dispatch_async(state, crp);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -705,5 +763,13 @@ ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
|
||||
memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* AES-CBC is always synchronous currently. Asynchronous
|
||||
* operation would require multiple callbacks and an additional
|
||||
* iovec array in ktls_ocf_encrypt_state.
|
||||
*/
|
||||
tls->sync_dispatch = CRYPTO_SESS_SYNC(os->sid) ||
|
||||
tls->params.cipher_algorithm == CRYPTO_AES_CBC;
|
||||
return (0);
|
||||
}
|
||||
|
@ -167,6 +167,7 @@ struct tls_session_params {
|
||||
#define KTLS_RX 2
|
||||
|
||||
struct iovec;
|
||||
struct ktls_ocf_encrypt_state;
|
||||
struct ktls_session;
|
||||
struct m_snd_tag;
|
||||
struct mbuf;
|
||||
@ -175,8 +176,9 @@ struct socket;
|
||||
|
||||
struct ktls_session {
|
||||
union {
|
||||
int (*sw_encrypt)(struct ktls_session *tls, struct mbuf *m,
|
||||
struct iovec *dst, int iovcnt);
|
||||
int (*sw_encrypt)(struct ktls_ocf_encrypt_state *state,
|
||||
struct ktls_session *tls, struct mbuf *m,
|
||||
struct iovec *outiov, int outiovcnt);
|
||||
int (*sw_decrypt)(struct ktls_session *tls,
|
||||
const struct tls_record_layer *hdr, struct mbuf *m,
|
||||
uint64_t seqno, int *trailer_len);
|
||||
@ -195,6 +197,7 @@ struct ktls_session {
|
||||
struct inpcb *inp;
|
||||
bool reset_pending;
|
||||
bool disable_ifnet_pending;
|
||||
bool sync_dispatch;
|
||||
} __aligned(CACHE_LINE_SIZE);
|
||||
|
||||
extern unsigned int ktls_ifnet_max_rexmit_pct;
|
||||
@ -206,8 +209,6 @@ int ktls_enable_tx(struct socket *so, struct tls_enable *en);
|
||||
void ktls_destroy(struct ktls_session *tls);
|
||||
void ktls_frame(struct mbuf *m, struct ktls_session *tls, int *enqueue_cnt,
|
||||
uint8_t record_type);
|
||||
void ktls_ocf_free(struct ktls_session *tls);
|
||||
int ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction);
|
||||
void ktls_seq(struct sockbuf *sb, struct mbuf *m);
|
||||
void ktls_enqueue(struct mbuf *m, struct socket *so, int page_count);
|
||||
void ktls_enqueue_to_free(struct mbuf *m);
|
||||
|
Loading…
Reference in New Issue
Block a user