Add accelerated AES with using the ARMv8 crypto instructions. This is based

on the AES-NI code, and modified as needed for use on ARMv8. When loaded
the driver will check the appropriate field in the id_aa64isar0_el1
register to see if AES is supported, and if so the probe function will
signal the driver should attach.

With this I have seen up to 2000Mb/s from the cryptotest test with a single
thread on a ThunderX Pass 2.0.

Reviewed by:	imp
Obtained from:	ABT Systems Ltd
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D8297
This commit is contained in:
Andrew Turner 2016-11-21 11:18:00 +00:00
parent f55d404d45
commit d6699d292b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=308921
8 changed files with 864 additions and 0 deletions

View File

@ -53,6 +53,7 @@ MAN= aac.4 \
${_aout.4} \
${_apic.4} \
arcmsr.4 \
${_armv8crypto.4} \
${_asmc.4} \
ata.4 \
ath.4 \
@ -746,6 +747,10 @@ MLINKS+=xe.4 if_xe.4
MLINKS+=xl.4 if_xl.4
MLINKS+=zyd.4 if_zyd.4
.if ${MACHINE_CPUARCH} == "aarch64"
_armv8crypto.4= armv8crypto.4
.endif
.if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386"
_acpi_asus.4= acpi_asus.4
_acpi_asus_wmi.4= acpi_asus_wmi.4

View File

@ -0,0 +1,83 @@
.\" Copyright (c) 2016 The FreeBSD Foundation
.\" All rights reserved.
.\"
.\" This software was developed by Andrew Turner under
.\" sponsorship from the FreeBSD Foundation.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd October 20, 2016
.Dt ARMV8CRYPTO 4
.Os
.Sh NAME
.Nm armv8crypto
.Nd "driver for the AES accelerator on ARM CPUs"
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
kernel configuration file:
.Bd -ragged -offset indent
.Cd "device crypto"
.Cd "device armv8crypto"
.Ed
.Pp
Alternatively, to load the driver as a
module at boot time, place the following line in
.Xr loader.conf 5 :
.Bd -literal -offset indent
armv8crypto_load="YES"
.Ed
.Sh DESCRIPTION
Starting with the ARMv8 architecture ARM Limited has added optional
cryptography instructions to accelerate AES, SHA-1, SHA-2, and
finite field arithmetic.
.Pp
The processor capability is reported as AES in the Instruction Set
Attributes 0 line at boot.
The
.Nm
driver does not attach on systems that lack the required CPU capability.
.Pp
The
.Nm
driver registers itself to accelerate AES operations for
.Xr crypto 4 .
.Sh SEE ALSO
.Xr crypt 3 ,
.Xr crypto 4 ,
.Xr intro 4 ,
.Xr ipsec 4 ,
.Xr random 4 ,
.Xr crypto 9
.Sh HISTORY
The
.Nm
driver first appeared in
.Fx 11.0 .
.Sh AUTHORS
.An -nosplit
The
.Nm
driver was written by
.An Andrew Turner Aq Mt andrew@FreeBSD.org .

View File

@ -136,6 +136,12 @@ contrib/vchiq/interface/vchiq_arm/vchiq_shim.c optional vchiq soc_brcm_bcm2837 \
compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
contrib/vchiq/interface/vchiq_arm/vchiq_util.c optional vchiq soc_brcm_bcm2837 \
compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq"
crypto/armv8/armv8_crypto.c optional armv8crypto
armv8_crypto_wrap.o optional armv8crypto \
dependency "$S/crypto/armv8/armv8_crypto_wrap.c" \
compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -march=armv8a+crypto ${.IMPSRC}" \
no-implicit-rule \
clean "armv8_crypto_wrap.o"
crypto/blowfish/bf_enc.c optional crypto | ipsec
crypto/des/des_enc.c optional crypto | ipsec | netsmb
dev/acpica/acpi_if.m optional acpi

View File

@ -0,0 +1,565 @@
/*-
* Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
* Copyright (c) 2014,2016 The FreeBSD Foundation
* All rights reserved.
*
* Portions of this software were developed by John-Mark Gurney
* under sponsorship of the FreeBSD Foundation and
* Rubicon Communications, LLC (Netgate).
*
* This software was developed by Andrew Turner under
* sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This is based on the aesni code.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/queue.h>
#include <sys/rwlock.h>
#include <sys/smp.h>
#include <sys/uio.h>
#include <machine/vfp.h>
#include <opencrypto/cryptodev.h>
#include <cryptodev_if.h>
#include <crypto/armv8/armv8_crypto.h>
#include <crypto/rijndael/rijndael.h>
struct armv8_crypto_softc {
int dieing;
int32_t cid;
uint32_t sid;
TAILQ_HEAD(armv8_crypto_sessions_head, armv8_crypto_session) sessions;
struct rwlock lock;
};
static struct mtx *ctx_mtx;
static struct fpu_kern_ctx **ctx_vfp;
#define AQUIRE_CTX(i, ctx) \
do { \
(i) = PCPU_GET(cpuid); \
mtx_lock(&ctx_mtx[(i)]); \
(ctx) = ctx_vfp[(i)]; \
} while (0)
#define RELEASE_CTX(i, ctx) \
do { \
mtx_unlock(&ctx_mtx[(i)]); \
(i) = -1; \
(ctx) = NULL; \
} while (0)
static void armv8_crypto_freesession_locked(struct armv8_crypto_softc *,
struct armv8_crypto_session *);
static int armv8_crypto_cipher_process(struct armv8_crypto_session *,
struct cryptodesc *, struct cryptop *);
MALLOC_DEFINE(M_ARMV8_CRYPTO, "armv8_crypto", "ARMv8 Crypto Data");
static void
armv8_crypto_identify(driver_t *drv, device_t parent)
{
/* NB: order 10 is so we get attached after h/w devices */
if (device_find_child(parent, "armv8crypto", -1) == NULL &&
BUS_ADD_CHILD(parent, 10, "armv8crypto", -1) == 0)
panic("ARMv8 crypto: could not attach");
}
static int
armv8_crypto_probe(device_t dev)
{
uint64_t reg;
int ret = ENXIO;
reg = READ_SPECIALREG(id_aa64isar0_el1);
switch (ID_AA64ISAR0_AES(reg)) {
case ID_AA64ISAR0_AES_BASE:
case ID_AA64ISAR0_AES_PMULL:
ret = 0;
break;
}
device_set_desc_copy(dev, "AES-CBC");
/* TODO: Check more fields as we support more features */
return (ret);
}
static int
armv8_crypto_attach(device_t dev)
{
struct armv8_crypto_softc *sc;
int i;
sc = device_get_softc(dev);
TAILQ_INIT(&sc->sessions);
sc->dieing = 0;
sc->sid = 1;
sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE |
CRYPTOCAP_F_SYNC);
if (sc->cid < 0) {
device_printf(dev, "Could not get crypto driver id.\n");
return (ENOMEM);
}
rw_init(&sc->lock, "armv8crypto");
ctx_mtx = malloc(sizeof(*ctx_mtx) * (mp_maxid + 1), M_ARMV8_CRYPTO,
M_WAITOK|M_ZERO);
ctx_vfp = malloc(sizeof(*ctx_vfp) * (mp_maxid + 1), M_ARMV8_CRYPTO,
M_WAITOK|M_ZERO);
CPU_FOREACH(i) {
ctx_vfp[i] = fpu_kern_alloc_ctx(0);
mtx_init(&ctx_mtx[i], "armv8cryptoctx", NULL, MTX_DEF|MTX_NEW);
}
crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
return (0);
}
static int
armv8_crypto_detach(device_t dev)
{
struct armv8_crypto_softc *sc;
struct armv8_crypto_session *ses;
int i;
sc = device_get_softc(dev);
rw_wlock(&sc->lock);
TAILQ_FOREACH(ses, &sc->sessions, next) {
if (ses->used) {
rw_wunlock(&sc->lock);
device_printf(dev,
"Cannot detach, sessions still active.\n");
return (EBUSY);
}
}
sc->dieing = 1;
while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) {
TAILQ_REMOVE(&sc->sessions, ses, next);
free(ses, M_ARMV8_CRYPTO);
}
rw_wunlock(&sc->lock);
crypto_unregister_all(sc->cid);
rw_destroy(&sc->lock);
CPU_FOREACH(i) {
if (ctx_vfp[i] != NULL) {
mtx_destroy(&ctx_mtx[i]);
fpu_kern_free_ctx(ctx_vfp[i]);
}
ctx_vfp[i] = NULL;
}
free(ctx_mtx, M_ARMV8_CRYPTO);
ctx_mtx = NULL;
free(ctx_vfp, M_ARMV8_CRYPTO);
ctx_vfp = NULL;
return (0);
}
static int
armv8_crypto_cipher_setup(struct armv8_crypto_session *ses,
struct cryptoini *encini)
{
int i;
switch (ses->algo) {
case CRYPTO_AES_CBC:
switch (encini->cri_klen) {
case 128:
ses->rounds = AES128_ROUNDS;
break;
case 192:
ses->rounds = AES192_ROUNDS;
break;
case 256:
ses->rounds = AES256_ROUNDS;
break;
default:
CRYPTDEB("invalid CBC/ICM/GCM key length");
return (EINVAL);
}
break;
default:
return (EINVAL);
}
rijndaelKeySetupEnc(ses->enc_schedule, encini->cri_key,
encini->cri_klen);
rijndaelKeySetupDec(ses->dec_schedule, encini->cri_key,
encini->cri_klen);
for (i = 0; i < nitems(ses->enc_schedule); i++) {
ses->enc_schedule[i] = bswap32(ses->enc_schedule[i]);
ses->dec_schedule[i] = bswap32(ses->dec_schedule[i]);
}
return (0);
}
static int
armv8_crypto_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
{
struct armv8_crypto_softc *sc;
struct armv8_crypto_session *ses;
struct cryptoini *encini;
int error;
if (sidp == NULL || cri == NULL) {
CRYPTDEB("no sidp or cri");
return (EINVAL);
}
sc = device_get_softc(dev);
if (sc->dieing)
return (EINVAL);
ses = NULL;
encini = NULL;
for (; cri != NULL; cri = cri->cri_next) {
switch (cri->cri_alg) {
case CRYPTO_AES_CBC:
if (encini != NULL) {
CRYPTDEB("encini already set");
return (EINVAL);
}
encini = cri;
break;
default:
CRYPTDEB("unhandled algorithm");
return (EINVAL);
}
}
if (encini == NULL) {
CRYPTDEB("no cipher");
return (EINVAL);
}
rw_wlock(&sc->lock);
if (sc->dieing) {
rw_wunlock(&sc->lock);
return (EINVAL);
}
/*
* Free sessions goes first, so if first session is used, we need to
* allocate one.
*/
ses = TAILQ_FIRST(&sc->sessions);
if (ses == NULL || ses->used) {
ses = malloc(sizeof(*ses), M_ARMV8_CRYPTO, M_NOWAIT | M_ZERO);
if (ses == NULL) {
rw_wunlock(&sc->lock);
return (ENOMEM);
}
ses->id = sc->sid++;
} else {
TAILQ_REMOVE(&sc->sessions, ses, next);
}
ses->used = 1;
TAILQ_INSERT_TAIL(&sc->sessions, ses, next);
rw_wunlock(&sc->lock);
ses->algo = encini->cri_alg;
error = armv8_crypto_cipher_setup(ses, encini);
if (error != 0) {
CRYPTDEB("setup failed");
rw_wlock(&sc->lock);
armv8_crypto_freesession_locked(sc, ses);
rw_wunlock(&sc->lock);
return (error);
}
*sidp = ses->id;
return (0);
}
static void
armv8_crypto_freesession_locked(struct armv8_crypto_softc *sc,
struct armv8_crypto_session *ses)
{
uint32_t sid;
rw_assert(&sc->lock, RA_WLOCKED);
sid = ses->id;
TAILQ_REMOVE(&sc->sessions, ses, next);
*ses = (struct armv8_crypto_session){};
ses->id = sid;
TAILQ_INSERT_HEAD(&sc->sessions, ses, next);
}
static int
armv8_crypto_freesession(device_t dev, uint64_t tid)
{
struct armv8_crypto_softc *sc;
struct armv8_crypto_session *ses;
uint32_t sid;
sc = device_get_softc(dev);
sid = ((uint32_t)tid) & 0xffffffff;
rw_wlock(&sc->lock);
TAILQ_FOREACH_REVERSE(ses, &sc->sessions, armv8_crypto_sessions_head,
next) {
if (ses->id == sid)
break;
}
if (ses == NULL) {
rw_wunlock(&sc->lock);
return (EINVAL);
}
armv8_crypto_freesession_locked(sc, ses);
rw_wunlock(&sc->lock);
return (0);
}
static int
armv8_crypto_process(device_t dev, struct cryptop *crp, int hint __unused)
{
struct armv8_crypto_softc *sc = device_get_softc(dev);
struct cryptodesc *crd, *enccrd;
struct armv8_crypto_session *ses;
int error;
error = 0;
enccrd = NULL;
/* Sanity check. */
if (crp == NULL)
return (EINVAL);
if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
error = EINVAL;
goto out;
}
for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
switch (crd->crd_alg) {
case CRYPTO_AES_CBC:
if (enccrd != NULL) {
error = EINVAL;
goto out;
}
enccrd = crd;
break;
default:
error = EINVAL;
goto out;
}
}
if (enccrd == NULL) {
error = EINVAL;
goto out;
}
/* We can only handle full blocks for now */
if ((enccrd->crd_len % AES_BLOCK_LEN) != 0) {
error = EINVAL;
goto out;
}
rw_rlock(&sc->lock);
TAILQ_FOREACH_REVERSE(ses, &sc->sessions, armv8_crypto_sessions_head,
next) {
if (ses->id == (crp->crp_sid & 0xffffffff))
break;
}
rw_runlock(&sc->lock);
if (ses == NULL) {
error = EINVAL;
goto out;
}
error = armv8_crypto_cipher_process(ses, enccrd, crp);
out:
crp->crp_etype = error;
crypto_done(crp);
return (error);
}
static uint8_t *
armv8_crypto_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
int *allocated)
{
struct mbuf *m;
struct uio *uio;
struct iovec *iov;
uint8_t *addr;
if (crp->crp_flags & CRYPTO_F_IMBUF) {
m = (struct mbuf *)crp->crp_buf;
if (m->m_next != NULL)
goto alloc;
addr = mtod(m, uint8_t *);
} else if (crp->crp_flags & CRYPTO_F_IOV) {
uio = (struct uio *)crp->crp_buf;
if (uio->uio_iovcnt != 1)
goto alloc;
iov = uio->uio_iov;
addr = (uint8_t *)iov->iov_base;
} else
addr = (uint8_t *)crp->crp_buf;
*allocated = 0;
addr += enccrd->crd_skip;
return (addr);
alloc:
addr = malloc(enccrd->crd_len, M_ARMV8_CRYPTO, M_NOWAIT);
if (addr != NULL) {
*allocated = 1;
crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
enccrd->crd_len, addr);
} else
*allocated = 0;
return (addr);
}
static int
armv8_crypto_cipher_process(struct armv8_crypto_session *ses,
struct cryptodesc *enccrd, struct cryptop *crp)
{
struct fpu_kern_ctx *ctx;
uint8_t *buf;
uint8_t iv[AES_BLOCK_LEN];
int allocated, error, i;
int encflag, ivlen;
int kt;
encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT;
buf = armv8_crypto_cipher_alloc(enccrd, crp, &allocated);
if (buf == NULL)
return (ENOMEM);
error = 0;
kt = is_fpu_kern_thread(0);
if (!kt) {
AQUIRE_CTX(i, ctx);
error = fpu_kern_enter(curthread, ctx,
FPU_KERN_NORMAL | FPU_KERN_KTHR);
if (error != 0)
goto out;
}
if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
panic("CRD_F_KEY_EXPLICIT");
}
switch (enccrd->crd_alg) {
case CRYPTO_AES_CBC:
ivlen = AES_BLOCK_LEN;
break;
}
/* Setup iv */
if (encflag) {
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, iv, ivlen);
else
arc4rand(iv, ivlen, 0);
if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
crypto_copyback(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, ivlen, iv);
} else {
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, iv, ivlen);
else
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, ivlen, iv);
}
/* Do work */
switch (ses->algo) {
case CRYPTO_AES_CBC:
if (encflag)
armv8_aes_encrypt_cbc(ses->rounds, ses->enc_schedule,
enccrd->crd_len, buf, buf, iv);
else
armv8_aes_decrypt_cbc(ses->rounds, ses->dec_schedule,
enccrd->crd_len, buf, iv);
break;
}
if (allocated)
crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
enccrd->crd_len, buf);
if (!kt) {
fpu_kern_leave(curthread, ctx);
out:
RELEASE_CTX(i, ctx);
}
if (allocated) {
bzero(buf, enccrd->crd_len);
free(buf, M_ARMV8_CRYPTO);
}
return (error);
}
static device_method_t armv8_crypto_methods[] = {
DEVMETHOD(device_identify, armv8_crypto_identify),
DEVMETHOD(device_probe, armv8_crypto_probe),
DEVMETHOD(device_attach, armv8_crypto_attach),
DEVMETHOD(device_detach, armv8_crypto_detach),
DEVMETHOD(cryptodev_newsession, armv8_crypto_newsession),
DEVMETHOD(cryptodev_freesession, armv8_crypto_freesession),
DEVMETHOD(cryptodev_process, armv8_crypto_process),
DEVMETHOD_END,
};
static DEFINE_CLASS_0(armv8crypto, armv8_crypto_driver, armv8_crypto_methods,
sizeof(struct armv8_crypto_softc));
static devclass_t armv8_crypto_devclass;
DRIVER_MODULE(armv8crypto, nexus, armv8_crypto_driver, armv8_crypto_devclass,
0, 0);

View File

@ -0,0 +1,55 @@
/*-
* Copyright (c) 2016 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Andrew Turner under
* sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _ARMV8_CRYPTO_H_
#define _ARMV8_CRYPTO_H_
#define AES128_ROUNDS 10
#define AES192_ROUNDS 12
#define AES256_ROUNDS 14
#define AES_SCHED_LEN ((AES256_ROUNDS + 1) * AES_BLOCK_LEN)
struct armv8_crypto_session {
uint32_t enc_schedule[AES_SCHED_LEN/4];
uint32_t dec_schedule[AES_SCHED_LEN/4];
int algo;
int rounds;
int used;
uint32_t id;
TAILQ_ENTRY(armv8_crypto_session) next;
};
void armv8_aes_encrypt_cbc(int, const void *, size_t, const uint8_t *,
uint8_t *, const uint8_t[static AES_BLOCK_LEN]);
void armv8_aes_decrypt_cbc(int, const void *, size_t, uint8_t *,
const uint8_t[static AES_BLOCK_LEN]);
#endif /* _ARMV8_CRYPTO_H_ */

View File

@ -0,0 +1,128 @@
/*-
* Copyright (c) 2016 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Andrew Turner under
* sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This code is built with floating-point enabled. Make sure to have entered
* into floating-point context before calling any of these functions.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <opencrypto/cryptodev.h>
#include <crypto/armv8/armv8_crypto.h>
#include <arm_neon.h>
static uint8x16_t
armv8_aes_enc(int rounds, const uint8x16_t *keysched, const uint8x16_t from)
{
uint8x16_t tmp;
int i;
tmp = from;
for (i = 0; i < rounds - 1; i += 2) {
tmp = vaeseq_u8(tmp, keysched[i]);
tmp = vaesmcq_u8(tmp);
tmp = vaeseq_u8(tmp, keysched[i + 1]);
tmp = vaesmcq_u8(tmp);
}
tmp = vaeseq_u8(tmp, keysched[rounds - 1]);
tmp = vaesmcq_u8(tmp);
tmp = vaeseq_u8(tmp, keysched[rounds]);
tmp = veorq_u8(tmp, keysched[rounds + 1]);
return (tmp);
}
static uint8x16_t
armv8_aes_dec(int rounds, const uint8x16_t *keysched, const uint8x16_t from)
{
uint8x16_t tmp;
int i;
tmp = from;
for (i = 0; i < rounds - 1; i += 2) {
tmp = vaesdq_u8(tmp, keysched[i]);
tmp = vaesimcq_u8(tmp);
tmp = vaesdq_u8(tmp, keysched[i+1]);
tmp = vaesimcq_u8(tmp);
}
tmp = vaesdq_u8(tmp, keysched[rounds - 1]);
tmp = vaesimcq_u8(tmp);
tmp = vaesdq_u8(tmp, keysched[rounds]);
tmp = veorq_u8(tmp, keysched[rounds + 1]);
return (tmp);
}
void
armv8_aes_encrypt_cbc(int rounds, const void *key_schedule, size_t len,
const uint8_t *from, uint8_t *to, const uint8_t iv[static AES_BLOCK_LEN])
{
uint8x16_t tot, ivreg, tmp;
size_t i;
len /= AES_BLOCK_LEN;
ivreg = vld1q_u8(iv);
for (i = 0; i < len; i++) {
tmp = vld1q_u8(from);
tot = armv8_aes_enc(rounds - 1, key_schedule,
veorq_u8(tmp, ivreg));
ivreg = tot;
vst1q_u8(to, tot);
from += AES_BLOCK_LEN;
to += AES_BLOCK_LEN;
}
}
void
armv8_aes_decrypt_cbc(int rounds, const void *key_schedule, size_t len,
uint8_t *buf, const uint8_t iv[static AES_BLOCK_LEN])
{
uint8x16_t ivreg, nextiv, tmp;
size_t i;
len /= AES_BLOCK_LEN;
ivreg = vld1q_u8(iv);
for (i = 0; i < len; i++) {
nextiv = vld1q_u8(buf);
tmp = armv8_aes_dec(rounds - 1, key_schedule, nextiv);
vst1q_u8(buf, veorq_u8(tmp, ivreg));
ivreg = nextiv;
buf += AES_BLOCK_LEN;
}
}

View File

@ -42,6 +42,7 @@ SUBDIR= \
${_apm} \
${_arcmsr} \
${_arcnet} \
${_armv8crypto} \
${_asmc} \
ata \
ath \
@ -539,6 +540,7 @@ _cxgb= cxgb
.endif
.if ${MACHINE_CPUARCH} == "aarch64"
_armv8crypto= armv8crypto
_em= em
_igb= igb
.endif

View File

@ -0,0 +1,20 @@
# $FreeBSD$
.PATH: ${.CURDIR}/../../crypto/armv8
KMOD= armv8crypto
SRCS= armv8_crypto.c
SRCS+= device_if.h bus_if.h opt_bus.h cryptodev_if.h
OBJS+= armv8_crypto_wrap.o
# Remove -nostdinc so we can get the intrinsics.
armv8_crypto_wrap.o: armv8_crypto_wrap.c
${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} \
${WERROR} ${PROF} \
-march=armv8a+crypto ${.IMPSRC}
${CTFCONVERT_CMD}
armv8_crypto_wrap.o: armv8_crypto.h
.include <bsd.kmod.mk>