Put the aesni_cipher_setup() and aesni_cipher_process() functions into

the file which is compiled with SSE disabled.  The functions set up
the FPU context for kernel, and compiler optimizations which could
lead to use of XMM registers before the fpu_kern_enter(9) is called or
after fpu_kern_leave(9), panic the machine.

Discussed with:	jmg
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
This commit is contained in:
Konstantin Belousov 2014-06-24 06:55:49 +00:00
parent 4af58157b6
commit 27007c6576
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=267815
3 changed files with 95 additions and 94 deletions

View File

@ -53,6 +53,10 @@ static int aesni_newsession(device_t, uint32_t *sidp, struct cryptoini *cri);
static int aesni_freesession(device_t, uint64_t tid);
static void aesni_freesession_locked(struct aesni_softc *sc,
struct aesni_session *ses);
static int aesni_cipher_setup(struct aesni_session *ses,
struct cryptoini *encini);
static int aesni_cipher_process(struct aesni_session *ses,
struct cryptodesc *enccrd, struct cryptop *crp);
MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
@ -354,3 +358,91 @@ static devclass_t aesni_devclass;
DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0);
MODULE_VERSION(aesni, 1);
MODULE_DEPEND(aesni, crypto, 1, 1, 1);
static int
aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
{
struct thread *td;
int error;
td = curthread;
error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
FPU_KERN_KTHR);
if (error != 0)
return (error);
error = aesni_cipher_setup_common(ses, encini->cri_key,
encini->cri_klen);
fpu_kern_leave(td, ses->fpu_ctx);
return (error);
}
static int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
struct cryptop *crp)
{
struct thread *td;
uint8_t *buf;
int error, allocated;
buf = aesni_cipher_alloc(enccrd, crp, &allocated);
if (buf == NULL)
return (ENOMEM);
td = curthread;
error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
FPU_KERN_KTHR);
if (error != 0)
goto out1;
if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
error = aesni_cipher_setup_common(ses, enccrd->crd_key,
enccrd->crd_klen);
if (error != 0)
goto out;
}
if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
crypto_copyback(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
if (ses->algo == CRYPTO_AES_CBC) {
aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
enccrd->crd_len, buf, buf, ses->iv);
} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
ses->xts_schedule, enccrd->crd_len, buf, buf,
ses->iv);
}
} else {
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
else
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
if (ses->algo == CRYPTO_AES_CBC) {
aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
enccrd->crd_len, buf, ses->iv);
} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
ses->xts_schedule, enccrd->crd_len, buf, buf,
ses->iv);
}
}
if (allocated)
crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
enccrd->crd_len, buf);
if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
AES_BLOCK_LEN, ses->iv);
out:
fpu_kern_leave(td, ses->fpu_ctx);
out1:
if (allocated) {
bzero(buf, enccrd->crd_len);
free(buf, M_AESNI);
}
return (error);
}

View File

@ -96,11 +96,8 @@ void aesni_decrypt_xts(int rounds, const void *data_schedule /*__aligned(16)*/,
const void *tweak_schedule /*__aligned(16)*/, size_t len,
const uint8_t *from, uint8_t *to, const uint8_t iv[AES_BLOCK_LEN]);
int aesni_cipher_setup(struct aesni_session *ses,
struct cryptoini *encini);
int aesni_cipher_process(struct aesni_session *ses,
struct cryptodesc *enccrd, struct cryptop *crp);
int aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
int keylen);
uint8_t *aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
int *allocated);

View File

@ -329,7 +329,7 @@ aesni_decrypt_xts(int rounds, const void *data_schedule,
iv, 0);
}
static int
int
aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
int keylen)
{
@ -377,91 +377,3 @@ aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
return (0);
}
int
aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
{
struct thread *td;
int error;
td = curthread;
error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
FPU_KERN_KTHR);
if (error != 0)
return (error);
error = aesni_cipher_setup_common(ses, encini->cri_key,
encini->cri_klen);
fpu_kern_leave(td, ses->fpu_ctx);
return (error);
}
int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
struct cryptop *crp)
{
struct thread *td;
uint8_t *buf;
int error, allocated;
buf = aesni_cipher_alloc(enccrd, crp, &allocated);
if (buf == NULL)
return (ENOMEM);
td = curthread;
error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
FPU_KERN_KTHR);
if (error != 0)
goto out1;
if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
error = aesni_cipher_setup_common(ses, enccrd->crd_key,
enccrd->crd_klen);
if (error != 0)
goto out;
}
if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
crypto_copyback(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
if (ses->algo == CRYPTO_AES_CBC) {
aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
enccrd->crd_len, buf, buf, ses->iv);
} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
ses->xts_schedule, enccrd->crd_len, buf, buf,
ses->iv);
}
} else {
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
else
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
if (ses->algo == CRYPTO_AES_CBC) {
aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
enccrd->crd_len, buf, ses->iv);
} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
ses->xts_schedule, enccrd->crd_len, buf, buf,
ses->iv);
}
}
if (allocated)
crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
enccrd->crd_len, buf);
if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
AES_BLOCK_LEN, ses->iv);
out:
fpu_kern_leave(td, ses->fpu_ctx);
out1:
if (allocated) {
bzero(buf, enccrd->crd_len);
free(buf, M_AESNI);
}
return (error);
}