address an issue where consumers, like IPsec, can reuse the same

session in multiple threads w/o locking..  There was a single fpu
context shared per session, if multiple threads were using the session,
and both migrated away, they could corrupt each other's fpu context...

This patch adds a per cpu context and a lock to protect it...

It also tries to better address unloading of the aesni module...
The pause will be removed once the OpenCrypto Framework provides a
better method for draining callers into _newsession...

I first discovered the fpu context sharing issue w/ a flood ping over
an IPsec tunnel between two bhyve machines...  The patch in D3015
was used to verify that this fix does fix the issue...

Reviewed by:	gnn, kib (both earlier versions)
Differential Revision:        https://reviews.freebsd.org/D3016
This commit is contained in:
John-Mark Gurney 2015-07-08 19:15:29 +00:00
parent 4cbf30133e
commit 9d38fd076e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=285289
2 changed files with 102 additions and 26 deletions

View File

@ -45,17 +45,35 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/uio.h>
#include <sys/mbuf.h>
#include <sys/smp.h>
#include <crypto/aesni/aesni.h>
#include <cryptodev_if.h>
#include <opencrypto/gmac.h>
static struct mtx_padalign *ctx_mtx;
static struct fpu_kern_ctx **ctx_fpu;
struct aesni_softc {
int dieing;
int32_t cid;
uint32_t sid;
TAILQ_HEAD(aesni_sessions_head, aesni_session) sessions;
struct rwlock lock;
};
#define AQUIRE_CTX(i, ctx) \
do { \
(i) = PCPU_GET(cpuid); \
mtx_lock(&ctx_mtx[(i)]); \
(ctx) = ctx_fpu[(i)]; \
} while (0)
#define RELEASE_CTX(i, ctx) \
do { \
mtx_unlock(&ctx_mtx[(i)]); \
(i) = -1; \
(ctx) = NULL; \
} while (0)
static int aesni_newsession(device_t, uint32_t *sidp, struct cryptoini *cri);
static int aesni_freesession(device_t, uint64_t tid);
static void aesni_freesession_locked(struct aesni_softc *sc,
@ -95,14 +113,36 @@ aesni_probe(device_t dev)
return (0);
}
static void
aensi_cleanctx(void)
{
int i;
/* XXX - no way to return driverid */
CPU_FOREACH(i) {
if (ctx_fpu[i] != NULL) {
mtx_destroy(&ctx_mtx[i]);
fpu_kern_free_ctx(ctx_fpu[i]);
}
ctx_fpu[i] = NULL;
}
free(ctx_mtx, M_AESNI);
ctx_mtx = NULL;
free(ctx_fpu, M_AESNI);
ctx_fpu = NULL;
}
static int
aesni_attach(device_t dev)
{
struct aesni_softc *sc;
int i;
sc = device_get_softc(dev);
sc->dieing = 0;
TAILQ_INIT(&sc->sessions);
sc->sid = 1;
sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE |
CRYPTOCAP_F_SYNC);
if (sc->cid < 0) {
@ -110,6 +150,16 @@ aesni_attach(device_t dev)
return (ENOMEM);
}
ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI,
M_WAITOK|M_ZERO);
ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI,
M_WAITOK|M_ZERO);
CPU_FOREACH(i) {
ctx_fpu[i] = fpu_kern_alloc_ctx(0);
mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW);
}
rw_init(&sc->lock, "aesni_lock");
crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
@ -128,6 +178,7 @@ aesni_detach(device_t dev)
struct aesni_session *ses;
sc = device_get_softc(dev);
rw_wlock(&sc->lock);
TAILQ_FOREACH(ses, &sc->sessions, next) {
if (ses->used) {
@ -137,14 +188,21 @@ aesni_detach(device_t dev)
return (EBUSY);
}
}
sc->dieing = 1;
while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) {
TAILQ_REMOVE(&sc->sessions, ses, next);
fpu_kern_free_ctx(ses->fpu_ctx);
free(ses, M_AESNI);
}
rw_wunlock(&sc->lock);
rw_destroy(&sc->lock);
crypto_unregister_all(sc->cid);
/* XXX - wait for anyone in _newsession to leave */
pause("aniwait", 1);
rw_destroy(&sc->lock);
aensi_cleanctx();
return (0);
}
@ -162,6 +220,9 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
}
sc = device_get_softc(dev);
if (sc->dieing)
return (EINVAL);
ses = NULL;
encini = NULL;
for (; cri != NULL; cri = cri->cri_next) {
@ -195,6 +256,10 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
}
rw_wlock(&sc->lock);
if (sc->dieing) {
rw_wunlock(&sc->lock);
return (EINVAL);
}
/*
* Free sessions goes first, so if first session is used, we need to
* allocate one.
@ -206,12 +271,6 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
rw_wunlock(&sc->lock);
return (ENOMEM);
}
ses->fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NOWAIT);
if (ses->fpu_ctx == NULL) {
free(ses, M_AESNI);
rw_wunlock(&sc->lock);
return (ENOMEM);
}
ses->id = sc->sid++;
} else {
TAILQ_REMOVE(&sc->sessions, ses, next);
@ -237,15 +296,14 @@ aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
static void
aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses)
{
struct fpu_kern_ctx *ctx;
uint32_t sid;
rw_assert(&sc->lock, RA_WLOCKED);
sid = ses->id;
TAILQ_REMOVE(&sc->sessions, ses, next);
ctx = ses->fpu_ctx;
*ses = (struct aesni_session){};
ses->id = sid;
ses->fpu_ctx = ctx;
TAILQ_INSERT_HEAD(&sc->sessions, ses, next);
}
@ -429,17 +487,27 @@ MODULE_DEPEND(aesni, crypto, 1, 1, 1);
static int
aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
{
struct thread *td;
struct fpu_kern_ctx *ctx;
int error;
int kt, ctxidx;
kt = is_fpu_kern_thread(0);
if (!kt) {
AQUIRE_CTX(ctxidx, ctx);
error = fpu_kern_enter(curthread, ctx,
FPU_KERN_NORMAL | FPU_KERN_KTHR);
if (error != 0)
goto out;
}
td = curthread;
error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
FPU_KERN_KTHR);
if (error != 0)
return (error);
error = aesni_cipher_setup_common(ses, encini->cri_key,
encini->cri_klen);
fpu_kern_leave(td, ses->fpu_ctx);
if (!kt) {
fpu_kern_leave(curthread, ctx);
out:
RELEASE_CTX(ctxidx, ctx);
}
return (error);
}
@ -450,12 +518,13 @@ static int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
struct cryptodesc *authcrd, struct cryptop *crp)
{
struct fpu_kern_ctx *ctx;
uint8_t iv[AES_BLOCK_LEN];
uint8_t tag[GMAC_DIGEST_LEN];
struct thread *td;
uint8_t *buf, *authbuf;
int error, allocated, authallocated;
int ivlen, encflag;
int kt, ctxidx;
encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT;
@ -478,11 +547,14 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
}
}
td = curthread;
error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
FPU_KERN_KTHR);
if (error != 0)
goto out1;
kt = is_fpu_kern_thread(0);
if (!kt) {
AQUIRE_CTX(ctxidx, ctx);
error = fpu_kern_enter(curthread, ctx,
FPU_KERN_NORMAL|FPU_KERN_KTHR);
if (error != 0)
goto out2;
}
if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
error = aesni_cipher_setup_common(ses, enccrd->crd_key,
@ -578,7 +650,12 @@ aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
}
out:
fpu_kern_leave(td, ses->fpu_ctx);
if (!kt) {
fpu_kern_leave(curthread, ctx);
out2:
RELEASE_CTX(ctxidx, ctx);
}
out1:
if (allocated) {
bzero(buf, enccrd->crd_len);

View File

@ -64,7 +64,6 @@ struct aesni_session {
int used;
uint32_t id;
TAILQ_ENTRY(aesni_session) next;
struct fpu_kern_ctx *fpu_ctx;
};
/*