- Pretend to accelerate various HMAC algorithms, so padlock(4) can be used

with fast_ipsec(4) and geli(8) authentication (comming soon).
  If consumer requests only for HMAC algorithm (without encryption), return
  EINVAL.
- Add support for the CRD_F_KEY_EXPLICIT flag, for both encryption and
  authentication.
This commit is contained in:
Pawel Jakub Dawidek 2006-06-05 16:22:04 +00:00
parent 7365463843
commit 64e18040cc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=159279

View File

@ -55,14 +55,14 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/libkern.h>
#include <sys/mbuf.h>
#include <sys/uio.h>
#if defined(__i386__) && !defined(PC98)
#include <machine/cpufunc.h>
#include <machine/cputypes.h>
#endif
#include <opencrypto/cryptodev.h>
#include <opencrypto/cryptosoft.h> /* for hmac_ipad_buffer and hmac_opad_buffer */
#include <opencrypto/xform.h>
#include <crypto/rijndael/rijndael.h>
@ -113,6 +113,10 @@ struct padlock_session {
uint32_t ses_ekey[4 * (RIJNDAEL_MAXNR + 1) + 4] __aligned(16); /* 128 bit aligned */
uint32_t ses_dkey[4 * (RIJNDAEL_MAXNR + 1) + 4] __aligned(16); /* 128 bit aligned */
uint8_t ses_iv[16] __aligned(16); /* 128 bit aligned */
struct auth_hash *ses_axf;
uint8_t *ses_ictx;
uint8_t *ses_octx;
int ses_mlen;
int ses_used;
uint32_t ses_id;
TAILQ_ENTRY(padlock_session) ses_next;
@ -194,6 +198,18 @@ padlock_init(void)
mtx_init(&sc->sc_sessions_mtx, "padlock_mtx", NULL, MTX_DEF);
crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, padlock_newsession,
padlock_freesession, padlock_process, NULL);
crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, padlock_newsession,
padlock_freesession, padlock_process, NULL);
crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, padlock_newsession,
padlock_freesession, padlock_process, NULL);
crypto_register(sc->sc_cid, CRYPTO_RIPEMD160_HMAC, 0, 0,
padlock_newsession, padlock_freesession, padlock_process, NULL);
crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0,
padlock_newsession, padlock_freesession, padlock_process, NULL);
crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0,
padlock_newsession, padlock_freesession, padlock_process, NULL);
crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0,
padlock_newsession, padlock_freesession, padlock_process, NULL);
return (0);
}
@ -229,20 +245,131 @@ padlock_destroy(void)
return (0);
}
static void
padlock_setup_enckey(struct padlock_session *ses, caddr_t key, int klen)
{
union padlock_cw *cw;
int i;
cw = &ses->ses_cw;
if (cw->cw_key_generation == PADLOCK_KEY_GENERATION_SW) {
/* Build expanded keys for both directions */
rijndaelKeySetupEnc(ses->ses_ekey, key, klen);
rijndaelKeySetupDec(ses->ses_dkey, key, klen);
for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) {
ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
}
} else {
bcopy(key, ses->ses_ekey, klen);
bcopy(key, ses->ses_dkey, klen);
}
}
static void
padlock_setup_mackey(struct padlock_session *ses, caddr_t key, int klen)
{
struct auth_hash *axf;
int i;
klen /= 8;
axf = ses->ses_axf;
for (i = 0; i < klen; i++)
key[i] ^= HMAC_IPAD_VAL;
axf->Init(ses->ses_ictx);
axf->Update(ses->ses_ictx, key, klen);
axf->Update(ses->ses_ictx, hmac_ipad_buffer, axf->blocksize - klen);
for (i = 0; i < klen; i++)
key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
axf->Init(ses->ses_octx);
axf->Update(ses->ses_octx, key, klen);
axf->Update(ses->ses_octx, hmac_opad_buffer, axf->blocksize - klen);
for (i = 0; i < klen; i++)
key[i] ^= HMAC_OPAD_VAL;
}
/*
* Compute keyed-hash authenticator.
*/
static int
padlock_authcompute(struct padlock_session *ses, struct cryptodesc *crd,
caddr_t buf, int flags)
{
u_char hash[HASH_MAX_LEN];
struct auth_hash *axf;
union authctx ctx;
int error;
axf = ses->ses_axf;
bcopy(ses->ses_ictx, &ctx, axf->ctxsize);
error = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
(int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
if (error != 0)
return (error);
axf->Final(hash, &ctx);
bcopy(ses->ses_octx, &ctx, axf->ctxsize);
axf->Update(&ctx, hash, axf->hashsize);
axf->Final(hash, &ctx);
/* Inject the authentication data */
crypto_copyback(flags, buf, crd->crd_inject,
ses->ses_mlen == 0 ? axf->hashsize : ses->ses_mlen, hash);
return (0);
}
static int
padlock_newsession(void *arg __unused, uint32_t *sidp, struct cryptoini *cri)
{
struct padlock_softc *sc = padlock_sc;
struct padlock_session *ses = NULL;
struct cryptoini *encini, *macini;
union padlock_cw *cw;
int i;
if (sc == NULL || sidp == NULL || cri == NULL ||
cri->cri_next != NULL || cri->cri_alg != CRYPTO_AES_CBC) {
if (sc == NULL || sidp == NULL || cri == NULL)
return (EINVAL);
encini = macini = NULL;
for (; cri != NULL; cri = cri->cri_next) {
switch (cri->cri_alg) {
case CRYPTO_NULL_HMAC:
case CRYPTO_MD5_HMAC:
case CRYPTO_SHA1_HMAC:
case CRYPTO_RIPEMD160_HMAC:
case CRYPTO_SHA2_256_HMAC:
case CRYPTO_SHA2_384_HMAC:
case CRYPTO_SHA2_512_HMAC:
if (macini != NULL)
return (EINVAL);
macini = cri;
break;
case CRYPTO_AES_CBC:
if (encini != NULL)
return (EINVAL);
encini = cri;
break;
default:
return (EINVAL);
}
}
if (cri->cri_klen != 128 && cri->cri_klen != 192 &&
cri->cri_klen != 256) {
/*
* We only support HMAC algorithms to be able to work with
* fast_ipsec(4), so if we are asked only for authentication without
* encryption, don't pretend we can accellerate it.
*/
if (encini == NULL)
return (EINVAL);
if (encini->cri_klen != 128 && encini->cri_klen != 192 &&
encini->cri_klen != 256) {
return (EINVAL);
}
@ -279,7 +406,7 @@ padlock_newsession(void *arg __unused, uint32_t *sidp, struct cryptoini *cri)
cw->cw_algorithm_type = PADLOCK_ALGORITHM_TYPE_AES;
cw->cw_key_generation = PADLOCK_KEY_GENERATION_SW;
cw->cw_intermediate = 0;
switch (cri->cri_klen) {
switch (encini->cri_klen) {
case 128:
cw->cw_round_count = PADLOCK_ROUND_COUNT_AES128;
cw->cw_key_size = PADLOCK_KEY_SIZE_128;
@ -297,20 +424,54 @@ padlock_newsession(void *arg __unused, uint32_t *sidp, struct cryptoini *cri)
cw->cw_key_size = PADLOCK_KEY_SIZE_256;
break;
}
if (encini->cri_key != NULL)
padlock_setup_enckey(ses, encini->cri_key, encini->cri_klen);
arc4rand(ses->ses_iv, sizeof(ses->ses_iv), 0);
if (cw->cw_key_generation == PADLOCK_KEY_GENERATION_SW) {
/* Build expanded keys for both directions */
rijndaelKeySetupEnc(ses->ses_ekey, cri->cri_key, cri->cri_klen);
rijndaelKeySetupDec(ses->ses_dkey, cri->cri_key, cri->cri_klen);
for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) {
ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
if (macini != NULL) {
ses->ses_mlen = macini->cri_mlen;
/* Find software structure which describes HMAC algorithm. */
switch (macini->cri_alg) {
case CRYPTO_NULL_HMAC:
ses->ses_axf = &auth_hash_null;
break;
case CRYPTO_MD5_HMAC:
ses->ses_axf = &auth_hash_hmac_md5;
break;
case CRYPTO_SHA1_HMAC:
ses->ses_axf = &auth_hash_hmac_sha1;
break;
case CRYPTO_RIPEMD160_HMAC:
ses->ses_axf = &auth_hash_hmac_ripemd_160;
break;
case CRYPTO_SHA2_256_HMAC:
ses->ses_axf = &auth_hash_hmac_sha2_256;
break;
case CRYPTO_SHA2_384_HMAC:
ses->ses_axf = &auth_hash_hmac_sha2_384;
break;
case CRYPTO_SHA2_512_HMAC:
ses->ses_axf = &auth_hash_hmac_sha2_512;
break;
}
/* Allocate memory for HMAC inner and outer contexts. */
ses->ses_ictx = malloc(ses->ses_axf->ctxsize, M_CRYPTO_DATA,
M_NOWAIT);
ses->ses_octx = malloc(ses->ses_axf->ctxsize, M_CRYPTO_DATA,
M_NOWAIT);
if (ses->ses_ictx == NULL || ses->ses_octx == NULL) {
padlock_freesession(NULL, ses->ses_id);
return (ENOMEM);
}
/* Setup key if given. */
if (macini->cri_key != NULL) {
padlock_setup_mackey(ses, macini->cri_key,
macini->cri_klen);
}
} else {
bcopy(cri->cri_key, ses->ses_ekey, cri->cri_klen);
bcopy(cri->cri_key, ses->ses_dkey, cri->cri_klen);
}
*sidp = ses->ses_id;
@ -336,6 +497,14 @@ padlock_freesession(void *arg __unused, uint64_t tid)
return (EINVAL);
}
TAILQ_REMOVE(&sc->sc_sessions, ses, ses_next);
if (ses->ses_ictx != NULL) {
bzero(ses->ses_ictx, sizeof(ses->ses_ictx));
free(ses->ses_ictx, M_CRYPTO_DATA);
}
if (ses->ses_octx != NULL) {
bzero(ses->ses_octx, sizeof(ses->ses_octx));
free(ses->ses_octx, M_CRYPTO_DATA);
}
bzero(ses, sizeof(ses));
ses->ses_used = 0;
TAILQ_INSERT_TAIL(&sc->sc_sessions, ses, ses_next);
@ -349,25 +518,46 @@ padlock_process(void *arg __unused, struct cryptop *crp, int hint __unused)
struct padlock_softc *sc = padlock_sc;
struct padlock_session *ses;
union padlock_cw *cw;
struct cryptodesc *crd = NULL;
struct cryptodesc *crd, *enccrd, *maccrd;
uint32_t *key;
u_char *buf, *abuf;
int err = 0;
int error = 0;
buf = NULL;
if (crp == NULL || crp->crp_callback == NULL) {
err = EINVAL;
if (crp == NULL || crp->crp_callback == NULL || crp->crp_desc == NULL) {
error = EINVAL;
goto out;
}
crd = crp->crp_desc;
if (crd == NULL || crd->crd_next != NULL ||
crd->crd_alg != CRYPTO_AES_CBC ||
(crd->crd_len % 16) != 0) {
err = EINVAL;
goto out;
enccrd = maccrd = NULL;
for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
switch (crd->crd_alg) {
case CRYPTO_NULL_HMAC:
case CRYPTO_MD5_HMAC:
case CRYPTO_SHA1_HMAC:
case CRYPTO_RIPEMD160_HMAC:
case CRYPTO_SHA2_256_HMAC:
case CRYPTO_SHA2_384_HMAC:
case CRYPTO_SHA2_512_HMAC:
if (maccrd != NULL) {
error = EINVAL;
goto out;
}
maccrd = crd;
break;
case CRYPTO_AES_CBC:
if (enccrd != NULL) {
error = EINVAL;
goto out;
}
enccrd = crd;
break;
default:
return (EINVAL);
}
}
if ((crd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
err = EINVAL;
if (enccrd == NULL || (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
error = EINVAL;
goto out;
}
@ -378,103 +568,88 @@ padlock_process(void *arg __unused, struct cryptop *crp, int hint __unused)
}
mtx_unlock(&sc->sc_sessions_mtx);
if (ses == NULL) {
err = EINVAL;
error = EINVAL;
goto out;
}
buf = malloc(crd->crd_len + 16, M_DEVBUF, M_NOWAIT);
buf = malloc(enccrd->crd_len + 16, M_DEVBUF, M_NOWAIT);
if (buf == NULL) {
err = ENOMEM;
error = ENOMEM;
goto out;
}
/* Buffer has to be 16 bytes aligned. */
abuf = buf + 16 - ((uintptr_t)buf % 16);
if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0)
padlock_setup_enckey(ses, enccrd->crd_key, enccrd->crd_klen);
if (maccrd != NULL && (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0)
padlock_setup_mackey(ses, maccrd->crd_key, maccrd->crd_klen);
cw = &ses->ses_cw;
cw->cw_filler0 = 0;
cw->cw_filler1 = 0;
cw->cw_filler2 = 0;
cw->cw_filler3 = 0;
if ((crd->crd_flags & CRD_F_ENCRYPT) != 0) {
if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
cw->cw_direction = PADLOCK_DIRECTION_ENCRYPT;
key = ses->ses_ekey;
if ((crd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(crd->crd_iv, ses->ses_iv, 16);
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, ses->ses_iv, 16);
if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
if ((crp->crp_flags & CRYPTO_F_IMBUF) != 0) {
m_copyback((struct mbuf *)crp->crp_buf,
crd->crd_inject, 16, ses->ses_iv);
} else if ((crp->crp_flags & CRYPTO_F_IOV) != 0) {
cuio_copyback((struct uio *)crp->crp_buf,
crd->crd_inject, 16, ses->ses_iv);
} else {
bcopy(ses->ses_iv,
crp->crp_buf + crd->crd_inject, 16);
}
if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
crypto_copyback(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, AES_BLOCK_LEN, ses->ses_iv);
}
} else {
cw->cw_direction = PADLOCK_DIRECTION_DECRYPT;
key = ses->ses_dkey;
if ((crd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(crd->crd_iv, ses->ses_iv, 16);
if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
bcopy(enccrd->crd_iv, ses->ses_iv, AES_BLOCK_LEN);
else {
if ((crp->crp_flags & CRYPTO_F_IMBUF) != 0) {
m_copydata((struct mbuf *)crp->crp_buf,
crd->crd_inject, 16, ses->ses_iv);
} else if ((crp->crp_flags & CRYPTO_F_IOV) != 0) {
cuio_copydata((struct uio *)crp->crp_buf,
crd->crd_inject, 16, ses->ses_iv);
} else {
bcopy(crp->crp_buf + crd->crd_inject,
ses->ses_iv, 16);
}
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_inject, AES_BLOCK_LEN, ses->ses_iv);
}
}
if ((crp->crp_flags & CRYPTO_F_IMBUF) != 0) {
m_copydata((struct mbuf *)crp->crp_buf, crd->crd_skip,
crd->crd_len, abuf);
} else if ((crp->crp_flags & CRYPTO_F_IOV) != 0) {
cuio_copydata((struct uio *)crp->crp_buf, crd->crd_skip,
crd->crd_len, abuf);
} else {
bcopy(crp->crp_buf + crd->crd_skip, abuf, crd->crd_len);
/* Perform data authentication if requested before encryption. */
if (maccrd != NULL && maccrd->crd_next == enccrd) {
error = padlock_authcompute(ses, maccrd, crp->crp_buf,
crp->crp_flags);
if (error != 0)
goto out;
}
padlock_cbc(abuf, abuf, crd->crd_len / 16, key, cw, ses->ses_iv);
crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
enccrd->crd_len, abuf);
if ((crp->crp_flags & CRYPTO_F_IMBUF) != 0) {
m_copyback((struct mbuf *)crp->crp_buf, crd->crd_skip,
crd->crd_len, abuf);
} else if ((crp->crp_flags & CRYPTO_F_IOV) != 0) {
cuio_copyback((struct uio *)crp->crp_buf, crd->crd_skip,
crd->crd_len, abuf);
} else {
bcopy(abuf, crp->crp_buf + crd->crd_skip, crd->crd_len);
padlock_cbc(abuf, abuf, enccrd->crd_len / 16, key, cw, ses->ses_iv);
crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
enccrd->crd_len, abuf);
/* Perform data authentication if requested after encryption. */
if (maccrd != NULL && enccrd->crd_next == maccrd) {
error = padlock_authcompute(ses, maccrd, crp->crp_buf,
crp->crp_flags);
if (error != 0)
goto out;
}
/* copy out last block for use as next session IV */
if ((crd->crd_flags & CRD_F_ENCRYPT) != 0) {
if ((crp->crp_flags & CRYPTO_F_IMBUF) != 0) {
m_copydata((struct mbuf *)crp->crp_buf,
crd->crd_skip + crd->crd_len - 16, 16, ses->ses_iv);
} else if ((crp->crp_flags & CRYPTO_F_IOV) != 0) {
cuio_copydata((struct uio *)crp->crp_buf,
crd->crd_skip + crd->crd_len - 16, 16, ses->ses_iv);
} else {
bcopy(crp->crp_buf + crd->crd_skip + crd->crd_len - 16,
ses->ses_iv, 16);
}
if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
crypto_copydata(crp->crp_flags, crp->crp_buf,
enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
AES_BLOCK_LEN, ses->ses_iv);
}
out:
if (buf != NULL) {
bzero(buf, crd->crd_len + 16);
bzero(buf, enccrd->crd_len + 16);
free(buf, M_DEVBUF);
}
crp->crp_etype = err;
crp->crp_etype = error;
crypto_done(crp);
return (err);
return (error);
}
static int