Improve support for stream ciphers in the software encryption interface.
Add a 'native_blocksize' member to 'struct enc_xform' that ciphers can use if they support a partial final block. This is particular useful for stream ciphers, but can also apply to other ciphers. cryptosoft will only pass in native blocks to the encrypt and decrypt hooks. For the final partial block, 'struct enc_xform' now has new encrypt_last/decrypt_last hooks which accept the length of the final block. The multi_block methods are also retired. Mark AES-ICM (AES-CTR) as a stream cipher. This has some interesting effects on IPsec in that FreeBSD can now properly receive all packets sent by Linux when using AES-CTR, but FreeBSD can no longer interoperate with OpenBSD and older verisons of FreeBSD which assume AES-CTR packets have a payload padded to a 16-byte boundary. Kornel has offered to work on a patch to add a compatiblity sysctl to enforce additional padding for AES-CTR in esp_output to permit compatibility with OpenBSD and older versions of FreeBSD. AES-XTS continues to use a block size of a single AES block length. It is possible to adjust it to support partial final blocks by implementing cipher text stealing via encrypt_last/decrypt_last hooks, but I have not done so. Reviewed by: cem (earlier version) Tested by: Kornel Dulęba <mindal@semihalf.com> (AES-CTR with IPsec) Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D24906
This commit is contained in:
parent
33af263230
commit
723d87648e
@ -27,7 +27,7 @@
|
||||
.\"
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd May 3, 2020
|
||||
.Dd May 22, 2020
|
||||
.Dt CRYPTO 7
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -82,7 +82,7 @@ Note: You must provide an IV on every call.
|
||||
.It IV size :
|
||||
16
|
||||
.It Block size :
|
||||
1 (aesni), 16 (software)
|
||||
1
|
||||
.It Key size :
|
||||
16, 24 or 32
|
||||
.El
|
||||
|
@ -28,11 +28,11 @@ static void
|
||||
chacha20_xform_crypt(void *ctx, const uint8_t *in, uint8_t *out)
|
||||
{
|
||||
|
||||
chacha_encrypt_bytes(ctx, in, out, 1);
|
||||
chacha_encrypt_bytes(ctx, in, out, CHACHA_BLOCKLEN);
|
||||
}
|
||||
|
||||
static void
|
||||
chacha20_xform_crypt_multi(void *ctx, const uint8_t *in, uint8_t *out,
|
||||
chacha20_xform_crypt_last(void *ctx, const uint8_t *in, uint8_t *out,
|
||||
size_t len)
|
||||
{
|
||||
|
||||
@ -44,6 +44,7 @@ struct enc_xform enc_xform_chacha20 = {
|
||||
.name = "chacha20",
|
||||
.ctxsize = sizeof(struct chacha_ctx),
|
||||
.blocksize = 1,
|
||||
.native_blocksize = CHACHA_BLOCKLEN,
|
||||
.ivsize = CHACHA_NONCELEN + CHACHA_CTRLEN,
|
||||
.minkey = CHACHA_MINKEYLEN,
|
||||
.maxkey = 32,
|
||||
@ -51,6 +52,6 @@ struct enc_xform enc_xform_chacha20 = {
|
||||
.decrypt = chacha20_xform_crypt,
|
||||
.setkey = chacha20_xform_setkey,
|
||||
.reinit = chacha20_xform_reinit,
|
||||
.encrypt_multi = chacha20_xform_crypt_multi,
|
||||
.decrypt_multi = chacha20_xform_crypt_multi,
|
||||
.encrypt_last = chacha20_xform_crypt_last,
|
||||
.decrypt_last = chacha20_xform_crypt_last,
|
||||
};
|
||||
|
@ -117,12 +117,16 @@ swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
|
||||
|
||||
sw = &ses->swcr_encdec;
|
||||
exf = sw->sw_exf;
|
||||
blks = exf->blocksize;
|
||||
ivlen = exf->ivsize;
|
||||
|
||||
/* Check for non-padded data */
|
||||
if ((crp->crp_payload_length % blks) != 0)
|
||||
return EINVAL;
|
||||
if (exf->native_blocksize == 0) {
|
||||
/* Check for non-padded data */
|
||||
if ((crp->crp_payload_length % exf->blocksize) != 0)
|
||||
return (EINVAL);
|
||||
|
||||
blks = exf->blocksize;
|
||||
} else
|
||||
blks = exf->native_blocksize;
|
||||
|
||||
if (exf == &enc_xform_aes_icm &&
|
||||
(crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
|
||||
@ -182,7 +186,7 @@ swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
|
||||
i = crp->crp_payload_length;
|
||||
encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
|
||||
|
||||
while (i > 0) {
|
||||
while (i >= blks) {
|
||||
/*
|
||||
* If there's insufficient data at the end of
|
||||
* an iovec, we have to do some copying.
|
||||
@ -249,31 +253,18 @@ swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
|
||||
break;
|
||||
}
|
||||
|
||||
while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
|
||||
while (uio->uio_iov[ind].iov_len >= k + blks && i >= blks) {
|
||||
uint8_t *idat;
|
||||
size_t nb, rem;
|
||||
|
||||
nb = blks;
|
||||
rem = MIN((size_t)i,
|
||||
uio->uio_iov[ind].iov_len - (size_t)k);
|
||||
idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
|
||||
|
||||
if (exf->reinit) {
|
||||
if (encrypting && exf->encrypt_multi == NULL)
|
||||
if (encrypting)
|
||||
exf->encrypt(sw->sw_kschedule,
|
||||
idat, idat);
|
||||
else if (encrypting) {
|
||||
nb = rounddown(rem, blks);
|
||||
exf->encrypt_multi(sw->sw_kschedule,
|
||||
idat, idat, nb);
|
||||
} else if (exf->decrypt_multi == NULL)
|
||||
else
|
||||
exf->decrypt(sw->sw_kschedule,
|
||||
idat, idat);
|
||||
else {
|
||||
nb = rounddown(rem, blks);
|
||||
exf->decrypt_multi(sw->sw_kschedule,
|
||||
idat, idat, nb);
|
||||
}
|
||||
} else if (encrypting) {
|
||||
/* XOR with previous block/IV */
|
||||
for (j = 0; j < blks; j++)
|
||||
@ -298,9 +289,9 @@ swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
|
||||
ivp = nivp;
|
||||
}
|
||||
|
||||
count += nb;
|
||||
k += nb;
|
||||
i -= nb;
|
||||
count += blks;
|
||||
k += blks;
|
||||
i -= blks;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -319,6 +310,25 @@ swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
|
||||
}
|
||||
}
|
||||
|
||||
/* Handle trailing partial block for stream ciphers. */
|
||||
if (i > 0) {
|
||||
KASSERT(exf->native_blocksize != 0,
|
||||
("%s: partial block of %d bytes for cipher %s",
|
||||
__func__, i, exf->name));
|
||||
KASSERT(exf->reinit != NULL,
|
||||
("%s: partial block cipher %s without reinit hook",
|
||||
__func__, exf->name));
|
||||
KASSERT(i < blks, ("%s: partial block too big", __func__));
|
||||
|
||||
cuio_copydata(uio, count, i, blk);
|
||||
if (encrypting) {
|
||||
exf->encrypt_last(sw->sw_kschedule, blk, blk, i);
|
||||
} else {
|
||||
exf->decrypt_last(sw->sw_kschedule, blk, blk, i);
|
||||
}
|
||||
cuio_copyback(uio, count, i, blk);
|
||||
}
|
||||
|
||||
out:
|
||||
if (iovalloc)
|
||||
free(iov, M_CRYPTO_DATA);
|
||||
@ -512,6 +522,8 @@ swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
|
||||
|
||||
swe = &ses->swcr_encdec;
|
||||
exf = swe->sw_exf;
|
||||
KASSERT(axf->blocksize == exf->native_blocksize,
|
||||
("%s: blocksize mismatch", __func__));
|
||||
|
||||
if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
|
||||
return (EINVAL);
|
||||
@ -665,6 +677,8 @@ swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
|
||||
|
||||
swe = &ses->swcr_encdec;
|
||||
exf = swe->sw_exf;
|
||||
KASSERT(axf->blocksize == exf->native_blocksize,
|
||||
("%s: blocksize mismatch", __func__));
|
||||
|
||||
if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
|
||||
return (EINVAL);
|
||||
|
@ -54,6 +54,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
static int aes_icm_setkey(void *, const uint8_t *, int);
|
||||
static void aes_icm_crypt(void *, const uint8_t *, uint8_t *);
|
||||
static void aes_icm_crypt_last(void *, const uint8_t *, uint8_t *, size_t);
|
||||
static void aes_icm_reinit(void *, const uint8_t *);
|
||||
static void aes_gcm_reinit(void *, const uint8_t *);
|
||||
static void aes_ccm_reinit(void *, const uint8_t *);
|
||||
@ -63,7 +64,8 @@ struct enc_xform enc_xform_aes_icm = {
|
||||
.type = CRYPTO_AES_ICM,
|
||||
.name = "AES-ICM",
|
||||
.ctxsize = sizeof(struct aes_icm_ctx),
|
||||
.blocksize = AES_BLOCK_LEN,
|
||||
.blocksize = 1,
|
||||
.native_blocksize = AES_BLOCK_LEN,
|
||||
.ivsize = AES_BLOCK_LEN,
|
||||
.minkey = AES_MIN_KEY,
|
||||
.maxkey = AES_MAX_KEY,
|
||||
@ -71,13 +73,16 @@ struct enc_xform enc_xform_aes_icm = {
|
||||
.decrypt = aes_icm_crypt,
|
||||
.setkey = aes_icm_setkey,
|
||||
.reinit = aes_icm_reinit,
|
||||
.encrypt_last = aes_icm_crypt_last,
|
||||
.decrypt_last = aes_icm_crypt_last,
|
||||
};
|
||||
|
||||
struct enc_xform enc_xform_aes_nist_gcm = {
|
||||
.type = CRYPTO_AES_NIST_GCM_16,
|
||||
.name = "AES-GCM",
|
||||
.ctxsize = sizeof(struct aes_icm_ctx),
|
||||
.blocksize = AES_ICM_BLOCK_LEN,
|
||||
.blocksize = 1,
|
||||
.native_blocksize = AES_BLOCK_LEN,
|
||||
.ivsize = AES_GCM_IV_LEN,
|
||||
.minkey = AES_MIN_KEY,
|
||||
.maxkey = AES_MAX_KEY,
|
||||
@ -85,18 +90,24 @@ struct enc_xform enc_xform_aes_nist_gcm = {
|
||||
.decrypt = aes_icm_crypt,
|
||||
.setkey = aes_icm_setkey,
|
||||
.reinit = aes_gcm_reinit,
|
||||
.encrypt_last = aes_icm_crypt_last,
|
||||
.decrypt_last = aes_icm_crypt_last,
|
||||
};
|
||||
|
||||
struct enc_xform enc_xform_ccm = {
|
||||
.type = CRYPTO_AES_CCM_16,
|
||||
.name = "AES-CCM",
|
||||
.ctxsize = sizeof(struct aes_icm_ctx),
|
||||
.blocksize = AES_ICM_BLOCK_LEN, .ivsize = AES_CCM_IV_LEN,
|
||||
.blocksize = 1,
|
||||
.native_blocksize = AES_BLOCK_LEN,
|
||||
.ivsize = AES_CCM_IV_LEN,
|
||||
.minkey = AES_MIN_KEY, .maxkey = AES_MAX_KEY,
|
||||
.encrypt = aes_icm_crypt,
|
||||
.decrypt = aes_icm_crypt,
|
||||
.setkey = aes_icm_setkey,
|
||||
.reinit = aes_ccm_reinit,
|
||||
.encrypt_last = aes_icm_crypt_last,
|
||||
.decrypt_last = aes_icm_crypt_last,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -143,14 +154,10 @@ static void
|
||||
aes_icm_crypt(void *key, const uint8_t *in, uint8_t *out)
|
||||
{
|
||||
struct aes_icm_ctx *ctx;
|
||||
uint8_t keystream[AESICM_BLOCKSIZE];
|
||||
int i;
|
||||
|
||||
ctx = key;
|
||||
rijndaelEncrypt(ctx->ac_ek, ctx->ac_nr, ctx->ac_block, keystream);
|
||||
for (i = 0; i < AESICM_BLOCKSIZE; i++)
|
||||
out[i] = in[i] ^ keystream[i];
|
||||
explicit_bzero(keystream, sizeof(keystream));
|
||||
aes_icm_crypt_last(key, in, out, AESICM_BLOCKSIZE);
|
||||
|
||||
/* increment counter */
|
||||
for (i = AESICM_BLOCKSIZE - 1;
|
||||
@ -159,6 +166,20 @@ aes_icm_crypt(void *key, const uint8_t *in, uint8_t *out)
|
||||
break;
|
||||
}
|
||||
|
||||
static void
|
||||
aes_icm_crypt_last(void *key, const uint8_t *in, uint8_t *out, size_t len)
|
||||
{
|
||||
struct aes_icm_ctx *ctx;
|
||||
uint8_t keystream[AESICM_BLOCKSIZE];
|
||||
int i;
|
||||
|
||||
ctx = key;
|
||||
rijndaelEncrypt(ctx->ac_ek, ctx->ac_nr, ctx->ac_block, keystream);
|
||||
for (i = 0; i < len; i++)
|
||||
out[i] = in[i] ^ keystream[i];
|
||||
explicit_bzero(keystream, sizeof(keystream));
|
||||
}
|
||||
|
||||
static int
|
||||
aes_icm_setkey(void *sched, const uint8_t *key, int len)
|
||||
{
|
||||
|
@ -51,20 +51,25 @@ struct enc_xform {
|
||||
char *name;
|
||||
size_t ctxsize;
|
||||
u_int16_t blocksize; /* Required input block size -- 1 for stream ciphers. */
|
||||
uint16_t native_blocksize; /* Used for stream ciphers. */
|
||||
u_int16_t ivsize;
|
||||
u_int16_t minkey, maxkey;
|
||||
|
||||
/*
|
||||
* Encrypt/decrypt a single block. For stream ciphers this
|
||||
* encrypts/decrypts a single "native" block.
|
||||
*/
|
||||
void (*encrypt) (void *, const uint8_t *, uint8_t *);
|
||||
void (*decrypt) (void *, const uint8_t *, uint8_t *);
|
||||
int (*setkey) (void *, const uint8_t *, int len);
|
||||
void (*reinit) (void *, const u_int8_t *);
|
||||
|
||||
/*
|
||||
* Encrypt/decrypt 1+ blocks of input -- total size is 'len' bytes.
|
||||
* Len is guaranteed to be a multiple of the defined 'blocksize'.
|
||||
* Optional interface -- most useful for stream ciphers with a small
|
||||
* blocksize (1).
|
||||
* For stream ciphers, encrypt/decrypt the final partial block
|
||||
* of 'len' bytes.
|
||||
*/
|
||||
void (*encrypt_multi) (void *, const uint8_t *, uint8_t *, size_t len);
|
||||
void (*decrypt_multi) (void *, const uint8_t *, uint8_t *, size_t len);
|
||||
void (*encrypt_last) (void *, const uint8_t *, uint8_t *, size_t len);
|
||||
void (*decrypt_last) (void *, const uint8_t *, uint8_t *, size_t len);
|
||||
};
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user