Regen assembly files for arm.

This commit is contained in:
Jung-uk Kim 2018-09-22 02:42:51 +00:00
parent 4b7c498f1f
commit 2c17169a65
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/openssl111/; revision=338877
15 changed files with 13420 additions and 3007 deletions

View File

@ -149,22 +149,34 @@ ${s}.S: ${s}.s
.PATH: ${LCRYPTO_SRC}/crypto \
${LCRYPTO_SRC}/crypto/aes/asm \
${LCRYPTO_SRC}/crypto/bn/asm \
${LCRYPTO_SRC}/crypto/chacha/asm \
${LCRYPTO_SRC}/crypto/ec/asm \
${LCRYPTO_SRC}/crypto/modes/asm \
${LCRYPTO_SRC}/crypto/poly1305/asm \
${LCRYPTO_SRC}/crypto/sha/asm
PERLPATH= -I${LCRYPTO_SRC}/crypto/perlasm
# aes
SRCS= aesv8-armx.pl bsaes-armv7.pl
SRCS= aes-armv4.pl aesv8-armx.pl bsaes-armv7.pl
# bn
SRCS+= armv4-mont.pl armv4-gf2m.pl
# chacha
SRCS+= chacha-armv4.pl
# ec
SRCS+= ecp_nistz256-armv4.pl
# modes
SRCS+= ghash-armv4.pl ghashv8-armx.pl
# poly1305
SRCS+= poly1305-armv4.pl
# sha
SRCS+= sha1-armv4-large.pl sha256-armv4.pl sha512-armv4.pl
SRCS+= keccak1600-armv4.pl sha1-armv4-large.pl sha256-armv4.pl sha512-armv4.pl
ASM= aes-armv4.S ${SRCS:R:S/$/.S/}

View File

@ -1,5 +1,12 @@
/* $FreeBSD$ */
/* Do not modify. This file is auto-generated from aes-armv4.pl. */
@ Copyright 2007-2018 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ -40,15 +47,12 @@
#endif
.text
#if __ARM_ARCH__<7
.code 32
#else
#if defined(__thumb2__) && !defined(__APPLE__)
.syntax unified
# ifdef __thumb2__
.thumb
# else
#else
.code 32
# endif
#undef __thumb2__
#endif
.type AES_Te,%object
@ -159,19 +163,23 @@ AES_Te:
@ void AES_encrypt(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) {
.global AES_encrypt
.globl AES_encrypt
.type AES_encrypt,%function
.align 5
AES_encrypt:
#if __ARM_ARCH__<7
#ifndef __thumb2__
sub r3,pc,#8 @ AES_encrypt
#else
adr r3,.
#endif
stmdb sp!,{r1,r4-r12,lr}
#if defined(__thumb2__) || defined(__APPLE__)
adr r10,AES_Te
#else
sub r10,r3,#AES_encrypt-AES_Te @ Te
#endif
mov r12,r0 @ inp
mov r11,r2
sub r10,r3,#AES_encrypt-AES_Te @ Te
#if __ARM_ARCH__<7
ldrb r0,[r12,#3] @ load input data in endian-neutral
ldrb r4,[r12,#2] @ manner...
@ -258,12 +266,12 @@ AES_encrypt:
strb r3,[r12,#15]
#endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4-r12,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size AES_encrypt,.-AES_encrypt
@ -271,7 +279,7 @@ AES_encrypt:
.align 2
_armv4_AES_encrypt:
str lr,[sp,#-4]! @ push lr
ldmia r11!,{r4-r7}
ldmia r11!,{r4,r5,r6,r7}
eor r0,r0,r4
ldr r12,[r11,#240-16]
eor r1,r1,r5
@ -404,24 +412,24 @@ _armv4_AES_encrypt:
ldr pc,[sp],#4 @ pop and return
.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
.global private_AES_set_encrypt_key
.type private_AES_set_encrypt_key,%function
.globl AES_set_encrypt_key
.type AES_set_encrypt_key,%function
.align 5
private_AES_set_encrypt_key:
AES_set_encrypt_key:
_armv4_AES_set_encrypt_key:
#if __ARM_ARCH__<7
#ifndef __thumb2__
sub r3,pc,#8 @ AES_set_encrypt_key
#else
adr r3,.
#endif
teq r0,#0
#if __ARM_ARCH__>=7
#ifdef __thumb2__
itt eq @ Thumb2 thing, sanity check in ARM
#endif
moveq r0,#-1
beq .Labrt
teq r2,#0
#if __ARM_ARCH__>=7
#ifdef __thumb2__
itt eq @ Thumb2 thing, sanity check in ARM
#endif
moveq r0,#-1
@ -432,19 +440,23 @@ _armv4_AES_set_encrypt_key:
teq r1,#192
beq .Lok
teq r1,#256
#if __ARM_ARCH__>=7
#ifdef __thumb2__
itt ne @ Thumb2 thing, sanity check in ARM
#endif
movne r0,#-1
bne .Labrt
.Lok: stmdb sp!,{r4-r12,lr}
sub r10,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024 @ Te4
.Lok: stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
mov r12,r0 @ inp
mov lr,r1 @ bits
mov r11,r2 @ key
#if defined(__thumb2__) || defined(__APPLE__)
adr r10,AES_Te+1024 @ Te4
#else
sub r10,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024 @ Te4
#endif
#if __ARM_ARCH__<7
ldrb r0,[r12,#3] @ load input data in endian-neutral
ldrb r4,[r12,#2] @ manner...
@ -589,7 +601,7 @@ _armv4_AES_set_encrypt_key:
str r2,[r11,#-16]
subs r12,r12,#1
str r3,[r11,#-12]
#if __ARM_ARCH__>=7
#ifdef __thumb2__
itt eq @ Thumb2 thing, sanity check in ARM
#endif
subeq r2,r11,#216
@ -661,7 +673,7 @@ _armv4_AES_set_encrypt_key:
str r2,[r11,#-24]
subs r12,r12,#1
str r3,[r11,#-20]
#if __ARM_ARCH__>=7
#ifdef __thumb2__
itt eq @ Thumb2 thing, sanity check in ARM
#endif
subeq r2,r11,#256
@ -695,21 +707,21 @@ _armv4_AES_set_encrypt_key:
.align 2
.Ldone: mov r0,#0
ldmia sp!,{r4-r12,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
.Labrt:
#if __ARM_ARCH__>=5
bx lr @ .word 0xe12fff1e
#else
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
.size AES_set_encrypt_key,.-AES_set_encrypt_key
.global private_AES_set_decrypt_key
.type private_AES_set_decrypt_key,%function
.globl AES_set_decrypt_key
.type AES_set_decrypt_key,%function
.align 5
private_AES_set_decrypt_key:
AES_set_decrypt_key:
str lr,[sp,#-4]! @ push lr
bl _armv4_AES_set_encrypt_key
teq r0,#0
@ -719,20 +731,20 @@ private_AES_set_decrypt_key:
mov r0,r2 @ AES_set_encrypt_key preserves r2,
mov r1,r2 @ which is AES_KEY *key
b _armv4_AES_set_enc2dec_key
.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
.size AES_set_decrypt_key,.-AES_set_decrypt_key
@ void AES_set_enc2dec_key(const AES_KEY *inp,AES_KEY *out)
.global AES_set_enc2dec_key
.globl AES_set_enc2dec_key
.type AES_set_enc2dec_key,%function
.align 5
AES_set_enc2dec_key:
_armv4_AES_set_enc2dec_key:
stmdb sp!,{r4-r12,lr}
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
ldr r12,[r0,#240]
mov r7,r0 @ input
add r8,r0,r12,lsl#4
mov r11,r1 @ ouput
mov r11,r1 @ output
add r10,r1,r12,lsl#4
str r12,[r1,#240]
@ -809,12 +821,12 @@ _armv4_AES_set_enc2dec_key:
mov r0,#0
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4-r12,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size AES_set_enc2dec_key,.-AES_set_enc2dec_key
@ -922,19 +934,23 @@ AES_Td:
@ void AES_decrypt(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) {
.global AES_decrypt
.globl AES_decrypt
.type AES_decrypt,%function
.align 5
AES_decrypt:
#if __ARM_ARCH__<7
#ifndef __thumb2__
sub r3,pc,#8 @ AES_decrypt
#else
adr r3,.
#endif
stmdb sp!,{r1,r4-r12,lr}
#if defined(__thumb2__) || defined(__APPLE__)
adr r10,AES_Td
#else
sub r10,r3,#AES_decrypt-AES_Td @ Td
#endif
mov r12,r0 @ inp
mov r11,r2
sub r10,r3,#AES_decrypt-AES_Td @ Td
#if __ARM_ARCH__<7
ldrb r0,[r12,#3] @ load input data in endian-neutral
ldrb r4,[r12,#2] @ manner...
@ -1021,12 +1037,12 @@ AES_decrypt:
strb r3,[r12,#15]
#endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4-r12,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size AES_decrypt,.-AES_decrypt
@ -1034,7 +1050,7 @@ AES_decrypt:
.align 2
_armv4_AES_decrypt:
str lr,[sp,#-4]! @ push lr
ldmia r11!,{r4-r7}
ldmia r11!,{r4,r5,r6,r7}
eor r0,r0,r4
ldr r12,[r11,#240-16]
eor r1,r1,r5
@ -1175,5 +1191,6 @@ _armv4_AES_decrypt:
sub r10,r10,#1024
ldr pc,[sp],#4 @ pop and return
.size _armv4_AES_decrypt,.-_armv4_AES_decrypt
.asciz "AES for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
.byte 65,69,83,32,102,111,114,32,65,82,77,118,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2

View File

@ -4,11 +4,12 @@
#if __ARM_MAX_ARCH__>=7
.text
.arch armv7-a
.arch armv7-a @ don't confuse not-so-latest binutils with argv8 :-)
.fpu neon
.code 32
#undef __thumb2__
.align 5
rcon:
.Lrcon:
.long 0x01,0x01,0x01,0x01
.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat
.long 0x1b,0x1b,0x1b,0x1b
@ -31,7 +32,7 @@ aes_v8_set_encrypt_key:
tst r1,#0x3f
bne .Lenc_key_abort
adr r3,rcon
adr r3,.Lrcon
cmp r1,#192
veor q0,q0,q0
@ -49,7 +50,7 @@ aes_v8_set_encrypt_key:
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
@ -68,7 +69,7 @@ aes_v8_set_encrypt_key:
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q3,q3,q9
vext.8 q9,q0,q9,#12
@ -83,7 +84,7 @@ aes_v8_set_encrypt_key:
vtbl.8 d21,{q3},d5
vext.8 q9,q0,q3,#12
vst1.32 {q3},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q3,q3,q9
vext.8 q9,q0,q9,#12
@ -110,7 +111,7 @@ aes_v8_set_encrypt_key:
vtbl.8 d21,{q8},d5
vext.8 q9,q0,q3,#12
vst1.32 {d16},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
@ -146,7 +147,7 @@ aes_v8_set_encrypt_key:
vtbl.8 d21,{q8},d5
vext.8 q9,q0,q3,#12
vst1.32 {q8},[r2]!
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
subs r1,r1,#1
veor q3,q3,q9
@ -162,7 +163,7 @@ aes_v8_set_encrypt_key:
vdup.32 q10,d7[1]
vext.8 q9,q0,q8,#12
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
.byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
veor q8,q8,q9
vext.8 q9,q0,q9,#12
@ -205,15 +206,15 @@ aes_v8_set_decrypt_key:
.Loop_imc:
vld1.32 {q0},[r2]
vld1.32 {q1},[r0]
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
vst1.32 {q0},[r0],r4
vst1.32 {q1},[r2]!
cmp r0,r2
bhi .Loop_imc
vld1.32 {q0},[r2]
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
vst1.32 {q0},[r0]
eor r0,r0,r0 @ return value
@ -231,19 +232,19 @@ aes_v8_encrypt:
vld1.32 {q1},[r2]!
.Loop_enc:
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q0},[r2]!
subs r3,r3,#2
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q1},[r2]!
bgt .Loop_enc
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
.byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
.byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
vld1.32 {q0},[r2]
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
.byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
veor q2,q2,q0
vst1.8 {q2},[r1]
@ -260,19 +261,19 @@ aes_v8_decrypt:
vld1.32 {q1},[r2]!
.Loop_dec:
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q0},[r2]!
subs r3,r3,#2
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q1},[r2]!
bgt .Loop_dec
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
.byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
.byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
vld1.32 {q0},[r2]
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
.byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
veor q2,q2,q0
vst1.8 {q2},[r1]
@ -283,9 +284,9 @@ aes_v8_decrypt:
.align 5
aes_v8_cbc_encrypt:
mov ip,sp
stmdb sp!,{r4-r8,lr}
vstmdb sp!,{d8-d15} @ ABI specification says so
ldmia ip,{r4-r5} @ load remaining args
stmdb sp!,{r4,r5,r6,r7,r8,lr}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldmia ip,{r4,r5} @ load remaining args
subs r2,r2,#16
mov r8,#16
blo .Lcbc_abort
@ -297,13 +298,13 @@ aes_v8_cbc_encrypt:
vld1.8 {q6},[r4]
vld1.8 {q0},[r0],r8
vld1.32 {q8-q9},[r3] @ load key schedule...
vld1.32 {q8,q9},[r3] @ load key schedule...
sub r5,r5,#6
add r7,r3,r5,lsl#4 @ pointer to last 7 round keys
sub r5,r5,#2
vld1.32 {q10-q11},[r7]!
vld1.32 {q12-q13},[r7]!
vld1.32 {q14-q15},[r7]!
vld1.32 {q10,q11},[r7]!
vld1.32 {q12,q13},[r7]!
vld1.32 {q14,q15},[r7]!
vld1.32 {q7},[r7]
add r7,r3,#32
@ -315,62 +316,62 @@ aes_v8_cbc_encrypt:
veor q5,q8,q7
beq .Lcbc_enc128
vld1.32 {q2-q3},[r7]
vld1.32 {q2,q3},[r7]
add r7,r3,#16
add r6,r3,#16*4
add r12,r3,#16*5
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
add r14,r3,#16*6
add r3,r3,#16*7
b .Lenter_cbc_enc
.align 4
.Loop_cbc_enc:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vst1.8 {q6},[r1]!
.Lenter_cbc_enc:
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q8},[r6]
cmp r5,#4
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r12]
beq .Lcbc_enc192
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q8},[r14]
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r3]
nop
.Lcbc_enc192:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
subs r2,r2,#16
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
moveq r8,#0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.8 {q8},[r0],r8
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
veor q8,q8,q5
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q9},[r7] @ re-pre-load rndkey[1]
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
veor q6,q0,q7
bhs .Loop_cbc_enc
@ -379,36 +380,36 @@ aes_v8_cbc_encrypt:
.align 5
.Lcbc_enc128:
vld1.32 {q2-q3},[r7]
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.32 {q2,q3},[r7]
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
b .Lenter_cbc_enc128
.Loop_cbc_enc128:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vst1.8 {q6},[r1]!
.Lenter_cbc_enc128:
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
subs r2,r2,#16
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
moveq r8,#0
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
vld1.8 {q8},[r0],r8
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
veor q8,q8,q5
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
veor q6,q0,q7
bhs .Loop_cbc_enc128
@ -431,69 +432,69 @@ aes_v8_cbc_encrypt:
vorr q11,q10,q10
.Loop3x_cbc_dec:
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q9},[r7]!
bgt .Loop3x_cbc_dec
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q4,q6,q7
subs r2,r2,#0x30
veor q5,q2,q7
movlo r6,r2 @ r6, r6, is zero at this point
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q9,q3,q7
add r0,r0,r6 @ r0 is adjusted in such way that
@ at exit from the loop q1-q10
@ are loaded with last "words"
vorr q6,q11,q11
mov r7,r3
.byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q2},[r0]!
.byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q3},[r0]!
.byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
.byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.8 {q11},[r0]!
.byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
.byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
add r6,r5,#2
veor q4,q4,q0
@ -513,44 +514,44 @@ aes_v8_cbc_encrypt:
nop
.Lcbc_dec_tail:
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
vld1.32 {q9},[r7]!
bgt .Lcbc_dec_tail
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
cmn r2,#0x20
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q5,q6,q7
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
.byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
.byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
.byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
.byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
veor q9,q3,q7
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
.byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
.byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
beq .Lcbc_dec_one
veor q5,q5,q1
veor q9,q9,q10
@ -567,30 +568,30 @@ aes_v8_cbc_encrypt:
.Lcbc_done:
vst1.8 {q6},[r4]
.Lcbc_abort:
vldmia sp!,{d8-d15}
ldmia sp!,{r4-r8,pc}
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,pc}
.size aes_v8_cbc_encrypt,.-aes_v8_cbc_encrypt
.globl aes_v8_ctr32_encrypt_blocks
.type aes_v8_ctr32_encrypt_blocks,%function
.align 5
aes_v8_ctr32_encrypt_blocks:
mov ip,sp
stmdb sp!,{r4-r10,lr}
vstmdb sp!,{d8-d15} @ ABI specification says so
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldr r4, [ip] @ load remaining arg
ldr r5,[r3,#240]
ldr r8, [r4, #12]
vld1.32 {q0},[r4]
vld1.32 {q8-q9},[r3] @ load key schedule...
vld1.32 {q8,q9},[r3] @ load key schedule...
sub r5,r5,#4
mov r12,#16
cmp r2,#2
add r7,r3,r5,lsl#4 @ pointer to last 5 round keys
sub r5,r5,#2
vld1.32 {q12-q13},[r7]!
vld1.32 {q14-q15},[r7]!
vld1.32 {q12,q13},[r7]!
vld1.32 {q14,q15},[r7]!
vld1.32 {q7},[r7]
add r7,r3,#32
mov r6,r5
@ -613,76 +614,76 @@ aes_v8_ctr32_encrypt_blocks:
.align 4
.Loop3x_ctr32:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.32 {q9},[r7]!
bgt .Loop3x_ctr32
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
vld1.8 {q2},[r0]!
vorr q0,q6,q6
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
.byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
.byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
vld1.8 {q3},[r0]!
vorr q1,q6,q6
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
.byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vld1.8 {q11},[r0]!
mov r7,r3
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
.byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
.byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
vorr q10,q6,q6
add r9,r8,#1
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
.byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
veor q2,q2,q7
add r10,r8,#2
.byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
.byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
veor q3,q3,q7
add r8,r8,#3
.byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
.byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
veor q11,q11,q7
rev r9,r9
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
.byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vmov.32 d1[1], r9
rev r10,r10
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
.byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
.byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
.byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
.byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
vmov.32 d3[1], r10
rev r12,r8
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
.byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
.byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
vmov.32 d21[1], r12
subs r2,r2,#3
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
.byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15
.byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
.byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
.byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15
veor q2,q2,q4
vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
@ -702,45 +703,45 @@ aes_v8_ctr32_encrypt_blocks:
moveq r12,#0
.Lctr32_tail:
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.32 {q8},[r7]!
subs r6,r6,#2
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.32 {q9},[r7]!
bgt .Lctr32_tail
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.8 {q2},[r0],r12
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
vld1.8 {q3},[r0]
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
veor q2,q2,q7
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
.byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
.byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
.byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14
.byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
veor q3,q3,q7
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
.byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15
.byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
.byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15
cmp r2,#1
veor q2,q2,q0
@ -750,7 +751,7 @@ aes_v8_ctr32_encrypt_blocks:
vst1.8 {q3},[r1]
.Lctr32_done:
vldmia sp!,{d8-d15}
ldmia sp!,{r4-r10,pc}
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
.size aes_v8_ctr32_encrypt_blocks,.-aes_v8_ctr32_encrypt_blocks
#endif

View File

@ -3,7 +3,12 @@
#include "arm_arch.h"
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.type mul_1x1_ialu,%function
.align 5
mul_1x1_ialu:
@ -71,11 +76,17 @@ mul_1x1_ialu:
eor r4,r4,r6,lsr#8
ldr r6,[sp,r8] @ tab[b >> 30 ]
#ifdef __thumb2__
itt ne
#endif
eorne r5,r5,r0,lsl#30
eorne r4,r4,r0,lsr#2
tst r1,#1<<31
eor r5,r5,r7,lsl#27
eor r4,r4,r7,lsr#5
#ifdef __thumb2__
itt ne
#endif
eorne r5,r5,r0,lsl#31
eorne r4,r4,r0,lsr#1
eor r5,r5,r6,lsl#30
@ -83,22 +94,35 @@ mul_1x1_ialu:
mov pc,lr
.size mul_1x1_ialu,.-mul_1x1_ialu
.global bn_GF2m_mul_2x2
.globl bn_GF2m_mul_2x2
.type bn_GF2m_mul_2x2,%function
.align 5
bn_GF2m_mul_2x2:
#if __ARM_MAX_ARCH__>=7
stmdb sp!,{r10,lr}
ldr r12,.LOPENSSL_armcap
.Lpic: ldr r12,[pc,r12]
tst r12,#1
bne .LNEON
adr r10,.LOPENSSL_armcap
ldr r12,[r12,r10]
#ifdef __APPLE__
ldr r12,[r12]
#endif
tst r12,#ARMV7_NEON
itt ne
ldrne r10,[sp],#8
bne .LNEON
stmdb sp!,{r4,r5,r6,r7,r8,r9}
#else
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
#endif
stmdb sp!,{r4-r10,lr}
mov r10,r0 @ reassign 1st argument
mov r0,r3 @ r0=b1
sub r7,sp,#36
mov r8,sp
and r7,r7,#-32
ldr r3,[sp,#32] @ load b0
mov r12,#7<<2
sub sp,sp,#32 @ allocate tab[8]
mov sp,r7 @ allocate tab[8]
str r8,[r7,#32]
bl mul_1x1_ialu @ a1·b1
str r5,[r10,#8]
@ -117,8 +141,9 @@ bn_GF2m_mul_2x2:
eor r1,r1,r2
eor r0,r0,r3
bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
ldmia r10,{r6-r9}
ldmia r10,{r6,r7,r8,r9}
eor r5,r5,r4
ldr sp,[sp,#32] @ destroy tab[8]
eor r4,r4,r7
eor r5,r5,r6
eor r4,r4,r8
@ -126,16 +151,15 @@ bn_GF2m_mul_2x2:
eor r4,r4,r9
str r4,[r10,#8]
eor r5,r5,r4
add sp,sp,#32 @ destroy tab[8]
str r5,[r10,#4]
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r10,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
#else
ldmia sp!,{r4-r10,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
@ -195,9 +219,10 @@ bn_GF2m_mul_2x2:
#if __ARM_MAX_ARCH__>=7
.align 5
.LOPENSSL_armcap:
.word OPENSSL_armcap_P-(.Lpic+8)
.word OPENSSL_armcap_P-.
#endif
.asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
.byte 71,70,40,50,94,109,41,32,77,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 5
#if __ARM_MAX_ARCH__>=7

View File

@ -3,28 +3,37 @@
#include "arm_arch.h"
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
#if __ARM_MAX_ARCH__>=7
.align 5
.LOPENSSL_armcap:
.word OPENSSL_armcap_P-bn_mul_mont
.word OPENSSL_armcap_P-.Lbn_mul_mont
#endif
.global bn_mul_mont
.globl bn_mul_mont
.type bn_mul_mont,%function
.align 5
bn_mul_mont:
.Lbn_mul_mont:
ldr ip,[sp,#4] @ load num
stmdb sp!,{r0,r2} @ sp points at argument block
#if __ARM_MAX_ARCH__>=7
tst ip,#7
bne .Lialu
adr r0,bn_mul_mont
adr r0,.Lbn_mul_mont
ldr r2,.LOPENSSL_armcap
ldr r0,[r0,r2]
tst r0,#1 @ NEON available?
#ifdef __APPLE__
ldr r0,[r0]
#endif
tst r0,#ARMV7_NEON @ NEON available?
ldmia sp, {r0,r2}
beq .Lialu
add sp,sp,#8
@ -34,11 +43,14 @@ bn_mul_mont:
#endif
cmp ip,#2
mov r0,ip @ load num
#ifdef __thumb2__
ittt lt
#endif
movlt r0,#0
addlt sp,sp,#2*4
blt .Labrt
stmdb sp!,{r4-r12,lr} @ save 10 registers
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ save 10 registers
mov r0,r0,lsl#2 @ rescale r0 for byte count
sub sp,sp,r0 @ alloca(4*num)
@ -81,10 +93,11 @@ bn_mul_mont:
ldr r8,[r0,#14*4] @ restore n0
adc r14,r14,#0
str r12,[r0] @ tp[num-1]=
mov r7,sp
str r14,[r0,#4] @ tp[num]=
.Louter:
sub r7,r0,sp @ "original" r0-1 value
sub r7,r0,r7 @ "original" r0-1 value
sub r1,r1,r7 @ "rewind" ap to &ap[1]
ldr r2,[r4,#4]! @ *(++bp)
sub r3,r3,r7 @ "rewind" np to &np[1]
@ -129,11 +142,16 @@ bn_mul_mont:
str r14,[r0,#4] @ tp[num]=
cmp r4,r7
#ifdef __thumb2__
itt ne
#endif
movne r7,sp
bne .Louter
ldr r2,[r0,#12*4] @ pull rp
mov r5,sp
add r0,r0,#4 @ r0 to point at &tp[num]
sub r5,r0,sp @ "original" num value
sub r5,r0,r5 @ "original" num value
mov r4,sp @ "rewind" r4
mov r1,r4 @ "borrow" r1
sub r3,r3,r5 @ "rewind" r3 to &np[0]
@ -160,17 +178,18 @@ bn_mul_mont:
teq r4,r0 @ preserve carry
bne .Lcopy
add sp,r0,#4 @ skip over tp[num+1]
ldmia sp!,{r4-r12,lr} @ restore registers
mov sp,r0
add sp,sp,#4 @ skip over tp[num+1]
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr} @ restore registers
add sp,sp,#2*4 @ skip over {r0,r2}
mov r0,#1
.Labrt:
#if __ARM_ARCH__>=5
bx lr @ .word 0xe12fff1e
bx lr @ bx lr
#else
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size bn_mul_mont,.-bn_mul_mont
#if __ARM_MAX_ARCH__>=7
@ -181,42 +200,42 @@ bn_mul_mont:
.align 5
bn_mul8x_mont_neon:
mov ip,sp
stmdb sp!,{r4-r11}
vstmdb sp!,{d8-d15} @ ABI specification says so
ldmia ip,{r4-r5} @ load rest of parameter block
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
ldmia ip,{r4,r5} @ load rest of parameter block
mov ip,sp
cmp r5,#8
bhi .LNEON_8n
@ special case for r5==8, everything is in register bank...
sub r7,sp,#16
vld1.32 {d28[0]}, [r2,:32]!
sub r7,r7,r5,lsl#4
vld1.32 {d0-d3}, [r1]! @ can't specify :32 :-(
veor d8,d8,d8
sub r7,sp,r5,lsl#4
vld1.32 {d0,d1,d2,d3}, [r1]! @ can't specify :32 :-(
and r7,r7,#-64
vld1.32 {d30[0]}, [r4,:32]
mov sp,r7 @ alloca
veor d8,d8,d8
subs r8,r5,#8
vzip.16 d28,d8
vmull.u32 q6,d28,d0[0]
vmull.u32 q7,d28,d0[1]
vmull.u32 q8,d28,d1[0]
vshl.i64 d10,d13,#16
vshl.i64 d29,d13,#16
vmull.u32 q9,d28,d1[1]
vadd.u64 d10,d10,d12
vadd.u64 d29,d29,d12
veor d8,d8,d8
vmul.u32 d29,d10,d30
vmul.u32 d29,d29,d30
vmull.u32 q10,d28,d2[0]
vld1.32 {d4-d7}, [r3]!
vld1.32 {d4,d5,d6,d7}, [r3]!
vmull.u32 q11,d28,d2[1]
vmull.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmull.u32 q13,d28,d3[1]
bne .LNEON_1st
@ special case for num=8, everything is in register bank...
vmlal.u32 q6,d29,d4[0]
sub r9,r5,#1
vmlal.u32 q7,d29,d4[1]
@ -252,13 +271,13 @@ bn_mul8x_mont_neon:
vmlal.u32 q6,d28,d0[0]
vmlal.u32 q7,d28,d0[1]
vmlal.u32 q8,d28,d1[0]
vshl.i64 d10,d13,#16
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d10,d10,d12
vadd.u64 d29,d29,d12
veor d8,d8,d8
subs r9,r9,#1
vmul.u32 d29,d10,d30
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[0]
vmlal.u32 q11,d28,d2[1]
@ -295,231 +314,576 @@ bn_mul8x_mont_neon:
vshr.u64 d10,d12,#16
mov r8,r5
vadd.u64 d13,d13,d10
add r6,sp,#16
add r6,sp,#96
vshr.u64 d10,d13,#16
vzip.16 d12,d13
b .LNEON_tail2
b .LNEON_tail_entry
.align 4
.LNEON_1st:
vmlal.u32 q6,d29,d4[0]
vld1.32 {d0-d3}, [r1]!
vmlal.u32 q7,d29,d4[1]
subs r8,r8,#8
vmlal.u32 q8,d29,d5[0]
vmlal.u32 q9,d29,d5[1]
vmlal.u32 q10,d29,d6[0]
vld1.32 {d4-d5}, [r3]!
vmlal.u32 q11,d29,d6[1]
vst1.64 {q6-q7}, [r7,:256]!
vmlal.u32 q12,d29,d7[0]
vmlal.u32 q13,d29,d7[1]
vst1.64 {q8-q9}, [r7,:256]!
vmull.u32 q6,d28,d0[0]
vld1.32 {d6-d7}, [r3]!
vmull.u32 q7,d28,d0[1]
vst1.64 {q10-q11}, [r7,:256]!
vmull.u32 q8,d28,d1[0]
vmull.u32 q9,d28,d1[1]
vst1.64 {q12-q13}, [r7,:256]!
vmull.u32 q10,d28,d2[0]
vmull.u32 q11,d28,d2[1]
vmull.u32 q12,d28,d3[0]
vmull.u32 q13,d28,d3[1]
bne .LNEON_1st
vmlal.u32 q6,d29,d4[0]
add r6,sp,#16
vmlal.u32 q7,d29,d4[1]
sub r1,r1,r5,lsl#2 @ rewind r1
vmlal.u32 q8,d29,d5[0]
vld1.64 {q5}, [sp,:128]
vmlal.u32 q9,d29,d5[1]
sub r9,r5,#1
vmlal.u32 q10,d29,d6[0]
vst1.64 {q6-q7}, [r7,:256]!
vmlal.u32 q11,d29,d6[1]
vshr.u64 d10,d10,#16
vld1.64 {q6}, [r6, :128]!
vmlal.u32 q12,d29,d7[0]
vst1.64 {q8-q9}, [r7,:256]!
vmlal.u32 q13,d29,d7[1]
vst1.64 {q10-q11}, [r7,:256]!
vadd.u64 d10,d10,d11
veor q4,q4,q4
vst1.64 {q12-q13}, [r7,:256]!
vld1.64 {q7-q8}, [r6, :256]!
vst1.64 {q4}, [r7,:128]
vshr.u64 d10,d10,#16
b .LNEON_outer
.align 4
.LNEON_outer:
vld1.32 {d28[0]}, [r2,:32]!
sub r3,r3,r5,lsl#2 @ rewind r3
vld1.32 {d0-d3}, [r1]!
veor d8,d8,d8
mov r7,sp
vzip.16 d28,d8
.LNEON_8n:
veor q6,q6,q6
sub r7,sp,#128
veor q7,q7,q7
sub r7,r7,r5,lsl#4
veor q8,q8,q8
and r7,r7,#-64
veor q9,q9,q9
mov sp,r7 @ alloca
veor q10,q10,q10
add r7,r7,#256
veor q11,q11,q11
sub r8,r5,#8
vadd.u64 d12,d12,d10
veor q12,q12,q12
veor q13,q13,q13
.LNEON_8n_init:
vst1.64 {q6,q7},[r7,:256]!
subs r8,r8,#8
vst1.64 {q8,q9},[r7,:256]!
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12,q13},[r7,:256]!
bne .LNEON_8n_init
add r6,sp,#256
vld1.32 {d0,d1,d2,d3},[r1]!
add r10,sp,#8
vld1.32 {d30[0]},[r4,:32]
mov r9,r5
b .LNEON_8n_outer
.align 4
.LNEON_8n_outer:
vld1.32 {d28[0]},[r2,:32]! @ *b++
veor d8,d8,d8
vzip.16 d28,d8
add r7,sp,#128
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q6,d28,d0[0]
vld1.64 {q9-q10},[r6,:256]!
vmlal.u32 q7,d28,d0[1]
vmlal.u32 q8,d28,d1[0]
vld1.64 {q11-q12},[r6,:256]!
vmlal.u32 q9,d28,d1[1]
vshl.i64 d10,d13,#16
veor d8,d8,d8
vadd.u64 d10,d10,d12
vld1.64 {q13},[r6,:128]!
vmul.u32 d29,d10,d30
vmlal.u32 q8,d28,d1[0]
vshl.i64 d29,d13,#16
vmlal.u32 q9,d28,d1[1]
vadd.u64 d29,d29,d12
vmlal.u32 q10,d28,d2[0]
vld1.32 {d4-d7}, [r3]!
vmul.u32 d29,d29,d30
vmlal.u32 q11,d28,d2[1]
vst1.32 {d28},[sp,:64] @ put aside smashed b[8*i+0]
vmlal.u32 q12,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q13,d28,d3[1]
.LNEON_inner:
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q6,d29,d4[0]
vld1.32 {d0-d3}, [r1]!
veor d10,d10,d10
vmlal.u32 q7,d29,d4[1]
subs r8,r8,#8
vzip.16 d28,d10
vmlal.u32 q8,d29,d5[0]
vshr.u64 d12,d12,#16
vmlal.u32 q9,d29,d5[1]
vst1.64 {q6-q7}, [r7,:256]!
vmlal.u32 q10,d29,d6[0]
vld1.64 {q6}, [r6, :128]!
vadd.u64 d12,d12,d13
vmlal.u32 q11,d29,d6[1]
vst1.64 {q8-q9}, [r7,:256]!
vshr.u64 d12,d12,#16
vmlal.u32 q12,d29,d7[0]
vld1.64 {q7-q8}, [r6, :256]!
vmlal.u32 q13,d29,d7[1]
vst1.64 {q10-q11}, [r7,:256]!
vadd.u64 d14,d14,d12
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+0]
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]!
vmlal.u32 q8,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q9,d28,d1[0]
vshl.i64 d29,d15,#16
vmlal.u32 q10,d28,d1[1]
vadd.u64 d29,d29,d14
vmlal.u32 q11,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q12,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+1]
vmlal.u32 q13,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q7,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q8,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q9,d29,d5[0]
vshr.u64 d14,d14,#16
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vadd.u64 d14,d14,d15
vmlal.u32 q12,d29,d6[1]
vshr.u64 d14,d14,#16
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vadd.u64 d16,d16,d14
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+1]
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]!
vmlal.u32 q9,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q10,d28,d1[0]
vshl.i64 d29,d17,#16
vmlal.u32 q11,d28,d1[1]
vadd.u64 d29,d29,d16
vmlal.u32 q12,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q13,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+2]
vmlal.u32 q6,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q8,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q9,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q10,d29,d5[0]
vshr.u64 d16,d16,#16
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vadd.u64 d16,d16,d17
vmlal.u32 q13,d29,d6[1]
vshr.u64 d16,d16,#16
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vadd.u64 d18,d18,d16
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+2]
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]!
vmlal.u32 q10,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q11,d28,d1[0]
vshl.i64 d29,d19,#16
vmlal.u32 q12,d28,d1[1]
vadd.u64 d29,d29,d18
vmlal.u32 q13,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q6,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+3]
vmlal.u32 q7,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q9,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q10,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q11,d29,d5[0]
vshr.u64 d18,d18,#16
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vadd.u64 d18,d18,d19
vmlal.u32 q6,d29,d6[1]
vshr.u64 d18,d18,#16
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vadd.u64 d20,d20,d18
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+3]
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]!
vmlal.u32 q11,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q12,d28,d1[0]
vshl.i64 d29,d21,#16
vmlal.u32 q13,d28,d1[1]
vadd.u64 d29,d29,d20
vmlal.u32 q6,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q7,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+4]
vmlal.u32 q8,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q10,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q11,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q12,d29,d5[0]
vshr.u64 d20,d20,#16
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vadd.u64 d20,d20,d21
vmlal.u32 q7,d29,d6[1]
vshr.u64 d20,d20,#16
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vadd.u64 d22,d22,d20
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+4]
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]!
vmlal.u32 q12,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q13,d28,d1[0]
vshl.i64 d29,d23,#16
vmlal.u32 q6,d28,d1[1]
vadd.u64 d29,d29,d22
vmlal.u32 q7,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q8,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+5]
vmlal.u32 q9,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q11,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q12,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q13,d29,d5[0]
vshr.u64 d22,d22,#16
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vadd.u64 d22,d22,d23
vmlal.u32 q8,d29,d6[1]
vshr.u64 d22,d22,#16
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vadd.u64 d24,d24,d22
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+5]
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]!
vmlal.u32 q13,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q6,d28,d1[0]
vshl.i64 d29,d25,#16
vmlal.u32 q7,d28,d1[1]
vadd.u64 d29,d29,d24
vmlal.u32 q8,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q9,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+6]
vmlal.u32 q10,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28[0]},[r2,:32]! @ *b++
vmlal.u32 q12,d29,d4[0]
veor d10,d10,d10
vmlal.u32 q13,d29,d4[1]
vzip.16 d28,d10
vmlal.u32 q6,d29,d5[0]
vshr.u64 d24,d24,#16
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vadd.u64 d24,d24,d25
vmlal.u32 q9,d29,d6[1]
vshr.u64 d24,d24,#16
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vadd.u64 d26,d26,d24
vst1.32 {d29},[r10,:64]! @ put aside smashed m[8*i+6]
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]!
vmlal.u32 q6,d28,d0[1]
veor d8,d8,d8
vmlal.u32 q7,d28,d1[0]
vshl.i64 d29,d27,#16
vmlal.u32 q8,d28,d1[1]
vadd.u64 d29,d29,d26
vmlal.u32 q9,d28,d2[0]
vmul.u32 d29,d29,d30
vmlal.u32 q10,d28,d2[1]
vst1.32 {d28},[r10,:64]! @ put aside smashed b[8*i+7]
vmlal.u32 q11,d28,d3[0]
vzip.16 d29,d8
vmlal.u32 q12,d28,d3[1]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q13,d29,d4[0]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q6,d29,d4[1]
vmlal.u32 q7,d29,d5[0]
vshr.u64 d26,d26,#16
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vadd.u64 d26,d26,d27
vmlal.u32 q10,d29,d6[1]
vshr.u64 d26,d26,#16
vmlal.u32 q11,d29,d7[0]
vmlal.u32 q12,d29,d7[1]
vadd.u64 d12,d12,d26
vst1.32 {d29},[r10,:64] @ put aside smashed m[8*i+7]
add r10,sp,#8 @ rewind
sub r8,r5,#8
b .LNEON_8n_inner
.align 4
.LNEON_8n_inner:
subs r8,r8,#8
vmlal.u32 q6,d28,d0[0]
vld1.64 {q9-q10}, [r6, :256]!
vld1.64 {q13},[r6,:128]
vmlal.u32 q7,d28,d0[1]
vst1.64 {q12-q13}, [r7,:256]!
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+0]
vmlal.u32 q8,d28,d1[0]
vld1.64 {q11-q12}, [r6, :256]!
vld1.32 {d4,d5,d6,d7},[r3]!
vmlal.u32 q9,d28,d1[1]
vld1.32 {d4-d7}, [r3]!
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d2[0]
vld1.64 {q13}, [r6, :128]!
vmlal.u32 q11,d28,d2[1]
vmlal.u32 q12,d28,d3[0]
vmlal.u32 q13,d28,d3[1]
bne .LNEON_inner
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+1]
vmlal.u32 q6,d29,d4[0]
add r6,sp,#16
vmlal.u32 q7,d29,d4[1]
sub r1,r1,r5,lsl#2 @ rewind r1
vmlal.u32 q8,d29,d5[0]
vld1.64 {q5}, [sp,:128]
vmlal.u32 q9,d29,d5[1]
subs r9,r9,#1
vmlal.u32 q10,d29,d6[0]
vst1.64 {q6-q7}, [r7,:256]!
vmlal.u32 q11,d29,d6[1]
vld1.64 {q6}, [r6, :128]!
vshr.u64 d10,d10,#16
vst1.64 {q8-q9}, [r7,:256]!
vmlal.u32 q12,d29,d7[0]
vld1.64 {q7-q8}, [r6, :256]!
vmlal.u32 q13,d29,d7[1]
vst1.64 {q6},[r7,:128]!
vmlal.u32 q7,d28,d0[0]
vld1.64 {q6},[r6,:128]
vmlal.u32 q8,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+1]
vmlal.u32 q9,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q10,d28,d1[1]
vmlal.u32 q11,d28,d2[0]
vmlal.u32 q12,d28,d2[1]
vmlal.u32 q13,d28,d3[0]
vmlal.u32 q6,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+2]
vmlal.u32 q7,d29,d4[0]
vmlal.u32 q8,d29,d4[1]
vmlal.u32 q9,d29,d5[0]
vmlal.u32 q10,d29,d5[1]
vmlal.u32 q11,d29,d6[0]
vmlal.u32 q12,d29,d6[1]
vmlal.u32 q13,d29,d7[0]
vmlal.u32 q6,d29,d7[1]
vst1.64 {q7},[r7,:128]!
vmlal.u32 q8,d28,d0[0]
vld1.64 {q7},[r6,:128]
vmlal.u32 q9,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+2]
vmlal.u32 q10,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q11,d28,d1[1]
vmlal.u32 q12,d28,d2[0]
vmlal.u32 q13,d28,d2[1]
vmlal.u32 q6,d28,d3[0]
vmlal.u32 q7,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+3]
vmlal.u32 q8,d29,d4[0]
vmlal.u32 q9,d29,d4[1]
vmlal.u32 q10,d29,d5[0]
vmlal.u32 q11,d29,d5[1]
vmlal.u32 q12,d29,d6[0]
vmlal.u32 q13,d29,d6[1]
vmlal.u32 q6,d29,d7[0]
vmlal.u32 q7,d29,d7[1]
vst1.64 {q8},[r7,:128]!
vmlal.u32 q9,d28,d0[0]
vld1.64 {q8},[r6,:128]
vmlal.u32 q10,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+3]
vmlal.u32 q11,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q12,d28,d1[1]
vmlal.u32 q13,d28,d2[0]
vmlal.u32 q6,d28,d2[1]
vmlal.u32 q7,d28,d3[0]
vmlal.u32 q8,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+4]
vmlal.u32 q9,d29,d4[0]
vmlal.u32 q10,d29,d4[1]
vmlal.u32 q11,d29,d5[0]
vmlal.u32 q12,d29,d5[1]
vmlal.u32 q13,d29,d6[0]
vmlal.u32 q6,d29,d6[1]
vmlal.u32 q7,d29,d7[0]
vmlal.u32 q8,d29,d7[1]
vst1.64 {q9},[r7,:128]!
vmlal.u32 q10,d28,d0[0]
vld1.64 {q9},[r6,:128]
vmlal.u32 q11,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+4]
vmlal.u32 q12,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q13,d28,d1[1]
vmlal.u32 q6,d28,d2[0]
vmlal.u32 q7,d28,d2[1]
vmlal.u32 q8,d28,d3[0]
vmlal.u32 q9,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+5]
vmlal.u32 q10,d29,d4[0]
vmlal.u32 q11,d29,d4[1]
vmlal.u32 q12,d29,d5[0]
vmlal.u32 q13,d29,d5[1]
vmlal.u32 q6,d29,d6[0]
vmlal.u32 q7,d29,d6[1]
vmlal.u32 q8,d29,d7[0]
vmlal.u32 q9,d29,d7[1]
vst1.64 {q10},[r7,:128]!
vmlal.u32 q11,d28,d0[0]
vld1.64 {q10},[r6,:128]
vmlal.u32 q12,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+5]
vmlal.u32 q13,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q6,d28,d1[1]
vmlal.u32 q7,d28,d2[0]
vmlal.u32 q8,d28,d2[1]
vmlal.u32 q9,d28,d3[0]
vmlal.u32 q10,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+6]
vmlal.u32 q11,d29,d4[0]
vmlal.u32 q12,d29,d4[1]
vmlal.u32 q13,d29,d5[0]
vmlal.u32 q6,d29,d5[1]
vmlal.u32 q7,d29,d6[0]
vmlal.u32 q8,d29,d6[1]
vmlal.u32 q9,d29,d7[0]
vmlal.u32 q10,d29,d7[1]
vst1.64 {q11},[r7,:128]!
vmlal.u32 q12,d28,d0[0]
vld1.64 {q11},[r6,:128]
vmlal.u32 q13,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+6]
vmlal.u32 q6,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q7,d28,d1[1]
vmlal.u32 q8,d28,d2[0]
vmlal.u32 q9,d28,d2[1]
vmlal.u32 q10,d28,d3[0]
vmlal.u32 q11,d28,d3[1]
vld1.32 {d28},[r10,:64]! @ pull smashed b[8*i+7]
vmlal.u32 q12,d29,d4[0]
vmlal.u32 q13,d29,d4[1]
vmlal.u32 q6,d29,d5[0]
vmlal.u32 q7,d29,d5[1]
vmlal.u32 q8,d29,d6[0]
vmlal.u32 q9,d29,d6[1]
vmlal.u32 q10,d29,d7[0]
vmlal.u32 q11,d29,d7[1]
vst1.64 {q12},[r7,:128]!
vmlal.u32 q13,d28,d0[0]
vld1.64 {q12},[r6,:128]
vmlal.u32 q6,d28,d0[1]
vld1.32 {d29},[r10,:64]! @ pull smashed m[8*i+7]
vmlal.u32 q7,d28,d1[0]
it ne
addne r6,r6,#16 @ don't advance in last iteration
vmlal.u32 q8,d28,d1[1]
vmlal.u32 q9,d28,d2[0]
vmlal.u32 q10,d28,d2[1]
vmlal.u32 q11,d28,d3[0]
vmlal.u32 q12,d28,d3[1]
it eq
subeq r1,r1,r5,lsl#2 @ rewind
vmlal.u32 q13,d29,d4[0]
vld1.32 {d28},[sp,:64] @ pull smashed b[8*i+0]
vmlal.u32 q6,d29,d4[1]
vld1.32 {d0,d1,d2,d3},[r1]!
vmlal.u32 q7,d29,d5[0]
add r10,sp,#8 @ rewind
vmlal.u32 q8,d29,d5[1]
vmlal.u32 q9,d29,d6[0]
vmlal.u32 q10,d29,d6[1]
vmlal.u32 q11,d29,d7[0]
vst1.64 {q13},[r7,:128]!
vmlal.u32 q12,d29,d7[1]
vst1.64 {q10-q11}, [r7,:256]!
vadd.u64 d10,d10,d11
vst1.64 {q12-q13}, [r7,:256]!
vshr.u64 d10,d10,#16
bne .LNEON_8n_inner
add r6,sp,#128
vst1.64 {q6,q7},[r7,:256]!
veor q2,q2,q2 @ d4-d5
vst1.64 {q8,q9},[r7,:256]!
veor q3,q3,q3 @ d6-d7
vst1.64 {q10,q11},[r7,:256]!
vst1.64 {q12},[r7,:128]
bne .LNEON_outer
subs r9,r9,#8
vld1.64 {q6,q7},[r6,:256]!
vld1.64 {q8,q9},[r6,:256]!
vld1.64 {q10,q11},[r6,:256]!
vld1.64 {q12,q13},[r6,:256]!
mov r7,sp
mov r8,r5
itt ne
subne r3,r3,r5,lsl#2 @ rewind
bne .LNEON_8n_outer
.LNEON_tail:
vadd.u64 d12,d12,d10
vld1.64 {q9-q10}, [r6, :256]!
add r7,sp,#128
vst1.64 {q2,q3}, [sp,:256]! @ start wiping stack frame
vshr.u64 d10,d12,#16
vst1.64 {q2,q3},[sp,:256]!
vadd.u64 d13,d13,d10
vld1.64 {q11-q12}, [r6, :256]!
vst1.64 {q2,q3}, [sp,:256]!
vshr.u64 d10,d13,#16
vld1.64 {q13}, [r6, :128]!
vst1.64 {q2,q3}, [sp,:256]!
vzip.16 d12,d13
.LNEON_tail2:
mov r8,r5
b .LNEON_tail_entry
.align 4
.LNEON_tail:
vadd.u64 d12,d12,d10
vshr.u64 d10,d12,#16
vld1.64 {q8,q9}, [r6, :256]!
vadd.u64 d13,d13,d10
vld1.64 {q10,q11}, [r6, :256]!
vshr.u64 d10,d13,#16
vld1.64 {q12,q13}, [r6, :256]!
vzip.16 d12,d13
.LNEON_tail_entry:
vadd.u64 d14,d14,d10
vst1.32 {d12[0]}, [r7, :32]!
vshr.u64 d10,d14,#16
vadd.u64 d15,d15,d10
vshr.u64 d10,d15,#16
vzip.16 d14,d15
vadd.u64 d16,d16,d10
vst1.32 {d14[0]}, [r7, :32]!
vshr.u64 d10,d16,#16
vadd.u64 d17,d17,d10
vshr.u64 d10,d17,#16
vzip.16 d16,d17
vadd.u64 d18,d18,d10
vst1.32 {d16[0]}, [r7, :32]!
vshr.u64 d10,d18,#16
vadd.u64 d19,d19,d10
vshr.u64 d10,d19,#16
vzip.16 d18,d19
vadd.u64 d20,d20,d10
vst1.32 {d18[0]}, [r7, :32]!
vshr.u64 d10,d20,#16
vadd.u64 d21,d21,d10
vshr.u64 d10,d21,#16
vzip.16 d20,d21
vadd.u64 d22,d22,d10
vst1.32 {d20[0]}, [r7, :32]!
vshr.u64 d10,d22,#16
vadd.u64 d23,d23,d10
vshr.u64 d10,d23,#16
vzip.16 d22,d23
vadd.u64 d24,d24,d10
vst1.32 {d22[0]}, [r7, :32]!
vshr.u64 d10,d24,#16
vadd.u64 d25,d25,d10
vld1.64 {q6}, [r6, :128]!
vshr.u64 d10,d25,#16
vzip.16 d24,d25
vadd.u64 d26,d26,d10
vst1.32 {d24[0]}, [r7, :32]!
vshr.u64 d10,d26,#16
vadd.u64 d27,d27,d10
vld1.64 {q7-q8}, [r6, :256]!
vshr.u64 d10,d27,#16
vzip.16 d26,d27
vld1.64 {q6,q7}, [r6, :256]!
subs r8,r8,#8
vst1.32 {d26[0]}, [r7, :32]!
bne .LNEON_tail
vst1.32 {d10[0]}, [r7, :32] @ top-most bit
@ -528,19 +892,20 @@ bn_mul8x_mont_neon:
add r2,sp,r5,lsl#2
.LNEON_sub:
ldmia r1!, {r4-r7}
ldmia r3!, {r8-r11}
ldmia r1!, {r4,r5,r6,r7}
ldmia r3!, {r8,r9,r10,r11}
sbcs r8, r4,r8
sbcs r9, r5,r9
sbcs r10,r6,r10
sbcs r11,r7,r11
teq r1,r2 @ preserves carry
stmia r0!, {r8-r11}
stmia r0!, {r8,r9,r10,r11}
bne .LNEON_sub
ldr r10, [r1] @ load top-most bit
mov r11,sp
veor q0,q0,q0
sub r11,r2,sp @ this is num*4
sub r11,r2,r11 @ this is num*4
veor q1,q1,q1
mov r1,sp
sub r0,r0,r11 @ rewind r0
@ -548,35 +913,42 @@ bn_mul8x_mont_neon:
sbcs r10,r10,#0 @ result is carry flag
.LNEON_copy_n_zap:
ldmia r1!, {r4-r7}
ldmia r0, {r8-r11}
ldmia r1!, {r4,r5,r6,r7}
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0-q1}, [r3,:256]! @ wipe
vst1.64 {q0,q1}, [r3,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0-q1}, [r3,:256]! @ wipe
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
ldmia r1, {r4-r7}
stmia r0!, {r8-r11}
ldmia r1, {r4,r5,r6,r7}
stmia r0!, {r8,r9,r10,r11}
sub r1,r1,#16
ldmia r0, {r8-r11}
ldmia r0, {r8,r9,r10,r11}
it cc
movcc r8, r4
vst1.64 {q0-q1}, [r1,:256]! @ wipe
vst1.64 {q0,q1}, [r1,:256]! @ wipe
itt cc
movcc r9, r5
movcc r10,r6
vst1.64 {q0-q1}, [r3,:256]! @ wipe
vst1.64 {q0,q1}, [r3,:256]! @ wipe
it cc
movcc r11,r7
teq r1,r2 @ preserves carry
stmia r0!, {r8-r11}
stmia r0!, {r8,r9,r10,r11}
bne .LNEON_copy_n_zap
sub sp,ip,#96
vldmia sp!,{d8-d15}
ldmia sp!,{r4-r11}
bx lr @ .word 0xe12fff1e
mov sp,ip
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11}
bx lr @ bx lr
.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
#endif
.asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
.byte 77,111,110,116,103,111,109,101,114,121,32,109,117,108,116,105,112,108,105,99,97,116,105,111,110,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#if __ARM_MAX_ARCH__>=7
.comm OPENSSL_armcap_P,4,4

View File

@ -1,5 +1,12 @@
/* $FreeBSD$ */
/* Do not modify. This file is auto-generated from bsaes-armv7.pl. */
@ Copyright 2012-2018 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ -8,8 +15,7 @@
@ details see http://www.openssl.org/~appro/cryptogams/.
@
@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
@ granted.
@ of Linaro. Permission to use under GPL terms is granted.
@ ====================================================================
@ Bit-sliced AES for ARM NEON
@ -43,10 +49,7 @@
@ <appro@openssl.org>
@ April-August 2013
@
@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
@
@ <ard.biesheuvel@linaro.org>
@ Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard.
#ifndef __KERNEL__
# include "arm_arch.h"
@ -61,7 +64,7 @@
# define BSAES_ASM_EXTENDED_KEY
# define XTS_CHAIN_TWEAK
# define __ARM_ARCH__ __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ 7
#endif
#ifdef __thumb__
@ -74,10 +77,11 @@
.text
.syntax unified @ ARMv7-capable assembler is expected to handle this
#ifdef __thumb2__
#if defined(__thumb2__) && !defined(__APPLE__)
.thumb
#else
.code 32
# undef __thumb2__
#endif
.type _bsaes_decrypt8,%function
@ -85,7 +89,11 @@
_bsaes_decrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0ISR
#else
add r6,r6,#.LM0ISR-_bsaes_decrypt8
#endif
vldmia r6!, {q8} @ .LM0ISR
veor q10, q0, q9 @ xor with round0 key
@ -191,7 +199,7 @@ _bsaes_decrypt8:
b .Ldec_sbox
.align 4
.Ldec_loop:
vldmia r4!, {q8-q11}
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
@ -546,23 +554,24 @@ _bsaes_decrypt8:
.type _bsaes_const,%object
.align 6
_bsaes_const:
.LM0ISR: @ InvShiftRows constants
.quad 0x0a0e0206070b0f03, 0x0004080c0d010509
.LM0ISR:@ InvShiftRows constants
.quad 0x0a0e0206070b0f03, 0x0004080c0d010509
.LISR:
.quad 0x0504070602010003, 0x0f0e0d0c080b0a09
.quad 0x0504070602010003, 0x0f0e0d0c080b0a09
.LISRM0:
.quad 0x01040b0e0205080f, 0x0306090c00070a0d
.LM0SR: @ ShiftRows constants
.quad 0x0a0e02060f03070b, 0x0004080c05090d01
.quad 0x01040b0e0205080f, 0x0306090c00070a0d
.LM0SR:@ ShiftRows constants
.quad 0x0a0e02060f03070b, 0x0004080c05090d01
.LSR:
.quad 0x0504070600030201, 0x0f0e0d0c0a09080b
.quad 0x0504070600030201, 0x0f0e0d0c0a09080b
.LSRM0:
.quad 0x0304090e00050a0f, 0x01060b0c0207080d
.quad 0x0304090e00050a0f, 0x01060b0c0207080d
.LM0:
.quad 0x02060a0e03070b0f, 0x0004080c0105090d
.quad 0x02060a0e03070b0f, 0x0004080c0105090d
.LREVM0SR:
.quad 0x090d01050c000408, 0x03070b0f060a0e02
.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
.quad 0x090d01050c000408, 0x03070b0f060a0e02
.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 6
.size _bsaes_const,.-_bsaes_const
@ -571,7 +580,11 @@ _bsaes_const:
_bsaes_encrypt8:
adr r6,.
vldmia r4!, {q9} @ round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0SR
#else
sub r6,r6,#_bsaes_encrypt8-.LM0SR
#endif
vldmia r6!, {q8} @ .LM0SR
_bsaes_encrypt8_alt:
@ -679,7 +692,7 @@ _bsaes_encrypt8_bitslice:
b .Lenc_sbox
.align 4
.Lenc_loop:
vldmia r4!, {q8-q11}
vldmia r4!, {q8,q9,q10,q11}
veor q8, q8, q0
veor q9, q9, q1
vtbl.8 d0, {q8}, d24
@ -1002,7 +1015,11 @@ _bsaes_encrypt8_bitslice:
_bsaes_key_convert:
adr r6,.
vld1.8 {q7}, [r4]! @ load round 0 key
#if defined(__thumb2__) || defined(__APPLE__)
adr r6,.LM0
#else
sub r6,r6,#_bsaes_key_convert-.LM0
#endif
vld1.8 {q15}, [r4]! @ load round 1 key
vmov.i8 q8, #0x01 @ bit masks
@ -1045,17 +1062,17 @@ _bsaes_key_convert:
vrev32.8 q15, q15
#endif
subs r5,r5,#1
vstmia r12!,{q0-q7} @ write bit-sliced round key
vstmia r12!,{q0,q1,q2,q3,q4,q5,q6,q7} @ write bit-sliced round key
bne .Lkey_loop
vmov.i8 q7,#0x63 @ compose .L63
@ don't save last round key
bx lr
.size _bsaes_key_convert,.-_bsaes_key_convert
.extern AES_cbc_encrypt
.extern AES_decrypt
.global bsaes_cbc_encrypt
.globl bsaes_cbc_encrypt
.type bsaes_cbc_encrypt,%function
.align 5
bsaes_cbc_encrypt:
@ -1073,7 +1090,7 @@ bsaes_cbc_encrypt:
@ it is up to the caller to make sure we are called with enc == 0
mov ip, sp
stmdb sp!, {r4-r10, lr}
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ IV is 1st arg on the stack
mov r2, r2, lsr#4 @ len in 16 byte blocks
@ -1113,7 +1130,7 @@ bsaes_cbc_encrypt:
vstmia r4, {q7}
.align 2
0:
#endif
vld1.8 {q15}, [r8] @ load IV
@ -1124,33 +1141,33 @@ bsaes_cbc_encrypt:
subs r2, r2, #0x8
bmi .Lcbc_dec_loop_finish
vld1.8 {q0-q1}, [r0]! @ load input
vld1.8 {q2-q3}, [r0]!
vld1.8 {q0,q1}, [r0]! @ load input
vld1.8 {q2,q3}, [r0]!
#ifndef BSAES_ASM_EXTENDED_KEY
mov r4, sp @ pass the key
#else
add r4, r3, #248
#endif
vld1.8 {q4-q5}, [r0]!
vld1.8 {q4,q5}, [r0]!
mov r5, r10
vld1.8 {q6-q7}, [r0]
vld1.8 {q6,q7}, [r0]
sub r0, r0, #0x60
vstmia r9, {q15} @ put aside IV
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8-q9}, [r0]! @ reload input
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10-q11}, [r0]!
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q12-q13}, [r0]!
vld1.8 {q12,q13}, [r0]!
veor q4, q4, q10
veor q2, q2, q11
vld1.8 {q14-q15}, [r0]!
vld1.8 {q14,q15}, [r0]!
veor q7, q7, q12
vst1.8 {q0-q1}, [r1]! @ write output
vst1.8 {q0,q1}, [r1]! @ write output
veor q3, q3, q13
vst1.8 {q6}, [r1]!
veor q5, q5, q14
@ -1194,17 +1211,17 @@ bsaes_cbc_encrypt:
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8-q9}, [r0]! @ reload input
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10-q11}, [r0]!
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q12-q13}, [r0]!
vld1.8 {q12,q13}, [r0]!
veor q4, q4, q10
veor q2, q2, q11
vld1.8 {q15}, [r0]!
veor q7, q7, q12
vst1.8 {q0-q1}, [r1]! @ write output
vst1.8 {q0,q1}, [r1]! @ write output
veor q3, q3, q13
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
@ -1217,9 +1234,9 @@ bsaes_cbc_encrypt:
sub r0, r0, #0x60
bl _bsaes_decrypt8
vldmia r9,{q14} @ reload IV
vld1.8 {q8-q9}, [r0]! @ reload input
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10-q11}, [r0]!
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q12}, [r0]!
@ -1227,7 +1244,7 @@ bsaes_cbc_encrypt:
veor q2, q2, q11
vld1.8 {q15}, [r0]!
veor q7, q7, q12
vst1.8 {q0-q1}, [r1]! @ write output
vst1.8 {q0,q1}, [r1]! @ write output
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
vst1.8 {q2}, [r1]!
@ -1238,14 +1255,14 @@ bsaes_cbc_encrypt:
sub r0, r0, #0x50
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8-q9}, [r0]! @ reload input
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10-q11}, [r0]!
vld1.8 {q10,q11}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q15}, [r0]!
veor q4, q4, q10
vst1.8 {q0-q1}, [r1]! @ write output
vst1.8 {q0,q1}, [r1]! @ write output
veor q2, q2, q11
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
@ -1256,14 +1273,14 @@ bsaes_cbc_encrypt:
sub r0, r0, #0x40
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8-q9}, [r0]! @ reload input
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q10}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vld1.8 {q15}, [r0]!
veor q4, q4, q10
vst1.8 {q0-q1}, [r1]! @ write output
vst1.8 {q0,q1}, [r1]! @ write output
vst1.8 {q6}, [r1]!
vst1.8 {q4}, [r1]!
b .Lcbc_dec_done
@ -1272,12 +1289,12 @@ bsaes_cbc_encrypt:
sub r0, r0, #0x30
bl _bsaes_decrypt8
vldmia r9, {q14} @ reload IV
vld1.8 {q8-q9}, [r0]! @ reload input
vld1.8 {q8,q9}, [r0]! @ reload input
veor q0, q0, q14 @ ^= IV
vld1.8 {q15}, [r0]!
veor q1, q1, q8
veor q6, q6, q9
vst1.8 {q0-q1}, [r1]! @ write output
vst1.8 {q0,q1}, [r1]! @ write output
vst1.8 {q6}, [r1]!
b .Lcbc_dec_done
.align 4
@ -1289,7 +1306,7 @@ bsaes_cbc_encrypt:
veor q0, q0, q14 @ ^= IV
vld1.8 {q15}, [r0]! @ reload input
veor q1, q1, q8
vst1.8 {q0-q1}, [r1]! @ write output
vst1.8 {q0,q1}, [r1]! @ write output
b .Lcbc_dec_done
.align 4
.Lcbc_dec_one:
@ -1309,8 +1326,8 @@ bsaes_cbc_encrypt:
#ifndef BSAES_ASM_EXTENDED_KEY
vmov.i32 q0, #0
vmov.i32 q1, #0
.Lcbc_dec_bzero: @ wipe key schedule [if any]
vstmia sp!, {q0-q1}
.Lcbc_dec_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne .Lcbc_dec_bzero
#endif
@ -1319,10 +1336,10 @@ bsaes_cbc_encrypt:
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
vst1.8 {q15}, [r8] @ return IV
VFP_ABI_POP
ldmia sp!, {r4-r10, pc}
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc}
.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
.extern AES_encrypt
.global bsaes_ctr32_encrypt_blocks
.globl bsaes_ctr32_encrypt_blocks
.type bsaes_ctr32_encrypt_blocks,%function
.align 5
bsaes_ctr32_encrypt_blocks:
@ -1330,7 +1347,7 @@ bsaes_ctr32_encrypt_blocks:
blo .Lctr_enc_short @ small sizes
mov ip, sp
stmdb sp!, {r4-r10, lr}
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr}
VFP_ABI_PUSH
ldr r8, [ip] @ ctr is 1st arg on the stack
sub sp, sp, #0x10 @ scratch space to carry over the ctr
@ -1351,7 +1368,12 @@ bsaes_ctr32_encrypt_blocks:
vstmia r12, {q7} @ save last round key
vld1.8 {q0}, [r8] @ load counter
#ifdef __APPLE__
mov r8, #:lower16:(.LREVM0SR-.LM0)
add r8, r6, r8
#else
add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
#endif
vldmia sp, {q4} @ load round0 key
#else
ldr r12, [r3, #244]
@ -1368,7 +1390,7 @@ bsaes_ctr32_encrypt_blocks:
vstmia r12, {q7} @ save last round key
.align 2
0: add r12, r3, #248
add r12, r3, #248
vld1.8 {q0}, [r8] @ load counter
adrl r8, .LREVM0SR @ borrow r8
vldmia r12, {q4} @ load round0 key
@ -1408,23 +1430,28 @@ bsaes_ctr32_encrypt_blocks:
vldmia r8, {q8} @ .LREVM0SR
mov r5, r10 @ pass rounds
vstmia r9, {q10} @ save next counter
#ifdef __APPLE__
mov r6, #:lower16:(.LREVM0SR-.LSR)
sub r6, r8, r6
#else
sub r6, r8, #.LREVM0SR-.LSR @ pass constants
#endif
bl _bsaes_encrypt8_alt
subs r2, r2, #8
blo .Lctr_enc_loop_done
vld1.8 {q8-q9}, [r0]! @ load input
vld1.8 {q10-q11}, [r0]!
vld1.8 {q8,q9}, [r0]! @ load input
vld1.8 {q10,q11}, [r0]!
veor q0, q8
veor q1, q9
vld1.8 {q12-q13}, [r0]!
vld1.8 {q12,q13}, [r0]!
veor q4, q10
veor q6, q11
vld1.8 {q14-q15}, [r0]!
vld1.8 {q14,q15}, [r0]!
veor q3, q12
vst1.8 {q0-q1}, [r1]! @ write output
vst1.8 {q0,q1}, [r1]! @ write output
veor q7, q13
veor q2, q14
vst1.8 {q4}, [r1]!
@ -1481,35 +1508,35 @@ bsaes_ctr32_encrypt_blocks:
vmov.i32 q0, #0
vmov.i32 q1, #0
#ifndef BSAES_ASM_EXTENDED_KEY
.Lctr_enc_bzero: @ wipe key schedule [if any]
vstmia sp!, {q0-q1}
.Lctr_enc_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r9
bne .Lctr_enc_bzero
#else
vstmia sp, {q0-q1}
vstmia sp, {q0,q1}
#endif
mov sp, r9
add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
VFP_ABI_POP
ldmia sp!, {r4-r10, pc} @ return
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return
.align 4
.Lctr_enc_short:
ldr ip, [sp] @ ctr pointer is passed on stack
stmdb sp!, {r4-r8, lr}
stmdb sp!, {r4,r5,r6,r7,r8, lr}
mov r4, r0 @ copy arguments
mov r5, r1
mov r6, r2
mov r7, r3
ldr r8, [ip, #12] @ load counter LSW
ldr r8, [ip, #12] @ load counter .LSW
vld1.8 {q1}, [ip] @ load whole counter value
#ifdef __ARMEL__
rev r8, r8
#endif
sub sp, sp, #0x10
vst1.8 {q1}, [sp,:64] @ copy counter value
vst1.8 {q1}, [sp] @ copy counter value
sub sp, sp, #0x10
.Lctr_enc_short_loop:
@ -1520,7 +1547,7 @@ bsaes_ctr32_encrypt_blocks:
bl AES_encrypt
vld1.8 {q0}, [r4]! @ load input
vld1.8 {q1}, [sp,:64] @ load encrypted counter
vld1.8 {q1}, [sp] @ load encrypted counter
add r8, r8, #1
#ifdef __ARMEL__
rev r0, r8
@ -1535,16 +1562,16 @@ bsaes_ctr32_encrypt_blocks:
vmov.i32 q0, #0
vmov.i32 q1, #0
vstmia sp!, {q0-q1}
vstmia sp!, {q0,q1}
ldmia sp!, {r4-r8, pc}
ldmia sp!, {r4,r5,r6,r7,r8, pc}
.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
.globl bsaes_xts_encrypt
.type bsaes_xts_encrypt,%function
.align 4
bsaes_xts_encrypt:
mov ip, sp
stmdb sp!, {r4-r10, lr} @ 0x20
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} @ 0x20
VFP_ABI_PUSH
mov r6, sp @ future r3
@ -1598,7 +1625,7 @@ bsaes_xts_encrypt:
vstmia r12, {q7}
.align 2
0: sub sp, #0x90 @ place for tweak[9]
sub sp, #0x90 @ place for tweak[9]
#endif
vld1.8 {q8}, [r0] @ initial tweak
@ -1673,7 +1700,7 @@ bsaes_xts_encrypt:
veor q8, q8, q7
vst1.64 {q8}, [r0,:128] @ next round tweak
vld1.8 {q6-q7}, [r7]!
vld1.8 {q6,q7}, [r7]!
veor q5, q5, q13
#ifndef BSAES_ASM_EXTENDED_KEY
add r4, sp, #0x90 @ pass key schedule
@ -1687,22 +1714,22 @@ bsaes_xts_encrypt:
bl _bsaes_encrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
vld1.64 {q12-q13}, [r0,:128]!
vld1.64 {q12,q13}, [r0,:128]!
veor q1, q1, q9
veor q8, q4, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q6, q11
vld1.64 {q14-q15}, [r0,:128]!
vld1.64 {q14,q15}, [r0,:128]!
veor q10, q3, q12
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
veor q11, q7, q13
veor q12, q2, q14
vst1.8 {q10-q11}, [r8]!
vst1.8 {q10,q11}, [r8]!
veor q13, q5, q15
vst1.8 {q12-q13}, [r8]!
vst1.8 {q12,q13}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
@ -1798,20 +1825,20 @@ bsaes_xts_encrypt:
bl _bsaes_encrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
vld1.64 {q12-q13}, [r0,:128]!
vld1.64 {q12,q13}, [r0,:128]!
veor q1, q1, q9
veor q8, q4, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q6, q11
vld1.64 {q14}, [r0,:128]!
veor q10, q3, q12
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
veor q11, q7, q13
veor q12, q2, q14
vst1.8 {q10-q11}, [r8]!
vst1.8 {q10,q11}, [r8]!
vst1.8 {q12}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
@ -1830,18 +1857,18 @@ bsaes_xts_encrypt:
bl _bsaes_encrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
vld1.64 {q12-q13}, [r0,:128]!
vld1.64 {q12,q13}, [r0,:128]!
veor q1, q1, q9
veor q8, q4, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q6, q11
veor q10, q3, q12
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
veor q11, q7, q13
vst1.8 {q10-q11}, [r8]!
vst1.8 {q10,q11}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
b .Lxts_enc_done
@ -1849,7 +1876,7 @@ bsaes_xts_encrypt:
@ put this in range for both ARM and Thumb mode adr instructions
.align 5
.Lxts_magic:
.quad 1, 0x87
.quad 1, 0x87
.align 5
.Lxts_enc_5:
@ -1865,16 +1892,16 @@ bsaes_xts_encrypt:
bl _bsaes_encrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
vld1.64 {q12}, [r0,:128]!
veor q1, q1, q9
veor q8, q4, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q6, q11
veor q10, q3, q12
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
vst1.8 {q10}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
@ -1893,14 +1920,14 @@ bsaes_xts_encrypt:
bl _bsaes_encrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
veor q1, q1, q9
veor q8, q4, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q6, q11
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
b .Lxts_enc_done
@ -1918,12 +1945,12 @@ bsaes_xts_encrypt:
bl _bsaes_encrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10}, [r0,:128]!
veor q0, q0, q8
veor q1, q1, q9
veor q8, q4, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
vst1.8 {q8}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
@ -1942,10 +1969,10 @@ bsaes_xts_encrypt:
bl _bsaes_encrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
veor q0, q0, q8
veor q1, q1, q9
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
b .Lxts_enc_done
@ -2005,8 +2032,8 @@ bsaes_xts_encrypt:
#ifdef XTS_CHAIN_TWEAK
ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
#endif
.Lxts_enc_bzero: @ wipe key schedule [if any]
vstmia sp!, {q0-q1}
.Lxts_enc_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r0
bne .Lxts_enc_bzero
@ -2015,7 +2042,7 @@ bsaes_xts_encrypt:
vst1.8 {q8}, [r1]
#endif
VFP_ABI_POP
ldmia sp!, {r4-r10, pc} @ return
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return
.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
@ -2024,7 +2051,7 @@ bsaes_xts_encrypt:
.align 4
bsaes_xts_decrypt:
mov ip, sp
stmdb sp!, {r4-r10, lr} @ 0x20
stmdb sp!, {r4,r5,r6,r7,r8,r9,r10, lr} @ 0x20
VFP_ABI_PUSH
mov r6, sp @ future r3
@ -2084,14 +2111,16 @@ bsaes_xts_decrypt:
vstmia r4, {q7}
.align 2
0: sub sp, #0x90 @ place for tweak[9]
sub sp, #0x90 @ place for tweak[9]
#endif
vld1.8 {q8}, [r0] @ initial tweak
adr r2, .Lxts_magic
#ifndef XTS_CHAIN_TWEAK
tst r9, #0xf @ if not multiple of 16
it ne @ Thumb2 thing, sanity check in ARM
subne r9, #0x10 @ subtract another 16 bytes
#endif
subs r9, #0x80
blo .Lxts_dec_short
@ -2162,7 +2191,7 @@ bsaes_xts_decrypt:
veor q8, q8, q7
vst1.64 {q8}, [r0,:128] @ next round tweak
vld1.8 {q6-q7}, [r7]!
vld1.8 {q6,q7}, [r7]!
veor q5, q5, q13
#ifndef BSAES_ASM_EXTENDED_KEY
add r4, sp, #0x90 @ pass key schedule
@ -2176,22 +2205,22 @@ bsaes_xts_decrypt:
bl _bsaes_decrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
vld1.64 {q12-q13}, [r0,:128]!
vld1.64 {q12,q13}, [r0,:128]!
veor q1, q1, q9
veor q8, q6, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q4, q11
vld1.64 {q14-q15}, [r0,:128]!
vld1.64 {q14,q15}, [r0,:128]!
veor q10, q2, q12
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
veor q11, q7, q13
veor q12, q3, q14
vst1.8 {q10-q11}, [r8]!
vst1.8 {q10,q11}, [r8]!
veor q13, q5, q15
vst1.8 {q12-q13}, [r8]!
vst1.8 {q12,q13}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
@ -2287,20 +2316,20 @@ bsaes_xts_decrypt:
bl _bsaes_decrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
vld1.64 {q12-q13}, [r0,:128]!
vld1.64 {q12,q13}, [r0,:128]!
veor q1, q1, q9
veor q8, q6, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q4, q11
vld1.64 {q14}, [r0,:128]!
veor q10, q2, q12
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
veor q11, q7, q13
veor q12, q3, q14
vst1.8 {q10-q11}, [r8]!
vst1.8 {q10,q11}, [r8]!
vst1.8 {q12}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
@ -2321,18 +2350,18 @@ bsaes_xts_decrypt:
bl _bsaes_decrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
vld1.64 {q12-q13}, [r0,:128]!
vld1.64 {q12,q13}, [r0,:128]!
veor q1, q1, q9
veor q8, q6, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q4, q11
veor q10, q2, q12
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
veor q11, q7, q13
vst1.8 {q10-q11}, [r8]!
vst1.8 {q10,q11}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
b .Lxts_dec_done
@ -2350,16 +2379,16 @@ bsaes_xts_decrypt:
bl _bsaes_decrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
vld1.64 {q12}, [r0,:128]!
veor q1, q1, q9
veor q8, q6, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q4, q11
veor q10, q2, q12
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
vst1.8 {q10}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
@ -2378,14 +2407,14 @@ bsaes_xts_decrypt:
bl _bsaes_decrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q10-q11}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10,q11}, [r0,:128]!
veor q0, q0, q8
veor q1, q1, q9
veor q8, q6, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
veor q9, q4, q11
vst1.8 {q8-q9}, [r8]!
vst1.8 {q8,q9}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
b .Lxts_dec_done
@ -2403,12 +2432,12 @@ bsaes_xts_decrypt:
bl _bsaes_decrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
vld1.64 {q10}, [r0,:128]!
veor q0, q0, q8
veor q1, q1, q9
veor q8, q6, q10
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
vst1.8 {q8}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
@ -2427,10 +2456,10 @@ bsaes_xts_decrypt:
bl _bsaes_decrypt8
vld1.64 {q8-q9}, [r0,:128]!
vld1.64 {q8,q9}, [r0,:128]!
veor q0, q0, q8
veor q1, q1, q9
vst1.8 {q0-q1}, [r8]!
vst1.8 {q0,q1}, [r8]!
vld1.64 {q8}, [r0,:128] @ next round tweak
b .Lxts_dec_done
@ -2514,8 +2543,8 @@ bsaes_xts_decrypt:
#ifdef XTS_CHAIN_TWEAK
ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
#endif
.Lxts_dec_bzero: @ wipe key schedule [if any]
vstmia sp!, {q0-q1}
.Lxts_dec_bzero:@ wipe key schedule [if any]
vstmia sp!, {q0,q1}
cmp sp, r0
bne .Lxts_dec_bzero
@ -2524,7 +2553,7 @@ bsaes_xts_decrypt:
vst1.8 {q8}, [r1]
#endif
VFP_ABI_POP
ldmia sp!, {r4-r10, pc} @ return
ldmia sp!, {r4,r5,r6,r7,r8,r9,r10, pc} @ return
.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -3,12 +3,16 @@
#include "arm_arch.h"
.text
.code 32
#ifdef __clang__
#if defined(__thumb2__) || defined(__clang__)
.syntax unified
#define ldrplb ldrbpl
#define ldrneb ldrbne
#endif
#if defined(__thumb2__)
.thumb
#else
.code 32
#endif
.type rem_4bit,%object
.align 5
@ -21,22 +25,30 @@ rem_4bit:
.type rem_4bit_get,%function
rem_4bit_get:
sub r2,pc,#8
sub r2,r2,#32 @ &rem_4bit
#if defined(__thumb2__)
adr r2,rem_4bit
#else
sub r2,pc,#8+32 @ &rem_4bit
#endif
b .Lrem_4bit_got
nop
nop
.size rem_4bit_get,.-rem_4bit_get
.global gcm_ghash_4bit
.globl gcm_ghash_4bit
.type gcm_ghash_4bit,%function
.align 4
gcm_ghash_4bit:
sub r12,pc,#8
#if defined(__thumb2__)
adr r12,rem_4bit
#else
sub r12,pc,#8+48 @ &rem_4bit
#endif
add r3,r2,r3 @ r3 to point at the end
stmdb sp!,{r3-r11,lr} @ save r3/end too
sub r12,r12,#48 @ &rem_4bit
stmdb sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr} @ save r3/end too
ldmia r12,{r4-r11} @ copy rem_4bit ...
stmdb sp!,{r4-r11} @ ... to stack
ldmia r12,{r4,r5,r6,r7,r8,r9,r10,r11} @ copy rem_4bit ...
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11} @ ... to stack
ldrb r12,[r2,#15]
ldrb r14,[r0,#15]
@ -47,12 +59,12 @@ gcm_ghash_4bit:
mov r3,#14
add r7,r1,r12,lsl#4
ldmia r7,{r4-r7} @ load Htbl[nlo]
ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo]
add r11,r1,r14
ldrb r12,[r2,#14]
and r14,r4,#0xf @ rem
ldmia r11,{r8-r11} @ load Htbl[nhi]
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
add r14,r14,r14
eor r4,r8,r4,lsr#4
ldrh r8,[sp,r14] @ rem_4bit[rem]
@ -73,13 +85,16 @@ gcm_ghash_4bit:
and r12,r4,#0xf @ rem
subs r3,r3,#1
add r12,r12,r12
ldmia r11,{r8-r11} @ load Htbl[nlo]
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo]
eor r4,r8,r4,lsr#4
eor r4,r4,r5,lsl#28
eor r5,r9,r5,lsr#4
eor r5,r5,r6,lsl#28
ldrh r8,[sp,r12] @ rem_4bit[rem]
eor r6,r10,r6,lsr#4
#ifdef __thumb2__
it pl
#endif
ldrplb r12,[r2,r3]
eor r6,r6,r7,lsl#28
eor r7,r11,r7,lsr#4
@ -88,8 +103,11 @@ gcm_ghash_4bit:
and r14,r4,#0xf @ rem
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
add r14,r14,r14
ldmia r11,{r8-r11} @ load Htbl[nhi]
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
eor r4,r8,r4,lsr#4
#ifdef __thumb2__
it pl
#endif
ldrplb r8,[r0,r3]
eor r4,r4,r5,lsl#28
eor r5,r9,r5,lsr#4
@ -97,8 +115,14 @@ gcm_ghash_4bit:
eor r5,r5,r6,lsl#28
eor r6,r10,r6,lsr#4
eor r6,r6,r7,lsl#28
#ifdef __thumb2__
it pl
#endif
eorpl r12,r12,r8
eor r7,r11,r7,lsr#4
#ifdef __thumb2__
itt pl
#endif
andpl r14,r12,#0xf0
andpl r12,r12,#0x0f
eor r7,r7,r9,lsl#16 @ ^= rem_4bit[rem]
@ -136,6 +160,10 @@ gcm_ghash_4bit:
strb r10,[r0,#8+1]
strb r11,[r0,#8]
#endif
#ifdef __thumb2__
it ne
#endif
ldrneb r12,[r2,#15]
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
rev r6,r6
@ -171,19 +199,19 @@ gcm_ghash_4bit:
add sp,sp,#36
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r11,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#else
ldmia sp!,{r4-r11,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size gcm_ghash_4bit,.-gcm_ghash_4bit
.global gcm_gmult_4bit
.globl gcm_gmult_4bit
.type gcm_gmult_4bit,%function
gcm_gmult_4bit:
stmdb sp!,{r4-r11,lr}
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
ldrb r12,[r0,#15]
b rem_4bit_get
.Lrem_4bit_got:
@ -192,12 +220,12 @@ gcm_gmult_4bit:
mov r3,#14
add r7,r1,r12,lsl#4
ldmia r7,{r4-r7} @ load Htbl[nlo]
ldmia r7,{r4,r5,r6,r7} @ load Htbl[nlo]
ldrb r12,[r0,#14]
add r11,r1,r14
and r14,r4,#0xf @ rem
ldmia r11,{r8-r11} @ load Htbl[nhi]
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
add r14,r14,r14
eor r4,r8,r4,lsr#4
ldrh r8,[r2,r14] @ rem_4bit[rem]
@ -216,13 +244,16 @@ gcm_gmult_4bit:
and r12,r4,#0xf @ rem
subs r3,r3,#1
add r12,r12,r12
ldmia r11,{r8-r11} @ load Htbl[nlo]
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nlo]
eor r4,r8,r4,lsr#4
eor r4,r4,r5,lsl#28
eor r5,r9,r5,lsr#4
eor r5,r5,r6,lsl#28
ldrh r8,[r2,r12] @ rem_4bit[rem]
eor r6,r10,r6,lsr#4
#ifdef __thumb2__
it pl
#endif
ldrplb r12,[r0,r3]
eor r6,r6,r7,lsl#28
eor r7,r11,r7,lsr#4
@ -231,7 +262,7 @@ gcm_gmult_4bit:
and r14,r4,#0xf @ rem
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
add r14,r14,r14
ldmia r11,{r8-r11} @ load Htbl[nhi]
ldmia r11,{r8,r9,r10,r11} @ load Htbl[nhi]
eor r4,r8,r4,lsr#4
eor r4,r4,r5,lsl#28
eor r5,r9,r5,lsr#4
@ -240,6 +271,9 @@ gcm_gmult_4bit:
eor r6,r10,r6,lsr#4
eor r6,r6,r7,lsl#28
eor r7,r11,r7,lsr#4
#ifdef __thumb2__
itt pl
#endif
andpl r14,r12,#0xf0
andpl r12,r12,#0x0f
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
@ -305,25 +339,25 @@ gcm_gmult_4bit:
#endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r11,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#else
ldmia sp!,{r4-r11,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size gcm_gmult_4bit,.-gcm_gmult_4bit
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.global gcm_init_neon
.globl gcm_init_neon
.type gcm_init_neon,%function
.align 4
gcm_init_neon:
vld1.64 d7,[r1,:64]! @ load H
vld1.64 d7,[r1]! @ load H
vmov.i8 q8,#0xe1
vld1.64 d6,[r1,:64]
vld1.64 d6,[r1]
vshl.i64 d17,#57
vshr.u64 d16,#63 @ t0=0xc2....01
vdup.8 q9,d7[7]
@ -338,14 +372,14 @@ gcm_init_neon:
bx lr @ bx lr
.size gcm_init_neon,.-gcm_init_neon
.global gcm_gmult_neon
.globl gcm_gmult_neon
.type gcm_gmult_neon,%function
.align 4
gcm_gmult_neon:
vld1.64 d7,[r0,:64]! @ load Xi
vld1.64 d6,[r0,:64]!
vld1.64 d7,[r0]! @ load Xi
vld1.64 d6,[r0]!
vmov.i64 d29,#0x0000ffffffffffff
vldmia r1,{d26-d27} @ load twisted H
vldmia r1,{d26,d27} @ load twisted H
vmov.i64 d30,#0x00000000ffffffff
#ifdef __ARMEL__
vrev64.8 q3,q3
@ -356,14 +390,14 @@ gcm_gmult_neon:
b .Lgmult_neon
.size gcm_gmult_neon,.-gcm_gmult_neon
.global gcm_ghash_neon
.globl gcm_ghash_neon
.type gcm_ghash_neon,%function
.align 4
gcm_ghash_neon:
vld1.64 d1,[r0,:64]! @ load Xi
vld1.64 d0,[r0,:64]!
vld1.64 d1,[r0]! @ load Xi
vld1.64 d0,[r0]!
vmov.i64 d29,#0x0000ffffffffffff
vldmia r1,{d26-d27} @ load twisted H
vldmia r1,{d26,d27} @ load twisted H
vmov.i64 d30,#0x00000000ffffffff
#ifdef __ARMEL__
vrev64.8 q0,q0
@ -520,11 +554,12 @@ gcm_ghash_neon:
vrev64.8 q0,q0
#endif
sub r0,#16
vst1.64 d1,[r0,:64]! @ write out Xi
vst1.64 d0,[r0,:64]
vst1.64 d1,[r0]! @ write out Xi
vst1.64 d0,[r0]
bx lr @ bx lr
.size gcm_ghash_neon,.-gcm_ghash_neon
#endif
.asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2

View File

@ -2,10 +2,12 @@
/* Do not modify. This file is auto-generated from ghashv8-armx.pl. */
#include "arm_arch.h"
#if __ARM_MAX_ARCH__>=7
.text
.fpu neon
.code 32
.global gcm_init_v8
#undef __thumb2__
.globl gcm_init_v8
.type gcm_init_v8,%function
.align 4
gcm_init_v8:
@ -28,63 +30,62 @@ gcm_init_v8:
@ calculate H^2
vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing
.byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12
.byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12
veor q8,q8,q12
.byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12
.byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8
.byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12
.byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2
veor q1,q1,q9
veor q1,q1,q10
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
vmov d4,d3 @ Xh|Xm - 256-bit result
vmov d3,d0 @ Xm is rotated Xl
veor q0,q1,q10
vext.8 q10,q0,q0,#8 @ 2nd phase
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
veor q10,q10,q2
veor q14,q0,q10
vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing
veor q9,q9,q14
vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed
vst1.64 {q13-q14},[r0] @ store Htable[1..2]
vst1.64 {q13,q14},[r0]! @ store Htable[1..2]
bx lr
.size gcm_init_v8,.-gcm_init_v8
.global gcm_gmult_v8
.globl gcm_gmult_v8
.type gcm_gmult_v8,%function
.align 4
gcm_gmult_v8:
vld1.64 {q9},[r0] @ load Xi
vmov.i8 q11,#0xe1
vld1.64 {q12-q13},[r1] @ load twisted H, ...
vld1.64 {q12,q13},[r1] @ load twisted H, ...
vshl.u64 q11,q11,#57
#ifndef __ARMEB__
vrev64.8 q9,q9
#endif
vext.8 q3,q9,q9,#8
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
veor q9,q9,q3 @ Karatsuba pre-processing
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2
veor q1,q1,q9
veor q1,q1,q10
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
vmov d4,d3 @ Xh|Xm - 256-bit result
vmov d3,d0 @ Xm is rotated Xl
veor q0,q1,q10
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
veor q10,q10,q2
veor q0,q0,q10
@ -96,28 +97,28 @@ gcm_gmult_v8:
bx lr
.size gcm_gmult_v8,.-gcm_gmult_v8
.global gcm_ghash_v8
.globl gcm_ghash_v8
.type gcm_ghash_v8,%function
.align 4
gcm_ghash_v8:
vstmdb sp!,{d8-d15} @ 32-bit ABI says so
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
vld1.64 {q0},[r0] @ load [rotated] Xi
@ "[rotated]" means that
@ loaded value would have
@ to be rotated in order to
@ make it appear as in
@ alorithm specification
@ algorithm specification
subs r3,r3,#32 @ see if r3 is 32 or larger
mov r12,#16 @ r12 is used as post-
@ increment for input pointer;
@ as loop is modulo-scheduled
@ r12 is zeroed just in time
@ to preclude oversteping
@ to preclude overstepping
@ inp[len], which means that
@ last block[s] are actually
@ loaded twice, but last
@ copy is not processed
vld1.64 {q12-q13},[r1]! @ load twisted H, ..., H^2
vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2
vmov.i8 q11,#0xe1
vld1.64 {q14},[r1]
moveq r12,#0 @ is it time to zero r12?
@ -136,23 +137,23 @@ gcm_ghash_v8:
#endif
vext.8 q7,q9,q9,#8
veor q3,q3,q0 @ I[i]^=Xi
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
veor q9,q9,q7 @ Karatsuba pre-processing
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
b .Loop_mod2x_v8
.align 4
.Loop_mod2x_v8:
vext.8 q10,q3,q3,#8
subs r3,r3,#32 @ is there more data?
.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
movlo r12,#0 @ is it time to zero r12?
.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
veor q10,q10,q3 @ Karatsuba pre-processing
.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
veor q0,q0,q4 @ accumulate
.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2]
veor q2,q2,q6
@ -167,7 +168,7 @@ gcm_ghash_v8:
vrev64.8 q8,q8
#endif
veor q1,q1,q10
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
#ifndef __ARMEB__
vrev64.8 q9,q9
@ -177,15 +178,15 @@ gcm_ghash_v8:
vext.8 q7,q9,q9,#8
vext.8 q3,q8,q8,#8
veor q0,q1,q10
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
veor q3,q3,q2 @ accumulate q3 early
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
veor q3,q3,q10
veor q9,q9,q7 @ Karatsuba pre-processing
veor q3,q3,q0
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
bhs .Loop_mod2x_v8 @ there was at least 32 more bytes
veor q2,q2,q10
@ -198,23 +199,23 @@ gcm_ghash_v8:
veor q3,q3,q0 @ inp^=Xi
veor q9,q8,q10 @ q9 is rotated inp^Xi
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
veor q9,q9,q3 @ Karatsuba pre-processing
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2
veor q1,q1,q9
veor q1,q1,q10
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
vmov d4,d3 @ Xh|Xm - 256-bit result
vmov d3,d0 @ Xm is rotated Xl
veor q0,q1,q10
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
veor q10,q10,q2
veor q0,q0,q10
@ -225,8 +226,10 @@ gcm_ghash_v8:
vext.8 q0,q0,q0,#8
vst1.64 {q0},[r0] @ write out Xi
vldmia sp!,{d8-d15} @ 32-bit ABI says so
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
bx lr
.size gcm_ghash_v8,.-gcm_ghash_v8
.asciz "GHASH for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -3,23 +3,32 @@
#include "arm_arch.h"
.text
#if defined(__thumb2__)
.syntax unified
.thumb
#else
.code 32
#endif
.global sha1_block_data_order
.globl sha1_block_data_order
.type sha1_block_data_order,%function
.align 5
sha1_block_data_order:
#if __ARM_MAX_ARCH__>=7
sub r3,pc,#8 @ sha1_block_data_order
.Lsha1_block:
adr r3,.Lsha1_block
ldr r12,.LOPENSSL_armcap
ldr r12,[r3,r12] @ OPENSSL_armcap_P
#ifdef __APPLE__
ldr r12,[r12]
#endif
tst r12,#ARMV8_SHA1
bne .LARMv8
tst r12,#ARMV7_NEON
bne .LNEON
#endif
stmdb sp!,{r4-r12,lr}
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7}
.Lloop:
@ -155,7 +164,12 @@ sha1_block_data_order:
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D)
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp
#endif
bne .L_00_15 @ [((11+4)*5+2)*3]
sub sp,sp,#25*4
#if __ARM_ARCH__<7
@ -335,7 +349,12 @@ sha1_block_data_order:
@ F_xx_xx
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D)
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp @ preserve carry
#endif
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
@ -427,7 +446,12 @@ sha1_block_data_order:
add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2
#if defined(__thumb2__)
mov r12,sp
teq r14,r12
#else
teq r14,sp
#endif
bne .L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,.LK_60_79
@ -447,25 +471,26 @@ sha1_block_data_order:
bne .Lloop @ [+18], total 1307
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4-r12,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha1_block_data_order,.-sha1_block_data_order
.align 5
.LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6
.LK_00_19:.word 0x5a827999
.LK_20_39:.word 0x6ed9eba1
.LK_40_59:.word 0x8f1bbcdc
.LK_60_79:.word 0xca62c1d6
#if __ARM_MAX_ARCH__>=7
.LOPENSSL_armcap:
.word OPENSSL_armcap_P-sha1_block_data_order
.word OPENSSL_armcap_P-.Lsha1_block
#endif
.asciz "SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 5
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
@ -475,21 +500,21 @@ sha1_block_data_order:
.align 4
sha1_block_data_order_neon:
.LNEON:
stmdb sp!,{r4-r12,lr}
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
@ dmb @ errata #451034 on early Cortex A8
@ vstmdb sp!,{d8-d15} @ ABI specification says so
mov r14,sp
sub sp,sp,#64 @ alloca
sub r12,sp,#64
adr r8,.LK_00_19
bic sp,sp,#15 @ align for 128-bit stores
bic r12,r12,#15 @ align for 128-bit stores
ldmia r0,{r3,r4,r5,r6,r7} @ load context
mov r12,sp
mov sp,r12 @ alloca
vld1.8 {q0-q1},[r1]! @ handles unaligned
vld1.8 {q0,q1},[r1]! @ handles unaligned
veor q15,q15,q15
vld1.8 {q2-q3},[r1]!
vld1.8 {q2,q3},[r1]!
vld1.32 {d28[],d29[]},[r8,:32]! @ load K_00_19
vrev32.8 q0,q0 @ yes, even on
vrev32.8 q1,q1 @ big-endian...
@ -1178,11 +1203,12 @@ sha1_block_data_order_neon:
sub r12,r12,#64
teq r1,r2
sub r8,r8,#16
it eq
subeq r1,r1,#64
vld1.8 {q0-q1},[r1]!
vld1.8 {q0,q1},[r1]!
ldr r9,[sp,#4]
eor r11,r10,r6
vld1.8 {q2-q3},[r1]!
vld1.8 {q2,q3},[r1]!
add r3,r3,r4,ror#27
mov r5,r5,ror#2
vld1.32 {d28[],d29[]},[r8,:32]!
@ -1307,23 +1333,33 @@ sha1_block_data_order_neon:
add r4,r4,r10
add r5,r5,r11
add r6,r6,r12
it eq
moveq sp,r14
add r7,r7,r9
it ne
ldrne r9,[sp]
stmia r0,{r3,r4,r5,r6,r7}
itt ne
addne r12,sp,#3*16
bne .Loop_neon
@ vldmia sp!,{d8-d15}
ldmia sp!,{r4-r12,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
.size sha1_block_data_order_neon,.-sha1_block_data_order_neon
#endif
#if __ARM_MAX_ARCH__>=7
# if defined(__thumb2__)
# define INST(a,b,c,d) .byte c,d|0xf,a,b
# else
# define INST(a,b,c,d) .byte a,b,c,d|0x10
# endif
.type sha1_block_data_order_armv8,%function
.align 5
sha1_block_data_order_armv8:
.LARMv8:
vstmdb sp!,{d8-d15} @ ABI specification says so
vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
veor q1,q1,q1
adr r3,.LK_00_19
@ -1336,8 +1372,8 @@ sha1_block_data_order_armv8:
vld1.32 {d22[],d23[]},[r3,:32]
.Loop_v8:
vld1.8 {q4-q5},[r1]!
vld1.8 {q6-q7},[r1]!
vld1.8 {q4,q5},[r1]!
vld1.8 {q6,q7},[r1]!
vrev32.8 q4,q4
vrev32.8 q5,q5
@ -1348,98 +1384,98 @@ sha1_block_data_order_armv8:
vadd.i32 q13,q8,q5
vrev32.8 q7,q7
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 0
.byte 0x68,0x0c,0x02,0xf2 @ sha1c q0,q1,q12
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 0
INST(0x68,0x0c,0x02,0xe2) @ sha1c q0,q1,q12
vadd.i32 q12,q8,q6
.byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 1
.byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 1
INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13
vadd.i32 q13,q8,q7
.byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
.byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 2
.byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7
INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 2
INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12
vadd.i32 q12,q8,q4
.byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
.byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 3
.byte 0x6a,0x0c,0x06,0xf2 @ sha1c q0,q3,q13
INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4
INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 3
INST(0x6a,0x0c,0x06,0xe2) @ sha1c q0,q3,q13
vadd.i32 q13,q9,q5
.byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
.byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 4
.byte 0x68,0x0c,0x04,0xf2 @ sha1c q0,q2,q12
INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5
INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 4
INST(0x68,0x0c,0x04,0xe2) @ sha1c q0,q2,q12
vadd.i32 q12,q9,q6
.byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
.byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 5
.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6
INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 5
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q9,q7
.byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
.byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 6
.byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7
INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 6
INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12
vadd.i32 q12,q9,q4
.byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
.byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 7
.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4
INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 7
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q9,q5
.byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
.byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 8
.byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5
INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 8
INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12
vadd.i32 q12,q10,q6
.byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
.byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 9
.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6
INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 9
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q10,q7
.byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
.byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 10
.byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7
INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 10
INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12
vadd.i32 q12,q10,q4
.byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
.byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 11
.byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4
INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 11
INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13
vadd.i32 q13,q10,q5
.byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
.byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 12
.byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5
INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 12
INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12
vadd.i32 q12,q10,q6
.byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
.byte 0x4c,0x8c,0x3a,0xf2 @ sha1su0 q4,q5,q6
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 13
.byte 0x6a,0x0c,0x26,0xf2 @ sha1m q0,q3,q13
INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6
INST(0x4c,0x8c,0x3a,0xe2) @ sha1su0 q4,q5,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 13
INST(0x6a,0x0c,0x26,0xe2) @ sha1m q0,q3,q13
vadd.i32 q13,q11,q7
.byte 0x8e,0x83,0xba,0xf3 @ sha1su1 q4,q7
.byte 0x4e,0xac,0x3c,0xf2 @ sha1su0 q5,q6,q7
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 14
.byte 0x68,0x0c,0x24,0xf2 @ sha1m q0,q2,q12
INST(0x8e,0x83,0xba,0xf3) @ sha1su1 q4,q7
INST(0x4e,0xac,0x3c,0xe2) @ sha1su0 q5,q6,q7
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 14
INST(0x68,0x0c,0x24,0xe2) @ sha1m q0,q2,q12
vadd.i32 q12,q11,q4
.byte 0x88,0xa3,0xba,0xf3 @ sha1su1 q5,q4
.byte 0x48,0xcc,0x3e,0xf2 @ sha1su0 q6,q7,q4
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 15
.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
INST(0x88,0xa3,0xba,0xf3) @ sha1su1 q5,q4
INST(0x48,0xcc,0x3e,0xe2) @ sha1su0 q6,q7,q4
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 15
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q11,q5
.byte 0x8a,0xc3,0xba,0xf3 @ sha1su1 q6,q5
.byte 0x4a,0xec,0x38,0xf2 @ sha1su0 q7,q4,q5
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 16
.byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
INST(0x8a,0xc3,0xba,0xf3) @ sha1su1 q6,q5
INST(0x4a,0xec,0x38,0xe2) @ sha1su0 q7,q4,q5
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 16
INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12
vadd.i32 q12,q11,q6
.byte 0x8c,0xe3,0xba,0xf3 @ sha1su1 q7,q6
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 17
.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
INST(0x8c,0xe3,0xba,0xf3) @ sha1su1 q7,q6
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 17
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q13,q11,q7
.byte 0xc0,0x62,0xb9,0xf3 @ sha1h q3,q0 @ 18
.byte 0x68,0x0c,0x14,0xf2 @ sha1p q0,q2,q12
INST(0xc0,0x62,0xb9,0xf3) @ sha1h q3,q0 @ 18
INST(0x68,0x0c,0x14,0xe2) @ sha1p q0,q2,q12
.byte 0xc0,0x42,0xb9,0xf3 @ sha1h q2,q0 @ 19
.byte 0x6a,0x0c,0x16,0xf2 @ sha1p q0,q3,q13
INST(0xc0,0x42,0xb9,0xf3) @ sha1h q2,q0 @ 19
INST(0x6a,0x0c,0x16,0xe2) @ sha1p q0,q3,q13
vadd.i32 q1,q1,q2
vadd.i32 q0,q0,q14
@ -1448,7 +1484,7 @@ sha1_block_data_order_armv8:
vst1.32 {q0},[r0]!
vst1.32 {d2[0]},[r0]
vldmia sp!,{d8-d15}
vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
bx lr @ bx lr
.size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8
#endif

View File

@ -1,5 +1,12 @@
/* $FreeBSD$ */
/* Do not modify. This file is auto-generated from sha256-armv4.pl. */
@ Copyright 2007-2018 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ -46,15 +53,11 @@
#endif
.text
#if __ARM_ARCH__<7
.code 32
#else
#if defined(__thumb2__)
.syntax unified
# ifdef __thumb2__
.thumb
# else
#else
.code 32
# endif
#endif
.type K256,%object
@ -80,21 +83,25 @@ K256:
.word 0 @ terminator
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.LOPENSSL_armcap:
.word OPENSSL_armcap_P-sha256_block_data_order
.word OPENSSL_armcap_P-.Lsha256_block_data_order
#endif
.align 5
.global sha256_block_data_order
.globl sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
#if __ARM_ARCH__<7
.Lsha256_block_data_order:
#if __ARM_ARCH__<7 && !defined(__thumb2__)
sub r3,pc,#8 @ sha256_block_data_order
#else
adr r3,.
adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
ldr r12,[r3,r12] @ OPENSSL_armcap_P
#ifdef __APPLE__
ldr r12,[r12]
#endif
tst r12,#ARMV8_SHA256
bne .LARMv8
tst r12,#ARMV7_NEON
@ -121,7 +128,9 @@ sha256_block_data_order:
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 0
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
@ -177,7 +186,9 @@ sha256_block_data_order:
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 1
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
@ -233,7 +244,9 @@ sha256_block_data_order:
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 2
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
@ -289,7 +302,9 @@ sha256_block_data_order:
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 3
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
@ -345,7 +360,9 @@ sha256_block_data_order:
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 4
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
@ -401,7 +418,9 @@ sha256_block_data_order:
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
@ -457,7 +476,9 @@ sha256_block_data_order:
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 6
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
@ -513,7 +534,9 @@ sha256_block_data_order:
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 7
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
@ -569,7 +592,9 @@ sha256_block_data_order:
eor r0,r8,r8,ror#5
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r8,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 8
add r4,r4,r12 @ h+=Maj(a,b,c) from the past
@ -625,7 +650,9 @@ sha256_block_data_order:
eor r0,r7,r7,ror#5
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r7,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 9
add r11,r11,r3 @ h+=Maj(a,b,c) from the past
@ -681,7 +708,9 @@ sha256_block_data_order:
eor r0,r6,r6,ror#5
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r6,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 10
add r10,r10,r12 @ h+=Maj(a,b,c) from the past
@ -737,7 +766,9 @@ sha256_block_data_order:
eor r0,r5,r5,ror#5
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r5,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 11
add r9,r9,r3 @ h+=Maj(a,b,c) from the past
@ -793,7 +824,9 @@ sha256_block_data_order:
eor r0,r4,r4,ror#5
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r4,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 12
add r8,r8,r12 @ h+=Maj(a,b,c) from the past
@ -849,7 +882,9 @@ sha256_block_data_order:
eor r0,r11,r11,ror#5
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r11,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 13
add r7,r7,r3 @ h+=Maj(a,b,c) from the past
@ -905,7 +940,9 @@ sha256_block_data_order:
eor r0,r10,r10,ror#5
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
eor r0,r0,r10,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 14
add r6,r6,r12 @ h+=Maj(a,b,c) from the past
@ -961,7 +998,9 @@ sha256_block_data_order:
eor r0,r9,r9,ror#5
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
eor r0,r0,r9,ror#19 @ Sigma1(e)
# ifndef __ARMEB__
rev r2,r2
# endif
#else
@ ldrb r2,[r1,#3] @ 15
add r5,r5,r3 @ h+=Maj(a,b,c) from the past
@ -1794,7 +1833,7 @@ sha256_block_data_order:
eor r12,r12,r6 @ Maj(a,b,c)
add r4,r4,r0,ror#2 @ h+=Sigma0(a)
@ add r4,r4,r12 @ h+=Maj(a,b,c)
#if __ARM_ARCH__>=7
#ifdef __thumb2__
ite eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r3,[sp,#16*4] @ pull ctx
@ -1826,24 +1865,25 @@ sha256_block_data_order:
add sp,sp,#19*4 @ destroy frame
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r11,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
#else
ldmia sp!,{r4-r11,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha256_block_data_order,.-sha256_block_data_order
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.global sha256_block_data_order_neon
.globl sha256_block_data_order_neon
.type sha256_block_data_order_neon,%function
.align 4
.align 5
.skip 16
sha256_block_data_order_neon:
.LNEON:
stmdb sp!,{r4-r12,lr}
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
sub r11,sp,#16*4+16
adr r14,K256
@ -1878,7 +1918,7 @@ sha256_block_data_order_neon:
vst1.32 {q10},[r1,:128]!
vst1.32 {q11},[r1,:128]!
ldmia r0,{r4-r11}
ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
sub r1,r1,#64
ldr r2,[sp,#0]
eor r12,r12,r12
@ -2606,7 +2646,7 @@ sha256_block_data_order_neon:
str r6,[r2],#4
add r11,r11,r1
str r7,[r2],#4
stmia r2,{r8-r11}
stmia r2,{r8,r9,r10,r11}
ittte ne
movne r1,sp
@ -2617,12 +2657,12 @@ sha256_block_data_order_neon:
eorne r3,r5,r6
bne .L_00_48
ldmia sp!,{r4-r12,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
# ifdef __thumb2__
# if defined(__thumb2__)
# define INST(a,b,c,d) .byte c,d|0xc,a,b
# else
# define INST(a,b,c,d) .byte a,b,c,d
@ -2633,17 +2673,14 @@ sha256_block_data_order_neon:
sha256_block_data_order_armv8:
.LARMv8:
vld1.32 {q0,q1},[r0]
# ifdef __thumb2__
adr r3,.LARMv8
sub r3,r3,#.LARMv8-K256
# else
sub r3,r3,#256+32
# endif
add r2,r1,r2,lsl#6 @ len to point at the end of inp
b .Loop_v8
.align 4
.Loop_v8:
vld1.8 {q8-q9},[r1]!
vld1.8 {q10-q11},[r1]!
vld1.8 {q8,q9},[r1]!
vld1.8 {q10,q11},[r1]!
vld1.32 {q12},[r3]!
vrev32.8 q8,q8
vrev32.8 q9,q9
@ -2770,7 +2807,8 @@ sha256_block_data_order_armv8:
bx lr @ bx lr
.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
#endif
.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,47,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.comm OPENSSL_armcap_P,4,4

View File

@ -1,6 +1,70 @@
/* $FreeBSD$ */
/* Do not modify. This file is auto-generated from sha512-armv4.pl. */
#include "arm_arch.h"
@ Copyright 2007-2018 The OpenSSL Project Authors. All Rights Reserved.
@
@ Licensed under the OpenSSL license (the "License"). You may not use
@ this file except in compliance with the License. You can obtain a copy
@ in the file LICENSE in the source distribution or at
@ https://www.openssl.org/source/license.html
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@ project. The module is, however, dual licensed under OpenSSL and
@ CRYPTOGAMS licenses depending on where you obtain it. For further
@ details see http://www.openssl.org/~appro/cryptogams/.
@
@ Permission to use under GPL terms is granted.
@ ====================================================================
@ SHA512 block procedure for ARMv4. September 2007.
@ This code is ~4.5 (four and a half) times faster than code generated
@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
@ Xscale PXA250 core].
@
@ July 2010.
@
@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
@ Cortex A8 core and ~40 cycles per processed byte.
@ February 2011.
@
@ Profiler-assisted and platform-specific optimization resulted in 7%
@ improvement on Coxtex A8 core and ~38 cycles per byte.
@ March 2011.
@
@ Add NEON implementation. On Cortex A8 it was measured to process
@ one byte in 23.3 cycles or ~60% faster than integer-only code.
@ August 2012.
@
@ Improve NEON performance by 12% on Snapdragon S4. In absolute
@ terms it's 22.6 cycles per byte, which is disappointing result.
@ Technical writers asserted that 3-way S4 pipeline can sustain
@ multiple NEON instructions per cycle, but dual NEON issue could
@ not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
@ for further details. On side note Cortex-A15 processes one byte in
@ 16 cycles.
@ Byte order [in]dependence. =========================================
@
@ Originally caller was expected to maintain specific *dword* order in
@ h[0-7], namely with most significant dword at *lower* address, which
@ was reflected in below two parameters as 0 and 4. Now caller is
@ expected to maintain native byte order for whole 64-bit values.
#ifndef __KERNEL__
# include "arm_arch.h"
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
#else
# define __ARM_ARCH__ __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ 7
# define VFP_ABI_PUSH
# define VFP_ABI_POP
#endif
#ifdef __ARMEL__
# define LO 0
# define HI 4
@ -12,71 +76,86 @@
#endif
.text
#if defined(__thumb2__)
.syntax unified
.thumb
# define adrl adr
#else
.code 32
#endif
.type K512,%object
.align 5
K512:
WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.size K512,.-K512
#if __ARM_MAX_ARCH__>=7
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.LOPENSSL_armcap:
.word OPENSSL_armcap_P-sha512_block_data_order
.word OPENSSL_armcap_P-.Lsha512_block_data_order
.skip 32-4
#else
.skip 32
#endif
.global sha512_block_data_order
.globl sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
.Lsha512_block_data_order:
#if __ARM_ARCH__<7 && !defined(__thumb2__)
sub r3,pc,#8 @ sha512_block_data_order
add r2,r1,r2,lsl#7 @ len to point at the end of inp
#if __ARM_MAX_ARCH__>=7
#else
adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
ldr r12,[r3,r12] @ OPENSSL_armcap_P
tst r12,#1
#ifdef __APPLE__
ldr r12,[r12]
#endif
tst r12,#ARMV7_NEON
bne .LNEON
#endif
stmdb sp!,{r4-r12,lr}
add r2,r1,r2,lsl#7 @ len to point at the end of inp
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
sub r14,r3,#672 @ K512
sub sp,sp,#9*8
@ -188,6 +267,9 @@ sha512_block_data_order:
teq r9,#148
ldr r12,[sp,#16+0] @ c.lo
#ifdef __thumb2__
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ -325,6 +407,9 @@ sha512_block_data_order:
teq r9,#23
ldr r12,[sp,#16+0] @ c.lo
#ifdef __thumb2__
it eq @ Thumb2 thing, sanity check in ARM
#endif
orreq r14,r14,#1
@ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
@ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
@ -359,6 +444,9 @@ sha512_block_data_order:
adc r6,r6,r4 @ h += T
tst r14,#1
add r14,r14,#8
#ifdef __thumb2__
ittt eq @ Thumb2 thing, sanity check in ARM
#endif
ldreq r9,[sp,#184+0]
ldreq r10,[sp,#184+4]
beq .L16_79
@ -436,23 +524,28 @@ sha512_block_data_order:
add sp,sp,#8*9 @ destroy frame
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
#else
ldmia sp!,{r4-r12,lr}
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size sha512_block_data_order,.-sha512_block_data_order
#if __ARM_MAX_ARCH__>=7
.arch armv7-a
.fpu neon
.globl sha512_block_data_order_neon
.type sha512_block_data_order_neon,%function
.align 4
sha512_block_data_order_neon:
.LNEON:
dmb @ errata #451034 on early Cortex A8
vstmdb sp!,{d8-d15} @ ABI specification says so
sub r3,r3,#672 @ K512
vldmia r0,{d16-d23} @ load context
add r2,r1,r2,lsl#7 @ len to point at the end of inp
adr r3,K512
VFP_ABI_PUSH
vldmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ load context
.Loop_neon:
vshr.u64 d24,d20,#14 @ 0
#if 0<16
@ -1756,22 +1849,23 @@ sha512_block_data_order:
bne .L16_79_neon
vadd.i64 d16,d30 @ h+=Maj from the past
vldmia r0,{d24-d31} @ load context to temp
vldmia r0,{d24,d25,d26,d27,d28,d29,d30,d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13
vadd.i64 q10,q14
vadd.i64 q11,q15
vstmia r0,{d16-d23} @ save context
vstmia r0,{d16,d17,d18,d19,d20,d21,d22,d23} @ save context
teq r1,r2
sub r3,#640 @ rewind K512
bne .Loop_neon
vldmia sp!,{d8-d15} @ epilogue
VFP_ABI_POP
bx lr @ .word 0xe12fff1e
.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
#endif
.size sha512_block_data_order,.-sha512_block_data_order
.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
.byte 83,72,65,53,49,50,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
#if __ARM_MAX_ARCH__>=7
.align 2
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
.comm OPENSSL_armcap_P,4,4
#endif