[PATCH 2/3] New ChaCha implementations
Jussi Kivilinna
jussi.kivilinna at iki.fi
Sat Jan 6 19:03:01 CET 2018
* cipher/Makefile.am: Remove 'chacha20-sse2-amd64.S',
'chacha20-ssse3-amd64.S', 'chacha20-avx2-amd64.S'; Add
'chacha20-amd64-ssse3.S', 'chacha20-amd64-avx2.S'.
* cipher/chacha20-amd64-avx2.S: New.
* cipher/chacha20-amd64-ssse3.S: New.
* cipher/chacha20-armv7-neon.S: Rewrite.
* cipher/chacha20-avx2-amd64.S: Remove.
* cipher/chacha20-sse2-amd64.S: Remove.
* cipher/chacha20-ssse3-amd64.S: Remove.
* cipher/chacha20.c (CHACHA20_INPUT_LENGTH, USE_SSE2, USE_NEON)
(ASM_EXTRA_STACK, chacha20_blocks_t, _gcry_chacha20_amd64_sse2_blocks)
(_gcry_chacha20_amd64_ssse3_blocks, _gcry_chacha20_amd64_avx2_blocks)
(_gcry_chacha20_armv7_neon_blocks, QROUND, QOUT, chacha20_core)
(chacha20_do_encrypt_stream): Remove.
(_gcry_chacha20_amd64_ssse3_blocks4, _gcry_chacha20_amd64_avx2_blocks8)
(_gcry_chacha20_armv7_neon_blocks4, ROTATE, XOR, PLUS, PLUSONE)
(QUARTERROUND, BUF_XOR_LE32): New.
(CHACHA20_context_s, chacha20_blocks, chacha20_keysetup)
(chacha20_encrypt_stream): Rewrite.
(chacha20_do_setkey): Adjust for new CHACHA20_context_s.
* configure.ac: Remove 'chacha20-sse2-amd64.lo',
'chacha20-ssse3-amd64.lo', 'chacha20-avx2-amd64.lo'; Add
'chacha20-amd64-ssse3.lo', 'chacha20-amd64-avx2.lo'.
--
Intel Core i7-4790K CPU @ 4.00GHz (x86_64/AVX2):
CHACHA20 | nanosecs/byte mebibytes/sec cycles/byte
STREAM enc | 0.319 ns/B 2988.5 MiB/s 1.28 c/B
STREAM dec | 0.318 ns/B 2995.4 MiB/s 1.27 c/B
Intel Core i7-4790K CPU @ 4.00GHz (x86_64/SSSE3):
CHACHA20 | nanosecs/byte mebibytes/sec cycles/byte
STREAM enc | 0.633 ns/B 1507.4 MiB/s 2.53 c/B
STREAM dec | 0.633 ns/B 1506.6 MiB/s 2.53 c/B
Intel Core i7-4790K CPU @ 4.00GHz (i386):
CHACHA20 | nanosecs/byte mebibytes/sec cycles/byte
STREAM enc | 2.05 ns/B 465.2 MiB/s 8.20 c/B
STREAM dec | 2.04 ns/B 467.5 MiB/s 8.16 c/B
Cortex-A53 @ 1152Mhz (armv7/neon):
CHACHA20 | nanosecs/byte mebibytes/sec cycles/byte
STREAM enc | 5.29 ns/B 180.3 MiB/s 6.09 c/B
STREAM dec | 5.29 ns/B 180.1 MiB/s 6.10 c/B
Signed-off-by: Jussi Kivilinna <jussi.kivilinna at iki.fi>
---
0 files changed
diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index 08baa7c44..a24b117c2 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -64,8 +64,7 @@ EXTRA_libcipher_la_SOURCES = \
arcfour.c arcfour-amd64.S \
blowfish.c blowfish-amd64.S blowfish-arm.S \
cast5.c cast5-amd64.S cast5-arm.S \
-chacha20.c chacha20-sse2-amd64.S chacha20-ssse3-amd64.S chacha20-avx2-amd64.S \
- chacha20-armv7-neon.S \
+chacha20.c chacha20-amd64-ssse3.S chacha20-amd64-avx2.S chacha20-armv7-neon.S \
crc.c \
crc-intel-pclmul.c \
des.c des-amd64.S \
diff --git a/cipher/chacha20-amd64-avx2.S b/cipher/chacha20-amd64-avx2.S
new file mode 100644
index 000000000..f4f290dbb
--- /dev/null
+++ b/cipher/chacha20-amd64-avx2.S
@@ -0,0 +1,322 @@
+/* chacha20-amd64-avx2.S - AVX2 implementation of ChaCha20 cipher
+ *
+ * Copyright (C) 2017,2018 Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Based on D. J. Bernstein reference implementation at
+ * http://cr.yp.to/chacha.html:
+ *
+ * chacha-regs.c version 20080118
+ * D. J. Bernstein
+ * Public domain.
+ */
+
+#ifdef __x86_64
+#include <config.h>
+#if defined(HAVE_GCC_INLINE_ASM_AVX2) && \
+ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
+ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
+
+.text
+
+#ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS
+# define ELF(...) __VA_ARGS__
+#else
+# define ELF(...) /*_*/
+#endif
+
+#ifdef __PIC__
+# define RIP (%rip)
+#else
+# define RIP
+#endif
+
+/* register macros */
+#define INPUT %rdi
+#define DST %rsi
+#define SRC %rdx
+#define NBLKS %rcx
+#define ROUND %eax
+
+/* stack structure */
+#define STACK_VEC_X12 (32)
+#define STACK_VEC_X13 (32 + STACK_VEC_X12)
+#define STACK_TMP (32 + STACK_VEC_X13)
+#define STACK_TMP1 (32 + STACK_TMP)
+#define STACK_TMP2 (32 + STACK_TMP1)
+
+#define STACK_MAX (32 + STACK_TMP2)
+
+/* vector registers */
+#define X0 %ymm0
+#define X1 %ymm1
+#define X2 %ymm2
+#define X3 %ymm3
+#define X4 %ymm4
+#define X5 %ymm5
+#define X6 %ymm6
+#define X7 %ymm7
+#define X8 %ymm8
+#define X9 %ymm9
+#define X10 %ymm10
+#define X11 %ymm11
+#define X12 %ymm12
+#define X13 %ymm13
+#define X14 %ymm14
+#define X15 %ymm15
+
+#define X0h %xmm0
+#define X1h %xmm1
+#define X2h %xmm2
+#define X3h %xmm3
+#define X4h %xmm4
+#define X5h %xmm5
+#define X6h %xmm6
+#define X7h %xmm7
+#define X8h %xmm8
+#define X9h %xmm9
+#define X10h %xmm10
+#define X11h %xmm11
+#define X12h %xmm12
+#define X13h %xmm13
+#define X14h %xmm14
+#define X15h %xmm15
+
+/**********************************************************************
+ helper macros
+ **********************************************************************/
+
+/* 4x4 32-bit integer matrix transpose */
+#define transpose_4x4(x0,x1,x2,x3,t1,t2) \
+ vpunpckhdq x1, x0, t2; \
+ vpunpckldq x1, x0, x0; \
+ \
+ vpunpckldq x3, x2, t1; \
+ vpunpckhdq x3, x2, x2; \
+ \
+ vpunpckhqdq t1, x0, x1; \
+ vpunpcklqdq t1, x0, x0; \
+ \
+ vpunpckhqdq x2, t2, x3; \
+ vpunpcklqdq x2, t2, x2;
+
+/**********************************************************************
+ 8-way chacha20
+ **********************************************************************/
+
+#define ROTATE2(v1,v2,c,tmp) \
+ vpsrld $(32 - (c)), v1, tmp; \
+ vpslld $(c), v1, v1; \
+ vpaddb tmp, v1, v1; \
+ vpsrld $(32 - (c)), v2, tmp; \
+ vpslld $(c), v2, v2; \
+ vpaddb tmp, v2, v2;
+
+#define ROTATE_SHUF_2(v1,v2,shuf) \
+ vpshufb shuf, v1, v1; \
+ vpshufb shuf, v2, v2;
+
+#define XOR(ds,s) \
+ vpxor s, ds, ds;
+
+#define PLUS(ds,s) \
+ vpaddd s, ds, ds;
+
+#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1) \
+ vbroadcasti128 .Lshuf_rol16 RIP, tmp1; \
+ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
+ ROTATE_SHUF_2(d1, d2, tmp1); \
+ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
+ ROTATE2(b1, b2, 12, tmp1); \
+ vbroadcasti128 .Lshuf_rol8 RIP, tmp1; \
+ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
+ ROTATE_SHUF_2(d1, d2, tmp1); \
+ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
+ ROTATE2(b1, b2, 7, tmp1);
+
+#define BUF_XOR_256_TO_128(dst, src, offset_lo, offset_hi, yreg, tmp1) \
+ vextracti128 $1, yreg, tmp1##h; \
+ vpxor offset_lo(src), yreg##h, yreg##h; \
+ vpxor offset_hi(src), tmp1##h, tmp1##h; \
+ vmovdqu yreg##h, offset_lo(dst); \
+ vmovdqu tmp1##h, offset_hi(dst);
+
+.align 32
+chacha20_data:
+.Lshuf_rol16:
+ .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
+.Lshuf_rol8:
+ .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
+.Linc_counter:
+ .byte 0,1,2,3,4,5,6,7
+.Lunsigned_cmp:
+ .long 0x80000000
+
+.align 8
+.globl _gcry_chacha20_amd64_avx2_blocks8
+ELF(.type _gcry_chacha20_amd64_avx2_blocks8, at function;)
+
+_gcry_chacha20_amd64_avx2_blocks8:
+ /* input:
+ * %rdi: input
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: nblks (multiple of 8)
+ */
+
+ vzeroupper;
+
+ pushq %rbp;
+ movq %rsp, %rbp;
+
+ subq $STACK_MAX, %rsp;
+ andq $~31, %rsp;
+
+.Loop4:
+ mov $20, ROUND;
+
+ /* Construct counter vectors X12 and X13 */
+ vpmovzxbd .Linc_counter RIP, X0;
+ vpbroadcastd .Lunsigned_cmp RIP, X2;
+ vpbroadcastd (12 * 4)(INPUT), X12;
+ vpbroadcastd (13 * 4)(INPUT), X13;
+ vpaddd X0, X12, X12;
+ vpxor X2, X0, X0;
+ vpxor X2, X12, X1;
+ vpcmpgtd X1, X0, X0;
+ vpsubd X0, X13, X13;
+ vmovdqa X12, (STACK_VEC_X12)(%rsp);
+ vmovdqa X13, (STACK_VEC_X13)(%rsp);
+
+ /* Load vectors */
+ vpbroadcastd (0 * 4)(INPUT), X0;
+ vpbroadcastd (1 * 4)(INPUT), X1;
+ vpbroadcastd (2 * 4)(INPUT), X2;
+ vpbroadcastd (3 * 4)(INPUT), X3;
+ vpbroadcastd (4 * 4)(INPUT), X4;
+ vpbroadcastd (5 * 4)(INPUT), X5;
+ vpbroadcastd (6 * 4)(INPUT), X6;
+ vpbroadcastd (7 * 4)(INPUT), X7;
+ vpbroadcastd (8 * 4)(INPUT), X8;
+ vpbroadcastd (9 * 4)(INPUT), X9;
+ vpbroadcastd (10 * 4)(INPUT), X10;
+ vpbroadcastd (11 * 4)(INPUT), X11;
+ vpbroadcastd (14 * 4)(INPUT), X14;
+ vpbroadcastd (15 * 4)(INPUT), X15;
+ vmovdqa X15, (STACK_TMP)(%rsp);
+
+.Lround2:
+ QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X15)
+ vmovdqa (STACK_TMP)(%rsp), X15;
+ vmovdqa X8, (STACK_TMP)(%rsp);
+ QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8)
+ QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8)
+ vmovdqa (STACK_TMP)(%rsp), X8;
+ vmovdqa X15, (STACK_TMP)(%rsp);
+ QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X15)
+ sub $2, ROUND;
+ jnz .Lround2;
+
+ /* tmp := X15 */
+ vpbroadcastd (0 * 4)(INPUT), X15;
+ PLUS(X0, X15);
+ vpbroadcastd (1 * 4)(INPUT), X15;
+ PLUS(X1, X15);
+ vpbroadcastd (2 * 4)(INPUT), X15;
+ PLUS(X2, X15);
+ vpbroadcastd (3 * 4)(INPUT), X15;
+ PLUS(X3, X15);
+ vpbroadcastd (4 * 4)(INPUT), X15;
+ PLUS(X4, X15);
+ vpbroadcastd (5 * 4)(INPUT), X15;
+ PLUS(X5, X15);
+ vpbroadcastd (6 * 4)(INPUT), X15;
+ PLUS(X6, X15);
+ vpbroadcastd (7 * 4)(INPUT), X15;
+ PLUS(X7, X15);
+ vpbroadcastd (8 * 4)(INPUT), X15;
+ PLUS(X8, X15);
+ vpbroadcastd (9 * 4)(INPUT), X15;
+ PLUS(X9, X15);
+ vpbroadcastd (10 * 4)(INPUT), X15;
+ PLUS(X10, X15);
+ vpbroadcastd (11 * 4)(INPUT), X15;
+ PLUS(X11, X15);
+ vmovdqa (STACK_VEC_X12)(%rsp), X15;
+ PLUS(X12, X15);
+ vmovdqa (STACK_VEC_X13)(%rsp), X15;
+ PLUS(X13, X15);
+ vmovdqa (STACK_TMP)(%rsp), X15;
+ vmovdqa X13, (STACK_TMP)(%rsp);
+ vpbroadcastd (14 * 4)(INPUT), X13;
+ PLUS(X14, X13);
+ vmovdqa X14, (STACK_TMP1)(%rsp);
+ vpbroadcastd (15 * 4)(INPUT), X13;
+ PLUS(X15, X13);
+ vmovdqa X15, (STACK_TMP2)(%rsp);
+
+ /* Update counter */
+ addq $8, (12 * 4)(INPUT);
+
+ transpose_4x4(X0, X1, X2, X3, X13, X14);
+ transpose_4x4(X4, X5, X6, X7, X13, X14);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 0 + 16 * 0), (64 * 4 + 16 * 0), X0, X15);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 1 + 16 * 0), (64 * 5 + 16 * 0), X1, X15);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 2 + 16 * 0), (64 * 6 + 16 * 0), X2, X15);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 3 + 16 * 0), (64 * 7 + 16 * 0), X3, X15);
+ vmovdqa (STACK_TMP)(%rsp), X13;
+ vmovdqa (STACK_TMP1)(%rsp), X14;
+ vmovdqa (STACK_TMP2)(%rsp), X15;
+ transpose_4x4(X8, X9, X10, X11, X0, X1);
+ transpose_4x4(X12, X13, X14, X15, X0, X1);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 0 + 16 * 1), (64 * 4 + 16 * 1), X4, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 1 + 16 * 1), (64 * 5 + 16 * 1), X5, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 2 + 16 * 1), (64 * 6 + 16 * 1), X6, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 3 + 16 * 1), (64 * 7 + 16 * 1), X7, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 0 + 16 * 2), (64 * 4 + 16 * 2), X8, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 1 + 16 * 2), (64 * 5 + 16 * 2), X9, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 2 + 16 * 2), (64 * 6 + 16 * 2), X10, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 3 + 16 * 2), (64 * 7 + 16 * 2), X11, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 0 + 16 * 3), (64 * 4 + 16 * 3), X12, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 1 + 16 * 3), (64 * 5 + 16 * 3), X13, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 2 + 16 * 3), (64 * 6 + 16 * 3), X14, X0);
+ BUF_XOR_256_TO_128(DST, SRC, (64 * 3 + 16 * 3), (64 * 7 + 16 * 3), X15, X0);
+
+ sub $8, NBLKS;
+ lea (8 * 64)(DST), DST;
+ lea (8 * 64)(SRC), SRC;
+ jnz .Loop4;
+
+ /* clear the used vector registers and stack */
+ vpxor X0, X0, X0;
+ vmovdqa X0, (STACK_VEC_X12)(%rsp);
+ vmovdqa X0, (STACK_VEC_X13)(%rsp);
+ vmovdqa X0, (STACK_TMP)(%rsp);
+ vmovdqa X0, (STACK_TMP1)(%rsp);
+ vmovdqa X0, (STACK_TMP2)(%rsp);
+ vzeroall;
+
+ /* eax zeroed by round loop. */
+ leave;
+ ret;
+ELF(.size _gcry_chacha20_amd64_avx2_blocks8,
+ .-_gcry_chacha20_amd64_avx2_blocks8;)
+
+#endif /*defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS)*/
+#endif /*__x86_64*/
diff --git a/cipher/chacha20-amd64-ssse3.S b/cipher/chacha20-amd64-ssse3.S
new file mode 100644
index 000000000..7ad1c0ae3
--- /dev/null
+++ b/cipher/chacha20-amd64-ssse3.S
@@ -0,0 +1,341 @@
+/* chacha20-amd64-ssse3.S - SSSE3 implementation of ChaCha20 cipher
+ *
+ * Copyright (C) 2017,2018 Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Based on D. J. Bernstein reference implementation at
+ * http://cr.yp.to/chacha.html:
+ *
+ * chacha-regs.c version 20080118
+ * D. J. Bernstein
+ * Public domain.
+ */
+
+#ifdef __x86_64
+#include <config.h>
+#if defined(HAVE_GCC_INLINE_ASM_SSSE3) && \
+ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
+ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
+
+.text
+
+#ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS
+# define ELF(...) __VA_ARGS__
+#else
+# define ELF(...) /*_*/
+#endif
+
+#ifdef __PIC__
+# define RIP (%rip)
+#else
+# define RIP
+#endif
+
+/* register macros */
+#define INPUT %rdi
+#define DST %rsi
+#define SRC %rdx
+#define NBLKS %rcx
+#define ROUND %eax
+
+/* stack structure */
+#define STACK_VEC_X12 (16)
+#define STACK_VEC_X13 (16 + STACK_VEC_X12)
+#define STACK_TMP (16 + STACK_VEC_X13)
+#define STACK_TMP1 (16 + STACK_TMP)
+#define STACK_TMP2 (16 + STACK_TMP1)
+
+#define STACK_MAX (16 + STACK_TMP2)
+
+/* vector registers */
+#define X0 %xmm0
+#define X1 %xmm1
+#define X2 %xmm2
+#define X3 %xmm3
+#define X4 %xmm4
+#define X5 %xmm5
+#define X6 %xmm6
+#define X7 %xmm7
+#define X8 %xmm8
+#define X9 %xmm9
+#define X10 %xmm10
+#define X11 %xmm11
+#define X12 %xmm12
+#define X13 %xmm13
+#define X14 %xmm14
+#define X15 %xmm15
+
+/**********************************************************************
+ helper macros
+ **********************************************************************/
+
+/* 4x4 32-bit integer matrix transpose */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
+ movdqa x0, t2; \
+ punpckhdq x1, t2; \
+ punpckldq x1, x0; \
+ \
+ movdqa x2, t1; \
+ punpckldq x3, t1; \
+ punpckhdq x3, x2; \
+ \
+ movdqa x0, x1; \
+ punpckhqdq t1, x1; \
+ punpcklqdq t1, x0; \
+ \
+ movdqa t2, x3; \
+ punpckhqdq x2, x3; \
+ punpcklqdq x2, t2; \
+ movdqa t2, x2;
+
+/* fill xmm register with 32-bit value from memory */
+#define pbroadcastd(mem32, xreg) \
+ movd mem32, xreg; \
+ pshufd $0, xreg, xreg;
+
+/* xor with unaligned memory operand */
+#define pxor_u(umem128, xreg, t) \
+ movdqu umem128, t; \
+ pxor t, xreg;
+
+/* xor register with unaligned src and save to unaligned dst */
+#define xor_src_dst(dst, src, offset, xreg, t) \
+ pxor_u(offset(src), xreg, t); \
+ movdqu xreg, offset(dst);
+
+#define clear(x) pxor x,x;
+
+/**********************************************************************
+ 4-way chacha20
+ **********************************************************************/
+
+#define ROTATE2(v1,v2,c,tmp1,tmp2) \
+ movdqa v1, tmp1; \
+ movdqa v2, tmp2; \
+ psrld $(32 - (c)), v1; \
+ pslld $(c), tmp1; \
+ paddb tmp1, v1; \
+ psrld $(32 - (c)), v2; \
+ pslld $(c), tmp2; \
+ paddb tmp2, v2;
+
+#define ROTATE_SHUF_2(v1,v2,shuf) \
+ pshufb shuf, v1; \
+ pshufb shuf, v2;
+
+#define XOR(ds,s) \
+ pxor s, ds;
+
+#define PLUS(ds,s) \
+ paddd s, ds;
+
+#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,tmp2) \
+ movdqa .Lshuf_rol16 RIP, tmp1; \
+ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
+ ROTATE_SHUF_2(d1, d2, tmp1); \
+ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
+ ROTATE2(b1, b2, 12, tmp1, tmp2); \
+ movdqa .Lshuf_rol8 RIP, tmp1; \
+ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \
+ ROTATE_SHUF_2(d1, d2, tmp1); \
+ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \
+ ROTATE2(b1, b2, 7, tmp1, tmp2);
+
+chacha20_data:
+.align 16
+.Lshuf_rol16:
+ .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13
+.Lshuf_rol8:
+ .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14
+.Linc_counter:
+ .long 0,1,2,3
+.Lunsigned_cmp:
+ .long 0x80000000,0x80000000,0x80000000,0x80000000
+
+.align 8
+.globl _gcry_chacha20_amd64_ssse3_blocks4
+ELF(.type _gcry_chacha20_amd64_ssse3_blocks4, at function;)
+
+_gcry_chacha20_amd64_ssse3_blocks4:
+ /* input:
+ * %rdi: input
+ * %rsi: dst
+ * %rdx: src
+ * %rcx: nblks (multiple of 4)
+ */
+
+ pushq %rbp;
+ movq %rsp, %rbp;
+
+ subq $STACK_MAX, %rsp;
+ andq $~15, %rsp;
+
+.Loop4:
+ mov $20, ROUND;
+
+ /* Construct counter vectors X12 and X13 */
+ vmovdqa .Linc_counter RIP, X0;
+ vmovdqa .Lunsigned_cmp RIP, X2;
+ pbroadcastd((12 * 4)(INPUT), X12);
+ pbroadcastd((13 * 4)(INPUT), X13);
+ paddd X0, X12;
+ movdqa X12, X1;
+ pxor X2, X0;
+ pxor X2, X1;
+ pcmpgtd X1, X0;
+ psubd X0, X13;
+ movdqa X12, (STACK_VEC_X12)(%rsp);
+ movdqa X13, (STACK_VEC_X13)(%rsp);
+
+ /* Load vectors */
+ pbroadcastd((0 * 4)(INPUT), X0);
+ pbroadcastd((1 * 4)(INPUT), X1);
+ pbroadcastd((2 * 4)(INPUT), X2);
+ pbroadcastd((3 * 4)(INPUT), X3);
+ pbroadcastd((4 * 4)(INPUT), X4);
+ pbroadcastd((5 * 4)(INPUT), X5);
+ pbroadcastd((6 * 4)(INPUT), X6);
+ pbroadcastd((7 * 4)(INPUT), X7);
+ pbroadcastd((8 * 4)(INPUT), X8);
+ pbroadcastd((9 * 4)(INPUT), X9);
+ pbroadcastd((10 * 4)(INPUT), X10);
+ pbroadcastd((11 * 4)(INPUT), X11);
+ pbroadcastd((14 * 4)(INPUT), X14);
+ pbroadcastd((15 * 4)(INPUT), X15);
+ movdqa X11, (STACK_TMP)(%rsp);
+ movdqa X15, (STACK_TMP1)(%rsp);
+
+.Lround2:
+ QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X11,X15)
+ movdqa (STACK_TMP)(%rsp), X11;
+ movdqa (STACK_TMP1)(%rsp), X15;
+ movdqa X8, (STACK_TMP)(%rsp);
+ movdqa X9, (STACK_TMP1)(%rsp);
+ QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8,X9)
+ QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8,X9)
+ movdqa (STACK_TMP)(%rsp), X8;
+ movdqa (STACK_TMP1)(%rsp), X9;
+ movdqa X11, (STACK_TMP)(%rsp);
+ movdqa X15, (STACK_TMP1)(%rsp);
+ QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X11,X15)
+ sub $2, ROUND;
+ jnz .Lround2;
+
+ /* tmp := X15 */
+ movdqa (STACK_TMP)(%rsp), X11;
+ pbroadcastd((0 * 4)(INPUT), X15);
+ PLUS(X0, X15);
+ pbroadcastd((1 * 4)(INPUT), X15);
+ PLUS(X1, X15);
+ pbroadcastd((2 * 4)(INPUT), X15);
+ PLUS(X2, X15);
+ pbroadcastd((3 * 4)(INPUT), X15);
+ PLUS(X3, X15);
+ pbroadcastd((4 * 4)(INPUT), X15);
+ PLUS(X4, X15);
+ pbroadcastd((5 * 4)(INPUT), X15);
+ PLUS(X5, X15);
+ pbroadcastd((6 * 4)(INPUT), X15);
+ PLUS(X6, X15);
+ pbroadcastd((7 * 4)(INPUT), X15);
+ PLUS(X7, X15);
+ pbroadcastd((8 * 4)(INPUT), X15);
+ PLUS(X8, X15);
+ pbroadcastd((9 * 4)(INPUT), X15);
+ PLUS(X9, X15);
+ pbroadcastd((10 * 4)(INPUT), X15);
+ PLUS(X10, X15);
+ pbroadcastd((11 * 4)(INPUT), X15);
+ PLUS(X11, X15);
+ movdqa (STACK_VEC_X12)(%rsp), X15;
+ PLUS(X12, X15);
+ movdqa (STACK_VEC_X13)(%rsp), X15;
+ PLUS(X13, X15);
+ movdqa X13, (STACK_TMP)(%rsp);
+ pbroadcastd((14 * 4)(INPUT), X15);
+ PLUS(X14, X15);
+ movdqa (STACK_TMP1)(%rsp), X15;
+ movdqa X14, (STACK_TMP1)(%rsp);
+ pbroadcastd((15 * 4)(INPUT), X13);
+ PLUS(X15, X13);
+ movdqa X15, (STACK_TMP2)(%rsp);
+
+ /* Update counter */
+ addq $4, (12 * 4)(INPUT);
+
+ transpose_4x4(X0, X1, X2, X3, X13, X14, X15);
+ xor_src_dst(DST, SRC, (64 * 0 + 16 * 0), X0, X15);
+ xor_src_dst(DST, SRC, (64 * 1 + 16 * 0), X1, X15);
+ xor_src_dst(DST, SRC, (64 * 2 + 16 * 0), X2, X15);
+ xor_src_dst(DST, SRC, (64 * 3 + 16 * 0), X3, X15);
+ transpose_4x4(X4, X5, X6, X7, X0, X1, X2);
+ movdqa (STACK_TMP)(%rsp), X13;
+ movdqa (STACK_TMP1)(%rsp), X14;
+ movdqa (STACK_TMP2)(%rsp), X15;
+ xor_src_dst(DST, SRC, (64 * 0 + 16 * 1), X4, X0);
+ xor_src_dst(DST, SRC, (64 * 1 + 16 * 1), X5, X0);
+ xor_src_dst(DST, SRC, (64 * 2 + 16 * 1), X6, X0);
+ xor_src_dst(DST, SRC, (64 * 3 + 16 * 1), X7, X0);
+ transpose_4x4(X8, X9, X10, X11, X0, X1, X2);
+ xor_src_dst(DST, SRC, (64 * 0 + 16 * 2), X8, X0);
+ xor_src_dst(DST, SRC, (64 * 1 + 16 * 2), X9, X0);
+ xor_src_dst(DST, SRC, (64 * 2 + 16 * 2), X10, X0);
+ xor_src_dst(DST, SRC, (64 * 3 + 16 * 2), X11, X0);
+ transpose_4x4(X12, X13, X14, X15, X0, X1, X2);
+ xor_src_dst(DST, SRC, (64 * 0 + 16 * 3), X12, X0);
+ xor_src_dst(DST, SRC, (64 * 1 + 16 * 3), X13, X0);
+ xor_src_dst(DST, SRC, (64 * 2 + 16 * 3), X14, X0);
+ xor_src_dst(DST, SRC, (64 * 3 + 16 * 3), X15, X0);
+
+ sub $4, NBLKS;
+ lea (4 * 64)(DST), DST;
+ lea (4 * 64)(SRC), SRC;
+ jnz .Loop4;
+
+ /* clear the used vector registers and stack */
+ clear(X0);
+ movdqa X0, (STACK_VEC_X12)(%rsp);
+ movdqa X0, (STACK_VEC_X13)(%rsp);
+ movdqa X0, (STACK_TMP)(%rsp);
+ movdqa X0, (STACK_TMP1)(%rsp);
+ movdqa X0, (STACK_TMP2)(%rsp);
+ clear(X1);
+ clear(X2);
+ clear(X3);
+ clear(X4);
+ clear(X5);
+ clear(X6);
+ clear(X7);
+ clear(X8);
+ clear(X9);
+ clear(X10);
+ clear(X11);
+ clear(X12);
+ clear(X13);
+ clear(X14);
+ clear(X15);
+
+ /* eax zeroed by round loop. */
+ leave;
+ ret;
+ELF(.size _gcry_chacha20_amd64_ssse3_blocks4,
+ .-_gcry_chacha20_amd64_ssse3_blocks4;)
+
+#endif /*defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS)*/
+#endif /*__x86_64*/
diff --git a/cipher/chacha20-armv7-neon.S b/cipher/chacha20-armv7-neon.S
index c1971fc7f..33a43df1f 100644
--- a/cipher/chacha20-armv7-neon.S
+++ b/cipher/chacha20-armv7-neon.S
@@ -1,6 +1,6 @@
-/* chacha20-armv7-neon.S - ARM/NEON accelerated chacha20 blocks function
+/* chacha20-armv7-neon.S - ARMv7 NEON implementation of ChaCha20 cipher
*
- * Copyright (C) 2014 Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ * Copyright (C) 2017,2018 Jussi Kivilinna <jussi.kivilinna at iki.fi>
*
* This file is part of Libgcrypt.
*
@@ -19,732 +19,375 @@
*/
/*
- * Based on public domain implementation by Andrew Moon at
- * https://github.com/floodyberry/chacha-opt
+ * Based on D. J. Bernstein reference implementation at
+ * http://cr.yp.to/chacha.html:
+ *
+ * chacha-regs.c version 20080118
+ * D. J. Bernstein
+ * Public domain.
*/
#include <config.h>
#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
- defined(HAVE_GCC_INLINE_ASM_NEON) && defined(USE_CHACHA20)
+ defined(HAVE_GCC_INLINE_ASM_NEON)
.syntax unified
.fpu neon
.arm
-#define UNALIGNED_STMIA8(ptr, l0, l1, l2, l3, l4, l5, l6, l7) \
- tst ptr, #3; \
- beq 1f; \
- vpush {d0-d3}; \
- vmov s0, l0; \
- vmov s1, l1; \
- vmov s2, l2; \
- vmov s3, l3; \
- vmov s4, l4; \
- vmov s5, l5; \
- vmov s6, l6; \
- vmov s7, l7; \
- vst1.32 {d0-d3}, [ptr]; \
- add ptr, #32; \
- vpop {d0-d3}; \
- b 2f; \
- 1: stmia ptr!, {l0-l7}; \
- 2: ;
-
-#define UNALIGNED_LDMIA4(ptr, l0, l1, l2, l3) \
- tst ptr, #3; \
- beq 1f; \
- vpush {d0-d1}; \
- vld1.32 {d0-d1}, [ptr]; \
- add ptr, #16; \
- vmov l0, s0; \
- vmov l1, s1; \
- vmov l2, s2; \
- vmov l3, s3; \
- vpop {d0-d1}; \
- b 2f; \
- 1: ldmia ptr!, {l0-l3}; \
- 2: ;
-
.text
-.globl _gcry_chacha20_armv7_neon_blocks
-.type _gcry_chacha20_armv7_neon_blocks,%function;
-_gcry_chacha20_armv7_neon_blocks:
-.Lchacha_blocks_neon_local:
- tst r3, r3
- beq .Lchacha_blocks_neon_nobytes
- vstmdb sp!, {q4,q5,q6,q7}
- stmfd sp!, {r4-r12, r14}
- mov r8, sp
- sub sp, sp, #196
- and sp, sp, #0xffffffe0
- str r0, [sp, #60]
- str r1, [sp, #48]
- str r2, [sp, #40]
- str r3, [sp, #52]
- str r8, [sp, #192]
- add r1, sp, #64
- ldmia r0!, {r4-r11}
- stmia r1!, {r4-r11}
- ldmia r0!, {r4-r11}
- stmia r1!, {r4-r11}
- mov r4, #20
- str r4, [sp, #44]
- cmp r3, #256
- blo .Lchacha_blocks_neon_mainloop2
-.Lchacha_blocks_neon_mainloop1:
- ldr r0, [sp, #44]
- str r0, [sp, #0]
- add r1, sp, #(64)
- mov r2, #1
- veor q12, q12
- vld1.32 {q0,q1}, [r1,:128]!
- vld1.32 {q2,q3}, [r1,:128]
- vmov.32 d24[0], r2
- vadd.u64 q3, q3, q12
- vmov q4, q0
- vmov q5, q1
- vmov q6, q2
- vadd.u64 q7, q3, q12
- vmov q8, q0
- vmov q9, q1
- vmov q10, q2
- vadd.u64 q11, q7, q12
- add r0, sp, #64
- ldm r0, {r0-r12}
- ldr r14, [sp, #(64 +60)]
- str r6, [sp, #8]
- str r11, [sp, #12]
- str r14, [sp, #28]
- ldr r11, [sp, #(64 +52)]
- ldr r14, [sp, #(64 +56)]
-.Lchacha_blocks_neon_rounds1:
- ldr r6, [sp, #0]
- vadd.i32 q0, q0, q1
- add r0, r0, r4
- vadd.i32 q4, q4, q5
- add r1, r1, r5
- vadd.i32 q8, q8, q9
- eor r12, r12, r0
- veor q12, q3, q0
- eor r11, r11, r1
- veor q13, q7, q4
- ror r12, r12, #16
- veor q14, q11, q8
- ror r11, r11, #16
- vrev32.16 q3, q12
- subs r6, r6, #2
- vrev32.16 q7, q13
- add r8, r8, r12
- vrev32.16 q11, q14
- add r9, r9, r11
- vadd.i32 q2, q2, q3
- eor r4, r4, r8
- vadd.i32 q6, q6, q7
- eor r5, r5, r9
- vadd.i32 q10, q10, q11
- str r6, [sp, #0]
- veor q12, q1, q2
- ror r4, r4, #20
- veor q13, q5, q6
- ror r5, r5, #20
- veor q14, q9, q10
- add r0, r0, r4
- vshl.i32 q1, q12, #12
- add r1, r1, r5
- vshl.i32 q5, q13, #12
- ldr r6, [sp, #8]
- vshl.i32 q9, q14, #12
- eor r12, r12, r0
- vsri.u32 q1, q12, #20
- eor r11, r11, r1
- vsri.u32 q5, q13, #20
- ror r12, r12, #24
- vsri.u32 q9, q14, #20
- ror r11, r11, #24
- vadd.i32 q0, q0, q1
- add r8, r8, r12
- vadd.i32 q4, q4, q5
- add r9, r9, r11
- vadd.i32 q8, q8, q9
- eor r4, r4, r8
- veor q12, q3, q0
- eor r5, r5, r9
- veor q13, q7, q4
- str r11, [sp, #20]
- veor q14, q11, q8
- ror r4, r4, #25
- vshl.i32 q3, q12, #8
- ror r5, r5, #25
- vshl.i32 q7, q13, #8
- str r4, [sp, #4]
- vshl.i32 q11, q14, #8
- ldr r4, [sp, #28]
- vsri.u32 q3, q12, #24
- add r2, r2, r6
- vsri.u32 q7, q13, #24
- add r3, r3, r7
- vsri.u32 q11, q14, #24
- ldr r11, [sp, #12]
- vadd.i32 q2, q2, q3
- eor r14, r14, r2
- vadd.i32 q6, q6, q7
- eor r4, r4, r3
- vadd.i32 q10, q10, q11
- ror r14, r14, #16
- veor q12, q1, q2
- ror r4, r4, #16
- veor q13, q5, q6
- add r10, r10, r14
- veor q14, q9, q10
- add r11, r11, r4
- vshl.i32 q1, q12, #7
- eor r6, r6, r10
- vshl.i32 q5, q13, #7
- eor r7, r7, r11
- vshl.i32 q9, q14, #7
- ror r6, r6, #20
- vsri.u32 q1, q12, #25
- ror r7, r7, #20
- vsri.u32 q5, q13, #25
- add r2, r2, r6
- vsri.u32 q9, q14, #25
- add r3, r3, r7
- vext.32 q3, q3, q3, #3
- eor r14, r14, r2
- vext.32 q7, q7, q7, #3
- eor r4, r4, r3
- vext.32 q11, q11, q11, #3
- ror r14, r14, #24
- vext.32 q1, q1, q1, #1
- ror r4, r4, #24
- vext.32 q5, q5, q5, #1
- add r10, r10, r14
- vext.32 q9, q9, q9, #1
- add r11, r11, r4
- vext.32 q2, q2, q2, #2
- eor r6, r6, r10
- vext.32 q6, q6, q6, #2
- eor r7, r7, r11
- vext.32 q10, q10, q10, #2
- ror r6, r6, #25
- vadd.i32 q0, q0, q1
- ror r7, r7, #25
- vadd.i32 q4, q4, q5
- add r0, r0, r5
- vadd.i32 q8, q8, q9
- add r1, r1, r6
- veor q12, q3, q0
- eor r4, r4, r0
- veor q13, q7, q4
- eor r12, r12, r1
- veor q14, q11, q8
- ror r4, r4, #16
- vrev32.16 q3, q12
- ror r12, r12, #16
- vrev32.16 q7, q13
- add r10, r10, r4
- vrev32.16 q11, q14
- add r11, r11, r12
- vadd.i32 q2, q2, q3
- eor r5, r5, r10
- vadd.i32 q6, q6, q7
- eor r6, r6, r11
- vadd.i32 q10, q10, q11
- ror r5, r5, #20
- veor q12, q1, q2
- ror r6, r6, #20
- veor q13, q5, q6
- add r0, r0, r5
- veor q14, q9, q10
- add r1, r1, r6
- vshl.i32 q1, q12, #12
- eor r4, r4, r0
- vshl.i32 q5, q13, #12
- eor r12, r12, r1
- vshl.i32 q9, q14, #12
- ror r4, r4, #24
- vsri.u32 q1, q12, #20
- ror r12, r12, #24
- vsri.u32 q5, q13, #20
- add r10, r10, r4
- vsri.u32 q9, q14, #20
- add r11, r11, r12
- vadd.i32 q0, q0, q1
- eor r5, r5, r10
- vadd.i32 q4, q4, q5
- eor r6, r6, r11
- vadd.i32 q8, q8, q9
- str r11, [sp, #12]
- veor q12, q3, q0
- ror r5, r5, #25
- veor q13, q7, q4
- ror r6, r6, #25
- veor q14, q11, q8
- str r4, [sp, #28]
- vshl.i32 q3, q12, #8
- ldr r4, [sp, #4]
- vshl.i32 q7, q13, #8
- add r2, r2, r7
- vshl.i32 q11, q14, #8
- add r3, r3, r4
- vsri.u32 q3, q12, #24
- ldr r11, [sp, #20]
- vsri.u32 q7, q13, #24
- eor r11, r11, r2
- vsri.u32 q11, q14, #24
- eor r14, r14, r3
- vadd.i32 q2, q2, q3
- ror r11, r11, #16
- vadd.i32 q6, q6, q7
- ror r14, r14, #16
- vadd.i32 q10, q10, q11
- add r8, r8, r11
- veor q12, q1, q2
- add r9, r9, r14
- veor q13, q5, q6
- eor r7, r7, r8
- veor q14, q9, q10
- eor r4, r4, r9
- vshl.i32 q1, q12, #7
- ror r7, r7, #20
- vshl.i32 q5, q13, #7
- ror r4, r4, #20
- vshl.i32 q9, q14, #7
- str r6, [sp, #8]
- vsri.u32 q1, q12, #25
- add r2, r2, r7
- vsri.u32 q5, q13, #25
- add r3, r3, r4
- vsri.u32 q9, q14, #25
- eor r11, r11, r2
- vext.32 q3, q3, q3, #1
- eor r14, r14, r3
- vext.32 q7, q7, q7, #1
- ror r11, r11, #24
- vext.32 q11, q11, q11, #1
- ror r14, r14, #24
- vext.32 q1, q1, q1, #3
- add r8, r8, r11
- vext.32 q5, q5, q5, #3
- add r9, r9, r14
- vext.32 q9, q9, q9, #3
- eor r7, r7, r8
- vext.32 q2, q2, q2, #2
- eor r4, r4, r9
- vext.32 q6, q6, q6, #2
- ror r7, r7, #25
- vext.32 q10, q10, q10, #2
- ror r4, r4, #25
- bne .Lchacha_blocks_neon_rounds1
- str r8, [sp, #0]
- str r9, [sp, #4]
- str r10, [sp, #8]
- str r12, [sp, #16]
- str r11, [sp, #20]
- str r14, [sp, #24]
- add r9, sp, #64
- vld1.32 {q12,q13}, [r9,:128]!
- ldr r12, [sp, #48]
- vld1.32 {q14,q15}, [r9,:128]
- ldr r14, [sp, #40]
- vadd.i32 q0, q0, q12
- ldr r8, [sp, #(64 +0)]
- vadd.i32 q4, q4, q12
- ldr r9, [sp, #(64 +4)]
- vadd.i32 q8, q8, q12
- ldr r10, [sp, #(64 +8)]
- vadd.i32 q1, q1, q13
- ldr r11, [sp, #(64 +12)]
- vadd.i32 q5, q5, q13
- add r0, r0, r8
- vadd.i32 q9, q9, q13
- add r1, r1, r9
- vadd.i32 q2, q2, q14
- add r2, r2, r10
- vadd.i32 q6, q6, q14
- ldr r8, [sp, #(64 +16)]
- vadd.i32 q10, q10, q14
- add r3, r3, r11
- veor q14, q14, q14
- ldr r9, [sp, #(64 +20)]
- mov r11, #1
- add r4, r4, r8
- vmov.32 d28[0], r11
- ldr r10, [sp, #(64 +24)]
- vadd.u64 q12, q14, q15
- add r5, r5, r9
- vadd.u64 q13, q14, q12
- ldr r11, [sp, #(64 +28)]
- vadd.u64 q14, q14, q13
- add r6, r6, r10
- vadd.i32 q3, q3, q12
- tst r12, r12
- vadd.i32 q7, q7, q13
- add r7, r7, r11
- vadd.i32 q11, q11, q14
- beq .Lchacha_blocks_neon_nomessage11
- UNALIGNED_LDMIA4(r12, r8, r9, r10, r11)
- tst r12, r12
- eor r0, r0, r8
- eor r1, r1, r9
- eor r2, r2, r10
- ldr r8, [r12, #0]
- eor r3, r3, r11
- ldr r9, [r12, #4]
- eor r4, r4, r8
- ldr r10, [r12, #8]
- eor r5, r5, r9
- ldr r11, [r12, #12]
- eor r6, r6, r10
- add r12, r12, #16
- eor r7, r7, r11
-.Lchacha_blocks_neon_nomessage11:
- UNALIGNED_STMIA8(r14, r0, r1, r2, r3, r4, r5, r6, r7)
- tst r12, r12
- ldm sp, {r0-r7}
- ldr r8, [sp, #(64 +32)]
- ldr r9, [sp, #(64 +36)]
- ldr r10, [sp, #(64 +40)]
- ldr r11, [sp, #(64 +44)]
- add r0, r0, r8
- add r1, r1, r9
- add r2, r2, r10
- ldr r8, [sp, #(64 +48)]
- add r3, r3, r11
- ldr r9, [sp, #(64 +52)]
- add r4, r4, r8
- ldr r10, [sp, #(64 +56)]
- add r5, r5, r9
- ldr r11, [sp, #(64 +60)]
- add r6, r6, r10
- adds r8, r8, #4
- add r7, r7, r11
- adc r9, r9, #0
- str r8, [sp, #(64 +48)]
- tst r12, r12
- str r9, [sp, #(64 +52)]
- beq .Lchacha_blocks_neon_nomessage12
- UNALIGNED_LDMIA4(r12, r8, r9, r10, r11)
- tst r12, r12
- eor r0, r0, r8
- eor r1, r1, r9
- eor r2, r2, r10
- ldr r8, [r12, #0]
- eor r3, r3, r11
- ldr r9, [r12, #4]
- eor r4, r4, r8
- ldr r10, [r12, #8]
- eor r5, r5, r9
- ldr r11, [r12, #12]
- eor r6, r6, r10
- add r12, r12, #16
- eor r7, r7, r11
-.Lchacha_blocks_neon_nomessage12:
- UNALIGNED_STMIA8(r14, r0, r1, r2, r3, r4, r5, r6, r7)
- tst r12, r12
- beq .Lchacha_blocks_neon_nomessage13
- vld1.32 {q12,q13}, [r12]!
- vld1.32 {q14,q15}, [r12]!
- veor q0, q0, q12
- veor q1, q1, q13
- veor q2, q2, q14
- veor q3, q3, q15
-.Lchacha_blocks_neon_nomessage13:
- vst1.32 {q0,q1}, [r14]!
- vst1.32 {q2,q3}, [r14]!
- beq .Lchacha_blocks_neon_nomessage14
- vld1.32 {q12,q13}, [r12]!
- vld1.32 {q14,q15}, [r12]!
- veor q4, q4, q12
- veor q5, q5, q13
- veor q6, q6, q14
- veor q7, q7, q15
-.Lchacha_blocks_neon_nomessage14:
- vst1.32 {q4,q5}, [r14]!
- vst1.32 {q6,q7}, [r14]!
- beq .Lchacha_blocks_neon_nomessage15
- vld1.32 {q12,q13}, [r12]!
- vld1.32 {q14,q15}, [r12]!
- veor q8, q8, q12
- veor q9, q9, q13
- veor q10, q10, q14
- veor q11, q11, q15
-.Lchacha_blocks_neon_nomessage15:
- vst1.32 {q8,q9}, [r14]!
- vst1.32 {q10,q11}, [r14]!
- str r12, [sp, #48]
- str r14, [sp, #40]
- ldr r3, [sp, #52]
- sub r3, r3, #256
- cmp r3, #256
- str r3, [sp, #52]
- bhs .Lchacha_blocks_neon_mainloop1
- tst r3, r3
- beq .Lchacha_blocks_neon_done
-.Lchacha_blocks_neon_mainloop2:
- ldr r3, [sp, #52]
- ldr r1, [sp, #48]
- cmp r3, #64
- bhs .Lchacha_blocks_neon_noswap1
- add r4, sp, #128
- mov r5, r4
- tst r1, r1
- beq .Lchacha_blocks_neon_nocopy1
-.Lchacha_blocks_neon_copyinput1:
- subs r3, r3, #1
- ldrb r0, [r1], #1
- strb r0, [r4], #1
- bne .Lchacha_blocks_neon_copyinput1
- str r5, [sp, #48]
-.Lchacha_blocks_neon_nocopy1:
- ldr r4, [sp, #40]
- str r5, [sp, #40]
- str r4, [sp, #56]
-.Lchacha_blocks_neon_noswap1:
- ldr r0, [sp, #44]
- str r0, [sp, #0]
- add r0, sp, #64
- ldm r0, {r0-r12}
- ldr r14, [sp, #(64 +60)]
- str r6, [sp, #8]
- str r11, [sp, #12]
- str r14, [sp, #28]
- ldr r11, [sp, #(64 +52)]
- ldr r14, [sp, #(64 +56)]
-.Lchacha_blocks_neon_rounds2:
- ldr r6, [sp, #0]
- add r0, r0, r4
- add r1, r1, r5
- eor r12, r12, r0
- eor r11, r11, r1
- ror r12, r12, #16
- ror r11, r11, #16
- subs r6, r6, #2
- add r8, r8, r12
- add r9, r9, r11
- eor r4, r4, r8
- eor r5, r5, r9
- str r6, [sp, #0]
- ror r4, r4, #20
- ror r5, r5, #20
- add r0, r0, r4
- add r1, r1, r5
- ldr r6, [sp, #8]
- eor r12, r12, r0
- eor r11, r11, r1
- ror r12, r12, #24
- ror r11, r11, #24
- add r8, r8, r12
- add r9, r9, r11
- eor r4, r4, r8
- eor r5, r5, r9
- str r11, [sp, #20]
- ror r4, r4, #25
- ror r5, r5, #25
- str r4, [sp, #4]
- ldr r4, [sp, #28]
- add r2, r2, r6
- add r3, r3, r7
- ldr r11, [sp, #12]
- eor r14, r14, r2
- eor r4, r4, r3
- ror r14, r14, #16
- ror r4, r4, #16
- add r10, r10, r14
- add r11, r11, r4
- eor r6, r6, r10
- eor r7, r7, r11
- ror r6, r6, #20
- ror r7, r7, #20
- add r2, r2, r6
- add r3, r3, r7
- eor r14, r14, r2
- eor r4, r4, r3
- ror r14, r14, #24
- ror r4, r4, #24
- add r10, r10, r14
- add r11, r11, r4
- eor r6, r6, r10
- eor r7, r7, r11
- ror r6, r6, #25
- ror r7, r7, #25
- add r0, r0, r5
- add r1, r1, r6
- eor r4, r4, r0
- eor r12, r12, r1
- ror r4, r4, #16
- ror r12, r12, #16
- add r10, r10, r4
- add r11, r11, r12
- eor r5, r5, r10
- eor r6, r6, r11
- ror r5, r5, #20
- ror r6, r6, #20
- add r0, r0, r5
- add r1, r1, r6
- eor r4, r4, r0
- eor r12, r12, r1
- ror r4, r4, #24
- ror r12, r12, #24
- add r10, r10, r4
- add r11, r11, r12
- eor r5, r5, r10
- eor r6, r6, r11
- str r11, [sp, #12]
- ror r5, r5, #25
- ror r6, r6, #25
- str r4, [sp, #28]
- ldr r4, [sp, #4]
- add r2, r2, r7
- add r3, r3, r4
- ldr r11, [sp, #20]
- eor r11, r11, r2
- eor r14, r14, r3
- ror r11, r11, #16
- ror r14, r14, #16
- add r8, r8, r11
- add r9, r9, r14
- eor r7, r7, r8
- eor r4, r4, r9
- ror r7, r7, #20
- ror r4, r4, #20
- str r6, [sp, #8]
- add r2, r2, r7
- add r3, r3, r4
- eor r11, r11, r2
- eor r14, r14, r3
- ror r11, r11, #24
- ror r14, r14, #24
- add r8, r8, r11
- add r9, r9, r14
- eor r7, r7, r8
- eor r4, r4, r9
- ror r7, r7, #25
- ror r4, r4, #25
- bne .Lchacha_blocks_neon_rounds2
- str r8, [sp, #0]
- str r9, [sp, #4]
- str r10, [sp, #8]
- str r12, [sp, #16]
- str r11, [sp, #20]
- str r14, [sp, #24]
- ldr r12, [sp, #48]
- ldr r14, [sp, #40]
- ldr r8, [sp, #(64 +0)]
- ldr r9, [sp, #(64 +4)]
- ldr r10, [sp, #(64 +8)]
- ldr r11, [sp, #(64 +12)]
- add r0, r0, r8
- add r1, r1, r9
- add r2, r2, r10
- ldr r8, [sp, #(64 +16)]
- add r3, r3, r11
- ldr r9, [sp, #(64 +20)]
- add r4, r4, r8
- ldr r10, [sp, #(64 +24)]
- add r5, r5, r9
- ldr r11, [sp, #(64 +28)]
- add r6, r6, r10
- tst r12, r12
- add r7, r7, r11
- beq .Lchacha_blocks_neon_nomessage21
- UNALIGNED_LDMIA4(r12, r8, r9, r10, r11)
- tst r12, r12
- eor r0, r0, r8
- eor r1, r1, r9
- eor r2, r2, r10
- ldr r8, [r12, #0]
- eor r3, r3, r11
- ldr r9, [r12, #4]
- eor r4, r4, r8
- ldr r10, [r12, #8]
- eor r5, r5, r9
- ldr r11, [r12, #12]
- eor r6, r6, r10
- add r12, r12, #16
- eor r7, r7, r11
-.Lchacha_blocks_neon_nomessage21:
- UNALIGNED_STMIA8(r14, r0, r1, r2, r3, r4, r5, r6, r7)
- ldm sp, {r0-r7}
- ldr r8, [sp, #(64 +32)]
- ldr r9, [sp, #(64 +36)]
- ldr r10, [sp, #(64 +40)]
- ldr r11, [sp, #(64 +44)]
- add r0, r0, r8
- add r1, r1, r9
- add r2, r2, r10
- ldr r8, [sp, #(64 +48)]
- add r3, r3, r11
- ldr r9, [sp, #(64 +52)]
- add r4, r4, r8
- ldr r10, [sp, #(64 +56)]
- add r5, r5, r9
- ldr r11, [sp, #(64 +60)]
- add r6, r6, r10
- adds r8, r8, #1
- add r7, r7, r11
- adc r9, r9, #0
- str r8, [sp, #(64 +48)]
- tst r12, r12
- str r9, [sp, #(64 +52)]
- beq .Lchacha_blocks_neon_nomessage22
- UNALIGNED_LDMIA4(r12, r8, r9, r10, r11)
- tst r12, r12
- eor r0, r0, r8
- eor r1, r1, r9
- eor r2, r2, r10
- ldr r8, [r12, #0]
- eor r3, r3, r11
- ldr r9, [r12, #4]
- eor r4, r4, r8
- ldr r10, [r12, #8]
- eor r5, r5, r9
- ldr r11, [r12, #12]
- eor r6, r6, r10
- add r12, r12, #16
- eor r7, r7, r11
-.Lchacha_blocks_neon_nomessage22:
- UNALIGNED_STMIA8(r14, r0, r1, r2, r3, r4, r5, r6, r7)
- str r12, [sp, #48]
- str r14, [sp, #40]
- ldr r3, [sp, #52]
- cmp r3, #64
- sub r4, r3, #64
- str r4, [sp, #52]
- bhi .Lchacha_blocks_neon_mainloop2
- cmp r3, #64
- beq .Lchacha_blocks_neon_nocopy2
- ldr r1, [sp, #56]
- sub r14, r14, #64
-.Lchacha_blocks_neon_copyinput2:
- subs r3, r3, #1
- ldrb r0, [r14], #1
- strb r0, [r1], #1
- bne .Lchacha_blocks_neon_copyinput2
-.Lchacha_blocks_neon_nocopy2:
-.Lchacha_blocks_neon_done:
- ldr r7, [sp, #60]
- ldr r8, [sp, #(64 +48)]
- ldr r9, [sp, #(64 +52)]
- str r8, [r7, #(48 + 0)]
- str r9, [r7, #(48 + 4)]
+#ifdef __PIC__
+# define GET_DATA_POINTER(reg, name, rtmp) \
+ ldr reg, 1f; \
+ ldr rtmp, 2f; \
+ b 3f; \
+ 1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \
+ 2: .word name(GOT); \
+ 3: add reg, pc, reg; \
+ ldr reg, [reg, rtmp];
+#else
+# define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name
+#endif
+
+/* register macros */
+#define INPUT r0
+#define DST r1
+#define SRC r2
+#define NBLKS r3
+#define ROUND r4
+
+/* stack structure */
+#define STACK_VEC_X12 (16)
+#define STACK_VEC_X13 (STACK_VEC_X12 + 16)
+#define STACK_TMP (STACK_VEC_X13 + 16)
+#define STACK_TMP1 (16 + STACK_TMP)
+#define STACK_TMP2 (16 + STACK_TMP1)
+
+#define STACK_MAX (16 + STACK_TMP2)
+
+/* vector registers */
+#define X0 q0
+#define X1 q1
+#define X2 q2
+#define X3 q3
+#define X4 q4
+#define X5 q5
+#define X6 q6
+#define X7 q7
+#define X8 q8
+#define X9 q9
+#define X10 q10
+#define X11 q11
+#define X12 q12
+#define X13 q13
+#define X14 q14
+#define X15 q15
+
+#define X0l d0
+#define X1l d2
+#define X2l d4
+#define X3l d6
+#define X4l d8
+#define X5l d10
+#define X6l d12
+#define X7l d14
+#define X8l d16
+#define X9l d18
+#define X10l d20
+#define X11l d22
+#define X12l d24
+#define X13l d26
+#define X14l d28
+#define X15l d30
+
+#define X0h d1
+#define X1h d3
+#define X2h d5
+#define X3h d7
+#define X4h d9
+#define X5h d11
+#define X6h d13
+#define X7h d15
+#define X8h d17
+#define X9h d19
+#define X10h d21
+#define X11h d23
+#define X12h d25
+#define X13h d27
+#define X14h d29
+#define X15h d31
+
+/**********************************************************************
+ helper macros
+ **********************************************************************/
+
+/* 4x4 32-bit integer matrix transpose */
+#define transpose_4x4_part1(_q0, _q1, _q2, _q3) \
+ vtrn.32 _q0, _q1; \
+ vtrn.32 _q2, _q3;
+#define transpose_4x4_part2(_q0, _q1, _q2, _q3) \
+ vswp _q0##h, _q2##l; \
+ vswp _q1##h, _q3##l;
+
+#define clear(x) veor x,x,x;
+
+/**********************************************************************
+ 4-way chacha20
+ **********************************************************************/
+
+#define ROTATE2(dst1,dst2,c,src1,src2) \
+ vshl.u32 dst1, src1, #(c); \
+ vshl.u32 dst2, src2, #(c); \
+ vsri.u32 dst1, src1, #(32 - (c)); \
+ vsri.u32 dst2, src2, #(32 - (c));
+
+#define ROTATE2_16(dst1,dst2,src1,src2) \
+ vrev32.16 dst1, src1; \
+ vrev32.16 dst2, src2;
+
+#define XOR(d,s1,s2) \
+ veor d, s2, s1;
+
+#define PLUS(ds,s) \
+ vadd.u32 ds, ds, s;
+
+#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,tmp2) \
+ PLUS(a1,b1); PLUS(a2,b2); XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); \
+ ROTATE2_16(d1, d2, tmp1, tmp2); \
+ PLUS(c1,d1); PLUS(c2,d2); XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); \
+ ROTATE2(b1, b2, 12, tmp1, tmp2); \
+ PLUS(a1,b1); PLUS(a2,b2); XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); \
+ ROTATE2(d1, d2, 8, tmp1, tmp2); \
+ PLUS(c1,d1); PLUS(c2,d2); XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); \
+ ROTATE2(b1, b2, 7, tmp1, tmp2);
+
+chacha20_data:
+.align 4
+.Linc_counter:
+ .long 0,1,2,3
+
+.align 3
+.globl _gcry_chacha20_armv7_neon_blocks4
+.type _gcry_chacha20_armv7_neon_blocks4,%function;
+
+_gcry_chacha20_armv7_neon_blocks4:
+ /* input:
+ * r0: input
+ * r1: dst
+ * r2: src
+ * r3: nblks (multiple of 4)
+ */
+
+ vpush {q4-q7};
+ push {r4-r12,lr};
+
mov r12, sp
- stmia r12!, {r0-r7}
- add r12, r12, #48
- stmia r12!, {r0-r7}
- sub r0, sp, #8
- ldr sp, [sp, #192]
- ldmfd sp!, {r4-r12, r14}
- vldm sp!, {q4-q7}
- sub r0, sp, r0
- bx lr
-.Lchacha_blocks_neon_nobytes:
- mov r0, #0;
+
+ mov r6, sp;
+ sub r6, r6, #(STACK_MAX);
+ and r6, r6, #(~15);
+ mov sp, r6;
+ GET_DATA_POINTER(r9, .Linc_counter, lr);
+ add lr, INPUT, #(12*4);
+ add r8, sp, #STACK_VEC_X12;
+
+.Loop4:
+ mov ROUND, #20;
+
+ /* Construct counter vectors X12 and X13 */
+
+ vld1.8 {X15}, [lr];
+ mov lr, INPUT;
+ vld1.8 {X8}, [r9];
+ vdup.32 X12, X15l[0];
+ vdup.32 X13, X15l[1];
+ vld1.8 {X3}, [lr]!;
+ vadd.u32 X12, X12, X8;
+ vdup.32 X0, X3l[0];
+ vdup.32 X1, X3l[1];
+ vdup.32 X2, X3h[0];
+ vcgt.u32 X8, X8, X12;
+ vdup.32 X3, X3h[1];
+ vdup.32 X14, X15h[0];
+ vdup.32 X15, X15h[1];
+ vsub.u32 X13, X13, X8;
+ vld1.8 {X7}, [lr]!;
+ vld1.8 {X11}, [lr];
+ vst1.8 {X12, X13}, [r8];
+ vdup.32 X4, X7l[0];
+ vdup.32 X5, X7l[1];
+ vdup.32 X6, X7h[0];
+ vdup.32 X7, X7h[1];
+ vdup.32 X8, X11l[0];
+ vdup.32 X9, X11l[1];
+ vdup.32 X10, X11h[0];
+ vdup.32 X11, X11h[1];
+
+ add r7, sp, #STACK_TMP2;
+ add r6, sp, #STACK_TMP1;
+ add r5, sp, #STACK_TMP;
+ vst1.8 {X15}, [r6];
+ vst1.8 {X11}, [r5];
+
+ mov lr, INPUT;
+.Lround2:
+ subs ROUND, ROUND, #2
+ QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X11,X15)
+ vld1.8 {X11}, [r5];
+ vld1.8 {X15}, [r6];
+ vst1.8 {X8}, [r5];
+ vst1.8 {X9}, [r6];
+ QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8,X9)
+ QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8,X9)
+ vld1.8 {X8}, [r5];
+ vld1.8 {X9}, [r6];
+ vst1.8 {X11}, [r5];
+ vst1.8 {X15}, [r6];
+ QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X11,X15)
+ bne .Lround2;
+
+ vld1.8 {X11}, [lr]!;
+ vst1.8 {X14}, [r7];
+
+ vdup.32 X14, X11l[0]; /* INPUT + 0 * 4 */
+ vdup.32 X15, X11l[1]; /* INPUT + 1 * 4 */
+ PLUS(X0, X14);
+ PLUS(X1, X15);
+ vdup.32 X14, X11h[0]; /* INPUT + 2 * 4 */
+ vdup.32 X15, X11h[1]; /* INPUT + 3 * 4 */
+ PLUS(X2, X14);
+ PLUS(X3, X15);
+
+ vld1.8 {X11}, [r5];
+ vld1.8 {X15}, [r6];
+ vst1.8 {X0}, [r5];
+ vld1.8 {X0}, [lr]!;
+ vst1.8 {X1}, [r6];
+
+ vdup.32 X14, X0l[0]; /* INPUT + 4 * 4 */
+ vdup.32 X1, X0l[1]; /* INPUT + 5 * 4 */
+ PLUS(X4, X14);
+ PLUS(X5, X1);
+ vdup.32 X14, X0h[0]; /* INPUT + 6 * 4 */
+ vdup.32 X1, X0h[1]; /* INPUT + 7 * 4 */
+ PLUS(X6, X14);
+ PLUS(X7, X1);
+
+ vld1.8 {X0}, [lr]!;
+
+ vdup.32 X14, X0l[0]; /* INPUT + 8 * 4 */
+ vdup.32 X1, X0l[1]; /* INPUT + 9 * 4 */
+ PLUS(X8, X14);
+ PLUS(X9, X1);
+ vdup.32 X14, X0h[0]; /* INPUT + 10 * 4 */
+ vdup.32 X1, X0h[1]; /* INPUT + 11 * 4 */
+ PLUS(X10, X14);
+ PLUS(X11, X1);
+
+ vld1.8 {X0}, [lr];
+ add lr, INPUT, #(12*4)
+ vld1.8 {X14}, [r7];
+
+ vdup.32 X1, X0h[0]; /* INPUT + 10 * 4 */
+ ldm lr, {r10, r11}; /* Update counter */
+ vdup.32 X0, X0h[1]; /* INPUT + 11 * 4 */
+ PLUS(X14, X1);
+ PLUS(X15, X0);
+ adds r10, r10, #4; /* Update counter */
+ vld1.8 {X0, X1}, [r8];
+
+ PLUS(X12, X0);
+ vld1.8 {X0}, [r5];
+ PLUS(X13, X1);
+ adc r11, r11, #0; /* Update counter */
+
+ vld1.8 {X1}, [r6];
+ stm lr, {r10, r11}; /* Update counter */
+ transpose_4x4_part1(X0, X1, X2, X3);
+ transpose_4x4_part1(X4, X5, X6, X7);
+ transpose_4x4_part1(X8, X9, X10, X11);
+ transpose_4x4_part1(X12, X13, X14, X15);
+ transpose_4x4_part2(X0, X1, X2, X3);
+ transpose_4x4_part2(X4, X5, X6, X7);
+ transpose_4x4_part2(X8, X9, X10, X11);
+ transpose_4x4_part2(X12, X13, X14, X15);
+
+ subs NBLKS, NBLKS, #4;
+
+ vst1.8 {X10}, [r5];
+ add lr, INPUT, #(12*4)
+ vst1.8 {X11}, [r6];
+ vld1.8 {X10, X11}, [SRC]!;
+ veor X10, X0, X10;
+ vld1.8 {X0}, [SRC]!;
+ veor X11, X4, X11;
+ vld1.8 {X4}, [SRC]!;
+ vst1.8 {X10, X11}, [DST]!;
+ vld1.8 {X10, X11}, [SRC]!;
+ veor X0, X8, X0;
+ veor X4, X12, X4;
+ veor X10, X1, X10;
+ veor X11, X5, X11;
+ vst1.8 {X0}, [DST]!;
+ vld1.8 {X0, X1}, [SRC]!;
+ vst1.8 {X4}, [DST]!;
+ vld1.8 {X4, X5}, [SRC]!;
+ vst1.8 {X10, X11}, [DST]!;
+ vld1.8 {X10}, [r5];
+ vld1.8 {X11}, [r6];
+ veor X0, X9, X0;
+ vld1.8 {X8, X9}, [SRC]!;
+ veor X1, X13, X1;
+ vld1.8 {X12, X13}, [SRC]!;
+ veor X4, X2, X4;
+ veor X5, X6, X5;
+ vst1.8 {X0, X1}, [DST]!;
+ vld1.8 {X0, X1}, [SRC]!;
+ vst1.8 {X4, X5}, [DST]!;
+ veor X8, X10, X8;
+ veor X9, X14, X9;
+ veor X12, X3, X12;
+ veor X13, X7, X13;
+ veor X0, X11, X0;
+ veor X1, X15, X1;
+ vst1.8 {X8, X9}, [DST]!;
+ vst1.8 {X12, X13}, [DST]!;
+ vst1.8 {X0, X1}, [DST]!;
+
+ bne .Loop4;
+
+ /* clear the used vector registers and stack */
+ clear(X0);
+ vst1.8 {X0}, [r5];
+ vst1.8 {X0}, [r6];
+ vst1.8 {X0}, [r7];
+ vst1.8 {X0}, [r8]!;
+ vst1.8 {X0}, [r8];
+
+ mov sp, r12
+ clear(X1);
+ clear(X2);
+ clear(X3);
+ clear(X4);
+ clear(X5);
+ clear(X6);
+ clear(X7);
+ clear(X8);
+ clear(X9);
+ clear(X10);
+ clear(X11);
+ clear(X12);
+ clear(X13);
+ clear(X14);
+ clear(X15);
+
+ pop {r4-r12,lr}
+ vpop {q4-q7}
+ eor r0, r0, r0
bx lr
-.ltorg
-.size _gcry_chacha20_armv7_neon_blocks,.-_gcry_chacha20_armv7_neon_blocks;
+.size _gcry_chacha20_armv7_neon_blocks4, .-_gcry_chacha20_armv7_neon_blocks4;
#endif
diff --git a/cipher/chacha20-avx2-amd64.S b/cipher/chacha20-avx2-amd64.S
deleted file mode 100644
index 8c085bad6..000000000
--- a/cipher/chacha20-avx2-amd64.S
+++ /dev/null
@@ -1,956 +0,0 @@
-/* chacha20-avx2-amd64.S - AMD64/AVX2 implementation of ChaCha20
- *
- * Copyright (C) 2014 Jussi Kivilinna <jussi.kivilinna at iki.fi>
- *
- * This file is part of Libgcrypt.
- *
- * Libgcrypt is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * Libgcrypt is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * Based on public domain implementation by Andrew Moon at
- * https://github.com/floodyberry/chacha-opt
- */
-
-#ifdef __x86_64__
-#include <config.h>
-
-#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
- defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
- defined(ENABLE_AVX2_SUPPORT) && USE_CHACHA20
-
-#ifdef __PIC__
-# define RIP (%rip)
-#else
-# define RIP
-#endif
-
-#ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS
-# define ELF(...) __VA_ARGS__
-#else
-# define ELF(...) /*_*/
-#endif
-
-.text
-
-.align 8
-.globl _gcry_chacha20_amd64_avx2_blocks
-ELF(.type _gcry_chacha20_amd64_avx2_blocks, at function;)
-_gcry_chacha20_amd64_avx2_blocks:
-.Lchacha_blocks_avx2_local:
- vzeroupper
- pushq %rbx
- pushq %rbp
- pushq %r12
- pushq %r13
- pushq %r14
- movq %rsp, %rbp
- andq $~63, %rsp
- subq $512, %rsp
- leaq .LC RIP, %rax
- vmovdqu 0(%rax), %xmm6
- vmovdqu 16(%rax), %xmm7
- vmovdqu 0(%rdi), %xmm8
- vmovdqu 16(%rdi), %xmm9
- vmovdqu 32(%rdi), %xmm10
- vmovdqu 48(%rdi), %xmm11
- movl $20, %eax
- movq $1, %r9
- vmovdqa %xmm8, 0(%rsp)
- vmovdqa %xmm9, 16(%rsp)
- vmovdqa %xmm10, 32(%rsp)
- vmovdqa %xmm11, 48(%rsp)
- movq %rax, 64(%rsp)
- vmovdqa %xmm6, 448(%rsp)
- vmovdqa %xmm6, 464(%rsp)
- vmovdqa %xmm7, 480(%rsp)
- vmovdqa %xmm7, 496(%rsp)
- cmpq $512, %rcx
- jae .Lchacha_blocks_avx2_atleast512
- cmp $256, %rcx
- jae .Lchacha_blocks_avx2_atleast256
- jmp .Lchacha_blocks_avx2_below256
- .p2align 6,,63
-.Lchacha_blocks_avx2_atleast512:
- movq 48(%rsp), %rax
- leaq 1(%rax), %r8
- leaq 2(%rax), %r9
- leaq 3(%rax), %r10
- leaq 4(%rax), %rbx
- leaq 5(%rax), %r11
- leaq 6(%rax), %r12
- leaq 7(%rax), %r13
- leaq 8(%rax), %r14
- movl %eax, 128(%rsp)
- movl %r8d, 4+128(%rsp)
- movl %r9d, 8+128(%rsp)
- movl %r10d, 12+128(%rsp)
- movl %ebx, 16+128(%rsp)
- movl %r11d, 20+128(%rsp)
- movl %r12d, 24+128(%rsp)
- movl %r13d, 28+128(%rsp)
- shrq $32, %rax
- shrq $32, %r8
- shrq $32, %r9
- shrq $32, %r10
- shrq $32, %rbx
- shrq $32, %r11
- shrq $32, %r12
- shrq $32, %r13
- movl %eax, 160(%rsp)
- movl %r8d, 4+160(%rsp)
- movl %r9d, 8+160(%rsp)
- movl %r10d, 12+160(%rsp)
- movl %ebx, 16+160(%rsp)
- movl %r11d, 20+160(%rsp)
- movl %r12d, 24+160(%rsp)
- movl %r13d, 28+160(%rsp)
- movq %r14, 48(%rsp)
- movq 64(%rsp), %rax
- vpbroadcastd 0(%rsp), %ymm0
- vpbroadcastd 4+0(%rsp), %ymm1
- vpbroadcastd 8+0(%rsp), %ymm2
- vpbroadcastd 12+0(%rsp), %ymm3
- vpbroadcastd 16(%rsp), %ymm4
- vpbroadcastd 4+16(%rsp), %ymm5
- vpbroadcastd 8+16(%rsp), %ymm6
- vpbroadcastd 12+16(%rsp), %ymm7
- vpbroadcastd 32(%rsp), %ymm8
- vpbroadcastd 4+32(%rsp), %ymm9
- vpbroadcastd 8+32(%rsp), %ymm10
- vpbroadcastd 12+32(%rsp), %ymm11
- vpbroadcastd 8+48(%rsp), %ymm14
- vpbroadcastd 12+48(%rsp), %ymm15
- vmovdqa 128(%rsp), %ymm12
- vmovdqa 160(%rsp), %ymm13
-.Lchacha_blocks_avx2_mainloop1:
- vpaddd %ymm0, %ymm4, %ymm0
- vpaddd %ymm1, %ymm5, %ymm1
- vpxor %ymm12, %ymm0, %ymm12
- vpxor %ymm13, %ymm1, %ymm13
- vpaddd %ymm2, %ymm6, %ymm2
- vpaddd %ymm3, %ymm7, %ymm3
- vpxor %ymm14, %ymm2, %ymm14
- vpxor %ymm15, %ymm3, %ymm15
- vpshufb 448(%rsp), %ymm12, %ymm12
- vpshufb 448(%rsp), %ymm13, %ymm13
- vpaddd %ymm8, %ymm12, %ymm8
- vpaddd %ymm9, %ymm13, %ymm9
- vpshufb 448(%rsp), %ymm14, %ymm14
- vpshufb 448(%rsp), %ymm15, %ymm15
- vpaddd %ymm10, %ymm14, %ymm10
- vpaddd %ymm11, %ymm15, %ymm11
- vmovdqa %ymm12, 96(%rsp)
- vpxor %ymm4, %ymm8, %ymm4
- vpxor %ymm5, %ymm9, %ymm5
- vpslld $ 12, %ymm4, %ymm12
- vpsrld $20, %ymm4, %ymm4
- vpxor %ymm4, %ymm12, %ymm4
- vpslld $ 12, %ymm5, %ymm12
- vpsrld $20, %ymm5, %ymm5
- vpxor %ymm5, %ymm12, %ymm5
- vpxor %ymm6, %ymm10, %ymm6
- vpxor %ymm7, %ymm11, %ymm7
- vpslld $ 12, %ymm6, %ymm12
- vpsrld $20, %ymm6, %ymm6
- vpxor %ymm6, %ymm12, %ymm6
- vpslld $ 12, %ymm7, %ymm12
- vpsrld $20, %ymm7, %ymm7
- vpxor %ymm7, %ymm12, %ymm7
- vpaddd %ymm0, %ymm4, %ymm0
- vpaddd %ymm1, %ymm5, %ymm1
- vpxor 96(%rsp), %ymm0, %ymm12
- vpxor %ymm13, %ymm1, %ymm13
- vpaddd %ymm2, %ymm6, %ymm2
- vpaddd %ymm3, %ymm7, %ymm3
- vpxor %ymm14, %ymm2, %ymm14
- vpxor %ymm15, %ymm3, %ymm15
- vpshufb 480(%rsp), %ymm12, %ymm12
- vpshufb 480(%rsp), %ymm13, %ymm13
- vpaddd %ymm8, %ymm12, %ymm8
- vpaddd %ymm9, %ymm13, %ymm9
- vpshufb 480(%rsp), %ymm14, %ymm14
- vpshufb 480(%rsp), %ymm15, %ymm15
- vpaddd %ymm10, %ymm14, %ymm10
- vpaddd %ymm11, %ymm15, %ymm11
- vmovdqa %ymm12, 96(%rsp)
- vpxor %ymm4, %ymm8, %ymm4
- vpxor %ymm5, %ymm9, %ymm5
- vpslld $ 7, %ymm4, %ymm12
- vpsrld $25, %ymm4, %ymm4
- vpxor %ymm4, %ymm12, %ymm4
- vpslld $ 7, %ymm5, %ymm12
- vpsrld $25, %ymm5, %ymm5
- vpxor %ymm5, %ymm12, %ymm5
- vpxor %ymm6, %ymm10, %ymm6
- vpxor %ymm7, %ymm11, %ymm7
- vpslld $ 7, %ymm6, %ymm12
- vpsrld $25, %ymm6, %ymm6
- vpxor %ymm6, %ymm12, %ymm6
- vpslld $ 7, %ymm7, %ymm12
- vpsrld $25, %ymm7, %ymm7
- vpxor %ymm7, %ymm12, %ymm7
- vpaddd %ymm0, %ymm5, %ymm0
- vpaddd %ymm1, %ymm6, %ymm1
- vpxor %ymm15, %ymm0, %ymm15
- vpxor 96(%rsp), %ymm1, %ymm12
- vpaddd %ymm2, %ymm7, %ymm2
- vpaddd %ymm3, %ymm4, %ymm3
- vpxor %ymm13, %ymm2, %ymm13
- vpxor %ymm14, %ymm3, %ymm14
- vpshufb 448(%rsp), %ymm15, %ymm15
- vpshufb 448(%rsp), %ymm12, %ymm12
- vpaddd %ymm10, %ymm15, %ymm10
- vpaddd %ymm11, %ymm12, %ymm11
- vpshufb 448(%rsp), %ymm13, %ymm13
- vpshufb 448(%rsp), %ymm14, %ymm14
- vpaddd %ymm8, %ymm13, %ymm8
- vpaddd %ymm9, %ymm14, %ymm9
- vmovdqa %ymm15, 96(%rsp)
- vpxor %ymm5, %ymm10, %ymm5
- vpxor %ymm6, %ymm11, %ymm6
- vpslld $ 12, %ymm5, %ymm15
- vpsrld $20, %ymm5, %ymm5
- vpxor %ymm5, %ymm15, %ymm5
- vpslld $ 12, %ymm6, %ymm15
- vpsrld $20, %ymm6, %ymm6
- vpxor %ymm6, %ymm15, %ymm6
- vpxor %ymm7, %ymm8, %ymm7
- vpxor %ymm4, %ymm9, %ymm4
- vpslld $ 12, %ymm7, %ymm15
- vpsrld $20, %ymm7, %ymm7
- vpxor %ymm7, %ymm15, %ymm7
- vpslld $ 12, %ymm4, %ymm15
- vpsrld $20, %ymm4, %ymm4
- vpxor %ymm4, %ymm15, %ymm4
- vpaddd %ymm0, %ymm5, %ymm0
- vpaddd %ymm1, %ymm6, %ymm1
- vpxor 96(%rsp), %ymm0, %ymm15
- vpxor %ymm12, %ymm1, %ymm12
- vpaddd %ymm2, %ymm7, %ymm2
- vpaddd %ymm3, %ymm4, %ymm3
- vpxor %ymm13, %ymm2, %ymm13
- vpxor %ymm14, %ymm3, %ymm14
- vpshufb 480(%rsp), %ymm15, %ymm15
- vpshufb 480(%rsp), %ymm12, %ymm12
- vpaddd %ymm10, %ymm15, %ymm10
- vpaddd %ymm11, %ymm12, %ymm11
- vpshufb 480(%rsp), %ymm13, %ymm13
- vpshufb 480(%rsp), %ymm14, %ymm14
- vpaddd %ymm8, %ymm13, %ymm8
- vpaddd %ymm9, %ymm14, %ymm9
- vmovdqa %ymm15, 96(%rsp)
- vpxor %ymm5, %ymm10, %ymm5
- vpxor %ymm6, %ymm11, %ymm6
- vpslld $ 7, %ymm5, %ymm15
- vpsrld $25, %ymm5, %ymm5
- vpxor %ymm5, %ymm15, %ymm5
- vpslld $ 7, %ymm6, %ymm15
- vpsrld $25, %ymm6, %ymm6
- vpxor %ymm6, %ymm15, %ymm6
- vpxor %ymm7, %ymm8, %ymm7
- vpxor %ymm4, %ymm9, %ymm4
- vpslld $ 7, %ymm7, %ymm15
- vpsrld $25, %ymm7, %ymm7
- vpxor %ymm7, %ymm15, %ymm7
- vpslld $ 7, %ymm4, %ymm15
- vpsrld $25, %ymm4, %ymm4
- vpxor %ymm4, %ymm15, %ymm4
- vmovdqa 96(%rsp), %ymm15
- subq $2, %rax
- jnz .Lchacha_blocks_avx2_mainloop1
- vmovdqa %ymm8, 192(%rsp)
- vmovdqa %ymm9, 224(%rsp)
- vmovdqa %ymm10, 256(%rsp)
- vmovdqa %ymm11, 288(%rsp)
- vmovdqa %ymm12, 320(%rsp)
- vmovdqa %ymm13, 352(%rsp)
- vmovdqa %ymm14, 384(%rsp)
- vmovdqa %ymm15, 416(%rsp)
- vpbroadcastd 0(%rsp), %ymm8
- vpbroadcastd 4+0(%rsp), %ymm9
- vpbroadcastd 8+0(%rsp), %ymm10
- vpbroadcastd 12+0(%rsp), %ymm11
- vpbroadcastd 16(%rsp), %ymm12
- vpbroadcastd 4+16(%rsp), %ymm13
- vpbroadcastd 8+16(%rsp), %ymm14
- vpbroadcastd 12+16(%rsp), %ymm15
- vpaddd %ymm8, %ymm0, %ymm0
- vpaddd %ymm9, %ymm1, %ymm1
- vpaddd %ymm10, %ymm2, %ymm2
- vpaddd %ymm11, %ymm3, %ymm3
- vpaddd %ymm12, %ymm4, %ymm4
- vpaddd %ymm13, %ymm5, %ymm5
- vpaddd %ymm14, %ymm6, %ymm6
- vpaddd %ymm15, %ymm7, %ymm7
- vpunpckldq %ymm1, %ymm0, %ymm8
- vpunpckldq %ymm3, %ymm2, %ymm9
- vpunpckhdq %ymm1, %ymm0, %ymm12
- vpunpckhdq %ymm3, %ymm2, %ymm13
- vpunpckldq %ymm5, %ymm4, %ymm10
- vpunpckldq %ymm7, %ymm6, %ymm11
- vpunpckhdq %ymm5, %ymm4, %ymm14
- vpunpckhdq %ymm7, %ymm6, %ymm15
- vpunpcklqdq %ymm9, %ymm8, %ymm0
- vpunpcklqdq %ymm11, %ymm10, %ymm1
- vpunpckhqdq %ymm9, %ymm8, %ymm2
- vpunpckhqdq %ymm11, %ymm10, %ymm3
- vpunpcklqdq %ymm13, %ymm12, %ymm4
- vpunpcklqdq %ymm15, %ymm14, %ymm5
- vpunpckhqdq %ymm13, %ymm12, %ymm6
- vpunpckhqdq %ymm15, %ymm14, %ymm7
- vperm2i128 $0x20, %ymm1, %ymm0, %ymm8
- vperm2i128 $0x20, %ymm3, %ymm2, %ymm9
- vperm2i128 $0x31, %ymm1, %ymm0, %ymm12
- vperm2i128 $0x31, %ymm3, %ymm2, %ymm13
- vperm2i128 $0x20, %ymm5, %ymm4, %ymm10
- vperm2i128 $0x20, %ymm7, %ymm6, %ymm11
- vperm2i128 $0x31, %ymm5, %ymm4, %ymm14
- vperm2i128 $0x31, %ymm7, %ymm6, %ymm15
- andq %rsi, %rsi
- jz .Lchacha_blocks_avx2_noinput1
- vpxor 0(%rsi), %ymm8, %ymm8
- vpxor 64(%rsi), %ymm9, %ymm9
- vpxor 128(%rsi), %ymm10, %ymm10
- vpxor 192(%rsi), %ymm11, %ymm11
- vpxor 256(%rsi), %ymm12, %ymm12
- vpxor 320(%rsi), %ymm13, %ymm13
- vpxor 384(%rsi), %ymm14, %ymm14
- vpxor 448(%rsi), %ymm15, %ymm15
- vmovdqu %ymm8, 0(%rdx)
- vmovdqu %ymm9, 64(%rdx)
- vmovdqu %ymm10, 128(%rdx)
- vmovdqu %ymm11, 192(%rdx)
- vmovdqu %ymm12, 256(%rdx)
- vmovdqu %ymm13, 320(%rdx)
- vmovdqu %ymm14, 384(%rdx)
- vmovdqu %ymm15, 448(%rdx)
- vmovdqa 192(%rsp), %ymm0
- vmovdqa 224(%rsp), %ymm1
- vmovdqa 256(%rsp), %ymm2
- vmovdqa 288(%rsp), %ymm3
- vmovdqa 320(%rsp), %ymm4
- vmovdqa 352(%rsp), %ymm5
- vmovdqa 384(%rsp), %ymm6
- vmovdqa 416(%rsp), %ymm7
- vpbroadcastd 32(%rsp), %ymm8
- vpbroadcastd 4+32(%rsp), %ymm9
- vpbroadcastd 8+32(%rsp), %ymm10
- vpbroadcastd 12+32(%rsp), %ymm11
- vmovdqa 128(%rsp), %ymm12
- vmovdqa 160(%rsp), %ymm13
- vpbroadcastd 8+48(%rsp), %ymm14
- vpbroadcastd 12+48(%rsp), %ymm15
- vpaddd %ymm8, %ymm0, %ymm0
- vpaddd %ymm9, %ymm1, %ymm1
- vpaddd %ymm10, %ymm2, %ymm2
- vpaddd %ymm11, %ymm3, %ymm3
- vpaddd %ymm12, %ymm4, %ymm4
- vpaddd %ymm13, %ymm5, %ymm5
- vpaddd %ymm14, %ymm6, %ymm6
- vpaddd %ymm15, %ymm7, %ymm7
- vpunpckldq %ymm1, %ymm0, %ymm8
- vpunpckldq %ymm3, %ymm2, %ymm9
- vpunpckhdq %ymm1, %ymm0, %ymm12
- vpunpckhdq %ymm3, %ymm2, %ymm13
- vpunpckldq %ymm5, %ymm4, %ymm10
- vpunpckldq %ymm7, %ymm6, %ymm11
- vpunpckhdq %ymm5, %ymm4, %ymm14
- vpunpckhdq %ymm7, %ymm6, %ymm15
- vpunpcklqdq %ymm9, %ymm8, %ymm0
- vpunpcklqdq %ymm11, %ymm10, %ymm1
- vpunpckhqdq %ymm9, %ymm8, %ymm2
- vpunpckhqdq %ymm11, %ymm10, %ymm3
- vpunpcklqdq %ymm13, %ymm12, %ymm4
- vpunpcklqdq %ymm15, %ymm14, %ymm5
- vpunpckhqdq %ymm13, %ymm12, %ymm6
- vpunpckhqdq %ymm15, %ymm14, %ymm7
- vperm2i128 $0x20, %ymm1, %ymm0, %ymm8
- vperm2i128 $0x20, %ymm3, %ymm2, %ymm9
- vperm2i128 $0x31, %ymm1, %ymm0, %ymm12
- vperm2i128 $0x31, %ymm3, %ymm2, %ymm13
- vperm2i128 $0x20, %ymm5, %ymm4, %ymm10
- vperm2i128 $0x20, %ymm7, %ymm6, %ymm11
- vperm2i128 $0x31, %ymm5, %ymm4, %ymm14
- vperm2i128 $0x31, %ymm7, %ymm6, %ymm15
- vpxor 32(%rsi), %ymm8, %ymm8
- vpxor 96(%rsi), %ymm9, %ymm9
- vpxor 160(%rsi), %ymm10, %ymm10
- vpxor 224(%rsi), %ymm11, %ymm11
- vpxor 288(%rsi), %ymm12, %ymm12
- vpxor 352(%rsi), %ymm13, %ymm13
- vpxor 416(%rsi), %ymm14, %ymm14
- vpxor 480(%rsi), %ymm15, %ymm15
- vmovdqu %ymm8, 32(%rdx)
- vmovdqu %ymm9, 96(%rdx)
- vmovdqu %ymm10, 160(%rdx)
- vmovdqu %ymm11, 224(%rdx)
- vmovdqu %ymm12, 288(%rdx)
- vmovdqu %ymm13, 352(%rdx)
- vmovdqu %ymm14, 416(%rdx)
- vmovdqu %ymm15, 480(%rdx)
- addq $512, %rsi
- jmp .Lchacha_blocks_avx2_mainloop1_cont
-.Lchacha_blocks_avx2_noinput1:
- vmovdqu %ymm8, 0(%rdx)
- vmovdqu %ymm9, 64(%rdx)
- vmovdqu %ymm10, 128(%rdx)
- vmovdqu %ymm11, 192(%rdx)
- vmovdqu %ymm12, 256(%rdx)
- vmovdqu %ymm13, 320(%rdx)
- vmovdqu %ymm14, 384(%rdx)
- vmovdqu %ymm15, 448(%rdx)
- vmovdqa 192(%rsp), %ymm0
- vmovdqa 224(%rsp), %ymm1
- vmovdqa 256(%rsp), %ymm2
- vmovdqa 288(%rsp), %ymm3
- vmovdqa 320(%rsp), %ymm4
- vmovdqa 352(%rsp), %ymm5
- vmovdqa 384(%rsp), %ymm6
- vmovdqa 416(%rsp), %ymm7
- vpbroadcastd 32(%rsp), %ymm8
- vpbroadcastd 4+32(%rsp), %ymm9
- vpbroadcastd 8+32(%rsp), %ymm10
- vpbroadcastd 12+32(%rsp), %ymm11
- vmovdqa 128(%rsp), %ymm12
- vmovdqa 160(%rsp), %ymm13
- vpbroadcastd 8+48(%rsp), %ymm14
- vpbroadcastd 12+48(%rsp), %ymm15
- vpaddd %ymm8, %ymm0, %ymm0
- vpaddd %ymm9, %ymm1, %ymm1
- vpaddd %ymm10, %ymm2, %ymm2
- vpaddd %ymm11, %ymm3, %ymm3
- vpaddd %ymm12, %ymm4, %ymm4
- vpaddd %ymm13, %ymm5, %ymm5
- vpaddd %ymm14, %ymm6, %ymm6
- vpaddd %ymm15, %ymm7, %ymm7
- vpunpckldq %ymm1, %ymm0, %ymm8
- vpunpckldq %ymm3, %ymm2, %ymm9
- vpunpckhdq %ymm1, %ymm0, %ymm12
- vpunpckhdq %ymm3, %ymm2, %ymm13
- vpunpckldq %ymm5, %ymm4, %ymm10
- vpunpckldq %ymm7, %ymm6, %ymm11
- vpunpckhdq %ymm5, %ymm4, %ymm14
- vpunpckhdq %ymm7, %ymm6, %ymm15
- vpunpcklqdq %ymm9, %ymm8, %ymm0
- vpunpcklqdq %ymm11, %ymm10, %ymm1
- vpunpckhqdq %ymm9, %ymm8, %ymm2
- vpunpckhqdq %ymm11, %ymm10, %ymm3
- vpunpcklqdq %ymm13, %ymm12, %ymm4
- vpunpcklqdq %ymm15, %ymm14, %ymm5
- vpunpckhqdq %ymm13, %ymm12, %ymm6
- vpunpckhqdq %ymm15, %ymm14, %ymm7
- vperm2i128 $0x20, %ymm1, %ymm0, %ymm8
- vperm2i128 $0x20, %ymm3, %ymm2, %ymm9
- vperm2i128 $0x31, %ymm1, %ymm0, %ymm12
- vperm2i128 $0x31, %ymm3, %ymm2, %ymm13
- vperm2i128 $0x20, %ymm5, %ymm4, %ymm10
- vperm2i128 $0x20, %ymm7, %ymm6, %ymm11
- vperm2i128 $0x31, %ymm5, %ymm4, %ymm14
- vperm2i128 $0x31, %ymm7, %ymm6, %ymm15
- vmovdqu %ymm8, 32(%rdx)
- vmovdqu %ymm9, 96(%rdx)
- vmovdqu %ymm10, 160(%rdx)
- vmovdqu %ymm11, 224(%rdx)
- vmovdqu %ymm12, 288(%rdx)
- vmovdqu %ymm13, 352(%rdx)
- vmovdqu %ymm14, 416(%rdx)
- vmovdqu %ymm15, 480(%rdx)
-.Lchacha_blocks_avx2_mainloop1_cont:
- addq $512, %rdx
- subq $512, %rcx
- cmp $512, %rcx
- jae .Lchacha_blocks_avx2_atleast512
- cmp $256, %rcx
- jb .Lchacha_blocks_avx2_below256_fixup
-.Lchacha_blocks_avx2_atleast256:
- movq 48(%rsp), %rax
- leaq 1(%rax), %r8
- leaq 2(%rax), %r9
- leaq 3(%rax), %r10
- leaq 4(%rax), %rbx
- movl %eax, 128(%rsp)
- movl %r8d, 4+128(%rsp)
- movl %r9d, 8+128(%rsp)
- movl %r10d, 12+128(%rsp)
- shrq $32, %rax
- shrq $32, %r8
- shrq $32, %r9
- shrq $32, %r10
- movl %eax, 160(%rsp)
- movl %r8d, 4+160(%rsp)
- movl %r9d, 8+160(%rsp)
- movl %r10d, 12+160(%rsp)
- movq %rbx, 48(%rsp)
- movq 64(%rsp), %rax
- vpbroadcastd 0(%rsp), %xmm0
- vpbroadcastd 4+0(%rsp), %xmm1
- vpbroadcastd 8+0(%rsp), %xmm2
- vpbroadcastd 12+0(%rsp), %xmm3
- vpbroadcastd 16(%rsp), %xmm4
- vpbroadcastd 4+16(%rsp), %xmm5
- vpbroadcastd 8+16(%rsp), %xmm6
- vpbroadcastd 12+16(%rsp), %xmm7
- vpbroadcastd 32(%rsp), %xmm8
- vpbroadcastd 4+32(%rsp), %xmm9
- vpbroadcastd 8+32(%rsp), %xmm10
- vpbroadcastd 12+32(%rsp), %xmm11
- vmovdqa 128(%rsp), %xmm12
- vmovdqa 160(%rsp), %xmm13
- vpbroadcastd 8+48(%rsp), %xmm14
- vpbroadcastd 12+48(%rsp), %xmm15
-.Lchacha_blocks_avx2_mainloop2:
- vpaddd %xmm0, %xmm4, %xmm0
- vpaddd %xmm1, %xmm5, %xmm1
- vpxor %xmm12, %xmm0, %xmm12
- vpxor %xmm13, %xmm1, %xmm13
- vpaddd %xmm2, %xmm6, %xmm2
- vpaddd %xmm3, %xmm7, %xmm3
- vpxor %xmm14, %xmm2, %xmm14
- vpxor %xmm15, %xmm3, %xmm15
- vpshufb 448(%rsp), %xmm12, %xmm12
- vpshufb 448(%rsp), %xmm13, %xmm13
- vpaddd %xmm8, %xmm12, %xmm8
- vpaddd %xmm9, %xmm13, %xmm9
- vpshufb 448(%rsp), %xmm14, %xmm14
- vpshufb 448(%rsp), %xmm15, %xmm15
- vpaddd %xmm10, %xmm14, %xmm10
- vpaddd %xmm11, %xmm15, %xmm11
- vmovdqa %xmm12, 96(%rsp)
- vpxor %xmm4, %xmm8, %xmm4
- vpxor %xmm5, %xmm9, %xmm5
- vpslld $ 12, %xmm4, %xmm12
- vpsrld $20, %xmm4, %xmm4
- vpxor %xmm4, %xmm12, %xmm4
- vpslld $ 12, %xmm5, %xmm12
- vpsrld $20, %xmm5, %xmm5
- vpxor %xmm5, %xmm12, %xmm5
- vpxor %xmm6, %xmm10, %xmm6
- vpxor %xmm7, %xmm11, %xmm7
- vpslld $ 12, %xmm6, %xmm12
- vpsrld $20, %xmm6, %xmm6
- vpxor %xmm6, %xmm12, %xmm6
- vpslld $ 12, %xmm7, %xmm12
- vpsrld $20, %xmm7, %xmm7
- vpxor %xmm7, %xmm12, %xmm7
- vpaddd %xmm0, %xmm4, %xmm0
- vpaddd %xmm1, %xmm5, %xmm1
- vpxor 96(%rsp), %xmm0, %xmm12
- vpxor %xmm13, %xmm1, %xmm13
- vpaddd %xmm2, %xmm6, %xmm2
- vpaddd %xmm3, %xmm7, %xmm3
- vpxor %xmm14, %xmm2, %xmm14
- vpxor %xmm15, %xmm3, %xmm15
- vpshufb 480(%rsp), %xmm12, %xmm12
- vpshufb 480(%rsp), %xmm13, %xmm13
- vpaddd %xmm8, %xmm12, %xmm8
- vpaddd %xmm9, %xmm13, %xmm9
- vpshufb 480(%rsp), %xmm14, %xmm14
- vpshufb 480(%rsp), %xmm15, %xmm15
- vpaddd %xmm10, %xmm14, %xmm10
- vpaddd %xmm11, %xmm15, %xmm11
- vmovdqa %xmm12, 96(%rsp)
- vpxor %xmm4, %xmm8, %xmm4
- vpxor %xmm5, %xmm9, %xmm5
- vpslld $ 7, %xmm4, %xmm12
- vpsrld $25, %xmm4, %xmm4
- vpxor %xmm4, %xmm12, %xmm4
- vpslld $ 7, %xmm5, %xmm12
- vpsrld $25, %xmm5, %xmm5
- vpxor %xmm5, %xmm12, %xmm5
- vpxor %xmm6, %xmm10, %xmm6
- vpxor %xmm7, %xmm11, %xmm7
- vpslld $ 7, %xmm6, %xmm12
- vpsrld $25, %xmm6, %xmm6
- vpxor %xmm6, %xmm12, %xmm6
- vpslld $ 7, %xmm7, %xmm12
- vpsrld $25, %xmm7, %xmm7
- vpxor %xmm7, %xmm12, %xmm7
- vpaddd %xmm0, %xmm5, %xmm0
- vpaddd %xmm1, %xmm6, %xmm1
- vpxor %xmm15, %xmm0, %xmm15
- vpxor 96(%rsp), %xmm1, %xmm12
- vpaddd %xmm2, %xmm7, %xmm2
- vpaddd %xmm3, %xmm4, %xmm3
- vpxor %xmm13, %xmm2, %xmm13
- vpxor %xmm14, %xmm3, %xmm14
- vpshufb 448(%rsp), %xmm15, %xmm15
- vpshufb 448(%rsp), %xmm12, %xmm12
- vpaddd %xmm10, %xmm15, %xmm10
- vpaddd %xmm11, %xmm12, %xmm11
- vpshufb 448(%rsp), %xmm13, %xmm13
- vpshufb 448(%rsp), %xmm14, %xmm14
- vpaddd %xmm8, %xmm13, %xmm8
- vpaddd %xmm9, %xmm14, %xmm9
- vmovdqa %xmm15, 96(%rsp)
- vpxor %xmm5, %xmm10, %xmm5
- vpxor %xmm6, %xmm11, %xmm6
- vpslld $ 12, %xmm5, %xmm15
- vpsrld $20, %xmm5, %xmm5
- vpxor %xmm5, %xmm15, %xmm5
- vpslld $ 12, %xmm6, %xmm15
- vpsrld $20, %xmm6, %xmm6
- vpxor %xmm6, %xmm15, %xmm6
- vpxor %xmm7, %xmm8, %xmm7
- vpxor %xmm4, %xmm9, %xmm4
- vpslld $ 12, %xmm7, %xmm15
- vpsrld $20, %xmm7, %xmm7
- vpxor %xmm7, %xmm15, %xmm7
- vpslld $ 12, %xmm4, %xmm15
- vpsrld $20, %xmm4, %xmm4
- vpxor %xmm4, %xmm15, %xmm4
- vpaddd %xmm0, %xmm5, %xmm0
- vpaddd %xmm1, %xmm6, %xmm1
- vpxor 96(%rsp), %xmm0, %xmm15
- vpxor %xmm12, %xmm1, %xmm12
- vpaddd %xmm2, %xmm7, %xmm2
- vpaddd %xmm3, %xmm4, %xmm3
- vpxor %xmm13, %xmm2, %xmm13
- vpxor %xmm14, %xmm3, %xmm14
- vpshufb 480(%rsp), %xmm15, %xmm15
- vpshufb 480(%rsp), %xmm12, %xmm12
- vpaddd %xmm10, %xmm15, %xmm10
- vpaddd %xmm11, %xmm12, %xmm11
- vpshufb 480(%rsp), %xmm13, %xmm13
- vpshufb 480(%rsp), %xmm14, %xmm14
- vpaddd %xmm8, %xmm13, %xmm8
- vpaddd %xmm9, %xmm14, %xmm9
- vmovdqa %xmm15, 96(%rsp)
- vpxor %xmm5, %xmm10, %xmm5
- vpxor %xmm6, %xmm11, %xmm6
- vpslld $ 7, %xmm5, %xmm15
- vpsrld $25, %xmm5, %xmm5
- vpxor %xmm5, %xmm15, %xmm5
- vpslld $ 7, %xmm6, %xmm15
- vpsrld $25, %xmm6, %xmm6
- vpxor %xmm6, %xmm15, %xmm6
- vpxor %xmm7, %xmm8, %xmm7
- vpxor %xmm4, %xmm9, %xmm4
- vpslld $ 7, %xmm7, %xmm15
- vpsrld $25, %xmm7, %xmm7
- vpxor %xmm7, %xmm15, %xmm7
- vpslld $ 7, %xmm4, %xmm15
- vpsrld $25, %xmm4, %xmm4
- vpxor %xmm4, %xmm15, %xmm4
- vmovdqa 96(%rsp), %xmm15
- subq $2, %rax
- jnz .Lchacha_blocks_avx2_mainloop2
- vmovdqa %xmm8, 192(%rsp)
- vmovdqa %xmm9, 208(%rsp)
- vmovdqa %xmm10, 224(%rsp)
- vmovdqa %xmm11, 240(%rsp)
- vmovdqa %xmm12, 256(%rsp)
- vmovdqa %xmm13, 272(%rsp)
- vmovdqa %xmm14, 288(%rsp)
- vmovdqa %xmm15, 304(%rsp)
- vpbroadcastd 0(%rsp), %xmm8
- vpbroadcastd 4+0(%rsp), %xmm9
- vpbroadcastd 8+0(%rsp), %xmm10
- vpbroadcastd 12+0(%rsp), %xmm11
- vpbroadcastd 16(%rsp), %xmm12
- vpbroadcastd 4+16(%rsp), %xmm13
- vpbroadcastd 8+16(%rsp), %xmm14
- vpbroadcastd 12+16(%rsp), %xmm15
- vpaddd %xmm8, %xmm0, %xmm0
- vpaddd %xmm9, %xmm1, %xmm1
- vpaddd %xmm10, %xmm2, %xmm2
- vpaddd %xmm11, %xmm3, %xmm3
- vpaddd %xmm12, %xmm4, %xmm4
- vpaddd %xmm13, %xmm5, %xmm5
- vpaddd %xmm14, %xmm6, %xmm6
- vpaddd %xmm15, %xmm7, %xmm7
- vpunpckldq %xmm1, %xmm0, %xmm8
- vpunpckldq %xmm3, %xmm2, %xmm9
- vpunpckhdq %xmm1, %xmm0, %xmm12
- vpunpckhdq %xmm3, %xmm2, %xmm13
- vpunpckldq %xmm5, %xmm4, %xmm10
- vpunpckldq %xmm7, %xmm6, %xmm11
- vpunpckhdq %xmm5, %xmm4, %xmm14
- vpunpckhdq %xmm7, %xmm6, %xmm15
- vpunpcklqdq %xmm9, %xmm8, %xmm0
- vpunpcklqdq %xmm11, %xmm10, %xmm1
- vpunpckhqdq %xmm9, %xmm8, %xmm2
- vpunpckhqdq %xmm11, %xmm10, %xmm3
- vpunpcklqdq %xmm13, %xmm12, %xmm4
- vpunpcklqdq %xmm15, %xmm14, %xmm5
- vpunpckhqdq %xmm13, %xmm12, %xmm6
- vpunpckhqdq %xmm15, %xmm14, %xmm7
- andq %rsi, %rsi
- jz .Lchacha_blocks_avx2_noinput2
- vpxor 0(%rsi), %xmm0, %xmm0
- vpxor 16(%rsi), %xmm1, %xmm1
- vpxor 64(%rsi), %xmm2, %xmm2
- vpxor 80(%rsi), %xmm3, %xmm3
- vpxor 128(%rsi), %xmm4, %xmm4
- vpxor 144(%rsi), %xmm5, %xmm5
- vpxor 192(%rsi), %xmm6, %xmm6
- vpxor 208(%rsi), %xmm7, %xmm7
- vmovdqu %xmm0, 0(%rdx)
- vmovdqu %xmm1, 16(%rdx)
- vmovdqu %xmm2, 64(%rdx)
- vmovdqu %xmm3, 80(%rdx)
- vmovdqu %xmm4, 128(%rdx)
- vmovdqu %xmm5, 144(%rdx)
- vmovdqu %xmm6, 192(%rdx)
- vmovdqu %xmm7, 208(%rdx)
- vmovdqa 192(%rsp), %xmm0
- vmovdqa 208(%rsp), %xmm1
- vmovdqa 224(%rsp), %xmm2
- vmovdqa 240(%rsp), %xmm3
- vmovdqa 256(%rsp), %xmm4
- vmovdqa 272(%rsp), %xmm5
- vmovdqa 288(%rsp), %xmm6
- vmovdqa 304(%rsp), %xmm7
- vpbroadcastd 32(%rsp), %xmm8
- vpbroadcastd 4+32(%rsp), %xmm9
- vpbroadcastd 8+32(%rsp), %xmm10
- vpbroadcastd 12+32(%rsp), %xmm11
- vmovdqa 128(%rsp), %xmm12
- vmovdqa 160(%rsp), %xmm13
- vpbroadcastd 8+48(%rsp), %xmm14
- vpbroadcastd 12+48(%rsp), %xmm15
- vpaddd %xmm8, %xmm0, %xmm0
- vpaddd %xmm9, %xmm1, %xmm1
- vpaddd %xmm10, %xmm2, %xmm2
- vpaddd %xmm11, %xmm3, %xmm3
- vpaddd %xmm12, %xmm4, %xmm4
- vpaddd %xmm13, %xmm5, %xmm5
- vpaddd %xmm14, %xmm6, %xmm6
- vpaddd %xmm15, %xmm7, %xmm7
- vpunpckldq %xmm1, %xmm0, %xmm8
- vpunpckldq %xmm3, %xmm2, %xmm9
- vpunpckhdq %xmm1, %xmm0, %xmm12
- vpunpckhdq %xmm3, %xmm2, %xmm13
- vpunpckldq %xmm5, %xmm4, %xmm10
- vpunpckldq %xmm7, %xmm6, %xmm11
- vpunpckhdq %xmm5, %xmm4, %xmm14
- vpunpckhdq %xmm7, %xmm6, %xmm15
- vpunpcklqdq %xmm9, %xmm8, %xmm0
- vpunpcklqdq %xmm11, %xmm10, %xmm1
- vpunpckhqdq %xmm9, %xmm8, %xmm2
- vpunpckhqdq %xmm11, %xmm10, %xmm3
- vpunpcklqdq %xmm13, %xmm12, %xmm4
- vpunpcklqdq %xmm15, %xmm14, %xmm5
- vpunpckhqdq %xmm13, %xmm12, %xmm6
- vpunpckhqdq %xmm15, %xmm14, %xmm7
- vpxor 32(%rsi), %xmm0, %xmm0
- vpxor 48(%rsi), %xmm1, %xmm1
- vpxor 96(%rsi), %xmm2, %xmm2
- vpxor 112(%rsi), %xmm3, %xmm3
- vpxor 160(%rsi), %xmm4, %xmm4
- vpxor 176(%rsi), %xmm5, %xmm5
- vpxor 224(%rsi), %xmm6, %xmm6
- vpxor 240(%rsi), %xmm7, %xmm7
- vmovdqu %xmm0, 32(%rdx)
- vmovdqu %xmm1, 48(%rdx)
- vmovdqu %xmm2, 96(%rdx)
- vmovdqu %xmm3, 112(%rdx)
- vmovdqu %xmm4, 160(%rdx)
- vmovdqu %xmm5, 176(%rdx)
- vmovdqu %xmm6, 224(%rdx)
- vmovdqu %xmm7, 240(%rdx)
- addq $256, %rsi
- jmp .Lchacha_blocks_avx2_mainloop2_cont
-.Lchacha_blocks_avx2_noinput2:
- vmovdqu %xmm0, 0(%rdx)
- vmovdqu %xmm1, 16(%rdx)
- vmovdqu %xmm2, 64(%rdx)
- vmovdqu %xmm3, 80(%rdx)
- vmovdqu %xmm4, 128(%rdx)
- vmovdqu %xmm5, 144(%rdx)
- vmovdqu %xmm6, 192(%rdx)
- vmovdqu %xmm7, 208(%rdx)
- vmovdqa 192(%rsp), %xmm0
- vmovdqa 208(%rsp), %xmm1
- vmovdqa 224(%rsp), %xmm2
- vmovdqa 240(%rsp), %xmm3
- vmovdqa 256(%rsp), %xmm4
- vmovdqa 272(%rsp), %xmm5
- vmovdqa 288(%rsp), %xmm6
- vmovdqa 304(%rsp), %xmm7
- vpbroadcastd 32(%rsp), %xmm8
- vpbroadcastd 4+32(%rsp), %xmm9
- vpbroadcastd 8+32(%rsp), %xmm10
- vpbroadcastd 12+32(%rsp), %xmm11
- vmovdqa 128(%rsp), %xmm12
- vmovdqa 160(%rsp), %xmm13
- vpbroadcastd 8+48(%rsp), %xmm14
- vpbroadcastd 12+48(%rsp), %xmm15
- vpaddd %xmm8, %xmm0, %xmm0
- vpaddd %xmm9, %xmm1, %xmm1
- vpaddd %xmm10, %xmm2, %xmm2
- vpaddd %xmm11, %xmm3, %xmm3
- vpaddd %xmm12, %xmm4, %xmm4
- vpaddd %xmm13, %xmm5, %xmm5
- vpaddd %xmm14, %xmm6, %xmm6
- vpaddd %xmm15, %xmm7, %xmm7
- vpunpckldq %xmm1, %xmm0, %xmm8
- vpunpckldq %xmm3, %xmm2, %xmm9
- vpunpckhdq %xmm1, %xmm0, %xmm12
- vpunpckhdq %xmm3, %xmm2, %xmm13
- vpunpckldq %xmm5, %xmm4, %xmm10
- vpunpckldq %xmm7, %xmm6, %xmm11
- vpunpckhdq %xmm5, %xmm4, %xmm14
- vpunpckhdq %xmm7, %xmm6, %xmm15
- vpunpcklqdq %xmm9, %xmm8, %xmm0
- vpunpcklqdq %xmm11, %xmm10, %xmm1
- vpunpckhqdq %xmm9, %xmm8, %xmm2
- vpunpckhqdq %xmm11, %xmm10, %xmm3
- vpunpcklqdq %xmm13, %xmm12, %xmm4
- vpunpcklqdq %xmm15, %xmm14, %xmm5
- vpunpckhqdq %xmm13, %xmm12, %xmm6
- vpunpckhqdq %xmm15, %xmm14, %xmm7
- vmovdqu %xmm0, 32(%rdx)
- vmovdqu %xmm1, 48(%rdx)
- vmovdqu %xmm2, 96(%rdx)
- vmovdqu %xmm3, 112(%rdx)
- vmovdqu %xmm4, 160(%rdx)
- vmovdqu %xmm5, 176(%rdx)
- vmovdqu %xmm6, 224(%rdx)
- vmovdqu %xmm7, 240(%rdx)
-.Lchacha_blocks_avx2_mainloop2_cont:
- addq $256, %rdx
- subq $256, %rcx
- cmp $256, %rcx
- jae .Lchacha_blocks_avx2_atleast256
-.Lchacha_blocks_avx2_below256_fixup:
- vmovdqa 448(%rsp), %xmm6
- vmovdqa 480(%rsp), %xmm7
- vmovdqa 0(%rsp), %xmm8
- vmovdqa 16(%rsp), %xmm9
- vmovdqa 32(%rsp), %xmm10
- vmovdqa 48(%rsp), %xmm11
- movq $1, %r9
-.Lchacha_blocks_avx2_below256:
- vmovq %r9, %xmm5
- andq %rcx, %rcx
- jz .Lchacha_blocks_avx2_done
- cmpq $64, %rcx
- jae .Lchacha_blocks_avx2_above63
- movq %rdx, %r9
- andq %rsi, %rsi
- jz .Lchacha_blocks_avx2_noinput3
- movq %rcx, %r10
- movq %rsp, %rdx
- addq %r10, %rsi
- addq %r10, %rdx
- negq %r10
-.Lchacha_blocks_avx2_copyinput:
- movb (%rsi, %r10), %al
- movb %al, (%rdx, %r10)
- incq %r10
- jnz .Lchacha_blocks_avx2_copyinput
- movq %rsp, %rsi
-.Lchacha_blocks_avx2_noinput3:
- movq %rsp, %rdx
-.Lchacha_blocks_avx2_above63:
- vmovdqa %xmm8, %xmm0
- vmovdqa %xmm9, %xmm1
- vmovdqa %xmm10, %xmm2
- vmovdqa %xmm11, %xmm3
- movq 64(%rsp), %rax
-.Lchacha_blocks_avx2_mainloop3:
- vpaddd %xmm0, %xmm1, %xmm0
- vpxor %xmm3, %xmm0, %xmm3
- vpshufb %xmm6, %xmm3, %xmm3
- vpaddd %xmm2, %xmm3, %xmm2
- vpxor %xmm1, %xmm2, %xmm1
- vpslld $12, %xmm1, %xmm4
- vpsrld $20, %xmm1, %xmm1
- vpxor %xmm1, %xmm4, %xmm1
- vpaddd %xmm0, %xmm1, %xmm0
- vpxor %xmm3, %xmm0, %xmm3
- vpshufb %xmm7, %xmm3, %xmm3
- vpshufd $0x93, %xmm0, %xmm0
- vpaddd %xmm2, %xmm3, %xmm2
- vpshufd $0x4e, %xmm3, %xmm3
- vpxor %xmm1, %xmm2, %xmm1
- vpshufd $0x39, %xmm2, %xmm2
- vpslld $7, %xmm1, %xmm4
- vpsrld $25, %xmm1, %xmm1
- vpxor %xmm1, %xmm4, %xmm1
- vpaddd %xmm0, %xmm1, %xmm0
- vpxor %xmm3, %xmm0, %xmm3
- vpshufb %xmm6, %xmm3, %xmm3
- vpaddd %xmm2, %xmm3, %xmm2
- vpxor %xmm1, %xmm2, %xmm1
- vpslld $12, %xmm1, %xmm4
- vpsrld $20, %xmm1, %xmm1
- vpxor %xmm1, %xmm4, %xmm1
- vpaddd %xmm0, %xmm1, %xmm0
- vpxor %xmm3, %xmm0, %xmm3
- vpshufb %xmm7, %xmm3, %xmm3
- vpshufd $0x39, %xmm0, %xmm0
- vpaddd %xmm2, %xmm3, %xmm2
- vpshufd $0x4e, %xmm3, %xmm3
- vpxor %xmm1, %xmm2, %xmm1
- vpshufd $0x93, %xmm2, %xmm2
- vpslld $7, %xmm1, %xmm4
- vpsrld $25, %xmm1, %xmm1
- vpxor %xmm1, %xmm4, %xmm1
- subq $2, %rax
- jnz .Lchacha_blocks_avx2_mainloop3
- vpaddd %xmm0, %xmm8, %xmm0
- vpaddd %xmm1, %xmm9, %xmm1
- vpaddd %xmm2, %xmm10, %xmm2
- vpaddd %xmm3, %xmm11, %xmm3
- andq %rsi, %rsi
- jz .Lchacha_blocks_avx2_noinput4
- vpxor 0(%rsi), %xmm0, %xmm0
- vpxor 16(%rsi), %xmm1, %xmm1
- vpxor 32(%rsi), %xmm2, %xmm2
- vpxor 48(%rsi), %xmm3, %xmm3
- addq $64, %rsi
-.Lchacha_blocks_avx2_noinput4:
- vmovdqu %xmm0, 0(%rdx)
- vmovdqu %xmm1, 16(%rdx)
- vmovdqu %xmm2, 32(%rdx)
- vmovdqu %xmm3, 48(%rdx)
- vpaddq %xmm11, %xmm5, %xmm11
- cmpq $64, %rcx
- jbe .Lchacha_blocks_avx2_mainloop3_finishup
- addq $64, %rdx
- subq $64, %rcx
- jmp .Lchacha_blocks_avx2_below256
-.Lchacha_blocks_avx2_mainloop3_finishup:
- cmpq $64, %rcx
- je .Lchacha_blocks_avx2_done
- addq %rcx, %r9
- addq %rcx, %rdx
- negq %rcx
-.Lchacha_blocks_avx2_copyoutput:
- movb (%rdx, %rcx), %al
- movb %al, (%r9, %rcx)
- incq %rcx
- jnz .Lchacha_blocks_avx2_copyoutput
-.Lchacha_blocks_avx2_done:
- vmovdqu %xmm11, 48(%rdi)
- movq %rbp, %rsp
- popq %r14
- popq %r13
- popq %r12
- popq %rbp
- popq %rbx
- vzeroall
- movl $(63 + 512), %eax
- ret
-ELF(.size _gcry_chacha20_amd64_avx2_blocks,.-_gcry_chacha20_amd64_avx2_blocks;)
-
-.align 16
-.LC:
-.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 /* pshufb rotate by 16 */
-.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 /* pshufb rotate by 8 */
-
-#endif /*defined(USE_CHACHA20)*/
-#endif /*__x86_64*/
diff --git a/cipher/chacha20-sse2-amd64.S b/cipher/chacha20-sse2-amd64.S
deleted file mode 100644
index 2b9842c13..000000000
--- a/cipher/chacha20-sse2-amd64.S
+++ /dev/null
@@ -1,659 +0,0 @@
-/* chacha20-sse2-amd64.S - AMD64/SSE2 implementation of ChaCha20
- *
- * Copyright (C) 2014 Jussi Kivilinna <jussi.kivilinna at iki.fi>
- *
- * This file is part of Libgcrypt.
- *
- * Libgcrypt is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * Libgcrypt is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * Based on public domain implementation by Andrew Moon at
- * https://github.com/floodyberry/chacha-opt
- */
-
-#ifdef __x86_64__
-#include <config.h>
-
-#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
- defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && USE_CHACHA20
-
-#ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS
-# define ELF(...) __VA_ARGS__
-#else
-# define ELF(...) /*_*/
-#endif
-
-.text
-
-.align 8
-.globl _gcry_chacha20_amd64_sse2_blocks
-ELF(.type _gcry_chacha20_amd64_sse2_blocks, at function;)
-_gcry_chacha20_amd64_sse2_blocks:
-.Lchacha_blocks_sse2_local:
- pushq %rbx
- pushq %rbp
- movq %rsp, %rbp
- andq $~63, %rsp
- subq $512, %rsp
- movdqu (%rdi), %xmm8
- movdqu 16(%rdi), %xmm9
- movdqu 32(%rdi), %xmm10
- movdqu 48(%rdi), %xmm11
- movq $20, %rax
- movq $1, %r9
- movdqa %xmm8, 0(%rsp)
- movdqa %xmm9, 16(%rsp)
- movdqa %xmm10, 32(%rsp)
- movdqa %xmm11, 48(%rsp)
- movq %rax, 64(%rsp)
- cmpq $256, %rcx
- jb .Lchacha_blocks_sse2_below256
- pshufd $0x00, %xmm8, %xmm0
- pshufd $0x55, %xmm8, %xmm1
- pshufd $0xaa, %xmm8, %xmm2
- pshufd $0xff, %xmm8, %xmm3
- movdqa %xmm0, 128(%rsp)
- movdqa %xmm1, 144(%rsp)
- movdqa %xmm2, 160(%rsp)
- movdqa %xmm3, 176(%rsp)
- pshufd $0x00, %xmm9, %xmm0
- pshufd $0x55, %xmm9, %xmm1
- pshufd $0xaa, %xmm9, %xmm2
- pshufd $0xff, %xmm9, %xmm3
- movdqa %xmm0, 192(%rsp)
- movdqa %xmm1, 208(%rsp)
- movdqa %xmm2, 224(%rsp)
- movdqa %xmm3, 240(%rsp)
- pshufd $0x00, %xmm10, %xmm0
- pshufd $0x55, %xmm10, %xmm1
- pshufd $0xaa, %xmm10, %xmm2
- pshufd $0xff, %xmm10, %xmm3
- movdqa %xmm0, 256(%rsp)
- movdqa %xmm1, 272(%rsp)
- movdqa %xmm2, 288(%rsp)
- movdqa %xmm3, 304(%rsp)
- pshufd $0xaa, %xmm11, %xmm0
- pshufd $0xff, %xmm11, %xmm1
- movdqa %xmm0, 352(%rsp)
- movdqa %xmm1, 368(%rsp)
- jmp .Lchacha_blocks_sse2_atleast256
-.p2align 6,,63
-.Lchacha_blocks_sse2_atleast256:
- movq 48(%rsp), %rax
- leaq 1(%rax), %r8
- leaq 2(%rax), %r9
- leaq 3(%rax), %r10
- leaq 4(%rax), %rbx
- movl %eax, 320(%rsp)
- movl %r8d, 4+320(%rsp)
- movl %r9d, 8+320(%rsp)
- movl %r10d, 12+320(%rsp)
- shrq $32, %rax
- shrq $32, %r8
- shrq $32, %r9
- shrq $32, %r10
- movl %eax, 336(%rsp)
- movl %r8d, 4+336(%rsp)
- movl %r9d, 8+336(%rsp)
- movl %r10d, 12+336(%rsp)
- movq %rbx, 48(%rsp)
- movq 64(%rsp), %rax
- movdqa 128(%rsp), %xmm0
- movdqa 144(%rsp), %xmm1
- movdqa 160(%rsp), %xmm2
- movdqa 176(%rsp), %xmm3
- movdqa 192(%rsp), %xmm4
- movdqa 208(%rsp), %xmm5
- movdqa 224(%rsp), %xmm6
- movdqa 240(%rsp), %xmm7
- movdqa 256(%rsp), %xmm8
- movdqa 272(%rsp), %xmm9
- movdqa 288(%rsp), %xmm10
- movdqa 304(%rsp), %xmm11
- movdqa 320(%rsp), %xmm12
- movdqa 336(%rsp), %xmm13
- movdqa 352(%rsp), %xmm14
- movdqa 368(%rsp), %xmm15
-.Lchacha_blocks_sse2_mainloop1:
- paddd %xmm4, %xmm0
- paddd %xmm5, %xmm1
- pxor %xmm0, %xmm12
- pxor %xmm1, %xmm13
- paddd %xmm6, %xmm2
- paddd %xmm7, %xmm3
- movdqa %xmm6, 96(%rsp)
- pxor %xmm2, %xmm14
- pxor %xmm3, %xmm15
- pshuflw $0xb1,%xmm12,%xmm12
- pshufhw $0xb1,%xmm12,%xmm12
- pshuflw $0xb1,%xmm13,%xmm13
- pshufhw $0xb1,%xmm13,%xmm13
- pshuflw $0xb1,%xmm14,%xmm14
- pshufhw $0xb1,%xmm14,%xmm14
- pshuflw $0xb1,%xmm15,%xmm15
- pshufhw $0xb1,%xmm15,%xmm15
- paddd %xmm12, %xmm8
- paddd %xmm13, %xmm9
- paddd %xmm14, %xmm10
- paddd %xmm15, %xmm11
- movdqa %xmm12, 112(%rsp)
- pxor %xmm8, %xmm4
- pxor %xmm9, %xmm5
- movdqa 96(%rsp), %xmm6
- movdqa %xmm4, %xmm12
- pslld $ 12, %xmm4
- psrld $20, %xmm12
- pxor %xmm12, %xmm4
- movdqa %xmm5, %xmm12
- pslld $ 12, %xmm5
- psrld $20, %xmm12
- pxor %xmm12, %xmm5
- pxor %xmm10, %xmm6
- pxor %xmm11, %xmm7
- movdqa %xmm6, %xmm12
- pslld $ 12, %xmm6
- psrld $20, %xmm12
- pxor %xmm12, %xmm6
- movdqa %xmm7, %xmm12
- pslld $ 12, %xmm7
- psrld $20, %xmm12
- pxor %xmm12, %xmm7
- movdqa 112(%rsp), %xmm12
- paddd %xmm4, %xmm0
- paddd %xmm5, %xmm1
- pxor %xmm0, %xmm12
- pxor %xmm1, %xmm13
- paddd %xmm6, %xmm2
- paddd %xmm7, %xmm3
- movdqa %xmm6, 96(%rsp)
- pxor %xmm2, %xmm14
- pxor %xmm3, %xmm15
- movdqa %xmm12, %xmm6
- pslld $ 8, %xmm12
- psrld $24, %xmm6
- pxor %xmm6, %xmm12
- movdqa %xmm13, %xmm6
- pslld $ 8, %xmm13
- psrld $24, %xmm6
- pxor %xmm6, %xmm13
- paddd %xmm12, %xmm8
- paddd %xmm13, %xmm9
- movdqa %xmm14, %xmm6
- pslld $ 8, %xmm14
- psrld $24, %xmm6
- pxor %xmm6, %xmm14
- movdqa %xmm15, %xmm6
- pslld $ 8, %xmm15
- psrld $24, %xmm6
- pxor %xmm6, %xmm15
- paddd %xmm14, %xmm10
- paddd %xmm15, %xmm11
- movdqa %xmm12, 112(%rsp)
- pxor %xmm8, %xmm4
- pxor %xmm9, %xmm5
- movdqa 96(%rsp), %xmm6
- movdqa %xmm4, %xmm12
- pslld $ 7, %xmm4
- psrld $25, %xmm12
- pxor %xmm12, %xmm4
- movdqa %xmm5, %xmm12
- pslld $ 7, %xmm5
- psrld $25, %xmm12
- pxor %xmm12, %xmm5
- pxor %xmm10, %xmm6
- pxor %xmm11, %xmm7
- movdqa %xmm6, %xmm12
- pslld $ 7, %xmm6
- psrld $25, %xmm12
- pxor %xmm12, %xmm6
- movdqa %xmm7, %xmm12
- pslld $ 7, %xmm7
- psrld $25, %xmm12
- pxor %xmm12, %xmm7
- movdqa 112(%rsp), %xmm12
- paddd %xmm5, %xmm0
- paddd %xmm6, %xmm1
- pxor %xmm0, %xmm15
- pxor %xmm1, %xmm12
- paddd %xmm7, %xmm2
- paddd %xmm4, %xmm3
- movdqa %xmm7, 96(%rsp)
- pxor %xmm2, %xmm13
- pxor %xmm3, %xmm14
- pshuflw $0xb1,%xmm15,%xmm15
- pshufhw $0xb1,%xmm15,%xmm15
- pshuflw $0xb1,%xmm12,%xmm12
- pshufhw $0xb1,%xmm12,%xmm12
- pshuflw $0xb1,%xmm13,%xmm13
- pshufhw $0xb1,%xmm13,%xmm13
- pshuflw $0xb1,%xmm14,%xmm14
- pshufhw $0xb1,%xmm14,%xmm14
- paddd %xmm15, %xmm10
- paddd %xmm12, %xmm11
- paddd %xmm13, %xmm8
- paddd %xmm14, %xmm9
- movdqa %xmm15, 112(%rsp)
- pxor %xmm10, %xmm5
- pxor %xmm11, %xmm6
- movdqa 96(%rsp), %xmm7
- movdqa %xmm5, %xmm15
- pslld $ 12, %xmm5
- psrld $20, %xmm15
- pxor %xmm15, %xmm5
- movdqa %xmm6, %xmm15
- pslld $ 12, %xmm6
- psrld $20, %xmm15
- pxor %xmm15, %xmm6
- pxor %xmm8, %xmm7
- pxor %xmm9, %xmm4
- movdqa %xmm7, %xmm15
- pslld $ 12, %xmm7
- psrld $20, %xmm15
- pxor %xmm15, %xmm7
- movdqa %xmm4, %xmm15
- pslld $ 12, %xmm4
- psrld $20, %xmm15
- pxor %xmm15, %xmm4
- movdqa 112(%rsp), %xmm15
- paddd %xmm5, %xmm0
- paddd %xmm6, %xmm1
- pxor %xmm0, %xmm15
- pxor %xmm1, %xmm12
- paddd %xmm7, %xmm2
- paddd %xmm4, %xmm3
- movdqa %xmm7, 96(%rsp)
- pxor %xmm2, %xmm13
- pxor %xmm3, %xmm14
- movdqa %xmm15, %xmm7
- pslld $ 8, %xmm15
- psrld $24, %xmm7
- pxor %xmm7, %xmm15
- movdqa %xmm12, %xmm7
- pslld $ 8, %xmm12
- psrld $24, %xmm7
- pxor %xmm7, %xmm12
- paddd %xmm15, %xmm10
- paddd %xmm12, %xmm11
- movdqa %xmm13, %xmm7
- pslld $ 8, %xmm13
- psrld $24, %xmm7
- pxor %xmm7, %xmm13
- movdqa %xmm14, %xmm7
- pslld $ 8, %xmm14
- psrld $24, %xmm7
- pxor %xmm7, %xmm14
- paddd %xmm13, %xmm8
- paddd %xmm14, %xmm9
- movdqa %xmm15, 112(%rsp)
- pxor %xmm10, %xmm5
- pxor %xmm11, %xmm6
- movdqa 96(%rsp), %xmm7
- movdqa %xmm5, %xmm15
- pslld $ 7, %xmm5
- psrld $25, %xmm15
- pxor %xmm15, %xmm5
- movdqa %xmm6, %xmm15
- pslld $ 7, %xmm6
- psrld $25, %xmm15
- pxor %xmm15, %xmm6
- pxor %xmm8, %xmm7
- pxor %xmm9, %xmm4
- movdqa %xmm7, %xmm15
- pslld $ 7, %xmm7
- psrld $25, %xmm15
- pxor %xmm15, %xmm7
- movdqa %xmm4, %xmm15
- pslld $ 7, %xmm4
- psrld $25, %xmm15
- pxor %xmm15, %xmm4
- movdqa 112(%rsp), %xmm15
- subq $2, %rax
- jnz .Lchacha_blocks_sse2_mainloop1
- paddd 128(%rsp), %xmm0
- paddd 144(%rsp), %xmm1
- paddd 160(%rsp), %xmm2
- paddd 176(%rsp), %xmm3
- paddd 192(%rsp), %xmm4
- paddd 208(%rsp), %xmm5
- paddd 224(%rsp), %xmm6
- paddd 240(%rsp), %xmm7
- paddd 256(%rsp), %xmm8
- paddd 272(%rsp), %xmm9
- paddd 288(%rsp), %xmm10
- paddd 304(%rsp), %xmm11
- paddd 320(%rsp), %xmm12
- paddd 336(%rsp), %xmm13
- paddd 352(%rsp), %xmm14
- paddd 368(%rsp), %xmm15
- movdqa %xmm8, 384(%rsp)
- movdqa %xmm9, 400(%rsp)
- movdqa %xmm10, 416(%rsp)
- movdqa %xmm11, 432(%rsp)
- movdqa %xmm12, 448(%rsp)
- movdqa %xmm13, 464(%rsp)
- movdqa %xmm14, 480(%rsp)
- movdqa %xmm15, 496(%rsp)
- movdqa %xmm0, %xmm8
- movdqa %xmm2, %xmm9
- movdqa %xmm4, %xmm10
- movdqa %xmm6, %xmm11
- punpckhdq %xmm1, %xmm0
- punpckhdq %xmm3, %xmm2
- punpckhdq %xmm5, %xmm4
- punpckhdq %xmm7, %xmm6
- punpckldq %xmm1, %xmm8
- punpckldq %xmm3, %xmm9
- punpckldq %xmm5, %xmm10
- punpckldq %xmm7, %xmm11
- movdqa %xmm0, %xmm1
- movdqa %xmm4, %xmm3
- movdqa %xmm8, %xmm5
- movdqa %xmm10, %xmm7
- punpckhqdq %xmm2, %xmm0
- punpckhqdq %xmm6, %xmm4
- punpckhqdq %xmm9, %xmm8
- punpckhqdq %xmm11, %xmm10
- punpcklqdq %xmm2, %xmm1
- punpcklqdq %xmm6, %xmm3
- punpcklqdq %xmm9, %xmm5
- punpcklqdq %xmm11, %xmm7
- andq %rsi, %rsi
- jz .Lchacha_blocks_sse2_noinput1
- movdqu 0(%rsi), %xmm2
- movdqu 16(%rsi), %xmm6
- movdqu 64(%rsi), %xmm9
- movdqu 80(%rsi), %xmm11
- movdqu 128(%rsi), %xmm12
- movdqu 144(%rsi), %xmm13
- movdqu 192(%rsi), %xmm14
- movdqu 208(%rsi), %xmm15
- pxor %xmm2, %xmm5
- pxor %xmm6, %xmm7
- pxor %xmm9, %xmm8
- pxor %xmm11, %xmm10
- pxor %xmm12, %xmm1
- pxor %xmm13, %xmm3
- pxor %xmm14, %xmm0
- pxor %xmm15, %xmm4
- movdqu %xmm5, 0(%rdx)
- movdqu %xmm7, 16(%rdx)
- movdqu %xmm8, 64(%rdx)
- movdqu %xmm10, 80(%rdx)
- movdqu %xmm1, 128(%rdx)
- movdqu %xmm3, 144(%rdx)
- movdqu %xmm0, 192(%rdx)
- movdqu %xmm4, 208(%rdx)
- movdqa 384(%rsp), %xmm0
- movdqa 400(%rsp), %xmm1
- movdqa 416(%rsp), %xmm2
- movdqa 432(%rsp), %xmm3
- movdqa 448(%rsp), %xmm4
- movdqa 464(%rsp), %xmm5
- movdqa 480(%rsp), %xmm6
- movdqa 496(%rsp), %xmm7
- movdqa %xmm0, %xmm8
- movdqa %xmm2, %xmm9
- movdqa %xmm4, %xmm10
- movdqa %xmm6, %xmm11
- punpckldq %xmm1, %xmm8
- punpckldq %xmm3, %xmm9
- punpckhdq %xmm1, %xmm0
- punpckhdq %xmm3, %xmm2
- punpckldq %xmm5, %xmm10
- punpckldq %xmm7, %xmm11
- punpckhdq %xmm5, %xmm4
- punpckhdq %xmm7, %xmm6
- movdqa %xmm8, %xmm1
- movdqa %xmm0, %xmm3
- movdqa %xmm10, %xmm5
- movdqa %xmm4, %xmm7
- punpcklqdq %xmm9, %xmm1
- punpcklqdq %xmm11, %xmm5
- punpckhqdq %xmm9, %xmm8
- punpckhqdq %xmm11, %xmm10
- punpcklqdq %xmm2, %xmm3
- punpcklqdq %xmm6, %xmm7
- punpckhqdq %xmm2, %xmm0
- punpckhqdq %xmm6, %xmm4
- movdqu 32(%rsi), %xmm2
- movdqu 48(%rsi), %xmm6
- movdqu 96(%rsi), %xmm9
- movdqu 112(%rsi), %xmm11
- movdqu 160(%rsi), %xmm12
- movdqu 176(%rsi), %xmm13
- movdqu 224(%rsi), %xmm14
- movdqu 240(%rsi), %xmm15
- pxor %xmm2, %xmm1
- pxor %xmm6, %xmm5
- pxor %xmm9, %xmm8
- pxor %xmm11, %xmm10
- pxor %xmm12, %xmm3
- pxor %xmm13, %xmm7
- pxor %xmm14, %xmm0
- pxor %xmm15, %xmm4
- movdqu %xmm1, 32(%rdx)
- movdqu %xmm5, 48(%rdx)
- movdqu %xmm8, 96(%rdx)
- movdqu %xmm10, 112(%rdx)
- movdqu %xmm3, 160(%rdx)
- movdqu %xmm7, 176(%rdx)
- movdqu %xmm0, 224(%rdx)
- movdqu %xmm4, 240(%rdx)
- addq $256, %rsi
- jmp .Lchacha_blocks_sse2_mainloop_cont
-.Lchacha_blocks_sse2_noinput1:
- movdqu %xmm5, 0(%rdx)
- movdqu %xmm7, 16(%rdx)
- movdqu %xmm8, 64(%rdx)
- movdqu %xmm10, 80(%rdx)
- movdqu %xmm1, 128(%rdx)
- movdqu %xmm3, 144(%rdx)
- movdqu %xmm0, 192(%rdx)
- movdqu %xmm4, 208(%rdx)
- movdqa 384(%rsp), %xmm0
- movdqa 400(%rsp), %xmm1
- movdqa 416(%rsp), %xmm2
- movdqa 432(%rsp), %xmm3
- movdqa 448(%rsp), %xmm4
- movdqa 464(%rsp), %xmm5
- movdqa 480(%rsp), %xmm6
- movdqa 496(%rsp), %xmm7
- movdqa %xmm0, %xmm8
- movdqa %xmm2, %xmm9
- movdqa %xmm4, %xmm10
- movdqa %xmm6, %xmm11
- punpckldq %xmm1, %xmm8
- punpckldq %xmm3, %xmm9
- punpckhdq %xmm1, %xmm0
- punpckhdq %xmm3, %xmm2
- punpckldq %xmm5, %xmm10
- punpckldq %xmm7, %xmm11
- punpckhdq %xmm5, %xmm4
- punpckhdq %xmm7, %xmm6
- movdqa %xmm8, %xmm1
- movdqa %xmm0, %xmm3
- movdqa %xmm10, %xmm5
- movdqa %xmm4, %xmm7
- punpcklqdq %xmm9, %xmm1
- punpcklqdq %xmm11, %xmm5
- punpckhqdq %xmm9, %xmm8
- punpckhqdq %xmm11, %xmm10
- punpcklqdq %xmm2, %xmm3
- punpcklqdq %xmm6, %xmm7
- punpckhqdq %xmm2, %xmm0
- punpckhqdq %xmm6, %xmm4
- movdqu %xmm1, 32(%rdx)
- movdqu %xmm5, 48(%rdx)
- movdqu %xmm8, 96(%rdx)
- movdqu %xmm10, 112(%rdx)
- movdqu %xmm3, 160(%rdx)
- movdqu %xmm7, 176(%rdx)
- movdqu %xmm0, 224(%rdx)
- movdqu %xmm4, 240(%rdx)
-.Lchacha_blocks_sse2_mainloop_cont:
- addq $256, %rdx
- subq $256, %rcx
- cmp $256, %rcx
- jae .Lchacha_blocks_sse2_atleast256
- movdqa 0(%rsp), %xmm8
- movdqa 16(%rsp), %xmm9
- movdqa 32(%rsp), %xmm10
- movdqa 48(%rsp), %xmm11
- movq $1, %r9
-.Lchacha_blocks_sse2_below256:
- movq %r9, %xmm5
- andq %rcx, %rcx
- jz .Lchacha_blocks_sse2_done
- cmpq $64, %rcx
- jae .Lchacha_blocks_sse2_above63
- movq %rdx, %r9
- andq %rsi, %rsi
- jz .Lchacha_blocks_sse2_noinput2
- movq %rcx, %r10
- movq %rsp, %rdx
- addq %r10, %rsi
- addq %r10, %rdx
- negq %r10
-.Lchacha_blocks_sse2_copyinput:
- movb (%rsi, %r10), %al
- movb %al, (%rdx, %r10)
- incq %r10
- jnz .Lchacha_blocks_sse2_copyinput
- movq %rsp, %rsi
-.Lchacha_blocks_sse2_noinput2:
- movq %rsp, %rdx
-.Lchacha_blocks_sse2_above63:
- movdqa %xmm8, %xmm0
- movdqa %xmm9, %xmm1
- movdqa %xmm10, %xmm2
- movdqa %xmm11, %xmm3
- movq 64(%rsp), %rax
-.Lchacha_blocks_sse2_mainloop2:
- paddd %xmm1, %xmm0
- pxor %xmm0, %xmm3
- pshuflw $0xb1,%xmm3,%xmm3
- pshufhw $0xb1,%xmm3,%xmm3
- paddd %xmm3, %xmm2
- pxor %xmm2, %xmm1
- movdqa %xmm1,%xmm4
- pslld $12, %xmm1
- psrld $20, %xmm4
- pxor %xmm4, %xmm1
- paddd %xmm1, %xmm0
- pxor %xmm0, %xmm3
- movdqa %xmm3,%xmm4
- pslld $8, %xmm3
- psrld $24, %xmm4
- pshufd $0x93,%xmm0,%xmm0
- pxor %xmm4, %xmm3
- paddd %xmm3, %xmm2
- pshufd $0x4e,%xmm3,%xmm3
- pxor %xmm2, %xmm1
- pshufd $0x39,%xmm2,%xmm2
- movdqa %xmm1,%xmm4
- pslld $7, %xmm1
- psrld $25, %xmm4
- pxor %xmm4, %xmm1
- subq $2, %rax
- paddd %xmm1, %xmm0
- pxor %xmm0, %xmm3
- pshuflw $0xb1,%xmm3,%xmm3
- pshufhw $0xb1,%xmm3,%xmm3
- paddd %xmm3, %xmm2
- pxor %xmm2, %xmm1
- movdqa %xmm1,%xmm4
- pslld $12, %xmm1
- psrld $20, %xmm4
- pxor %xmm4, %xmm1
- paddd %xmm1, %xmm0
- pxor %xmm0, %xmm3
- movdqa %xmm3,%xmm4
- pslld $8, %xmm3
- psrld $24, %xmm4
- pshufd $0x39,%xmm0,%xmm0
- pxor %xmm4, %xmm3
- paddd %xmm3, %xmm2
- pshufd $0x4e,%xmm3,%xmm3
- pxor %xmm2, %xmm1
- pshufd $0x93,%xmm2,%xmm2
- movdqa %xmm1,%xmm4
- pslld $7, %xmm1
- psrld $25, %xmm4
- pxor %xmm4, %xmm1
- jnz .Lchacha_blocks_sse2_mainloop2
- paddd %xmm8, %xmm0
- paddd %xmm9, %xmm1
- paddd %xmm10, %xmm2
- paddd %xmm11, %xmm3
- andq %rsi, %rsi
- jz .Lchacha_blocks_sse2_noinput3
- movdqu 0(%rsi), %xmm12
- movdqu 16(%rsi), %xmm13
- movdqu 32(%rsi), %xmm14
- movdqu 48(%rsi), %xmm15
- pxor %xmm12, %xmm0
- pxor %xmm13, %xmm1
- pxor %xmm14, %xmm2
- pxor %xmm15, %xmm3
- addq $64, %rsi
-.Lchacha_blocks_sse2_noinput3:
- movdqu %xmm0, 0(%rdx)
- movdqu %xmm1, 16(%rdx)
- movdqu %xmm2, 32(%rdx)
- movdqu %xmm3, 48(%rdx)
- paddq %xmm5, %xmm11
- cmpq $64, %rcx
- jbe .Lchacha_blocks_sse2_mainloop2_finishup
- addq $64, %rdx
- subq $64, %rcx
- jmp .Lchacha_blocks_sse2_below256
-.Lchacha_blocks_sse2_mainloop2_finishup:
- cmpq $64, %rcx
- je .Lchacha_blocks_sse2_done
- addq %rcx, %r9
- addq %rcx, %rdx
- negq %rcx
-.Lchacha_blocks_sse2_copyoutput:
- movb (%rdx, %rcx), %al
- movb %al, (%r9, %rcx)
- incq %rcx
- jnz .Lchacha_blocks_sse2_copyoutput
-.Lchacha_blocks_sse2_done:
- movdqu %xmm11, 48(%rdi)
- movq %rbp, %rsp
- pxor %xmm15, %xmm15
- pxor %xmm7, %xmm7
- pxor %xmm14, %xmm14
- pxor %xmm6, %xmm6
- pxor %xmm13, %xmm13
- pxor %xmm5, %xmm5
- pxor %xmm12, %xmm12
- pxor %xmm4, %xmm4
- popq %rbp
- popq %rbx
- movl $(63 + 512 + 16), %eax
- pxor %xmm11, %xmm11
- pxor %xmm3, %xmm3
- pxor %xmm10, %xmm10
- pxor %xmm2, %xmm2
- pxor %xmm9, %xmm9
- pxor %xmm1, %xmm1
- pxor %xmm8, %xmm8
- pxor %xmm0, %xmm0
- ret
-ELF(.size _gcry_chacha20_amd64_sse2_blocks,.-_gcry_chacha20_amd64_sse2_blocks;)
-
-#endif /*defined(USE_CHACHA20)*/
-#endif /*__x86_64*/
diff --git a/cipher/chacha20-ssse3-amd64.S b/cipher/chacha20-ssse3-amd64.S
deleted file mode 100644
index c04010e7b..000000000
--- a/cipher/chacha20-ssse3-amd64.S
+++ /dev/null
@@ -1,632 +0,0 @@
-/* chacha20-ssse3-amd64.S - AMD64/SSSE3 implementation of ChaCha20
- *
- * Copyright (C) 2014 Jussi Kivilinna <jussi.kivilinna at iki.fi>
- *
- * This file is part of Libgcrypt.
- *
- * Libgcrypt is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * Libgcrypt is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*
- * Based on public domain implementation by Andrew Moon at
- * https://github.com/floodyberry/chacha-opt
- */
-
-#ifdef __x86_64__
-#include <config.h>
-
-#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
- defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
- defined(HAVE_GCC_INLINE_ASM_SSSE3) && USE_CHACHA20
-
-#ifdef __PIC__
-# define RIP (%rip)
-#else
-# define RIP
-#endif
-
-#ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS
-# define ELF(...) __VA_ARGS__
-#else
-# define ELF(...) /*_*/
-#endif
-
-.text
-
-.align 8
-.globl _gcry_chacha20_amd64_ssse3_blocks
-ELF(.type _gcry_chacha20_amd64_ssse3_blocks, at function;)
-_gcry_chacha20_amd64_ssse3_blocks:
-.Lchacha_blocks_ssse3_local:
- pushq %rbx
- pushq %rbp
- movq %rsp, %rbp
- andq $~63, %rsp
- subq $512, %rsp
- leaq .LC RIP, %rax
- movdqa 0(%rax), %xmm6
- movdqa 16(%rax), %xmm7
- movdqu 0(%rdi), %xmm8
- movdqu 16(%rdi), %xmm9
- movdqu 32(%rdi), %xmm10
- movdqu 48(%rdi), %xmm11
- movl $20, %eax
- movq $1, %r9
- movdqa %xmm8, 0(%rsp)
- movdqa %xmm9, 16(%rsp)
- movdqa %xmm10, 32(%rsp)
- movdqa %xmm11, 48(%rsp)
- movdqa %xmm6, 80(%rsp)
- movdqa %xmm7, 96(%rsp)
- movq %rax, 64(%rsp)
- cmpq $256, %rcx
- jb .Lchacha_blocks_ssse3_below256
- pshufd $0x00, %xmm8, %xmm0
- pshufd $0x55, %xmm8, %xmm1
- pshufd $0xaa, %xmm8, %xmm2
- pshufd $0xff, %xmm8, %xmm3
- movdqa %xmm0, 128(%rsp)
- movdqa %xmm1, 144(%rsp)
- movdqa %xmm2, 160(%rsp)
- movdqa %xmm3, 176(%rsp)
- pshufd $0x00, %xmm9, %xmm0
- pshufd $0x55, %xmm9, %xmm1
- pshufd $0xaa, %xmm9, %xmm2
- pshufd $0xff, %xmm9, %xmm3
- movdqa %xmm0, 192(%rsp)
- movdqa %xmm1, 208(%rsp)
- movdqa %xmm2, 224(%rsp)
- movdqa %xmm3, 240(%rsp)
- pshufd $0x00, %xmm10, %xmm0
- pshufd $0x55, %xmm10, %xmm1
- pshufd $0xaa, %xmm10, %xmm2
- pshufd $0xff, %xmm10, %xmm3
- movdqa %xmm0, 256(%rsp)
- movdqa %xmm1, 272(%rsp)
- movdqa %xmm2, 288(%rsp)
- movdqa %xmm3, 304(%rsp)
- pshufd $0xaa, %xmm11, %xmm0
- pshufd $0xff, %xmm11, %xmm1
- movdqa %xmm0, 352(%rsp)
- movdqa %xmm1, 368(%rsp)
- jmp .Lchacha_blocks_ssse3_atleast256
-.p2align 6,,63
- # align to 4 mod 64
- nop;nop;nop;nop;
-.Lchacha_blocks_ssse3_atleast256:
- movq 48(%rsp), %rax
- leaq 1(%rax), %r8
- leaq 2(%rax), %r9
- leaq 3(%rax), %r10
- leaq 4(%rax), %rbx
- movl %eax, 320(%rsp)
- movl %r8d, 4+320(%rsp)
- movl %r9d, 8+320(%rsp)
- movl %r10d, 12+320(%rsp)
- shrq $32, %rax
- shrq $32, %r8
- shrq $32, %r9
- shrq $32, %r10
- movl %eax, 336(%rsp)
- movl %r8d, 4+336(%rsp)
- movl %r9d, 8+336(%rsp)
- movl %r10d, 12+336(%rsp)
- movq %rbx, 48(%rsp)
- movq 64(%rsp), %rax
- movdqa 128(%rsp), %xmm0
- movdqa 144(%rsp), %xmm1
- movdqa 160(%rsp), %xmm2
- movdqa 176(%rsp), %xmm3
- movdqa 192(%rsp), %xmm4
- movdqa 208(%rsp), %xmm5
- movdqa 224(%rsp), %xmm6
- movdqa 240(%rsp), %xmm7
- movdqa 256(%rsp), %xmm8
- movdqa 272(%rsp), %xmm9
- movdqa 288(%rsp), %xmm10
- movdqa 304(%rsp), %xmm11
- movdqa 320(%rsp), %xmm12
- movdqa 336(%rsp), %xmm13
- movdqa 352(%rsp), %xmm14
- movdqa 368(%rsp), %xmm15
-.Lchacha_blocks_ssse3_mainloop1:
- paddd %xmm4, %xmm0
- paddd %xmm5, %xmm1
- pxor %xmm0, %xmm12
- pxor %xmm1, %xmm13
- paddd %xmm6, %xmm2
- paddd %xmm7, %xmm3
- pxor %xmm2, %xmm14
- pxor %xmm3, %xmm15
- pshufb 80(%rsp), %xmm12
- pshufb 80(%rsp), %xmm13
- paddd %xmm12, %xmm8
- paddd %xmm13, %xmm9
- pshufb 80(%rsp), %xmm14
- pshufb 80(%rsp), %xmm15
- paddd %xmm14, %xmm10
- paddd %xmm15, %xmm11
- movdqa %xmm12, 112(%rsp)
- pxor %xmm8, %xmm4
- pxor %xmm9, %xmm5
- movdqa %xmm4, %xmm12
- pslld $ 12, %xmm4
- psrld $20, %xmm12
- pxor %xmm12, %xmm4
- movdqa %xmm5, %xmm12
- pslld $ 12, %xmm5
- psrld $20, %xmm12
- pxor %xmm12, %xmm5
- pxor %xmm10, %xmm6
- pxor %xmm11, %xmm7
- movdqa %xmm6, %xmm12
- pslld $ 12, %xmm6
- psrld $20, %xmm12
- pxor %xmm12, %xmm6
- movdqa %xmm7, %xmm12
- pslld $ 12, %xmm7
- psrld $20, %xmm12
- pxor %xmm12, %xmm7
- movdqa 112(%rsp), %xmm12
- paddd %xmm4, %xmm0
- paddd %xmm5, %xmm1
- pxor %xmm0, %xmm12
- pxor %xmm1, %xmm13
- paddd %xmm6, %xmm2
- paddd %xmm7, %xmm3
- pxor %xmm2, %xmm14
- pxor %xmm3, %xmm15
- pshufb 96(%rsp), %xmm12
- pshufb 96(%rsp), %xmm13
- paddd %xmm12, %xmm8
- paddd %xmm13, %xmm9
- pshufb 96(%rsp), %xmm14
- pshufb 96(%rsp), %xmm15
- paddd %xmm14, %xmm10
- paddd %xmm15, %xmm11
- movdqa %xmm12, 112(%rsp)
- pxor %xmm8, %xmm4
- pxor %xmm9, %xmm5
- movdqa %xmm4, %xmm12
- pslld $ 7, %xmm4
- psrld $25, %xmm12
- pxor %xmm12, %xmm4
- movdqa %xmm5, %xmm12
- pslld $ 7, %xmm5
- psrld $25, %xmm12
- pxor %xmm12, %xmm5
- pxor %xmm10, %xmm6
- pxor %xmm11, %xmm7
- movdqa %xmm6, %xmm12
- pslld $ 7, %xmm6
- psrld $25, %xmm12
- pxor %xmm12, %xmm6
- movdqa %xmm7, %xmm12
- pslld $ 7, %xmm7
- psrld $25, %xmm12
- pxor %xmm12, %xmm7
- movdqa 112(%rsp), %xmm12
- paddd %xmm5, %xmm0
- paddd %xmm6, %xmm1
- pxor %xmm0, %xmm15
- pxor %xmm1, %xmm12
- paddd %xmm7, %xmm2
- paddd %xmm4, %xmm3
- pxor %xmm2, %xmm13
- pxor %xmm3, %xmm14
- pshufb 80(%rsp), %xmm15
- pshufb 80(%rsp), %xmm12
- paddd %xmm15, %xmm10
- paddd %xmm12, %xmm11
- pshufb 80(%rsp), %xmm13
- pshufb 80(%rsp), %xmm14
- paddd %xmm13, %xmm8
- paddd %xmm14, %xmm9
- movdqa %xmm15, 112(%rsp)
- pxor %xmm10, %xmm5
- pxor %xmm11, %xmm6
- movdqa %xmm5, %xmm15
- pslld $ 12, %xmm5
- psrld $20, %xmm15
- pxor %xmm15, %xmm5
- movdqa %xmm6, %xmm15
- pslld $ 12, %xmm6
- psrld $20, %xmm15
- pxor %xmm15, %xmm6
- pxor %xmm8, %xmm7
- pxor %xmm9, %xmm4
- movdqa %xmm7, %xmm15
- pslld $ 12, %xmm7
- psrld $20, %xmm15
- pxor %xmm15, %xmm7
- movdqa %xmm4, %xmm15
- pslld $ 12, %xmm4
- psrld $20, %xmm15
- pxor %xmm15, %xmm4
- movdqa 112(%rsp), %xmm15
- paddd %xmm5, %xmm0
- paddd %xmm6, %xmm1
- pxor %xmm0, %xmm15
- pxor %xmm1, %xmm12
- paddd %xmm7, %xmm2
- paddd %xmm4, %xmm3
- pxor %xmm2, %xmm13
- pxor %xmm3, %xmm14
- pshufb 96(%rsp), %xmm15
- pshufb 96(%rsp), %xmm12
- paddd %xmm15, %xmm10
- paddd %xmm12, %xmm11
- pshufb 96(%rsp), %xmm13
- pshufb 96(%rsp), %xmm14
- paddd %xmm13, %xmm8
- paddd %xmm14, %xmm9
- movdqa %xmm15, 112(%rsp)
- pxor %xmm10, %xmm5
- pxor %xmm11, %xmm6
- movdqa %xmm5, %xmm15
- pslld $ 7, %xmm5
- psrld $25, %xmm15
- pxor %xmm15, %xmm5
- movdqa %xmm6, %xmm15
- pslld $ 7, %xmm6
- psrld $25, %xmm15
- pxor %xmm15, %xmm6
- pxor %xmm8, %xmm7
- pxor %xmm9, %xmm4
- movdqa %xmm7, %xmm15
- pslld $ 7, %xmm7
- psrld $25, %xmm15
- pxor %xmm15, %xmm7
- movdqa %xmm4, %xmm15
- pslld $ 7, %xmm4
- psrld $25, %xmm15
- pxor %xmm15, %xmm4
- subq $2, %rax
- movdqa 112(%rsp), %xmm15
- jnz .Lchacha_blocks_ssse3_mainloop1
- paddd 128(%rsp), %xmm0
- paddd 144(%rsp), %xmm1
- paddd 160(%rsp), %xmm2
- paddd 176(%rsp), %xmm3
- paddd 192(%rsp), %xmm4
- paddd 208(%rsp), %xmm5
- paddd 224(%rsp), %xmm6
- paddd 240(%rsp), %xmm7
- paddd 256(%rsp), %xmm8
- paddd 272(%rsp), %xmm9
- paddd 288(%rsp), %xmm10
- paddd 304(%rsp), %xmm11
- paddd 320(%rsp), %xmm12
- paddd 336(%rsp), %xmm13
- paddd 352(%rsp), %xmm14
- paddd 368(%rsp), %xmm15
- movdqa %xmm8, 384(%rsp)
- movdqa %xmm9, 400(%rsp)
- movdqa %xmm10, 416(%rsp)
- movdqa %xmm11, 432(%rsp)
- movdqa %xmm12, 448(%rsp)
- movdqa %xmm13, 464(%rsp)
- movdqa %xmm14, 480(%rsp)
- movdqa %xmm15, 496(%rsp)
- movdqa %xmm0, %xmm8
- movdqa %xmm2, %xmm9
- movdqa %xmm4, %xmm10
- movdqa %xmm6, %xmm11
- punpckhdq %xmm1, %xmm0
- punpckhdq %xmm3, %xmm2
- punpckhdq %xmm5, %xmm4
- punpckhdq %xmm7, %xmm6
- punpckldq %xmm1, %xmm8
- punpckldq %xmm3, %xmm9
- punpckldq %xmm5, %xmm10
- punpckldq %xmm7, %xmm11
- movdqa %xmm0, %xmm1
- movdqa %xmm4, %xmm3
- movdqa %xmm8, %xmm5
- movdqa %xmm10, %xmm7
- punpckhqdq %xmm2, %xmm0
- punpckhqdq %xmm6, %xmm4
- punpckhqdq %xmm9, %xmm8
- punpckhqdq %xmm11, %xmm10
- punpcklqdq %xmm2, %xmm1
- punpcklqdq %xmm6, %xmm3
- punpcklqdq %xmm9, %xmm5
- punpcklqdq %xmm11, %xmm7
- andq %rsi, %rsi
- jz .Lchacha_blocks_ssse3_noinput1
- movdqu 0(%rsi), %xmm2
- movdqu 16(%rsi), %xmm6
- movdqu 64(%rsi), %xmm9
- movdqu 80(%rsi), %xmm11
- movdqu 128(%rsi), %xmm12
- movdqu 144(%rsi), %xmm13
- movdqu 192(%rsi), %xmm14
- movdqu 208(%rsi), %xmm15
- pxor %xmm2, %xmm5
- pxor %xmm6, %xmm7
- pxor %xmm9, %xmm8
- pxor %xmm11, %xmm10
- pxor %xmm12, %xmm1
- pxor %xmm13, %xmm3
- pxor %xmm14, %xmm0
- pxor %xmm15, %xmm4
- movdqu %xmm5, 0(%rdx)
- movdqu %xmm7, 16(%rdx)
- movdqu %xmm8, 64(%rdx)
- movdqu %xmm10, 80(%rdx)
- movdqu %xmm1, 128(%rdx)
- movdqu %xmm3, 144(%rdx)
- movdqu %xmm0, 192(%rdx)
- movdqu %xmm4, 208(%rdx)
- movdqa 384(%rsp), %xmm0
- movdqa 400(%rsp), %xmm1
- movdqa 416(%rsp), %xmm2
- movdqa 432(%rsp), %xmm3
- movdqa 448(%rsp), %xmm4
- movdqa 464(%rsp), %xmm5
- movdqa 480(%rsp), %xmm6
- movdqa 496(%rsp), %xmm7
- movdqa %xmm0, %xmm8
- movdqa %xmm2, %xmm9
- movdqa %xmm4, %xmm10
- movdqa %xmm6, %xmm11
- punpckldq %xmm1, %xmm8
- punpckldq %xmm3, %xmm9
- punpckhdq %xmm1, %xmm0
- punpckhdq %xmm3, %xmm2
- punpckldq %xmm5, %xmm10
- punpckldq %xmm7, %xmm11
- punpckhdq %xmm5, %xmm4
- punpckhdq %xmm7, %xmm6
- movdqa %xmm8, %xmm1
- movdqa %xmm0, %xmm3
- movdqa %xmm10, %xmm5
- movdqa %xmm4, %xmm7
- punpcklqdq %xmm9, %xmm1
- punpcklqdq %xmm11, %xmm5
- punpckhqdq %xmm9, %xmm8
- punpckhqdq %xmm11, %xmm10
- punpcklqdq %xmm2, %xmm3
- punpcklqdq %xmm6, %xmm7
- punpckhqdq %xmm2, %xmm0
- punpckhqdq %xmm6, %xmm4
- movdqu 32(%rsi), %xmm2
- movdqu 48(%rsi), %xmm6
- movdqu 96(%rsi), %xmm9
- movdqu 112(%rsi), %xmm11
- movdqu 160(%rsi), %xmm12
- movdqu 176(%rsi), %xmm13
- movdqu 224(%rsi), %xmm14
- movdqu 240(%rsi), %xmm15
- pxor %xmm2, %xmm1
- pxor %xmm6, %xmm5
- pxor %xmm9, %xmm8
- pxor %xmm11, %xmm10
- pxor %xmm12, %xmm3
- pxor %xmm13, %xmm7
- pxor %xmm14, %xmm0
- pxor %xmm15, %xmm4
- movdqu %xmm1, 32(%rdx)
- movdqu %xmm5, 48(%rdx)
- movdqu %xmm8, 96(%rdx)
- movdqu %xmm10, 112(%rdx)
- movdqu %xmm3, 160(%rdx)
- movdqu %xmm7, 176(%rdx)
- movdqu %xmm0, 224(%rdx)
- movdqu %xmm4, 240(%rdx)
- addq $256, %rsi
- jmp .Lchacha_blocks_ssse3_mainloop_cont
-.Lchacha_blocks_ssse3_noinput1:
- movdqu %xmm5, 0(%rdx)
- movdqu %xmm7, 16(%rdx)
- movdqu %xmm8, 64(%rdx)
- movdqu %xmm10, 80(%rdx)
- movdqu %xmm1, 128(%rdx)
- movdqu %xmm3, 144(%rdx)
- movdqu %xmm0, 192(%rdx)
- movdqu %xmm4, 208(%rdx)
- movdqa 384(%rsp), %xmm0
- movdqa 400(%rsp), %xmm1
- movdqa 416(%rsp), %xmm2
- movdqa 432(%rsp), %xmm3
- movdqa 448(%rsp), %xmm4
- movdqa 464(%rsp), %xmm5
- movdqa 480(%rsp), %xmm6
- movdqa 496(%rsp), %xmm7
- movdqa %xmm0, %xmm8
- movdqa %xmm2, %xmm9
- movdqa %xmm4, %xmm10
- movdqa %xmm6, %xmm11
- punpckldq %xmm1, %xmm8
- punpckldq %xmm3, %xmm9
- punpckhdq %xmm1, %xmm0
- punpckhdq %xmm3, %xmm2
- punpckldq %xmm5, %xmm10
- punpckldq %xmm7, %xmm11
- punpckhdq %xmm5, %xmm4
- punpckhdq %xmm7, %xmm6
- movdqa %xmm8, %xmm1
- movdqa %xmm0, %xmm3
- movdqa %xmm10, %xmm5
- movdqa %xmm4, %xmm7
- punpcklqdq %xmm9, %xmm1
- punpcklqdq %xmm11, %xmm5
- punpckhqdq %xmm9, %xmm8
- punpckhqdq %xmm11, %xmm10
- punpcklqdq %xmm2, %xmm3
- punpcklqdq %xmm6, %xmm7
- punpckhqdq %xmm2, %xmm0
- punpckhqdq %xmm6, %xmm4
- movdqu %xmm1, 32(%rdx)
- movdqu %xmm5, 48(%rdx)
- movdqu %xmm8, 96(%rdx)
- movdqu %xmm10, 112(%rdx)
- movdqu %xmm3, 160(%rdx)
- movdqu %xmm7, 176(%rdx)
- movdqu %xmm0, 224(%rdx)
- movdqu %xmm4, 240(%rdx)
-.Lchacha_blocks_ssse3_mainloop_cont:
- addq $256, %rdx
- subq $256, %rcx
- cmp $256, %rcx
- jae .Lchacha_blocks_ssse3_atleast256
- movdqa 80(%rsp), %xmm6
- movdqa 96(%rsp), %xmm7
- movdqa 0(%rsp), %xmm8
- movdqa 16(%rsp), %xmm9
- movdqa 32(%rsp), %xmm10
- movdqa 48(%rsp), %xmm11
- movq $1, %r9
-.Lchacha_blocks_ssse3_below256:
- movq %r9, %xmm5
- andq %rcx, %rcx
- jz .Lchacha_blocks_ssse3_done
- cmpq $64, %rcx
- jae .Lchacha_blocks_ssse3_above63
- movq %rdx, %r9
- andq %rsi, %rsi
- jz .Lchacha_blocks_ssse3_noinput2
- movq %rcx, %r10
- movq %rsp, %rdx
- addq %r10, %rsi
- addq %r10, %rdx
- negq %r10
-.Lchacha_blocks_ssse3_copyinput:
- movb (%rsi, %r10), %al
- movb %al, (%rdx, %r10)
- incq %r10
- jnz .Lchacha_blocks_ssse3_copyinput
- movq %rsp, %rsi
-.Lchacha_blocks_ssse3_noinput2:
- movq %rsp, %rdx
-.Lchacha_blocks_ssse3_above63:
- movdqa %xmm8, %xmm0
- movdqa %xmm9, %xmm1
- movdqa %xmm10, %xmm2
- movdqa %xmm11, %xmm3
- movq 64(%rsp), %rax
-.Lchacha_blocks_ssse3_mainloop2:
- paddd %xmm1, %xmm0
- pxor %xmm0, %xmm3
- pshufb %xmm6, %xmm3
- paddd %xmm3, %xmm2
- pxor %xmm2, %xmm1
- movdqa %xmm1, %xmm4
- pslld $12, %xmm4
- psrld $20, %xmm1
- pxor %xmm4, %xmm1
- paddd %xmm1, %xmm0
- pxor %xmm0, %xmm3
- pshufb %xmm7, %xmm3
- pshufd $0x93, %xmm0, %xmm0
- paddd %xmm3, %xmm2
- pshufd $0x4e, %xmm3, %xmm3
- pxor %xmm2, %xmm1
- pshufd $0x39, %xmm2, %xmm2
- movdqa %xmm1, %xmm4
- pslld $7, %xmm4
- psrld $25, %xmm1
- pxor %xmm4, %xmm1
- paddd %xmm1, %xmm0
- pxor %xmm0, %xmm3
- pshufb %xmm6, %xmm3
- paddd %xmm3, %xmm2
- pxor %xmm2, %xmm1
- movdqa %xmm1, %xmm4
- pslld $12, %xmm4
- psrld $20, %xmm1
- pxor %xmm4, %xmm1
- paddd %xmm1, %xmm0
- pxor %xmm0, %xmm3
- pshufb %xmm7, %xmm3
- pshufd $0x39, %xmm0, %xmm0
- paddd %xmm3, %xmm2
- pshufd $0x4e, %xmm3, %xmm3
- pxor %xmm2, %xmm1
- pshufd $0x93, %xmm2, %xmm2
- movdqa %xmm1, %xmm4
- pslld $7, %xmm4
- psrld $25, %xmm1
- pxor %xmm4, %xmm1
- subq $2, %rax
- jnz .Lchacha_blocks_ssse3_mainloop2
- paddd %xmm8, %xmm0
- paddd %xmm9, %xmm1
- paddd %xmm10, %xmm2
- paddd %xmm11, %xmm3
- andq %rsi, %rsi
- jz .Lchacha_blocks_ssse3_noinput3
- movdqu 0(%rsi), %xmm12
- movdqu 16(%rsi), %xmm13
- movdqu 32(%rsi), %xmm14
- movdqu 48(%rsi), %xmm15
- pxor %xmm12, %xmm0
- pxor %xmm13, %xmm1
- pxor %xmm14, %xmm2
- pxor %xmm15, %xmm3
- addq $64, %rsi
-.Lchacha_blocks_ssse3_noinput3:
- movdqu %xmm0, 0(%rdx)
- movdqu %xmm1, 16(%rdx)
- movdqu %xmm2, 32(%rdx)
- movdqu %xmm3, 48(%rdx)
- paddq %xmm5, %xmm11
- cmpq $64, %rcx
- jbe .Lchacha_blocks_ssse3_mainloop2_finishup
- addq $64, %rdx
- subq $64, %rcx
- jmp .Lchacha_blocks_ssse3_below256
-.Lchacha_blocks_ssse3_mainloop2_finishup:
- cmpq $64, %rcx
- je .Lchacha_blocks_ssse3_done
- addq %rcx, %r9
- addq %rcx, %rdx
- negq %rcx
-.Lchacha_blocks_ssse3_copyoutput:
- movb (%rdx, %rcx), %al
- movb %al, (%r9, %rcx)
- incq %rcx
- jnz .Lchacha_blocks_ssse3_copyoutput
-.Lchacha_blocks_ssse3_done:
- movdqu %xmm11, 48(%rdi)
- movq %rbp, %rsp
- pxor %xmm15, %xmm15
- pxor %xmm7, %xmm7
- pxor %xmm14, %xmm14
- pxor %xmm6, %xmm6
- pxor %xmm13, %xmm13
- pxor %xmm5, %xmm5
- pxor %xmm12, %xmm12
- pxor %xmm4, %xmm4
- popq %rbp
- popq %rbx
- movl $(63 + 512 + 16), %eax
- pxor %xmm11, %xmm11
- pxor %xmm3, %xmm3
- pxor %xmm10, %xmm10
- pxor %xmm2, %xmm2
- pxor %xmm9, %xmm9
- pxor %xmm1, %xmm1
- pxor %xmm8, %xmm8
- pxor %xmm0, %xmm0
- ret
-ELF(.size _gcry_chacha20_amd64_ssse3_blocks,.-_gcry_chacha20_amd64_ssse3_blocks;)
-
-.align 16;
-.LC:
-.byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 /* pshufb rotate by 16 */
-.byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 /* pshufb rotate by 8 */
-
-#endif /*defined(USE_CHACHA20)*/
-#endif /*__x86_64*/
diff --git a/cipher/chacha20.c b/cipher/chacha20.c
index 613fa82a9..ac6cc29e8 100644
--- a/cipher/chacha20.c
+++ b/cipher/chacha20.c
@@ -1,5 +1,5 @@
/* chacha20.c - Bernstein's ChaCha20 cipher
- * Copyright (C) 2014 Jussi Kivilinna <jussi.kivilinna at iki.fi>
+ * Copyright (C) 2014,2017,2018 Jussi Kivilinna <jussi.kivilinna at iki.fi>
*
* This file is part of Libgcrypt.
*
@@ -20,16 +20,15 @@
* http://cr.yp.to/chacha.html
*/
-/* The code is based on salsa20.c and public-domain ChaCha implementations:
- * chacha-ref.c version 20080118
- * D. J. Bernstein
- * Public domain.
- * and
- * Andrew Moon
- * https://github.com/floodyberry/chacha-opt
+/*
+ * Based on D. J. Bernstein reference implementation at
+ * http://cr.yp.to/chacha.html:
+ *
+ * chacha-regs.c version 20080118
+ * D. J. Bernstein
+ * Public domain.
*/
-
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
@@ -46,295 +45,216 @@
#define CHACHA20_MIN_IV_SIZE 8 /* Bytes. */
#define CHACHA20_MAX_IV_SIZE 12 /* Bytes. */
#define CHACHA20_CTR_SIZE 16 /* Bytes. */
-#define CHACHA20_INPUT_LENGTH (CHACHA20_BLOCK_SIZE / 4)
-/* USE_SSE2 indicates whether to compile with Intel SSE2 code. */
-#undef USE_SSE2
-#if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
- defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
-# define USE_SSE2 1
-#endif
/* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */
#undef USE_SSSE3
-#if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
- defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
- defined(HAVE_GCC_INLINE_ASM_SSSE3)
+#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_SSSE3) && \
+ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
+ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_SSSE3 1
#endif
/* USE_AVX2 indicates whether to compile with Intel AVX2 code. */
#undef USE_AVX2
-#if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
- defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
- defined(ENABLE_AVX2_SUPPORT)
+#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX2) && \
+ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
+ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_AVX2 1
#endif
-/* USE_NEON indicates whether to enable ARM NEON assembly code. */
-#undef USE_NEON
+/* USE_ARMV7_NEON indicates whether to enable ARMv7 NEON assembly code. */
+#undef USE_ARMV7_NEON
#ifdef ENABLE_NEON_SUPPORT
# if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \
&& defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \
&& defined(HAVE_GCC_INLINE_ASM_NEON)
-# define USE_NEON 1
+# define USE_ARMV7_NEON 1
# endif
-#endif /*ENABLE_NEON_SUPPORT*/
-
-
-struct CHACHA20_context_s;
-
+#endif
/* Assembly implementations use SystemV ABI, ABI conversion and additional
* stack to store XMM6-XMM15 needed on Win64. */
#undef ASM_FUNC_ABI
#undef ASM_EXTRA_STACK
-#if (defined(USE_SSE2) || defined(USE_SSSE3) || defined(USE_AVX2)) && \
- defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)
+#if defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)
# define ASM_FUNC_ABI __attribute__((sysv_abi))
-# define ASM_EXTRA_STACK (10 * 16)
#else
# define ASM_FUNC_ABI
-# define ASM_EXTRA_STACK 0
#endif
-typedef unsigned int (* chacha20_blocks_t)(u32 *state, const byte *src,
- byte *dst,
- size_t bytes) ASM_FUNC_ABI;
-
typedef struct CHACHA20_context_s
{
- u32 input[CHACHA20_INPUT_LENGTH];
- u32 pad[CHACHA20_INPUT_LENGTH];
- chacha20_blocks_t blocks;
+ u32 input[16];
+ unsigned char pad[CHACHA20_BLOCK_SIZE];
unsigned int unused; /* bytes in the pad. */
+ int use_ssse3:1;
+ int use_avx2:1;
+ int use_neon:1;
} CHACHA20_context_t;
-#ifdef USE_SSE2
-
-unsigned int _gcry_chacha20_amd64_sse2_blocks(u32 *state, const byte *in,
- byte *out,
- size_t bytes) ASM_FUNC_ABI;
-
-#endif /* USE_SSE2 */
-
#ifdef USE_SSSE3
-unsigned int _gcry_chacha20_amd64_ssse3_blocks(u32 *state, const byte *in,
- byte *out,
- size_t bytes) ASM_FUNC_ABI;
+unsigned int _gcry_chacha20_amd64_ssse3_blocks4(u32 *state, byte *dst,
+ const byte *src,
+ size_t nblks) ASM_FUNC_ABI;
#endif /* USE_SSSE3 */
#ifdef USE_AVX2
-unsigned int _gcry_chacha20_amd64_avx2_blocks(u32 *state, const byte *in,
- byte *out,
- size_t bytes) ASM_FUNC_ABI;
+unsigned int _gcry_chacha20_amd64_avx2_blocks8(u32 *state, byte *dst,
+ const byte *src,
+ size_t nblks) ASM_FUNC_ABI;
#endif /* USE_AVX2 */
-#ifdef USE_NEON
+#ifdef USE_ARMV7_NEON
-unsigned int _gcry_chacha20_armv7_neon_blocks(u32 *state, const byte *in,
- byte *out,
- size_t bytes) ASM_FUNC_ABI;
+unsigned int _gcry_chacha20_armv7_neon_blocks4(u32 *state, byte *dst,
+ const byte *src,
+ size_t nblks);
-#endif /* USE_NEON */
+#endif /* USE_ARMV7_NEON */
-static void chacha20_setiv (void *context, const byte * iv, size_t ivlen);
static const char *selftest (void);
+#define ROTATE(v,c) (rol(v,c))
+#define XOR(v,w) ((v) ^ (w))
+#define PLUS(v,w) ((u32)((v) + (w)))
+#define PLUSONE(v) (PLUS((v),1))
-#define QROUND(a,b,c,d) \
- do { \
- a += b; d = rol(d ^ a, 16); \
- c += d; b = rol(b ^ c, 12); \
- a += b; d = rol(d ^ a, 8); \
- c += d; b = rol(b ^ c, 7); \
- } while (0)
+#define QUARTERROUND(a,b,c,d) \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a),16); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c),12); \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a), 8); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c), 7);
-#define QOUT(ai, bi, ci, di) \
- DO_OUT(ai); DO_OUT(bi); DO_OUT(ci); DO_OUT(di)
+#define BUF_XOR_LE32(dst, src, offset, x) \
+ buf_put_le32((dst) + (offset), buf_get_le32((src) + (offset)) ^ (x))
-
-#ifndef USE_SSE2
-ASM_FUNC_ABI static unsigned int
-chacha20_blocks (u32 *state, const byte *src, byte *dst, size_t bytes)
+static unsigned int
+chacha20_blocks (u32 *input, byte *dst, const byte *src, size_t nblks)
{
- u32 pad[CHACHA20_INPUT_LENGTH];
- u32 inp[CHACHA20_INPUT_LENGTH];
+ u32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
unsigned int i;
- /* Note: 'bytes' must be multiple of 64 and not zero. */
-
- inp[0] = state[0];
- inp[1] = state[1];
- inp[2] = state[2];
- inp[3] = state[3];
- inp[4] = state[4];
- inp[5] = state[5];
- inp[6] = state[6];
- inp[7] = state[7];
- inp[8] = state[8];
- inp[9] = state[9];
- inp[10] = state[10];
- inp[11] = state[11];
- inp[12] = state[12];
- inp[13] = state[13];
- inp[14] = state[14];
- inp[15] = state[15];
-
- do
+ while (nblks)
{
- /* First round. */
- pad[0] = inp[0];
- pad[4] = inp[4];
- pad[8] = inp[8];
- pad[12] = inp[12];
- QROUND (pad[0], pad[4], pad[8], pad[12]);
- pad[1] = inp[1];
- pad[5] = inp[5];
- pad[9] = inp[9];
- pad[13] = inp[13];
- QROUND (pad[1], pad[5], pad[9], pad[13]);
- pad[2] = inp[2];
- pad[6] = inp[6];
- pad[10] = inp[10];
- pad[14] = inp[14];
- QROUND (pad[2], pad[6], pad[10], pad[14]);
- pad[3] = inp[3];
- pad[7] = inp[7];
- pad[11] = inp[11];
- pad[15] = inp[15];
- QROUND (pad[3], pad[7], pad[11], pad[15]);
-
- QROUND (pad[0], pad[5], pad[10], pad[15]);
- QROUND (pad[1], pad[6], pad[11], pad[12]);
- QROUND (pad[2], pad[7], pad[8], pad[13]);
- QROUND (pad[3], pad[4], pad[9], pad[14]);
-
- for (i = 2; i < 20 - 2; i += 2)
- {
- QROUND (pad[0], pad[4], pad[8], pad[12]);
- QROUND (pad[1], pad[5], pad[9], pad[13]);
- QROUND (pad[2], pad[6], pad[10], pad[14]);
- QROUND (pad[3], pad[7], pad[11], pad[15]);
-
- QROUND (pad[0], pad[5], pad[10], pad[15]);
- QROUND (pad[1], pad[6], pad[11], pad[12]);
- QROUND (pad[2], pad[7], pad[8], pad[13]);
- QROUND (pad[3], pad[4], pad[9], pad[14]);
- }
-
- QROUND (pad[0], pad[4], pad[8], pad[12]);
- QROUND (pad[1], pad[5], pad[9], pad[13]);
- QROUND (pad[2], pad[6], pad[10], pad[14]);
- QROUND (pad[3], pad[7], pad[11], pad[15]);
-
- if (src)
- {
-#define DO_OUT(idx) buf_put_le32(dst + (idx) * 4, \
- (pad[idx] + inp[idx]) ^ \
- buf_get_le32(src + (idx) * 4))
- /* Last round. */
- QROUND (pad[0], pad[5], pad[10], pad[15]);
- QOUT(0, 5, 10, 15);
- QROUND (pad[1], pad[6], pad[11], pad[12]);
- QOUT(1, 6, 11, 12);
- QROUND (pad[2], pad[7], pad[8], pad[13]);
- QOUT(2, 7, 8, 13);
- QROUND (pad[3], pad[4], pad[9], pad[14]);
- QOUT(3, 4, 9, 14);
-#undef DO_OUT
- }
- else
- {
-#define DO_OUT(idx) buf_put_le32(dst + (idx) * 4, pad[idx] + inp[idx])
- /* Last round. */
- QROUND (pad[0], pad[5], pad[10], pad[15]);
- QOUT(0, 5, 10, 15);
- QROUND (pad[1], pad[6], pad[11], pad[12]);
- QOUT(1, 6, 11, 12);
- QROUND (pad[2], pad[7], pad[8], pad[13]);
- QOUT(2, 7, 8, 13);
- QROUND (pad[3], pad[4], pad[9], pad[14]);
- QOUT(3, 4, 9, 14);
-#undef DO_OUT
- }
-
- /* Update counter. */
- inp[13] += (!++inp[12]);
-
- bytes -= CHACHA20_BLOCK_SIZE;
+ x0 = input[0];
+ x1 = input[1];
+ x2 = input[2];
+ x3 = input[3];
+ x4 = input[4];
+ x5 = input[5];
+ x6 = input[6];
+ x7 = input[7];
+ x8 = input[8];
+ x9 = input[9];
+ x10 = input[10];
+ x11 = input[11];
+ x12 = input[12];
+ x13 = input[13];
+ x14 = input[14];
+ x15 = input[15];
+
+ for (i = 20; i > 0; i -= 2)
+ {
+ QUARTERROUND(x0, x4, x8, x12)
+ QUARTERROUND(x1, x5, x9, x13)
+ QUARTERROUND(x2, x6, x10, x14)
+ QUARTERROUND(x3, x7, x11, x15)
+ QUARTERROUND(x0, x5, x10, x15)
+ QUARTERROUND(x1, x6, x11, x12)
+ QUARTERROUND(x2, x7, x8, x13)
+ QUARTERROUND(x3, x4, x9, x14)
+ }
+
+ x0 = PLUS(x0, input[0]);
+ x1 = PLUS(x1, input[1]);
+ x2 = PLUS(x2, input[2]);
+ x3 = PLUS(x3, input[3]);
+ x4 = PLUS(x4, input[4]);
+ x5 = PLUS(x5, input[5]);
+ x6 = PLUS(x6, input[6]);
+ x7 = PLUS(x7, input[7]);
+ x8 = PLUS(x8, input[8]);
+ x9 = PLUS(x9, input[9]);
+ x10 = PLUS(x10, input[10]);
+ x11 = PLUS(x11, input[11]);
+ x12 = PLUS(x12, input[12]);
+ x13 = PLUS(x13, input[13]);
+ x14 = PLUS(x14, input[14]);
+ x15 = PLUS(x15, input[15]);
+
+ input[12] = PLUSONE(input[12]);
+ input[13] = PLUS(input[13], !input[12]);
+
+ BUF_XOR_LE32(dst, src, 0, x0);
+ BUF_XOR_LE32(dst, src, 4, x1);
+ BUF_XOR_LE32(dst, src, 8, x2);
+ BUF_XOR_LE32(dst, src, 12, x3);
+ BUF_XOR_LE32(dst, src, 16, x4);
+ BUF_XOR_LE32(dst, src, 20, x5);
+ BUF_XOR_LE32(dst, src, 24, x6);
+ BUF_XOR_LE32(dst, src, 28, x7);
+ BUF_XOR_LE32(dst, src, 32, x8);
+ BUF_XOR_LE32(dst, src, 36, x9);
+ BUF_XOR_LE32(dst, src, 40, x10);
+ BUF_XOR_LE32(dst, src, 44, x11);
+ BUF_XOR_LE32(dst, src, 48, x12);
+ BUF_XOR_LE32(dst, src, 52, x13);
+ BUF_XOR_LE32(dst, src, 56, x14);
+ BUF_XOR_LE32(dst, src, 60, x15);
+
+ src += CHACHA20_BLOCK_SIZE;
dst += CHACHA20_BLOCK_SIZE;
- src += (src) ? CHACHA20_BLOCK_SIZE : 0;
+ nblks--;
}
- while (bytes >= CHACHA20_BLOCK_SIZE);
-
- state[12] = inp[12];
- state[13] = inp[13];
/* burn_stack */
- return (2 * CHACHA20_INPUT_LENGTH * sizeof(u32) + 6 * sizeof(void *));
-}
-#endif /*!USE_SSE2*/
-
-#undef QROUND
-#undef QOUT
-
-
-static unsigned int
-chacha20_core(u32 *dst, struct CHACHA20_context_s *ctx)
-{
- return ctx->blocks(ctx->input, NULL, (byte *)dst, CHACHA20_BLOCK_SIZE)
- + ASM_EXTRA_STACK;
+ return (17 * sizeof(u32) + 6 * sizeof(void *));
}
static void
-chacha20_keysetup (CHACHA20_context_t * ctx, const byte * key,
+chacha20_keysetup (CHACHA20_context_t *ctx, const byte *key,
unsigned int keylen)
{
- /* These constants are the little endian encoding of the string
- "expand 32-byte k". For the 128 bit variant, the "32" in that
- string will be fixed up to "16". */
- ctx->input[0] = 0x61707865; /* "apxe" */
- ctx->input[1] = 0x3320646e; /* "3 dn" */
- ctx->input[2] = 0x79622d32; /* "yb-2" */
- ctx->input[3] = 0x6b206574; /* "k et" */
-
- ctx->input[4] = buf_get_le32 (key + 0);
- ctx->input[5] = buf_get_le32 (key + 4);
- ctx->input[6] = buf_get_le32 (key + 8);
- ctx->input[7] = buf_get_le32 (key + 12);
-
+ static const char sigma[16] = "expand 32-byte k";
+ static const char tau[16] = "expand 16-byte k";
+ const char *constants;
+
+ ctx->input[4] = buf_get_le32(key + 0);
+ ctx->input[5] = buf_get_le32(key + 4);
+ ctx->input[6] = buf_get_le32(key + 8);
+ ctx->input[7] = buf_get_le32(key + 12);
if (keylen == CHACHA20_MAX_KEY_SIZE) /* 256 bits */
{
- ctx->input[8] = buf_get_le32 (key + 16);
- ctx->input[9] = buf_get_le32 (key + 20);
- ctx->input[10] = buf_get_le32 (key + 24);
- ctx->input[11] = buf_get_le32 (key + 28);
+ key += 16;
+ constants = sigma;
}
else /* 128 bits */
{
- ctx->input[8] = ctx->input[4];
- ctx->input[9] = ctx->input[5];
- ctx->input[10] = ctx->input[6];
- ctx->input[11] = ctx->input[7];
-
- ctx->input[1] -= 0x02000000; /* Change to "1 dn". */
- ctx->input[2] += 0x00000004; /* Change to "yb-6". */
+ constants = tau;
}
+ ctx->input[8] = buf_get_le32(key + 0);
+ ctx->input[9] = buf_get_le32(key + 4);
+ ctx->input[10] = buf_get_le32(key + 8);
+ ctx->input[11] = buf_get_le32(key + 12);
+ ctx->input[0] = buf_get_le32(constants + 0);
+ ctx->input[1] = buf_get_le32(constants + 4);
+ ctx->input[2] = buf_get_le32(constants + 8);
+ ctx->input[3] = buf_get_le32(constants + 12);
}
static void
-chacha20_ivsetup (CHACHA20_context_t * ctx, const byte * iv, size_t ivlen)
+chacha20_ivsetup (CHACHA20_context_t * ctx, const byte *iv, size_t ivlen)
{
if (ivlen == CHACHA20_CTR_SIZE)
{
@@ -367,9 +287,30 @@ chacha20_ivsetup (CHACHA20_context_t * ctx, const byte * iv, size_t ivlen)
}
+static void
+chacha20_setiv (void *context, const byte *iv, size_t ivlen)
+{
+ CHACHA20_context_t *ctx = (CHACHA20_context_t *) context;
+
+ /* draft-nir-cfrg-chacha20-poly1305-02 defines 96-bit and 64-bit nonce. */
+ if (iv && ivlen != CHACHA20_MAX_IV_SIZE && ivlen != CHACHA20_MIN_IV_SIZE
+ && ivlen != CHACHA20_CTR_SIZE)
+ log_info ("WARNING: chacha20_setiv: bad ivlen=%u\n", (u32) ivlen);
+
+ if (iv && (ivlen == CHACHA20_MAX_IV_SIZE || ivlen == CHACHA20_MIN_IV_SIZE
+ || ivlen == CHACHA20_CTR_SIZE))
+ chacha20_ivsetup (ctx, iv, ivlen);
+ else
+ chacha20_ivsetup (ctx, NULL, 0);
+
+ /* Reset the unused pad bytes counter. */
+ ctx->unused = 0;
+}
+
+
static gcry_err_code_t
-chacha20_do_setkey (CHACHA20_context_t * ctx,
- const byte * key, unsigned int keylen)
+chacha20_do_setkey (CHACHA20_context_t *ctx,
+ const byte *key, unsigned int keylen)
{
static int initialized;
static const char *selftest_failed;
@@ -388,25 +329,15 @@ chacha20_do_setkey (CHACHA20_context_t * ctx,
if (keylen != CHACHA20_MAX_KEY_SIZE && keylen != CHACHA20_MIN_KEY_SIZE)
return GPG_ERR_INV_KEYLEN;
-#ifdef USE_SSE2
- ctx->blocks = _gcry_chacha20_amd64_sse2_blocks;
-#else
- ctx->blocks = chacha20_blocks;
-#endif
-
#ifdef USE_SSSE3
- if (features & HWF_INTEL_SSSE3)
- ctx->blocks = _gcry_chacha20_amd64_ssse3_blocks;
+ ctx->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
#endif
#ifdef USE_AVX2
- if (features & HWF_INTEL_AVX2)
- ctx->blocks = _gcry_chacha20_amd64_avx2_blocks;
+ ctx->use_avx2 = (features & HWF_INTEL_AVX2) != 0;
#endif
-#ifdef USE_NEON
- if (features & HWF_ARM_NEON)
- ctx->blocks = _gcry_chacha20_armv7_neon_blocks;
+#ifdef USE_ARMV7_NEON
+ ctx->use_neon = (features & HWF_ARM_NEON) != 0;
#endif
-
(void)features;
chacha20_keysetup (ctx, key, keylen);
@@ -419,7 +350,7 @@ chacha20_do_setkey (CHACHA20_context_t * ctx,
static gcry_err_code_t
-chacha20_setkey (void *context, const byte * key, unsigned int keylen)
+chacha20_setkey (void *context, const byte *key, unsigned int keylen)
{
CHACHA20_context_t *ctx = (CHACHA20_context_t *) context;
gcry_err_code_t rc = chacha20_do_setkey (ctx, key, keylen);
@@ -429,37 +360,19 @@ chacha20_setkey (void *context, const byte * key, unsigned int keylen)
static void
-chacha20_setiv (void *context, const byte * iv, size_t ivlen)
+chacha20_encrypt_stream (void *context, byte *outbuf, const byte *inbuf,
+ size_t length)
{
+ static const unsigned char zero_pad[CHACHA20_BLOCK_SIZE] = { 0, };
CHACHA20_context_t *ctx = (CHACHA20_context_t *) context;
-
- /* draft-nir-cfrg-chacha20-poly1305-02 defines 96-bit and 64-bit nonce. */
- if (iv && ivlen != CHACHA20_MAX_IV_SIZE && ivlen != CHACHA20_MIN_IV_SIZE
- && ivlen != CHACHA20_CTR_SIZE)
- log_info ("WARNING: chacha20_setiv: bad ivlen=%u\n", (u32) ivlen);
-
- if (iv && (ivlen == CHACHA20_MAX_IV_SIZE || ivlen == CHACHA20_MIN_IV_SIZE
- || ivlen == CHACHA20_CTR_SIZE))
- chacha20_ivsetup (ctx, iv, ivlen);
- else
- chacha20_ivsetup (ctx, NULL, 0);
-
- /* Reset the unused pad bytes counter. */
- ctx->unused = 0;
-}
-
-
-
-/* Note: This function requires LENGTH > 0. */
-static void
-chacha20_do_encrypt_stream (CHACHA20_context_t * ctx,
- byte * outbuf, const byte * inbuf, size_t length)
-{
unsigned int nburn, burn = 0;
+ if (!length)
+ return;
+
if (ctx->unused)
{
- unsigned char *p = (void *) ctx->pad;
+ unsigned char *p = ctx->pad;
size_t n;
gcry_assert (ctx->unused < CHACHA20_BLOCK_SIZE);
@@ -467,29 +380,73 @@ chacha20_do_encrypt_stream (CHACHA20_context_t * ctx,
n = ctx->unused;
if (n > length)
n = length;
+
buf_xor (outbuf, inbuf, p + CHACHA20_BLOCK_SIZE - ctx->unused, n);
length -= n;
outbuf += n;
inbuf += n;
ctx->unused -= n;
+
if (!length)
return;
gcry_assert (!ctx->unused);
}
+#ifdef USE_AVX2
+ if (ctx->use_avx2 && length >= CHACHA20_BLOCK_SIZE * 8)
+ {
+ size_t nblocks = length / CHACHA20_BLOCK_SIZE;
+ nblocks -= nblocks % 8;
+ nburn = _gcry_chacha20_amd64_avx2_blocks8(ctx->input, outbuf, inbuf,
+ nblocks);
+ burn = nburn > burn ? nburn : burn;
+ length -= nblocks * CHACHA20_BLOCK_SIZE;
+ outbuf += nblocks * CHACHA20_BLOCK_SIZE;
+ inbuf += nblocks * CHACHA20_BLOCK_SIZE;
+ }
+#endif
+
+#ifdef USE_SSSE3
+ if (ctx->use_ssse3 && length >= CHACHA20_BLOCK_SIZE * 4)
+ {
+ size_t nblocks = length / CHACHA20_BLOCK_SIZE;
+ nblocks -= nblocks % 4;
+ nburn = _gcry_chacha20_amd64_ssse3_blocks4(ctx->input, outbuf, inbuf,
+ nblocks);
+ burn = nburn > burn ? nburn : burn;
+ length -= nblocks * CHACHA20_BLOCK_SIZE;
+ outbuf += nblocks * CHACHA20_BLOCK_SIZE;
+ inbuf += nblocks * CHACHA20_BLOCK_SIZE;
+ }
+#endif
+
+#ifdef USE_ARMV7_NEON
+ if (ctx->use_neon && length >= CHACHA20_BLOCK_SIZE * 4)
+ {
+ size_t nblocks = length / CHACHA20_BLOCK_SIZE;
+ nblocks -= nblocks % 4;
+ nburn = _gcry_chacha20_armv7_neon_blocks4(ctx->input, outbuf, inbuf,
+ nblocks);
+ burn = nburn > burn ? nburn : burn;
+ length -= nblocks * CHACHA20_BLOCK_SIZE;
+ outbuf += nblocks * CHACHA20_BLOCK_SIZE;
+ inbuf += nblocks * CHACHA20_BLOCK_SIZE;
+ }
+#endif
+
if (length >= CHACHA20_BLOCK_SIZE)
{
size_t nblocks = length / CHACHA20_BLOCK_SIZE;
- size_t bytes = nblocks * CHACHA20_BLOCK_SIZE;
- burn = ctx->blocks(ctx->input, inbuf, outbuf, bytes);
- length -= bytes;
- outbuf += bytes;
- inbuf += bytes;
+ nburn = chacha20_blocks(ctx->input, outbuf, inbuf, nblocks);
+ burn = nburn > burn ? nburn : burn;
+ length -= nblocks * CHACHA20_BLOCK_SIZE;
+ outbuf += nblocks * CHACHA20_BLOCK_SIZE;
+ inbuf += nblocks * CHACHA20_BLOCK_SIZE;
}
if (length > 0)
{
- nburn = chacha20_core (ctx->pad, ctx);
+ nburn = chacha20_blocks(ctx->input, ctx->pad, zero_pad, 1);
burn = nburn > burn ? nburn : burn;
buf_xor (outbuf, inbuf, ctx->pad, length);
@@ -500,17 +457,6 @@ chacha20_do_encrypt_stream (CHACHA20_context_t * ctx,
}
-static void
-chacha20_encrypt_stream (void *context, byte * outbuf, const byte * inbuf,
- size_t length)
-{
- CHACHA20_context_t *ctx = (CHACHA20_context_t *) context;
-
- if (length)
- chacha20_do_encrypt_stream (ctx, outbuf, inbuf, length);
-}
-
-
static const char *
selftest (void)
{
diff --git a/configure.ac b/configure.ac
index c4b59f4dd..a5aba144c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2227,9 +2227,8 @@ if test "$found" = "1" ; then
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
- GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-sse2-amd64.lo"
- GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-ssse3-amd64.lo"
- GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-avx2-amd64.lo"
+ GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-amd64-ssse3.lo"
+ GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20-amd64-avx2.lo"
;;
esac
More information about the Gcrypt-devel
mailing list