From 5c2736f1a9a3cf5ecec0423e42fd877b85688b53 Mon Sep 17 00:00:00 2001 From: Sean Parkinson Date: Tue, 25 Jul 2017 15:45:43 +1000 Subject: [PATCH] Poly1305 Intel Assembly code - AVX and AVX2 --- wolfcrypt/src/chacha.c | 37 +- wolfcrypt/src/poly1305.c | 1218 ++++++++++++++++++++++++++++++++-- wolfcrypt/test/test.c | 61 +- wolfssl/wolfcrypt/poly1305.h | 49 +- 4 files changed, 1252 insertions(+), 113 deletions(-) diff --git a/wolfcrypt/src/chacha.c b/wolfcrypt/src/chacha.c index a4654bec0..ca3573b2d 100644 --- a/wolfcrypt/src/chacha.c +++ b/wolfcrypt/src/chacha.c @@ -404,10 +404,12 @@ static void chacha_encrypt_avx(ChaCha* ctx, const byte* m, byte* c, byte* output; word32 i; word32 cnt = 0; - static const __m128i add = { 0x0000000100000000UL,0x0000000300000002UL }; - static const __m128i four = { 0x0000000400000004UL,0x0000000400000004UL }; - static const __m128i rotl8 = { 0x0605040702010003UL,0x0e0d0c0f0a09080bUL }; - static const __m128i rotl16 = { 0x0504070601000302UL,0x0d0c0f0e09080b0aUL }; + static const word64 add[2] = { 0x0000000100000000UL,0x0000000300000002UL }; + static const word64 four[2] = { 0x0000000400000004UL,0x0000000400000004UL }; + static const word64 rotl8[2] = + { 0x0605040702010003UL,0x0e0d0c0f0a09080bUL }; + static const word64 rotl16[2] = + { 0x0504070601000302UL,0x0d0c0f0e09080b0aUL }; if (bytes == 0) return; @@ -632,8 +634,8 @@ static void chacha_encrypt_avx(ChaCha* ctx, const byte* m, byte* c, : [bytes] "+r" (bytes), [cnt] "+r" (cnt), [in] "+r" (m), [out] "+r" (c) : [X] "r" (X), [x] "r" (x), [key] "r" (ctx->X), - [add] "xrm" (add), [four] "xrm" (four), - [rotl8] "xrm" (rotl8), [rotl16] "xrm" (rotl16) + [add] "m" (add), [four] "m" (four), + [rotl8] "m" (rotl8), [rotl16] "m" (rotl16) : "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", @@ -669,14 +671,17 @@ static void chacha_encrypt_avx2(ChaCha* ctx, const byte* m, byte* c, byte* output; word32 i; word32 cnt = 0; - static const __m256i add = { 0x0000000100000000UL,0x0000000300000002UL, - 0x0000000500000004UL,0x0000000700000006UL }; - static const __m256i eight = { 0x0000000800000008UL,0x0000000800000008UL, - 0x0000000800000008UL,0x0000000800000008UL }; - static const __m256i rotl8 = { 0x0605040702010003UL,0x0e0d0c0f0a09080bUL, - 0x0605040702010003UL,0x0e0d0c0f0a09080bUL }; - static const __m256i rotl16 = { 0x0504070601000302UL,0x0d0c0f0e09080b0aUL, - 0x0504070601000302UL,0x0d0c0f0e09080b0aUL }; + static const word64 add[4] = { 0x0000000100000000UL, 0x0000000300000002UL, + 0x0000000500000004UL, 0x0000000700000006UL }; + static const word64 eight[4] = + { 0x0000000800000008UL, 0x0000000800000008UL, + 0x0000000800000008UL, 0x0000000800000008UL }; + static const word64 rotl8[4] = + { 0x0605040702010003UL, 0x0e0d0c0f0a09080bUL, + 0x0605040702010003UL, 0x0e0d0c0f0a09080bUL }; + static const word64 rotl16[4] = + { 0x0504070601000302UL, 0x0d0c0f0e09080b0aUL, + 0x0504070601000302UL, 0x0d0c0f0e09080b0aUL }; if (bytes == 0) return; @@ -917,8 +922,8 @@ static void chacha_encrypt_avx2(ChaCha* ctx, const byte* m, byte* c, : [bytes] "+r" (bytes), [cnt] "+r" (cnt), [in] "+r" (m), [out] "+r" (c) : [X] "r" (X), [x] "r" (x), [key] "r" (ctx->X), - [add] "rm" (add), [eight] "rm" (eight), - [rotl8] "rm" (rotl8), [rotl16] "rm" (rotl16) + [add] "m" (add), [eight] "m" (eight), + [rotl8] "m" (rotl8), [rotl16] "m" (rotl16) : "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", diff --git a/wolfcrypt/src/poly1305.c b/wolfcrypt/src/poly1305.c index 4badc05b7..096305c36 100644 --- a/wolfcrypt/src/poly1305.c +++ b/wolfcrypt/src/poly1305.c @@ -34,6 +34,7 @@ #include #include #include +#include #ifdef NO_INLINE #include #else @@ -49,8 +50,20 @@ #pragma warning(disable: 4127) #endif -#if defined(POLY130564) +#ifdef USE_INTEL_SPEEDUP + #include + #include + #define HAVE_INTEL_AVX1 + #define HAVE_INTEL_AVX2 +#endif + +#ifdef USE_INTEL_SPEEDUP +static word32 intel_flags = 0; +static word32 cpu_flags_set = 0; +#endif + +#if defined(USE_INTEL_SPEEDUP) || defined(POLY130564) #if defined(_MSC_VER) #define POLY1305_NOINLINE __declspec(noinline) #elif defined(__GNUC__) @@ -88,8 +101,1012 @@ #define SHR(in, shift) (word64)(in >> (shift)) #define LO(in) (word64)(in) #endif +#endif - static word64 U8TO64(const byte* p) { +#ifdef USE_INTEL_SPEEDUP +#ifdef HAVE_INTEL_AVX1 +static void poly1305_block_avx(Poly1305* ctx, const unsigned char *m) +{ + __asm__ __volatile__ ( + "movq (%[ctx]), %%r15\n\t" + "movq 24(%[ctx]), %%r8\n\t" + "movq 32(%[ctx]), %%r9\n\t" + "movq 40(%[ctx]), %%r10\n\t" + "xorq %%rbx, %%rbx\n\t" + "movb %[nfin], %%bl\n\t" + "# h += m\n\t" + "movq (%[m]), %%r11\n\t" + "movq 8(%[m]), %%r12\n\t" + "addq %%r11, %%r8\n\t" + "adcq %%r12, %%r9\n\t" + "movq 8(%[ctx]), %%rax\n\t" + "adcq %%rbx, %%r10\n\t" + "# r[1] * h[0] => rdx, rax ==> t2, t1\n\t" + "mulq %%r8\n\t" + "movq %%rax, %%r12\n\t" + "movq %%rdx, %%r13\n\t" + "# r[0] * h[1] => rdx, rax ++> t2, t1\n\t" + "movq %%r15, %%rax\n\t" + "mulq %%r9\n\t" + "addq %%rax, %%r12\n\t" + "movq %%r15, %%rax\n\t" + "adcq %%rdx, %%r13\n\t" + "# r[0] * h[0] => rdx, rax +=> t1, t0\n\t" + "mulq %%r8\n\t" + "movq %%rdx, %%r8\n\t" + "movq %%rax, %%r11\n\t" + "# r[1] * h[1] => rdx, rax =+> t3, t2\n\t" + "movq 8(%[ctx]), %%rax\n\t" + "mulq %%r9\n\t" + "# r[0] * h[2] +> t2\n\t" + "addq 64(%[ctx],%%r10,8), %%r13\n\t" + "movq %%rdx, %%r14\n\t" + "addq %%r8, %%r12\n\t" + "adcq %%rax, %%r13\n\t" + "# r[1] * h[2] +> t3\n\t" + "adcq 112(%[ctx],%%r10,8), %%r14\n\t" + "# r * h in r14, r13, r12, r11 \n\t" + "# h = (r * h) mod 2^130 - 5\n\t" + "movq %%r13, %%r10\n\t" + "andq $-4, %%r13\n\t" + "andq $3, %%r10\n\t" + "addq %%r13, %%r11\n\t" + "movq %%r13, %%r8\n\t" + "adcq %%r14, %%r12\n\t" + "adcq $0, %%r10\n\t" + "shrdq $2, %%r14, %%r8\n\t" + "shrq $2, %%r14\n\t" + "addq %%r11, %%r8\n\t" + "adcq %%r14, %%r12\n\t" + "movq %%r12, %%r9\n\t" + "adcq $0, %%r10\n\t" + "# h in r10, r9, r8 \n\t" + "# Store h to ctx\n\t" + "movq %%r8, 24(%[ctx])\n\t" + "movq %%r9, 32(%[ctx])\n\t" + "movq %%r10, 40(%[ctx])\n\t" + : + : [m] "r" (m), [ctx] "r" (ctx), [nfin] "m" (ctx->finished) + : "rax", "rdx", "r11", "r12", "r13", "r14", "r15", "rbx", + "r8", "r9", "r10", "memory" + ); +} + +static void poly1305_blocks_avx(Poly1305* ctx, const unsigned char *m, + size_t bytes) +{ + __asm__ __volatile__ ( + "movq (%[ctx]), %%r15\n\t" + "movq 24(%[ctx]), %%r8\n\t" + "movq 32(%[ctx]), %%r9\n\t" + "movq 40(%[ctx]), %%r10\n\t" + "L_avx_start:\n\t" + "# h += m\n\t" + "movq (%[m]), %%r11\n\t" + "movq 8(%[m]), %%r12\n\t" + "addq %%r11, %%r8\n\t" + "adcq %%r12, %%r9\n\t" + "movq 8(%[ctx]), %%rax\n\t" + "adcq $0, %%r10\n\t" + "# r[1] * h[0] => rdx, rax ==> t2, t1\n\t" + "mulq %%r8\n\t" + "movq %%rax, %%r12\n\t" + "movq %%rdx, %%r13\n\t" + "# r[0] * h[1] => rdx, rax ++> t2, t1\n\t" + "movq %%r15, %%rax\n\t" + "mulq %%r9\n\t" + "addq %%rax, %%r12\n\t" + "movq %%r15, %%rax\n\t" + "adcq %%rdx, %%r13\n\t" + "# r[0] * h[0] => rdx, rax +=> t1, t0\n\t" + "mulq %%r8\n\t" + "movq %%rdx, %%r8\n\t" + "movq %%rax, %%r11\n\t" + "# r[1] * h[1] => rdx, rax =+> t3, t2\n\t" + "movq 8(%[ctx]), %%rax\n\t" + "mulq %%r9\n\t" + "# r[0] * h[2] +> t2\n\t" + "addq 72(%[ctx],%%r10,8), %%r13\n\t" + "movq %%rdx, %%r14\n\t" + "addq %%r8, %%r12\n\t" + "adcq %%rax, %%r13\n\t" + "# r[1] * h[2] +> t3\n\t" + "adcq 120(%[ctx],%%r10,8), %%r14\n\t" + "# r * h in r14, r13, r12, r11 \n\t" + "# h = (r * h) mod 2^130 - 5\n\t" + "movq %%r13, %%r10\n\t" + "andq $-4, %%r13\n\t" + "andq $3, %%r10\n\t" + "addq %%r13, %%r11\n\t" + "movq %%r13, %%r8\n\t" + "adcq %%r14, %%r12\n\t" + "adcq $0, %%r10\n\t" + "shrdq $2, %%r14, %%r8\n\t" + "shrq $2, %%r14\n\t" + "addq %%r11, %%r8\n\t" + "adcq %%r14, %%r12\n\t" + "movq %%r12, %%r9\n\t" + "adcq $0, %%r10\n\t" + "# h in r10, r9, r8 \n\t" + "# Next block from message\n\t" + "addq $16, %[m]\n\t" + "subq $16, %[bytes]\n\t" + "cmp $16, %[bytes]\n\t" + "jge L_avx_start\n\t" + "# Store h to ctx\n\t" + "movq %%r8, 24(%[ctx])\n\t" + "movq %%r9, 32(%[ctx])\n\t" + "movq %%r10, 40(%[ctx])\n\t" + : [m] "+r" (m), [bytes] "+r" (bytes) + : [ctx] "r" (ctx) + : "rax", "rdx", "r11", "r12", "r13", "r14", "r15", + "r8", "r9", "r10", "memory" + ); +} + +static void poly1305_setkey_avx(Poly1305* ctx, const byte* key) +{ + int i; + + ctx->r[0] = *(word64*)(key + 0) & 0x0ffffffc0fffffffL; + ctx->r[1] = *(word64*)(key + 8) & 0x0ffffffc0ffffffcL; + + for (i=0; i<6; i++) { + ctx->t0[i] = ctx->r[0] * i; + ctx->t1[i] = ctx->r[1] * i; + } + + /* h (accumulator) = 0 */ + ctx->h[0] = 0; + ctx->h[1] = 0; + ctx->h[2] = 0; + + /* save pad for later */ + ctx->pad[0] = *(word64*)(key + 16); + ctx->pad[1] = *(word64*)(key + 24); + + ctx->leftover = 0; + ctx->finished = 1; +} + +static void poly1305_final_avx(Poly1305* ctx, byte* mac) +{ + word64 h0, h1, h2; + + /* process the remaining block */ + if (ctx->leftover) { + size_t i = ctx->leftover; + ctx->buffer[i] = 1; + for (i = i + 1; i < POLY1305_BLOCK_SIZE; i++) + ctx->buffer[i] = 0; + ctx->finished = 0; + poly1305_block_avx(ctx, ctx->buffer); + } + + h0 = ctx->h[0]; + h1 = ctx->h[1]; + h2 = ctx->h[2]; + + /* h %= p */ + /* h = (h + pad) */ + __asm__ __volatile__ ( + "# mod 2^130 - 5\n\t" + "movq %[h2], %%r13\n\t" + "andq $0x3, %[h2]\n\t" + "shrq $0x2, %%r13\n\t" + "leaq (%%r13, %%r13, 4), %%r13\n\t" + "add %%r13, %[h0]\n\t" + "adc $0, %[h1]\n\t" + "adc $0, %[h2]\n\t" + "# Fixup when between (1 << 130) - 1 and (1 << 130) - 5\n\t" + "movq %[h0], %%r13\n\t" + "movq %[h1], %%r14\n\t" + "movq %[h2], %%r15\n\t" + "addq $5, %%r13\n\t" + "adcq $0, %%r14\n\t" + "adcq $0, %%r15\n\t" + "movq %%r15, %%r12\n\t" + "andq $3, %%r15\n\t" + "cmpq $4, %%r12\n\t" + "cmove %%r13, %[h0]\n\t" + "cmove %%r14, %[h1]\n\t" + "cmove %%r15, %[h2]\n\t" + "# h += pad\n\t" + "add %[p0], %[h0]\n\t" + "adc %[p1], %[h1]\n\t" + "movq %[h0], (%[m])\n\t" + "movq %[h1], 8(%[m])\n\t" + : [h0] "+r" (h0), [h1] "+r" (h1), [h2] "+r" (h2), + [p0] "+r" (ctx->pad[0]), [p1] "+r" (ctx->pad[1]) + : [m] "r" (mac) + : "memory", "r15", "r14", "r13", "r12" + ); + + /* zero out the state */ + ctx->h[0] = 0; + ctx->h[1] = 0; + ctx->h[2] = 0; + ctx->r[0] = 0; + ctx->r[1] = 0; + ctx->pad[0] = 0; + ctx->pad[1] = 0; +} +#endif + +#ifdef HAVE_INTEL_AVX2 +#if defined(_MSC_VER) + #define POLY1305_NOINLINE __declspec(noinline) +#elif defined(__GNUC__) + #define POLY1305_NOINLINE __attribute__((noinline)) +#else + #define POLY1305_NOINLINE +#endif + +/* Load H into five 256-bit registers. + * + * h is the memory location of the data - 26 bits in 32. + * h0-h4 the 4 H values with 26 bits stored in 64 for multiply. + * z is zero. + */ +#define LOAD_H(h, h0, h1, h2, h3, h4, z) \ + "vmovdqu ("#h"), "#h1"\n\t" \ + "vmovdqu 32("#h"), "#h3"\n\t" \ + "vmovdqu 64("#h"), "#h4"\n\t" \ + "vpermq $0xd8, "#h1", "#h1"\n\t" \ + "vpermq $0xd8, "#h3", "#h3"\n\t" \ + "vpermq $0xd8, "#h4", "#h4"\n\t" \ + "vpunpckldq "#z", "#h1", "#h0"\n\t" \ + "vpunpckhdq "#z", "#h1", "#h1"\n\t" \ + "vpunpckldq "#z", "#h3", "#h2"\n\t" \ + "vpunpckhdq "#z", "#h3", "#h3"\n\t" \ + "vpunpckldq "#z", "#h4", "#h4"\n\t" + +/* Store H, five 256-bit registers, packed. + * + * h is the memory location of the data - 26 bits in 32. + * h0-h4 the 4 H values with 26 bits stored in 64. + * x4 is the xmm register of h4. + */ +#define STORE_H(h, h0, h1, h2, h3, h4, x4) \ + "vpshufd $0x08, "#h0", "#h0"\n\t" \ + "vpshufd $0x08, "#h1", "#h1"\n\t" \ + "vpshufd $0x08, "#h2", "#h2"\n\t" \ + "vpshufd $0x08, "#h3", "#h3"\n\t" \ + "vpshufd $0x08, "#h4", "#h4"\n\t" \ + "vpermq $0x08, "#h0", "#h0"\n\t" \ + "vpermq $0x08, "#h1", "#h1"\n\t" \ + "vpermq $0x08, "#h2", "#h2"\n\t" \ + "vpermq $0x08, "#h3", "#h3"\n\t" \ + "vpermq $0x08, "#h4", "#h4"\n\t" \ + "vperm2i128 $0x20, "#h1", "#h0", "#h0"\n\t" \ + "vperm2i128 $0x20, "#h3", "#h2", "#h2"\n\t" \ + "vmovdqu "#h0", ("#h")\n\t" \ + "vmovdqu "#h2", 32("#h")\n\t" \ + "vmovdqu "#x4", 64("#h")\n\t" + +/* Load four powers of r into position to be multiplied by the 4 H values. + * + * rp0-rp3 are the register holding pointers to the values of the powers of r. + * r0-r4 holds the loaded values with 26 bits store in 64 for multiply. + * t0-t3 are temporary registers. + */ +#define LOAD_Rx4(rp0, rp1, rp2, rp3, \ + r0, r1, r2, r3, r4, \ + t0, t1, t2, t3) \ + "vmovdqu ("#rp0"), "#r0"\n\t" \ + "vmovdqu ("#rp1"), "#r1"\n\t" \ + "vmovdqu ("#rp2"), "#r2"\n\t" \ + "vmovdqu ("#rp3"), "#r3"\n\t" \ + "vpermq $0xd8, "#r0", "#r0"\n\t" \ + "vpermq $0xd8, "#r1", "#r1"\n\t" \ + "vpermq $0xd8, "#r2", "#r2"\n\t" \ + "vpermq $0xd8, "#r3", "#r3"\n\t" \ + "vpunpcklqdq "#r1", "#r0", "#t0"\n\t" \ + "vpunpckhqdq "#r1", "#r0", "#t1"\n\t" \ + "vpunpcklqdq "#r3", "#r2", "#t2"\n\t" \ + "vpunpckhqdq "#r3", "#r2", "#t3"\n\t" \ + "vperm2i128 $0x20, "#t2", "#t0", "#r0"\n\t" \ + "vperm2i128 $0x31, "#t2", "#t0", "#r2"\n\t" \ + "vperm2i128 $0x20, "#t3", "#t1", "#r4"\n\t" \ + "vpsrlq $32, "#r0", "#r1"\n\t" \ + "vpsrlq $32, "#r2", "#r3"\n\t" + +/* Load the r^4 value into position to be multiplied by all 4 H values. + * + * r4 holds r^4 as five 26 bits each in 32. + * r0-r4 holds the loaded values with 26 bits store in 64 for multiply. + * t0-t1 are temporary registers. + */ +#define LOAD_R4(r4, r40, r41, r42, r43, r44, \ + t0, t1) \ + "vmovdqu "#r4", "#t0"\n\t" \ + "vpsrlq $32, "#t0", "#t1"\n\t" \ + "vpermq $0x0, "#t0", "#r40"\n\t" \ + "vpermq $0x0, "#t1", "#r41"\n\t" \ + "vpermq $0x55, "#t0", "#r42"\n\t" \ + "vpermq $0x55, "#t1", "#r43"\n\t" \ + "vpermq $0xaa, "#t0", "#r44"\n\t" + +/* Multiply the top 4 26-bit values in 64 bits of each H by 5 for reduction in + * multiply. + * + * s1-s4 are each 64 bit value in r1-r4 multiplied by 5. + * r1-r4 are the top 4 + */ +#define MUL5(s1, s2, s3, s4, r1, r2, r3, r4) \ + "vpslld $2, "#r1", "#s1"\n\t" \ + "vpslld $2, "#r2", "#s2"\n\t" \ + "vpslld $2, "#r3", "#s3"\n\t" \ + "vpslld $2, "#r4", "#s4"\n\t" \ + "vpaddq "#s1", "#r1", "#s1"\n\t" \ + "vpaddq "#s2", "#r2", "#s2"\n\t" \ + "vpaddq "#s3", "#r3", "#s3"\n\t" \ + "vpaddq "#s4", "#r4", "#s4"\n\t" + +/* Add the 4 H values together. + * Each 64 bits in a register is 26 bits of one of the H values. + * + * h0-h4 contains the 4 H values. + * t1-t4 are temporary registers. + */ +#define FINALIZE_H(h0, h1, h2, h3, h4, \ + t0, t1, t2, t3, t4) \ + "vpermq $0xf5, "#h0", "#t0"\n\t" \ + "vpermq $0xf5, "#h1", "#t1"\n\t" \ + "vpermq $0xf5, "#h2", "#t2"\n\t" \ + "vpermq $0xf5, "#h3", "#t3"\n\t" \ + "vpermq $0xf5, "#h4", "#t4"\n\t" \ + "vpaddq "#h0", "#t0", "#h0"\n\t" \ + "vpaddq "#h1", "#t1", "#h1"\n\t" \ + "vpaddq "#h2", "#t2", "#h2"\n\t" \ + "vpaddq "#h3", "#t3", "#h3"\n\t" \ + "vpaddq "#h4", "#t4", "#h4"\n\t" \ + "vpermq $0xaa, "#h0", "#t0"\n\t" \ + "vpermq $0xaa, "#h1", "#t1"\n\t" \ + "vpermq $0xaa, "#h2", "#t2"\n\t" \ + "vpermq $0xaa, "#h3", "#t3"\n\t" \ + "vpermq $0xaa, "#h4", "#t4"\n\t" \ + "vpaddq "#h0", "#t0", "#h0"\n\t" \ + "vpaddq "#h1", "#t1", "#h1"\n\t" \ + "vpaddq "#h2", "#t2", "#h2"\n\t" \ + "vpaddq "#h3", "#t3", "#h3"\n\t" \ + "vpaddq "#h4", "#t4", "#h4"\n\t" + +/* Move 32 bits from each xmm register to a 32 bit register. + * + * x0-x4 are the xmm version of the ymm registers used. + * t0-t4 are the 32-bit registers to store data in. + */ +#define MOVE_TO_32(x0, x1, x2, x3, x4, \ + t0, t1, t2, t3, t4) \ + "vmovd "#x0", "#t0"\n\t" \ + "vmovd "#x1", "#t1"\n\t" \ + "vmovd "#x2", "#t2"\n\t" \ + "vmovd "#x3", "#t3"\n\t" \ + "vmovd "#x4", "#t4"\n\t" + +/* Multiply using AVX2 instructions. + * Each register contains up to 32 bits of data in 64 bits. + * This is a 4 way parallel multiply. + * + * h0-h4 contain 4 H values with the 32 bits of each per register. + * r0-r4 contain the 4 powers of r. + * s1-s4 contain r1-r4 times 5. + * t0-t4 and v0-v3 are temporary registers. + */ +#define MUL_AVX2(h0, h1, h2, h3, h4, \ + r0, r1, r2, r3, r4, \ + s1, s2, s3, s4, \ + t0, t1, t2, t3, t4, \ + v0, v1, v2, v3) \ + "vpmuludq "#s1", "#h4", "#t0"\n\t" \ + "vpmuludq "#s2", "#h3", "#v0"\n\t" \ + "vpmuludq "#s2", "#h4", "#t1"\n\t" \ + "vpmuludq "#s3", "#h3", "#v1"\n\t" \ + "vpmuludq "#s3", "#h4", "#t2"\n\t" \ + "vpaddq "#t0", "#v0", "#t0"\n\t" \ + "vpmuludq "#s3", "#h2", "#v2"\n\t" \ + "vpmuludq "#s4", "#h4", "#t3"\n\t" \ + "vpaddq "#t1", "#v1", "#t1"\n\t" \ + "vpmuludq "#s4", "#h1", "#v3"\n\t" \ + "vpmuludq "#s4", "#h2", "#v0"\n\t" \ + "vpaddq "#t0", "#v2", "#t0"\n\t" \ + "vpmuludq "#s4", "#h3", "#v1"\n\t" \ + "vpmuludq "#r0", "#h3", "#v2"\n\t" \ + "vpaddq "#t0", "#v3", "#t0"\n\t" \ + "vpmuludq "#r0", "#h4", "#t4"\n\t" \ + "vpaddq "#t1", "#v0", "#t1"\n\t" \ + "vpmuludq "#r0", "#h0", "#v3"\n\t" \ + "vpaddq "#t2", "#v1", "#t2"\n\t" \ + "vpmuludq "#r0", "#h1", "#v0"\n\t" \ + "vpaddq "#t3", "#v2", "#t3"\n\t" \ + "vpmuludq "#r0", "#h2", "#v1"\n\t" \ + "vpmuludq "#r1", "#h2", "#v2"\n\t" \ + "vpaddq "#t0", "#v3", "#t0"\n\t" \ + "vpmuludq "#r1", "#h3", "#v3"\n\t" \ + "vpaddq "#t1", "#v0", "#t1"\n\t" \ + "vpmuludq "#r1", "#h0", "#v0"\n\t" \ + "vpaddq "#t2", "#v1", "#t2"\n\t" \ + "vpmuludq "#r1", "#h1", "#v1"\n\t" \ + "vpaddq "#t3", "#v2", "#t3"\n\t" \ + "vpmuludq "#r2", "#h1", "#v2"\n\t" \ + "vpaddq "#t4", "#v3", "#t4"\n\t" \ + "vpmuludq "#r2", "#h2", "#v3"\n\t" \ + "vpaddq "#t1", "#v0", "#t1"\n\t" \ + "vpmuludq "#r2", "#h0", "#v0"\n\t" \ + "vpaddq "#t2", "#v1", "#t2"\n\t" \ + "vpmuludq "#r3", "#h0", "#v1"\n\t" \ + "vpaddq "#t3", "#v2", "#t3"\n\t" \ + "vpmuludq "#r3", "#h1", "#v2"\n\t" \ + "vpaddq "#t4", "#v3", "#t4"\n\t" \ + "vpmuludq "#r4", "#h0", "#v3"\n\t" \ + "vpaddq "#t2", "#v0", "#t2"\n\t" \ + "vpaddq "#t3", "#v1", "#t3"\n\t" \ + "vpaddq "#t4", "#v2", "#t4"\n\t" \ + "vpaddq "#t4", "#v3", "#t4"\n\t" + +/* Load the 4 blocks of the message. + * + * m the address of the message to load. + * m0-m4 is the loaded message with 32 bits in 64. Loaded so data is parallel. + * hi is the high bits of the 4 m (1<< 128 if not final block). + * z is zero. + */ +#define LOAD_M(m, m0, m1, m2, m3, m4, hi, z) \ + "vmovdqu (%[m]), "#m0"\n\t" \ + "vmovdqu 32(%[m]), "#m1"\n\t" \ + "vperm2i128 $0x20, "#m1", "#m0", "#m2"\n\t" \ + "vperm2i128 $0x31, "#m1", "#m0", "#m0"\n\t" \ + "vpunpckldq "#m0", "#m2", "#m1"\n\t" \ + "vpunpckhdq "#m0", "#m2", "#m3"\n\t" \ + "vpunpckldq "#z", "#m1", "#m0"\n\t" \ + "vpunpckhdq "#z", "#m1", "#m1"\n\t" \ + "vpunpckldq "#z", "#m3", "#m2"\n\t" \ + "vpunpckhdq "#z", "#m3", "#m3"\n\t" \ + "vmovdqu "#hi", "#m4"\n\t" \ + "vpsllq $6, "#m1", "#m1"\n\t" \ + "vpsllq $12, "#m2", "#m2"\n\t" \ + "vpsllq $18, "#m3", "#m3"\n\t" + + +/* Multiply using AVX2 instructions - adding with message. + * Each register contains up to 32 bits of data in 64 bits. + * This is a 4 way parallel multiply. + * The message data is loaded first and the multiplication adds into it. + * + * h0-h4 contain 4 H values with the 32 bits of each per register. + * r0-r4 contain the 4 powers of r. + * s1-s4 contain r1-r4 times 5. + * t0-t4 and v0-v3 are temporary registers. + * hi is the high bits of the 4 m (1<< 128 if not final block). + * z is zero. + */ +#define MUL_ADD_AVX2(h0, h1, h2, h3, h4, \ + r0, r1, r2, r3, r4, \ + s1, s2, s3, s4, \ + t0, t1, t2, t3, t4, \ + v0, v1, v2, v3, \ + hi, z) \ + "vmovdqu (%[m]), "#t0"\n\t" \ + "vmovdqu 32(%[m]), "#t1"\n\t" \ + "vperm2i128 $0x20, "#t1", "#t0", "#t2"\n\t" \ + "vperm2i128 $0x31, "#t1", "#t0", "#t0"\n\t" \ + "vpunpckldq "#t0", "#t2", "#t1"\n\t" \ + "vpunpckhdq "#t0", "#t2", "#t3"\n\t" \ + "vpunpckldq "#z", "#t1", "#t0"\n\t" \ + "vpunpckhdq "#z", "#t1", "#t1"\n\t" \ + "vpunpckldq "#z", "#t3", "#t2"\n\t" \ + "vpunpckhdq "#z", "#t3", "#t3"\n\t" \ + "vmovdqu "#hi", "#t4"\n\t" \ + "vpsllq $6, "#t1", "#t1"\n\t" \ + "vpsllq $12, "#t2", "#t2"\n\t" \ + "vpsllq $18, "#t3", "#t3"\n\t" \ + "vpmuludq "#s1", "#h4", "#v0"\n\t" \ + "vpaddq "#t0", "#v0", "#t0"\n\t" \ + "vpmuludq "#s2", "#h3", "#v0"\n\t" \ + "vpmuludq "#s2", "#h4", "#v1"\n\t" \ + "vpaddq "#t1", "#v1", "#t1"\n\t" \ + "vpmuludq "#s3", "#h3", "#v1"\n\t" \ + "vpmuludq "#s3", "#h4", "#v2"\n\t" \ + "vpaddq "#t2", "#v2", "#t2"\n\t" \ + "vpaddq "#t0", "#v0", "#t0"\n\t" \ + "vpmuludq "#s3", "#h2", "#v2"\n\t" \ + "vpmuludq "#s4", "#h4", "#v3"\n\t" \ + "vpaddq "#t3", "#v3", "#t3"\n\t" \ + "vpaddq "#t1", "#v1", "#t1"\n\t" \ + "vpmuludq "#s4", "#h1", "#v3"\n\t" \ + "vpmuludq "#s4", "#h2", "#v0"\n\t" \ + "vpaddq "#t0", "#v2", "#t0"\n\t" \ + "vpmuludq "#s4", "#h3", "#v1"\n\t" \ + "vpmuludq "#r0", "#h3", "#v2"\n\t" \ + "vpaddq "#t0", "#v3", "#t0"\n\t" \ + "vpmuludq "#r0", "#h4", "#v3"\n\t" \ + "vpaddq "#t4", "#v3", "#t4"\n\t" \ + "vpaddq "#t1", "#v0", "#t1"\n\t" \ + "vpmuludq "#r0", "#h0", "#v3"\n\t" \ + "vpaddq "#t2", "#v1", "#t2"\n\t" \ + "vpmuludq "#r0", "#h1", "#v0"\n\t" \ + "vpaddq "#t3", "#v2", "#t3"\n\t" \ + "vpmuludq "#r0", "#h2", "#v1"\n\t" \ + "vpmuludq "#r1", "#h2", "#v2"\n\t" \ + "vpaddq "#t0", "#v3", "#t0"\n\t" \ + "vpmuludq "#r1", "#h3", "#v3"\n\t" \ + "vpaddq "#t1", "#v0", "#t1"\n\t" \ + "vpmuludq "#r1", "#h0", "#v0"\n\t" \ + "vpaddq "#t2", "#v1", "#t2"\n\t" \ + "vpmuludq "#r1", "#h1", "#v1"\n\t" \ + "vpaddq "#t3", "#v2", "#t3"\n\t" \ + "vpmuludq "#r2", "#h1", "#v2"\n\t" \ + "vpaddq "#t4", "#v3", "#t4"\n\t" \ + "vpmuludq "#r2", "#h2", "#v3"\n\t" \ + "vpaddq "#t1", "#v0", "#t1"\n\t" \ + "vpmuludq "#r2", "#h0", "#v0"\n\t" \ + "vpaddq "#t2", "#v1", "#t2"\n\t" \ + "vpmuludq "#r3", "#h0", "#v1"\n\t" \ + "vpaddq "#t3", "#v2", "#t3"\n\t" \ + "vpmuludq "#r3", "#h1", "#v2"\n\t" \ + "vpaddq "#t4", "#v3", "#t4"\n\t" \ + "vpmuludq "#r4", "#h0", "#v3"\n\t" \ + "vpaddq "#t2", "#v0", "#t2"\n\t" \ + "vpaddq "#t3", "#v1", "#t3"\n\t" \ + "vpaddq "#t4", "#v2", "#t4"\n\t" \ + "vpaddq "#t4", "#v3", "#t4"\n\t" + +/* Reduce, in place, the 64 bits of data to 26 bits. + * + * h0-h4 contain the 4 H values to reduce. + * t0-t2 are temporaries. + * mask contains the 26-bit mask for each 64 bit value in the 256 bit register. + */ +#define REDUCE_IN(h0, h1, h2, h3, h4, \ + t0, t1, t2, mask) \ + "vpsrlq $26, "#h0", "#t0"\n\t" \ + "vpsrlq $26, "#h3", "#t1"\n\t" \ + "vpand "#mask", "#h0", "#h0"\n\t" \ + "vpand "#mask", "#h3", "#h3"\n\t" \ + "vpaddq "#h1", "#t0", "#h1"\n\t" \ + "vpaddq "#h4", "#t1", "#h4"\n\t" \ + \ + "vpsrlq $26, "#h1", "#t0"\n\t" \ + "vpsrlq $26, "#h4", "#t1"\n\t" \ + "vpand "#mask", "#h1", "#h1"\n\t" \ + "vpand "#mask", "#h4", "#h4"\n\t" \ + "vpaddq "#h2", "#t0", "#h2"\n\t" \ + "vpslld $2, "#t1", "#t2"\n\t" \ + "vpaddd "#t2", "#t1", "#t2"\n\t" \ + "vpaddq "#h0", "#t2", "#h0"\n\t" \ + \ + "vpsrlq $26, "#h2", "#t0"\n\t" \ + "vpsrlq $26, "#h0", "#t1"\n\t" \ + "vpand "#mask", "#h2", "#h2"\n\t" \ + "vpand "#mask", "#h0", "#h0"\n\t" \ + "vpaddq "#h3", "#t0", "#h3"\n\t" \ + "vpaddq "#h1", "#t1", "#h1"\n\t" \ + \ + "vpsrlq $26, "#h3", "#t0"\n\t" \ + "vpand "#mask", "#h3", "#h3"\n\t" \ + "vpaddq "#h4", "#t0", "#h4"\n\t" + +/* Reduce the 64 bits of data to 26 bits. + * + * h0-h4 contain the reduced H values. + * m0-m4 contain the 4 H values to reduce. + * t0-t2 are temporaries. + * mask contains the 26-bit mask for each 64 bit value in the 256 bit register. + */ +#define REDUCE(h0, h1, h2, h3, h4, \ + m0, m1, m2, m3, m4, \ + t0, t1, t2, mask) \ + "vpsrlq $26, "#m0", "#t0"\n\t" \ + "vpsrlq $26, "#m3", "#t1"\n\t" \ + "vpand "#mask", "#m0", "#m0"\n\t" \ + "vpand "#mask", "#m3", "#m3"\n\t" \ + "vpaddq "#m1", "#t0", "#m1"\n\t" \ + "vpaddq "#m4", "#t1", "#m4"\n\t" \ + \ + "vpsrlq $26, "#m1", "#t0"\n\t" \ + "vpsrlq $26, "#m4", "#t1"\n\t" \ + "vpand "#mask", "#m1", "#h1"\n\t" \ + "vpand "#mask", "#m4", "#h4"\n\t" \ + "vpaddq "#m2", "#t0", "#m2"\n\t" \ + "vpslld $2, "#t1", "#t2"\n\t" \ + "vpaddd "#t2", "#t1", "#t2"\n\t" \ + "vpaddq "#m0", "#t2", "#m0"\n\t" \ + \ + "vpsrlq $26, "#m2", "#t0"\n\t" \ + "vpsrlq $26, "#m0", "#t1"\n\t" \ + "vpand "#mask", "#m2", "#h2"\n\t" \ + "vpand "#mask", "#m0", "#h0"\n\t" \ + "vpaddq "#m3", "#t0", "#m3"\n\t" \ + "vpaddq "#h1", "#t1", "#h1"\n\t" \ + \ + "vpsrlq $26, "#m3", "#t0"\n\t" \ + "vpand "#mask", "#m3", "#h3"\n\t" \ + "vpaddq "#h4", "#t0", "#h4"\n\t" + + +POLY1305_NOINLINE static void poly1305_blocks_avx2(Poly1305* ctx, + const unsigned char* m, size_t bytes) +{ + ALIGN256 word64 r4[5][4]; + ALIGN256 word64 s[4][4]; + register word32 t0 asm("r8"); + register word32 t1 asm("r9"); + register word32 t2 asm("r10"); + register word32 t3 asm("r11"); + register word32 t4 asm("r12"); + static const word64 mask[4] = { 0x0000000003ffffff, 0x0000000003ffffff, + 0x0000000003ffffff, 0x0000000003ffffff }; + + __asm__ __volatile__ ( + "vpxor %%ymm15, %%ymm15, %%ymm15\n\t" + "cmpb $0x0, %[started]\n\t" + "jne L_begin\n\t" + "# Load the message data\n\t" + LOAD_M(m, %%ymm0, %%ymm1, %%ymm2, %%ymm3, %%ymm4, %[hibit], %%ymm15) + "vmovdqu %[mask], %%ymm14\n\t" + "# Reduce, in place, the message data\n\t" + REDUCE_IN(%%ymm0, %%ymm1, %%ymm2, %%ymm3, %%ymm4, + %%ymm10, %%ymm11, %%ymm12, %%ymm14) + "addq $64, %[m]\n\t" + "subq $64, %[bytes]\n\t" + "jz L_store\n\t" + "\n" + "L_begin:\n\t" + "# Load the H values.\n\t" + LOAD_H(%[h], %%ymm0, %%ymm1, %%ymm2, %%ymm3, %%ymm4, %%ymm15) + "movq 416(%[ctx]), %%r8\n\t" + "# Check if there is a power of r to load - otherwise use r^4.\n\t" + "cmpq $0x0, %%r8\n\t" + "je L_load_r4\n\t" + "\n\t" + "movq 424(%[ctx]), %%r9\n\t" + "movq 432(%[ctx]), %%r10\n\t" + "movq 440(%[ctx]), %%r11\n\t" + "# Load the 4 powers of r.\n\t" + LOAD_Rx4(%%r8, %%r9, %%r10, %%r11, \ + %%ymm5, %%ymm6, %%ymm7, %%ymm8, %%ymm9, + %%ymm10, %%ymm11, %%ymm12, %%ymm13) + "jmp L_mul_5\n\t" + "\n" + "L_load_r4:\n\t" + "# Load r^4 into all four positions.\n\t" + LOAD_R4(384(%[ctx]), %%ymm5, %%ymm6, %%ymm7, %%ymm8, %%ymm9, + %%ymm13, %%ymm14) + "\n" + "L_mul_5:\n\t" + "# Multiply top 4 26-bit values of all four H by 5\n\t" + MUL5(%%ymm10, %%ymm11, %%ymm12, %%ymm13, %%ymm6, %%ymm7, %%ymm8, %%ymm9) + "# Store powers of r and multiple of 5 for use in multiply.\n\t" + "vmovdqa %%ymm10, (%[s])\n\t" + "vmovdqa %%ymm11, 32(%[s])\n\t" + "vmovdqa %%ymm12, 64(%[s])\n\t" + "vmovdqa %%ymm13, 96(%[s])\n\t" + "vmovdqa %%ymm5, (%[r4])\n\t" + "vmovdqa %%ymm6, 32(%[r4])\n\t" + "vmovdqa %%ymm7, 64(%[r4])\n\t" + "vmovdqa %%ymm8, 96(%[r4])\n\t" + "vmovdqa %%ymm9, 128(%[r4])\n\t" + "vmovdqu %[mask], %%ymm14\n\t" + "\n" + "# If not finished then loop over data\n\t" + "cmpb $0x1, %[fin]\n\t" + "jne L_start\n\t" + "# Do last multiply, reduce, add the four H together and move to\n\t" + "# 32-bit registers\n\t" + MUL_AVX2(%%ymm0, %%ymm1, %%ymm2, %%ymm3, %%ymm4, + (%[r4]), 32(%[r4]), 64(%[r4]), 96(%[r4]), 128(%[r4]), + (%[s]), 32(%[s]), 64(%[s]), 96(%[s]), + %%ymm5, %%ymm6, %%ymm7, %%ymm8, %%ymm9, + %%ymm10, %%ymm11, %%ymm12, %%ymm13) + REDUCE(%%ymm0, %%ymm1, %%ymm2, %%ymm3, %%ymm4, + %%ymm5, %%ymm6, %%ymm7, %%ymm8, %%ymm9, + %%ymm10, %%ymm11, %%ymm12, %%ymm14) + FINALIZE_H(%%ymm0, %%ymm1, %%ymm2, %%ymm3, %%ymm4, + %%ymm5, %%ymm6, %%ymm7, %%ymm8, %%ymm9) + MOVE_TO_32(%%xmm0, %%xmm1, %%xmm2, %%xmm3, %%xmm4, + %[t0], %[t1], %[t2], %[t3], %[t4]) + "jmp L_end\n\t" + "\n" + "L_start:\n\t" + MUL_ADD_AVX2(%%ymm0, %%ymm1, %%ymm2, %%ymm3, %%ymm4, + (%[r4]), 32(%[r4]), 64(%[r4]), 96(%[r4]), 128(%[r4]), + (%[s]), 32(%[s]), 64(%[s]), 96(%[s]), + %%ymm5, %%ymm6, %%ymm7, %%ymm8, %%ymm9, + %%ymm10, %%ymm11, %%ymm12, %%ymm13, + %[hibit], %%ymm15) + REDUCE(%%ymm0, %%ymm1, %%ymm2, %%ymm3, %%ymm4, + %%ymm5, %%ymm6, %%ymm7, %%ymm8, %%ymm9, + %%ymm10, %%ymm11, %%ymm12, %%ymm14) + "addq $64, %[m]\n\t" + "subq $64, %[bytes]\n\t" + "jnz L_start\n\t" + "\n" + "L_store:\n\t" + "# Store four H values - state\n\t" + STORE_H(%[h], %%ymm0, %%ymm1, %%ymm2, %%ymm3, %%ymm4, %%xmm4) + "\n" + "L_end:\n\t" + : [m] "+r" (m), [bytes] "+r" (bytes), + [t0] "=r" (t0), [t1] "=r" (t1), [t2] "=r" (t2), + [t3] "=r" (t3), [t4] "=r" (t4) + : [ctx] "r" (ctx), [h] "r" (ctx->hh), + [r4] "r" (r4), [s] "r" (s), + [fin] "m" (ctx->finished), [started] "m" (ctx->started), + [mask] "m" (mask), [hibit] "m" (ctx->hibit) + : "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", + "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15", + "memory" + ); + + if (ctx->finished) + { + word64 h0, h1, h2, g0, g1, g2, c; + + /* Convert to 64 bit form. */ + h0 = (((word64)(t1 & 0x3FFFF)) << 26) + t0; + h1 = (((word64)(t3 & 0x3FF)) << 34) + + (((word64) t2 ) << 8) + (t1 >> 18); + h2 = (((word64) t4 ) << 16) + (t3 >> 10); + + /* Perform modulur reduction. */ + c = (h1 >> 44); h1 &= 0xfffffffffff; + h2 += c; c = (h2 >> 42); h2 &= 0x3ffffffffff; + h0 += c * 5; c = (h0 >> 44); h0 &= 0xfffffffffff; + h1 += c; c = (h1 >> 44); h1 &= 0xfffffffffff; + h2 += c; c = (h2 >> 42); h2 &= 0x3ffffffffff; + h0 += c * 5; c = (h0 >> 44); h0 &= 0xfffffffffff; + h1 += c; + + /* compute h + -p */ + g0 = h0 + 5; c = (g0 >> 44); g0 &= 0xfffffffffff; + g1 = h1 + c; c = (g1 >> 44); g1 &= 0xfffffffffff; + g2 = h2 + c - ((word64)1 << 42); + + /* select h if h < p, or h + -p if h >= p */ + c = (g2 >> ((sizeof(word64) * 8) - 1)) - 1; + g0 &= c; + g1 &= c; + g2 &= c; + c = ~c; + h0 = (h0 & c) | g0; + h1 = (h1 & c) | g1; + h2 = (h2 & c) | g2; + + /* Store for return */ + ctx->h[0] = h0; + ctx->h[1] = h1; + ctx->h[2] = h2; + } + + ctx->started = 1; +} + +/* Multiply two 130-bit numbers in 64-bit registers and reduce. + * + * r0-r2 are the first operand and the result. + * a0-a2 are the second operand. + */ +#define MUL_64(r0, r1, r2, a0, a1, a2) \ + s1 = a1 * (5 << 2); \ + s2 = a2 * (5 << 2); \ + MUL(d0, r0, a0); MUL(d, r1, s2); ADD(d0, d); MUL(d, r2, s1); ADD(d0, d); \ + MUL(d1, r0, a1); MUL(d, r1, a0); ADD(d1, d); MUL(d, r2, s2); ADD(d1, d); \ + MUL(d2, r0, a2); MUL(d, r1, a1); ADD(d2, d); MUL(d, r2, a0); ADD(d2, d); \ + \ + c = SHR(d0, 44); r0 = LO(d0) & 0xfffffffffff; \ + ADDLO(d1, c); c = SHR(d1, 44); r1 = LO(d1) & 0xfffffffffff; \ + ADDLO(d2, c); c = SHR(d2, 42); r2 = LO(d2) & 0x3ffffffffff; \ + r0 += c * 5; c = (r0 >> 44); r0 = r0 & 0xfffffffffff; \ + r1 += c + +/* Store the 130-bit number in 64-bit registers as 26-bit values in 32 bits. + * + * r0-r2 contains the 130-bit number in 64-bit registers. + * r is the address of where to store the 26 bits in 32 result. + */ +#define CONV_64_TO_32(r0, r1, r2, r) \ + r[0] = (word32)( r0 ) & 0x3ffffff; \ + r[1] = (word32)((r0 >> 26) | (r1 << 18)) & 0x3ffffff; \ + r[2] = (word32)( r1 >> 8 ) & 0x3ffffff; \ + r[3] = (word32)((r1 >> 34) | (r2 << 10)) & 0x3ffffff; \ + r[4] = (word32)( r2 >> 16 ) + + +static void poly1305_setkey_avx2(Poly1305* ctx, const byte* key) +{ + word64 r0, r1, r2, t0, t1, c; + word64 r20, r21, r22; + word64 r30, r31, r32; + word64 r40, r41, r42; + word64 s1, s2; + word128 d0, d1, d2, d; + + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ + t0 = ((word64*)key)[0]; + t1 = ((word64*)key)[1]; + r0 = ( t0 ) & 0xffc0fffffff; + r1 = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffff; + r2 = ((t1 >> 24) ) & 0x00ffffffc0f; + + __asm__ __volatile__ ( + "vpxor %%ymm0, %%ymm0, %%ymm0\n\t" + "vmovdqu %%ymm0, (%[h])\n\t" + "vmovdqu %%ymm0, 32(%[h])\n\t" + "vmovdqu %%ymm0, 64(%[h])\n\t" + "vmovdqu %%ymm0, (%[r0])\n\t" + "vmovdqu %%ymm0, (%[r1])\n\t" + "vmovdqu %%ymm0, (%[r2])\n\t" + "vmovdqu %%ymm0, (%[r3])\n\t" + "vmovdqu %%ymm0, (%[r4])\n\t" + : + : [h] "r" (ctx->hh), [r0] "r" (ctx->r0), [r1] "r" (ctx->r1), + [r2] "r" (ctx->r2), [r3] "r" (ctx->r3), [r4] "r" (ctx->r4) + : "memory", "ymm0" + ); + /* h = 0 */ + ctx->h[0] = 0; + ctx->h[1] = 0; + ctx->h[2] = 0; + + /* save pad for later */ + ctx->pad[0] = ((word64*)key)[2]; + ctx->pad[1] = ((word64*)key)[3]; + + /* Set 1 for r^0 */ + ctx->r0[0] = 1; + + /* Store r^1 */ + CONV_64_TO_32(r0, r1, r2, ctx->r1); + + /* Calc and store r^2 */ + r20 = r0; r21 = r1; r22 = r2; + MUL_64(r20, r21, r22, r0, r1, r2); + CONV_64_TO_32(r20, r21, r22, ctx->r2); + + /* Calc and store r^3 */ + r30 = r20; r31 = r21; r32 = r22; + MUL_64(r30, r31, r32, r0, r1, r2); + CONV_64_TO_32(r30, r31, r32, ctx->r3); + + /* Calc and store r^4 */ + r40 = r20; r41 = r21; r42 = r22; + MUL_64(r40, r41, r42, r20, r21, r22); + CONV_64_TO_32(r40, r41, r42, ctx->r4); + + /* NULL means use [r^4, r^4, r^4, r^4] */ + ctx->rp[0] = ctx->rp[1] = ctx->rp[2] = ctx->rp[3] = NULL; + + /* Message high bits set unless last partial block. */ + ctx->hibit[0] = ctx->hibit[1] = ctx->hibit[2] = ctx->hibit[3] = 0x1000000; + + ctx->leftover = 0; + ctx->finished = 0; + ctx->started = 0; +} + +static void poly1305_final_avx2(Poly1305* ctx, byte* mac) +{ + word64 h0, h1, h2, t0, t1, c; + + /* process the remaining block */ + if (ctx->leftover) { + size_t i = ctx->leftover; + + if (i & 15) + ctx->buffer[i++] = 1; + for (; i < POLY1305_BLOCK_SIZE * 4; i++) + ctx->buffer[i] = 0; + + ctx->hibit[3] = 0; + if (ctx->leftover < 48) + ctx->hibit[2] = 0; + if (ctx->leftover < 32) + ctx->hibit[1] = 0; + if (ctx->leftover < 16) + ctx->hibit[0] = 0; + + if (ctx->started) { + if (ctx->leftover <= 16) { + ctx->rp[0] = ctx->r4; + ctx->rp[1] = ctx->r4; + ctx->rp[2] = ctx->r3; + ctx->rp[3] = ctx->r2; + } + else if (ctx->leftover <= 32) { + ctx->rp[0] = ctx->r4; + ctx->rp[1] = ctx->r4; + ctx->rp[2] = ctx->r4; + ctx->rp[3] = ctx->r3; + } + } + + poly1305_blocks_avx2(ctx, ctx->buffer, POLY1305_BLOCK_SIZE * 4); + } + if (ctx->started) { + if (ctx->leftover == 0 || ctx->leftover > 48) { + ctx->rp[0] = ctx->r4; + ctx->rp[1] = ctx->r3; + ctx->rp[2] = ctx->r2; + ctx->rp[3] = ctx->r1; + } + else if (ctx->leftover > 32) { + ctx->rp[0] = ctx->r3; + ctx->rp[1] = ctx->r2; + ctx->rp[2] = ctx->r1; + ctx->rp[3] = ctx->r0; + } + else if (ctx->leftover > 16) { + ctx->rp[0] = ctx->r2; + ctx->rp[1] = ctx->r1; + ctx->rp[2] = ctx->r0; + ctx->rp[3] = ctx->r0; + } + else { + ctx->rp[0] = ctx->r1; + ctx->rp[1] = ctx->r0; + ctx->rp[2] = ctx->r0; + ctx->rp[3] = ctx->r0; + } + ctx->finished = 1; + poly1305_blocks_avx2(ctx, ctx->buffer, POLY1305_BLOCK_SIZE * 4); + } + + h0 = ctx->h[0]; + h1 = ctx->h[1]; + h2 = ctx->h[2]; + + /* h = (h + pad) */ + t0 = ctx->pad[0]; + t1 = ctx->pad[1]; + + h0 += (( t0 ) & 0xfffffffffff) ; + c = (h0 >> 44); h0 &= 0xfffffffffff; + h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffff) + c; + c = (h1 >> 44); h1 &= 0xfffffffffff; + h2 += (((t1 >> 24) ) & 0x3ffffffffff) + c; + h2 &= 0x3ffffffffff; + + /* mac = h % (2^128) */ + h0 = ((h0 ) | (h1 << 44)); + h1 = ((h1 >> 20) | (h2 << 24)); + + ((word64*)mac)[0] = h0; + ((word64*)mac)[1] = h1; + + /* zero out the state */ + __asm__ __volatile__ ( + "vpxor %%ymm0, %%ymm0, %%ymm0\n\t" + "vmovdqu %%ymm0, (%[h])\n\t" + "vmovdqu %%ymm0, 32(%[h])\n\t" + "vmovdqu %%ymm0, 64(%[h])\n\t" + "vmovdqu %%ymm0, (%[r1])\n\t" + "vmovdqu %%ymm0, (%[r2])\n\t" + "vmovdqu %%ymm0, (%[r3])\n\t" + "vmovdqu %%ymm0, (%[r4])\n\t" + : + : [h] "r" (ctx->hh), [r1] "r" (ctx->r1), [r2] "r" (ctx->r2), + [r3] "r" (ctx->r3), [r4] "r" (ctx->r4) + : "memory", "ymm0" + ); + ctx->h[0] = 0; + ctx->h[1] = 0; + ctx->h[2] = 0; + ctx->r[0] = 0; + ctx->r[1] = 0; + ctx->r[2] = 0; + ctx->pad[0] = 0; + ctx->pad[1] = 0; + + ctx->finished = 0; + ctx->started = 0; +} +#endif + +#elif defined(POLY130564) + + static word64 U8TO64(const byte* p) + { return (((word64)(p[0] & 0xff) ) | ((word64)(p[1] & 0xff) << 8) | @@ -114,7 +1131,8 @@ #else /* if not 64 bit then use 32 bit */ - static word32 U8TO32(const byte *p) { + static word32 U8TO32(const byte *p) + { return (((word32)(p[0] & 0xff) ) | ((word32)(p[1] & 0xff) << 8) | @@ -131,7 +1149,8 @@ #endif -static void U32TO64(word32 v, byte* p) { +static void U32TO64(word32 v, byte* p) +{ XMEMSET(p, 0, 8); p[0] = (v & 0xFF); p[1] = (v >> 8) & 0xFF; @@ -139,32 +1158,20 @@ static void U32TO64(word32 v, byte* p) { p[3] = (v >> 24) & 0xFF; } - static void poly1305_blocks(Poly1305* ctx, const unsigned char *m, - size_t bytes) { - -#ifdef POLY130564 - - const word64 hibit = (ctx->final) ? 0 : ((word64)1 << 40); /* 1 << 128 */ + size_t bytes) +{ +#ifdef USE_INTEL_SPEEDUP + /* AVX2 is handled in wc_Poly1305Update. */ + poly1305_blocks_avx(ctx, m, bytes); +#elif defined(POLY130564) + const word64 hibit = (ctx->finished) ? 0 : ((word64)1 << 40); /* 1 << 128 */ word64 r0,r1,r2; word64 s1,s2; word64 h0,h1,h2; word64 c; word128 d0,d1,d2,d; -#else - - const word32 hibit = (ctx->final) ? 0 : (1 << 24); /* 1 << 128 */ - word32 r0,r1,r2,r3,r4; - word32 s1,s2,s3,s4; - word32 h0,h1,h2,h3,h4; - word64 d0,d1,d2,d3,d4; - word32 c; - -#endif - -#ifdef POLY130564 - r0 = ctx->r[0]; r1 = ctx->r[1]; r2 = ctx->r[2]; @@ -208,6 +1215,13 @@ static void poly1305_blocks(Poly1305* ctx, const unsigned char *m, ctx->h[2] = h2; #else /* if not 64 bit then use 32 bit */ + const word32 hibit = (ctx->finished) ? 0 : (1 << 24); /* 1 << 128 */ + word32 r0,r1,r2,r3,r4; + word32 s1,s2,s3,s4; + word32 h0,h1,h2,h3,h4; + word64 d0,d1,d2,d3,d4; + word32 c; + r0 = ctx->r[0]; r1 = ctx->r[1]; @@ -268,9 +1282,19 @@ static void poly1305_blocks(Poly1305* ctx, const unsigned char *m, #endif /* end of 64 bit cpu blocks or 32 bit cpu */ } +static void poly1305_block(Poly1305* ctx, const unsigned char *m) +{ +#ifdef USE_INTEL_SPEEDUP + /* AVX2 does 4 blocks at a time - this func not used. */ + poly1305_block_avx(ctx, m); +#else + poly1305_blocks(ctx, m, POLY1305_BLOCK_SIZE); +#endif +} -int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz) { +int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz) +{ #if defined(POLY130564) word64 t0,t1; #endif @@ -289,7 +1313,18 @@ int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz) { if (keySz != 32 || ctx == NULL) return BAD_FUNC_ARG; -#if defined(POLY130564) +#ifdef USE_INTEL_SPEEDUP + if (!cpu_flags_set) { + intel_flags = cpuid_get_flags(); + cpu_flags_set = 1; + } + #ifdef HAVE_INTEL_AVX2 + if (IS_INTEL_AVX2(intel_flags)) + poly1305_setkey_avx2(ctx, key); + else + #endif + poly1305_setkey_avx(ctx, key); +#elif defined(POLY130564) /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ t0 = U8TO64(key + 0); @@ -308,6 +1343,9 @@ int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz) { ctx->pad[0] = U8TO64(key + 16); ctx->pad[1] = U8TO64(key + 24); + ctx->leftover = 0; + ctx->finished = 0; + #else /* if not 64 bit then use 32 bit */ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ @@ -330,18 +1368,19 @@ int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz) { ctx->pad[2] = U8TO32(key + 24); ctx->pad[3] = U8TO32(key + 28); -#endif - ctx->leftover = 0; - ctx->final = 0; + ctx->finished = 0; + +#endif return 0; } -int wc_Poly1305Final(Poly1305* ctx, byte* mac) { - -#if defined(POLY130564) +int wc_Poly1305Final(Poly1305* ctx, byte* mac) +{ +#ifdef USE_INTEL_SPEEDUP +#elif defined(POLY130564) word64 h0,h1,h2,c; word64 g0,g1,g2; @@ -359,7 +1398,14 @@ int wc_Poly1305Final(Poly1305* ctx, byte* mac) { if (ctx == NULL) return BAD_FUNC_ARG; -#if defined(POLY130564) +#ifdef USE_INTEL_SPEEDUP + #ifdef HAVE_INTEL_AVX2 + if (IS_INTEL_AVX2(intel_flags)) + poly1305_final_avx2(ctx, mac); + else + #endif + poly1305_final_avx(ctx, mac); +#elif defined(POLY130564) /* process the remaining block */ if (ctx->leftover) { @@ -367,8 +1413,8 @@ int wc_Poly1305Final(Poly1305* ctx, byte* mac) { ctx->buffer[i] = 1; for (i = i + 1; i < POLY1305_BLOCK_SIZE; i++) ctx->buffer[i] = 0; - ctx->final = 1; - poly1305_blocks(ctx, ctx->buffer, POLY1305_BLOCK_SIZE); + ctx->finished = 1; + poly1305_block(ctx, ctx->buffer); } /* fully carry h */ @@ -435,8 +1481,8 @@ int wc_Poly1305Final(Poly1305* ctx, byte* mac) { ctx->buffer[i++] = 1; for (; i < POLY1305_BLOCK_SIZE; i++) ctx->buffer[i] = 0; - ctx->final = 1; - poly1305_blocks(ctx, ctx->buffer, POLY1305_BLOCK_SIZE); + ctx->finished = 1; + poly1305_block(ctx, ctx->buffer); } /* fully carry h */ @@ -513,8 +1559,8 @@ int wc_Poly1305Final(Poly1305* ctx, byte* mac) { } -int wc_Poly1305Update(Poly1305* ctx, const byte* m, word32 bytes) { - +int wc_Poly1305Update(Poly1305* ctx, const byte* m, word32 bytes) +{ size_t i; #ifdef CHACHA_AEAD_TEST @@ -531,36 +1577,76 @@ int wc_Poly1305Update(Poly1305* ctx, const byte* m, word32 bytes) { if (ctx == NULL) return BAD_FUNC_ARG; - /* handle leftover */ - if (ctx->leftover) { - size_t want = (POLY1305_BLOCK_SIZE - ctx->leftover); - if (want > bytes) - want = bytes; - for (i = 0; i < want; i++) - ctx->buffer[ctx->leftover + i] = m[i]; - bytes -= (word32)want; - m += want; - ctx->leftover += want; - if (ctx->leftover < POLY1305_BLOCK_SIZE) - return 0; - poly1305_blocks(ctx, ctx->buffer, POLY1305_BLOCK_SIZE); - ctx->leftover = 0; +#ifdef USE_INTEL_SPEEDUP + #ifdef HAVE_INTEL_AVX2 + if (IS_INTEL_AVX2(intel_flags)) { + /* handle leftover */ + if (ctx->leftover) { + size_t want = (4 * POLY1305_BLOCK_SIZE - ctx->leftover); + if (want > bytes) + want = bytes; + for (i = 0; i < want; i++) + ctx->buffer[ctx->leftover + i] = m[i]; + bytes -= (word32)want; + m += want; + ctx->leftover += want; + if (ctx->leftover < 4 * POLY1305_BLOCK_SIZE) + return 0; + poly1305_blocks_avx2(ctx, ctx->buffer, 4 * POLY1305_BLOCK_SIZE); + ctx->leftover = 0; + } + + /* process full blocks */ + if (bytes >= 4 * POLY1305_BLOCK_SIZE) { + size_t want = (bytes & ~(4 * POLY1305_BLOCK_SIZE - 1)); + poly1305_blocks_avx2(ctx, m, want); + m += want; + bytes -= (word32)want; + } + + /* store leftover */ + if (bytes) { + for (i = 0; i < bytes; i++) + ctx->buffer[ctx->leftover + i] = m[i]; + ctx->leftover += bytes; + } + } + else + #endif +#endif + { + /* handle leftover */ + if (ctx->leftover) { + size_t want = (POLY1305_BLOCK_SIZE - ctx->leftover); + if (want > bytes) + want = bytes; + for (i = 0; i < want; i++) + ctx->buffer[ctx->leftover + i] = m[i]; + bytes -= (word32)want; + m += want; + ctx->leftover += want; + if (ctx->leftover < POLY1305_BLOCK_SIZE) + return 0; + poly1305_block(ctx, ctx->buffer); + ctx->leftover = 0; + } + + /* process full blocks */ + if (bytes >= POLY1305_BLOCK_SIZE) { + size_t want = (bytes & ~(POLY1305_BLOCK_SIZE - 1)); + poly1305_blocks(ctx, m, want); + m += want; + bytes -= (word32)want; + } + + /* store leftover */ + if (bytes) { + for (i = 0; i < bytes; i++) + ctx->buffer[ctx->leftover + i] = m[i]; + ctx->leftover += bytes; + } } - /* process full blocks */ - if (bytes >= POLY1305_BLOCK_SIZE) { - size_t want = (bytes & ~(POLY1305_BLOCK_SIZE - 1)); - poly1305_blocks(ctx, m, want); - m += want; - bytes -= (word32)want; - } - - /* store leftover */ - if (bytes) { - for (i = 0; i < bytes; i++) - ctx->buffer[ctx->leftover + i] = m[i]; - ctx->leftover += bytes; - } return 0; } diff --git a/wolfcrypt/test/test.c b/wolfcrypt/test/test.c index 95a427c74..44d4bfc8b 100644 --- a/wolfcrypt/test/test.c +++ b/wolfcrypt/test/test.c @@ -3188,7 +3188,9 @@ int poly1305_test(void) byte tag[16]; Poly1305 enc; - static const byte msg[] = + static const byte empty[] = { }; + + static const byte msg1[] = { 0x43,0x72,0x79,0x70,0x74,0x6f,0x67,0x72, 0x61,0x70,0x68,0x69,0x63,0x20,0x46,0x6f, @@ -3230,17 +3232,28 @@ int poly1305_test(void) 0x61,0x16 }; + static const byte msg5[] = + { + 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, + 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, + }; + byte additional[] = { 0x50,0x51,0x52,0x53,0xc0,0xc1,0xc2,0xc3, 0xc4,0xc5,0xc6,0xc7 }; - static const byte correct[] = + static const byte correct0[] = + { + 0x01,0x03,0x80,0x8a,0xfb,0x0d,0xb2,0xfd, + 0x4a,0xbf,0xf6,0xaf,0x41,0x49,0xf5,0x1b + }; + + static const byte correct1[] = { 0xa8,0x06,0x1d,0xc1,0x30,0x51,0x36,0xc6, 0xc2,0x2b,0x8b,0xaf,0x0c,0x01,0x27,0xa9 - }; static const byte correct2[] = @@ -3261,6 +3274,12 @@ int poly1305_test(void) 0x7e,0x90,0x2e,0xcb,0xd0,0x60,0x06,0x91 }; + static const byte correct5[] = + { + 0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + }; + static const byte key[] = { 0x85,0xd6,0xbe,0x78,0x57,0x55,0x6d,0x33, 0x7f,0x44,0x52,0xfe,0x42,0xd5,0x06,0xa8, @@ -3282,41 +3301,49 @@ int poly1305_test(void) 0x2a,0x93,0x75,0x78,0x3e,0xd5,0x53,0xff }; - const byte* msgs[] = {msg, msg2, msg3}; - word32 szm[] = {sizeof(msg),sizeof(msg2),sizeof(msg3)}; - const byte* keys[] = {key, key2, key2}; - const byte* tests[] = {correct, correct2, correct3}; + static const byte key5[] = { + 0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 + }; - for (i = 0; i < 3; i++) { + const byte* msgs[] = {empty, msg1, msg2, msg3, msg5}; + word32 szm[] = {sizeof(empty), sizeof(msg1), sizeof(msg2), + sizeof(msg3), sizeof(msg5)}; + const byte* keys[] = {key, key, key2, key2, key5}; + const byte* tests[] = {correct0, correct1, correct2, correct3, correct5}; + + for (i = 0; i < 5; i++) { ret = wc_Poly1305SetKey(&enc, keys[i], 32); if (ret != 0) - return -3600; + return -3600 + i; ret = wc_Poly1305Update(&enc, msgs[i], szm[i]); if (ret != 0) - return -3601; + return -3605 + i; ret = wc_Poly1305Final(&enc, tag); if (ret != 0) - return -3602; + return -36108 + i; if (XMEMCMP(tag, tests[i], sizeof(tag))) - return -3603; + return -3615 + i; } /* Check TLS MAC function from 2.8.2 https://tools.ietf.org/html/rfc7539 */ XMEMSET(tag, 0, sizeof(tag)); ret = wc_Poly1305SetKey(&enc, key4, sizeof(key4)); if (ret != 0) - return -3604; + return -3614; ret = wc_Poly1305_MAC(&enc, additional, sizeof(additional), (byte*)msg4, sizeof(msg4), tag, sizeof(tag)); if (ret != 0) - return -3605; + return -3615; if (XMEMCMP(tag, correct4, sizeof(tag))) - return -3606; + return -3616; /* Check fail of TLS MAC function if altering additional data */ XMEMSET(tag, 0, sizeof(tag)); @@ -3324,10 +3351,10 @@ int poly1305_test(void) ret = wc_Poly1305_MAC(&enc, additional, sizeof(additional), (byte*)msg4, sizeof(msg4), tag, sizeof(tag)); if (ret != 0) - return -3607; + return -3617; if (XMEMCMP(tag, correct4, sizeof(tag)) == 0) - return -3608; + return -3618; return 0; diff --git a/wolfssl/wolfcrypt/poly1305.h b/wolfssl/wolfcrypt/poly1305.h index 75e30d7fb..2be815999 100644 --- a/wolfssl/wolfcrypt/poly1305.h +++ b/wolfssl/wolfcrypt/poly1305.h @@ -45,8 +45,9 @@ #define WC_HAS_GCC_4_4_64BIT #endif -#if (defined(WC_HAS_SIZEOF_INT128_64BIT) || defined(WC_HAS_MSVC_64BIT) || \ - defined(WC_HAS_GCC_4_4_64BIT)) +#ifdef USE_INTEL_SPEEDUP +#elif (defined(WC_HAS_SIZEOF_INT128_64BIT) || defined(WC_HAS_MSVC_64BIT) || \ + defined(WC_HAS_GCC_4_4_64BIT)) #define POLY130564 #else #define POLY130532 @@ -63,24 +64,44 @@ enum { /* Poly1305 state */ typedef struct Poly1305 { -#if defined(POLY130564) - word64 r[3]; - word64 h[3]; - word64 pad[2]; +#ifdef USE_INTEL_SPEEDUP + word64 r[3]; + word64 h[3]; + word64 pad[2]; + word64 t0[6]; + word64 t1[6]; + word64 hh[12]; + word32 r0[8]; + word32 r1[8]; + word32 r2[8]; + word32 r3[8]; + word32 r4[8]; + word32* rp[4]; + word64 hibit[4]; + size_t leftover; + unsigned char buffer[4*POLY1305_BLOCK_SIZE]; + unsigned char finished; + unsigned char started; #else - word32 r[5]; - word32 h[5]; - word32 pad[4]; +#if defined(POLY130564) + word64 r[3]; + word64 h[3]; + word64 pad[2]; +#else + word32 r[5]; + word32 h[5]; + word32 pad[4]; +#endif + size_t leftover; + unsigned char buffer[POLY1305_BLOCK_SIZE]; + unsigned char finished; #endif - size_t leftover; - unsigned char buffer[POLY1305_BLOCK_SIZE]; - unsigned char final; } Poly1305; - /* does init */ -WOLFSSL_API int wc_Poly1305SetKey(Poly1305* poly1305, const byte* key, word32 kySz); +WOLFSSL_API int wc_Poly1305SetKey(Poly1305* poly1305, const byte* key, + word32 kySz); WOLFSSL_API int wc_Poly1305Update(Poly1305* poly1305, const byte*, word32); WOLFSSL_API int wc_Poly1305Final(Poly1305* poly1305, byte* tag); WOLFSSL_API int wc_Poly1305_MAC(Poly1305* ctx, byte* additional, word32 addSz,