diff --git a/src/include.am b/src/include.am index 61f89f86d4..c3d8376a1d 100644 --- a/src/include.am +++ b/src/include.am @@ -922,6 +922,12 @@ if !BUILD_FIPS_RAND if BUILD_POLY1305 if BUILD_ARMASM src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/arm/armv8-poly1305.c +src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/arm/thumb2-poly1305.c +if BUILD_ARMASM_INLINE +src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/arm/thumb2-poly1305-asm_c.c +else +src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/arm/thumb2-poly1305-asm.S +endif !BUILD_ARMASM_INLINE endif if BUILD_RISCV_ASM src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/riscv/riscv-64-poly1305.c diff --git a/wolfcrypt/src/poly1305.c b/wolfcrypt/src/poly1305.c index b4b5c0f7ea..48529d78c1 100644 --- a/wolfcrypt/src/poly1305.c +++ b/wolfcrypt/src/poly1305.c @@ -231,7 +231,8 @@ extern void poly1305_final_avx2(Poly1305* ctx, byte* mac); p[7] = (byte)(v >> 56); } #endif/* !WOLFSSL_ARMASM && !WOLFSSL_RISCV_ASM */ -#else /* if not 64 bit then use 32 bit */ +/* if not 64 bit then use 32 bit */ +#elif !defined(WOLFSSL_ARMASM) || !defined(__thumb__) static word32 U8TO32(const byte *p) { @@ -268,8 +269,8 @@ static WC_INLINE void u32tole64(const word32 inLe32, byte outLe64[8]) } -#if (!defined(WOLFSSL_ARMASM) || !defined(__aarch64__)) && \ - !defined(WOLFSSL_RISCV_ASM) +#if (!defined(WOLFSSL_ARMASM) || (!defined(__aarch64__) && \ + !defined(__thumb__))) && !defined(WOLFSSL_RISCV_ASM) /* This local function operates on a message with a given number of bytes with a given ctx pointer to a Poly1305 structure. @@ -788,7 +789,8 @@ int wc_Poly1305Final(Poly1305* ctx, byte* mac) return 0; } -#endif /* (!WOLFSSL_ARMASM || !__aarch64__) && !WOLFSSL_RISCV_ASM */ +#endif /* (!WOLFSSL_ARMASM || (!__aarch64__ && !__thumb__)) && + * !WOLFSSL_RISCV_ASM */ int wc_Poly1305Update(Poly1305* ctx, const byte* m, word32 bytes) @@ -883,8 +885,8 @@ int wc_Poly1305Update(Poly1305* ctx, const byte* m, word32 bytes) /* process full blocks */ if (bytes >= POLY1305_BLOCK_SIZE) { size_t want = ((size_t)bytes & ~((size_t)POLY1305_BLOCK_SIZE - 1)); -#if (!defined(WOLFSSL_ARMASM) || !defined(__aarch64__)) && \ - !defined(WOLFSSL_RISCV_ASM) +#if (!defined(WOLFSSL_ARMASM) || (!defined(__aarch64__) && \ + !defined(__thumb__))) && !defined(WOLFSSL_RISCV_ASM) int ret; ret = poly1305_blocks(ctx, m, want); if (ret != 0) diff --git a/wolfcrypt/src/port/arm/thumb2-poly1305-asm.S b/wolfcrypt/src/port/arm/thumb2-poly1305-asm.S new file mode 100644 index 0000000000..e7e8f16204 --- /dev/null +++ b/wolfcrypt/src/port/arm/thumb2-poly1305-asm.S @@ -0,0 +1,368 @@ +/* thumb2-poly1305-asm + * + * Copyright (C) 2006-2024 wolfSSL Inc. + * + * This file is part of wolfSSL. + * + * wolfSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * wolfSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA + */ + +/* Generated using (from wolfssl): + * cd ../scripts + * ruby ./poly1305/poly1305.rb thumb2 ../wolfssl/wolfcrypt/src/port/arm/thumb2-poly1305-asm.S + */ + +#ifdef HAVE_CONFIG_H + #include +#endif /* HAVE_CONFIG_H */ +#include + +#ifdef WOLFSSL_ARMASM +#if !defined(__aarch64__) && defined(__thumb__) +#ifndef WOLFSSL_ARMASM_INLINE + .thumb + .syntax unified +#ifdef HAVE_POLY1305 + .text + .align 4 + .globl poly1305_blocks_thumb2_16 + .type poly1305_blocks_thumb2_16, %function +poly1305_blocks_thumb2_16: + PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} + SUB sp, sp, #0x1c + CMP r2, #0x0 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BEQ L_poly1305_thumb2_16_done +#else + BEQ.N L_poly1305_thumb2_16_done +#endif + ADD lr, sp, #0xc + STM lr, {r0, r1, r2, r3} + /* Get h pointer */ + ADD lr, r0, #0x10 + LDM lr, {r4, r5, r6, r7, r8} +L_poly1305_thumb2_16_loop: + /* Add m to h */ + LDR r1, [sp, #16] + LDR r2, [r1] + LDR r3, [r1, #4] + LDR r9, [r1, #8] + LDR r10, [r1, #12] + LDR r11, [sp, #24] + ADDS r4, r4, r2 + ADCS r5, r5, r3 + ADCS r6, r6, r9 + ADCS r7, r7, r10 + ADD r1, r1, #0x10 + ADC r8, r8, r11 +#ifdef WOLFSSL_SP_NO_UMAAL + STM lr, {r4, r5, r6, r7, r8} +#else + /* h[0]-h[2] in r4-r6 for multiplication. */ + STR r7, [lr, #12] + STR r8, [lr, #16] +#endif /* WOLFSSL_SP_NO_UMAAL */ + STR r1, [sp, #16] + LDR r1, [sp, #12] + /* Multiply h by r */ +#ifdef WOLFSSL_SP_NO_UMAAL + /* r0 = #0, r1 = r, lr = h, r2 = h[j], r3 = r[i] */ + LDR r3, [r1] + EOR r0, r0, r0 + /* r[0] * h[0] */ + /* h[0] in r4 */ + UMULL r4, r5, r3, r4 + /* r[0] * h[2] */ + /* h[2] in r6 */ + UMULL r6, r7, r3, r6 + /* r[0] * h[4] */ + /* h[4] in r8 */ + MUL r8, r3, r8 + /* r[0] * h[1] */ + LDR r2, [lr, #4] + MOV r12, r0 + UMLAL r5, r12, r3, r2 + /* r[0] * h[3] */ + LDR r2, [lr, #12] + ADDS r6, r6, r12 + ADC r7, r7, r0 + UMLAL r7, r8, r3, r2 + /* r[1] * h[0] */ + LDR r3, [r1, #4] + LDR r2, [lr] + MOV r12, r0 + UMLAL r5, r12, r3, r2 + /* r[1] * h[1] */ + LDR r2, [lr, #4] + ADDS r6, r6, r12 + ADC r12, r0, r0 + UMLAL r6, r12, r3, r2 + /* r[1] * h[2] */ + LDR r2, [lr, #8] + ADDS r7, r7, r12 + ADC r12, r0, r0 + UMLAL r7, r12, r3, r2 + /* r[1] * h[3] */ + LDR r2, [lr, #12] + ADDS r8, r8, r12 + ADC r9, r0, r0 + UMLAL r8, r9, r3, r2 + /* r[1] * h[4] */ + LDR r2, [lr, #16] + MLA r9, r3, r2, r9 + /* r[2] * h[0] */ + LDR r3, [r1, #8] + LDR r2, [lr] + MOV r12, r0 + UMLAL r6, r12, r3, r2 + /* r[2] * h[1] */ + LDR r2, [lr, #4] + ADDS r7, r7, r12 + ADC r12, r0, r0 + UMLAL r7, r12, r3, r2 + /* r[2] * h[2] */ + LDR r2, [lr, #8] + ADDS r8, r8, r12 + ADC r12, r0, r0 + UMLAL r8, r12, r3, r2 + /* r[2] * h[3] */ + LDR r2, [lr, #12] + ADDS r9, r9, r12 + ADC r10, r0, r0 + UMLAL r9, r10, r3, r2 + /* r[2] * h[4] */ + LDR r2, [lr, #16] + MLA r10, r3, r2, r10 + /* r[3] * h[0] */ + LDR r3, [r1, #12] + LDR r2, [lr] + MOV r12, r0 + UMLAL r7, r12, r3, r2 + /* r[3] * h[1] */ + LDR r2, [lr, #4] + ADDS r8, r8, r12 + ADC r12, r0, r0 + UMLAL r8, r12, r3, r2 + /* r[3] * h[2] */ + LDR r2, [lr, #8] + ADDS r9, r9, r12 + ADC r10, r10, r0 + UMLAL r9, r10, r3, r2 + /* r[3] * h[3] */ + LDR r2, [lr, #12] + MOV r11, r0 + UMLAL r10, r11, r3, r2 + /* r[3] * h[4] */ + LDR r2, [lr, #16] + MOV r12, r0 + MLA r11, r3, r2, r11 +#else + LDM r1, {r0, r1, r2, r3} + /* r[0] * h[0] */ + UMULL r10, r11, r0, r4 + /* r[1] * h[0] */ + UMULL r12, r7, r1, r4 + /* r[0] * h[1] */ + UMAAL r11, r12, r0, r5 + /* r[2] * h[0] */ + UMULL r8, r9, r2, r4 + /* r[1] * h[1] */ + UMAAL r12, r8, r1, r5 + /* r[0] * h[2] */ + UMAAL r12, r7, r0, r6 + /* r[3] * h[0] */ + UMAAL r8, r9, r3, r4 + STM sp, {r10, r11, r12} + /* r[2] * h[1] */ + UMAAL r7, r8, r2, r5 + /* Replace h[0] with h[3] */ + LDR r4, [lr, #12] + /* r[1] * h[2] */ + UMULL r10, r11, r1, r6 + /* r[2] * h[2] */ + UMAAL r8, r9, r2, r6 + /* r[0] * h[3] */ + UMAAL r7, r10, r0, r4 + /* r[3] * h[1] */ + UMAAL r8, r11, r3, r5 + /* r[1] * h[3] */ + UMAAL r8, r10, r1, r4 + /* r[3] * h[2] */ + UMAAL r9, r11, r3, r6 + /* r[2] * h[3] */ + UMAAL r9, r10, r2, r4 + /* Replace h[1] with h[4] */ + LDR r5, [lr, #16] + /* r[3] * h[3] */ + UMAAL r10, r11, r3, r4 + MOV r12, #0x0 + /* r[0] * h[4] */ + UMAAL r8, r12, r0, r5 + /* r[1] * h[4] */ + UMAAL r9, r12, r1, r5 + /* r[2] * h[4] */ + UMAAL r10, r12, r2, r5 + /* r[3] * h[4] */ + UMAAL r11, r12, r3, r5 + /* DONE */ + LDM sp, {r4, r5, r6} +#endif /* WOLFSSL_SP_NO_UMAAL */ + /* r12 will be zero because r is masked. */ + /* Load length */ + LDR r2, [sp, #20] + /* Reduce mod 2^130 - 5 */ + BIC r3, r8, #0x3 + AND r8, r8, #0x3 + ADDS r4, r4, r3 + LSR r3, r3, #2 + ADCS r5, r5, r9 + ORR r3, r3, r9, LSL #30 + ADCS r6, r6, r10 + LSR r9, r9, #2 + ADCS r7, r7, r11 + ORR r9, r9, r10, LSL #30 + ADC r8, r8, r12 + LSR r10, r10, #2 + ADDS r4, r4, r3 + ORR r10, r10, r11, LSL #30 + ADCS r5, r5, r9 + LSR r11, r11, #2 + ADCS r6, r6, r10 + ADCS r7, r7, r11 + ADC r8, r8, r12 + /* Sub 16 from length. */ + SUBS r2, r2, #0x10 + /* Store length. */ + STR r2, [sp, #20] + /* Loop again if more message to do. */ +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BGT L_poly1305_thumb2_16_loop +#else + BGT.N L_poly1305_thumb2_16_loop +#endif + STM lr, {r4, r5, r6, r7, r8} +L_poly1305_thumb2_16_done: + ADD sp, sp, #0x1c + POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} + /* Cycle Count = 250 */ + .size poly1305_blocks_thumb2_16,.-poly1305_blocks_thumb2_16 + .text + .type L_poly1305_thumb2_rt, %object + .size L_poly1305_thumb2_rt, 16 + .align 4 +L_poly1305_thumb2_rt: + .word 0xfffffff + .word 0xffffffc + .word 0xffffffc + .word 0xffffffc + .text + .align 4 + .globl poly1305_set_key + .type poly1305_set_key, %function +poly1305_set_key: + PUSH {r4, r5, r6, r7, r8, r9, r10, lr} + /* Load and cache padding. */ + LDR r2, [r1, #16] + LDR r3, [r1, #20] + LDR r4, [r1, #24] + LDR r5, [r1, #28] + ADD r10, r0, #0x24 + STM r10, {r2, r3, r4, r5} + /* Load, mask and store r. */ + ADR r10, L_poly1305_thumb2_rt + LDR r2, [r1] + LDR r3, [r1, #4] + LDR r4, [r1, #8] + LDR r5, [r1, #12] + LDM r10, {r6, r7, r8, r9} + AND r2, r2, r6 + AND r3, r3, r7 + AND r4, r4, r8 + AND r5, r5, r9 + ADD r10, r0, #0x0 + STM r10, {r2, r3, r4, r5} + /* h (accumulator) = 0 */ + EOR r6, r6, r6 + EOR r7, r7, r7 + EOR r8, r8, r8 + EOR r9, r9, r9 + ADD r10, r0, #0x10 + EOR r5, r5, r5 + STM r10, {r5, r6, r7, r8, r9} + /* Zero leftover */ + STR r5, [r0, #52] + POP {r4, r5, r6, r7, r8, r9, r10, pc} + /* Cycle Count = 70 */ + .size poly1305_set_key,.-poly1305_set_key + .text + .align 4 + .globl poly1305_final + .type poly1305_final, %function +poly1305_final: + PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} + ADD r11, r0, #0x10 + LDM r11, {r2, r3, r4, r5, r6} + /* Add 5 and check for h larger than p. */ + ADDS r7, r2, #0x5 + ADCS r7, r3, #0x0 + ADCS r7, r4, #0x0 + ADCS r7, r5, #0x0 + ADC r7, r6, #0x0 + SUB r7, r7, #0x4 + LSR r7, r7, #31 + SUB r7, r7, #0x1 + AND r7, r7, #0x5 + /* Add 0/5 to h. */ + ADDS r2, r2, r7 + ADCS r3, r3, #0x0 + ADCS r4, r4, #0x0 + ADC r5, r5, #0x0 + /* Add padding */ + ADD r11, r0, #0x24 + LDM r11, {r7, r8, r9, r10} + ADDS r2, r2, r7 + ADCS r3, r3, r8 + ADCS r4, r4, r9 + ADC r5, r5, r10 + /* Store MAC */ + STR r2, [r1] + STR r3, [r1, #4] + STR r4, [r1, #8] + STR r5, [r1, #12] + /* Zero out h. */ + EOR r2, r2, r2 + EOR r3, r3, r3 + EOR r4, r4, r4 + EOR r5, r5, r5 + EOR r6, r6, r6 + ADD r11, r0, #0x10 + STM r11, {r2, r3, r4, r5, r6} + /* Zero out r. */ + ADD r11, r0, #0x0 + STM r11, {r2, r3, r4, r5} + /* Zero out padding. */ + ADD r11, r0, #0x24 + STM r11, {r2, r3, r4, r5} + POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} + /* Cycle Count = 82 */ + .size poly1305_final,.-poly1305_final +#endif /* HAVE_POLY1305 */ +#endif /* !__aarch64__ && __thumb__ */ +#endif /* WOLFSSL_ARMASM */ + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",%progbits +#endif +#endif /* !WOLFSSL_ARMASM_INLINE */ diff --git a/wolfcrypt/src/port/arm/thumb2-poly1305-asm_c.c b/wolfcrypt/src/port/arm/thumb2-poly1305-asm_c.c new file mode 100644 index 0000000000..6c174cb9a3 --- /dev/null +++ b/wolfcrypt/src/port/arm/thumb2-poly1305-asm_c.c @@ -0,0 +1,421 @@ +/* thumb2-poly1305-asm + * + * Copyright (C) 2006-2024 wolfSSL Inc. + * + * This file is part of wolfSSL. + * + * wolfSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * wolfSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA + */ + +/* Generated using (from wolfssl): + * cd ../scripts + * ruby ./poly1305/poly1305.rb thumb2 ../wolfssl/wolfcrypt/src/port/arm/thumb2-poly1305-asm.c + */ + +#ifdef HAVE_CONFIG_H + #include +#endif /* HAVE_CONFIG_H */ +#include +#include + +#ifdef WOLFSSL_ARMASM +#if !defined(__aarch64__) && defined(__thumb__) +#ifdef WOLFSSL_ARMASM_INLINE + +#ifdef __IAR_SYSTEMS_ICC__ +#define __asm__ asm +#define __volatile__ volatile +#define WOLFSSL_NO_VAR_ASSIGN_REG +#endif /* __IAR_SYSTEMS_ICC__ */ +#ifdef __KEIL__ +#define __asm__ __asm +#define __volatile__ volatile +#endif /* __KEIL__ */ +#ifdef HAVE_POLY1305 +#include + +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG +void poly1305_blocks_thumb2_16(Poly1305* ctx_p, const byte* m_p, word32 len_p, int notLast_p) +#else +void poly1305_blocks_thumb2_16(Poly1305* ctx, const byte* m, word32 len, int notLast) +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ +{ +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG + register Poly1305* ctx __asm__ ("r0") = (Poly1305*)ctx_p; + register const byte* m __asm__ ("r1") = (const byte*)m_p; + register word32 len __asm__ ("r2") = (word32)len_p; + register int notLast __asm__ ("r3") = (int)notLast_p; +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ + + __asm__ __volatile__ ( + "SUB sp, sp, #0x1c\n\t" + "CMP %[len], #0x0\n\t" +#if defined(__GNUC__) + "BEQ L_poly1305_thumb2_16_done_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BEQ.N L_poly1305_thumb2_16_done\n\t" +#else + "BEQ.N L_poly1305_thumb2_16_done_%=\n\t" +#endif + "ADD lr, sp, #0xc\n\t" + "STM lr, {%[ctx], %[m], %[len], %[notLast]}\n\t" + /* Get h pointer */ + "ADD lr, %[ctx], #0x10\n\t" + "LDM lr, {r4, r5, r6, r7, r8}\n\t" + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_poly1305_thumb2_16_loop:\n\t" +#else + "L_poly1305_thumb2_16_loop_%=:\n\t" +#endif + /* Add m to h */ + "LDR %[m], [sp, #16]\n\t" + "LDR %[len], [%[m]]\n\t" + "LDR %[notLast], [%[m], #4]\n\t" + "LDR r9, [%[m], #8]\n\t" + "LDR r10, [%[m], #12]\n\t" + "LDR r11, [sp, #24]\n\t" + "ADDS r4, r4, %[len]\n\t" + "ADCS r5, r5, %[notLast]\n\t" + "ADCS r6, r6, r9\n\t" + "ADCS r7, r7, r10\n\t" + "ADD %[m], %[m], #0x10\n\t" + "ADC r8, r8, r11\n\t" +#ifdef WOLFSSL_SP_NO_UMAAL + "STM lr, {r4, r5, r6, r7, r8}\n\t" +#else + /* h[0]-h[2] in r4-r6 for multiplication. */ + "STR r7, [lr, #12]\n\t" + "STR r8, [lr, #16]\n\t" +#endif /* WOLFSSL_SP_NO_UMAAL */ + "STR %[m], [sp, #16]\n\t" + "LDR %[m], [sp, #12]\n\t" + /* Multiply h by r */ +#ifdef WOLFSSL_SP_NO_UMAAL + /* r0 = #0, r1 = r, lr = h, r2 = h[j], r3 = r[i] */ + "LDR %[notLast], [%[m]]\n\t" + "EOR %[ctx], %[ctx], %[ctx]\n\t" + /* r[0] * h[0] */ + /* h[0] in r4 */ + "UMULL r4, r5, %[notLast], r4\n\t" + /* r[0] * h[2] */ + /* h[2] in r6 */ + "UMULL r6, r7, %[notLast], r6\n\t" + /* r[0] * h[4] */ + /* h[4] in r8 */ + "MUL r8, %[notLast], r8\n\t" + /* r[0] * h[1] */ + "LDR %[len], [lr, #4]\n\t" + "MOV r12, %[ctx]\n\t" + "UMLAL r5, r12, %[notLast], %[len]\n\t" + /* r[0] * h[3] */ + "LDR %[len], [lr, #12]\n\t" + "ADDS r6, r6, r12\n\t" + "ADC r7, r7, %[ctx]\n\t" + "UMLAL r7, r8, %[notLast], %[len]\n\t" + /* r[1] * h[0] */ + "LDR %[notLast], [%[m], #4]\n\t" + "LDR %[len], [lr]\n\t" + "MOV r12, %[ctx]\n\t" + "UMLAL r5, r12, %[notLast], %[len]\n\t" + /* r[1] * h[1] */ + "LDR %[len], [lr, #4]\n\t" + "ADDS r6, r6, r12\n\t" + "ADC r12, %[ctx], %[ctx]\n\t" + "UMLAL r6, r12, %[notLast], %[len]\n\t" + /* r[1] * h[2] */ + "LDR %[len], [lr, #8]\n\t" + "ADDS r7, r7, r12\n\t" + "ADC r12, %[ctx], %[ctx]\n\t" + "UMLAL r7, r12, %[notLast], %[len]\n\t" + /* r[1] * h[3] */ + "LDR %[len], [lr, #12]\n\t" + "ADDS r8, r8, r12\n\t" + "ADC r9, %[ctx], %[ctx]\n\t" + "UMLAL r8, r9, %[notLast], %[len]\n\t" + /* r[1] * h[4] */ + "LDR %[len], [lr, #16]\n\t" + "MLA r9, %[notLast], %[len], r9\n\t" + /* r[2] * h[0] */ + "LDR %[notLast], [%[m], #8]\n\t" + "LDR %[len], [lr]\n\t" + "MOV r12, %[ctx]\n\t" + "UMLAL r6, r12, %[notLast], %[len]\n\t" + /* r[2] * h[1] */ + "LDR %[len], [lr, #4]\n\t" + "ADDS r7, r7, r12\n\t" + "ADC r12, %[ctx], %[ctx]\n\t" + "UMLAL r7, r12, %[notLast], %[len]\n\t" + /* r[2] * h[2] */ + "LDR %[len], [lr, #8]\n\t" + "ADDS r8, r8, r12\n\t" + "ADC r12, %[ctx], %[ctx]\n\t" + "UMLAL r8, r12, %[notLast], %[len]\n\t" + /* r[2] * h[3] */ + "LDR %[len], [lr, #12]\n\t" + "ADDS r9, r9, r12\n\t" + "ADC r10, %[ctx], %[ctx]\n\t" + "UMLAL r9, r10, %[notLast], %[len]\n\t" + /* r[2] * h[4] */ + "LDR %[len], [lr, #16]\n\t" + "MLA r10, %[notLast], %[len], r10\n\t" + /* r[3] * h[0] */ + "LDR %[notLast], [%[m], #12]\n\t" + "LDR %[len], [lr]\n\t" + "MOV r12, %[ctx]\n\t" + "UMLAL r7, r12, %[notLast], %[len]\n\t" + /* r[3] * h[1] */ + "LDR %[len], [lr, #4]\n\t" + "ADDS r8, r8, r12\n\t" + "ADC r12, %[ctx], %[ctx]\n\t" + "UMLAL r8, r12, %[notLast], %[len]\n\t" + /* r[3] * h[2] */ + "LDR %[len], [lr, #8]\n\t" + "ADDS r9, r9, r12\n\t" + "ADC r10, r10, %[ctx]\n\t" + "UMLAL r9, r10, %[notLast], %[len]\n\t" + /* r[3] * h[3] */ + "LDR %[len], [lr, #12]\n\t" + "MOV r11, %[ctx]\n\t" + "UMLAL r10, r11, %[notLast], %[len]\n\t" + /* r[3] * h[4] */ + "LDR %[len], [lr, #16]\n\t" + "MOV r12, %[ctx]\n\t" + "MLA r11, %[notLast], %[len], r11\n\t" +#else + "LDM %[m], {%[ctx], %[m], %[len], %[notLast]}\n\t" + /* r[0] * h[0] */ + "UMULL r10, r11, %[ctx], r4\n\t" + /* r[1] * h[0] */ + "UMULL r12, r7, %[m], r4\n\t" + /* r[0] * h[1] */ + "UMAAL r11, r12, %[ctx], r5\n\t" + /* r[2] * h[0] */ + "UMULL r8, r9, %[len], r4\n\t" + /* r[1] * h[1] */ + "UMAAL r12, r8, %[m], r5\n\t" + /* r[0] * h[2] */ + "UMAAL r12, r7, %[ctx], r6\n\t" + /* r[3] * h[0] */ + "UMAAL r8, r9, %[notLast], r4\n\t" + "STM sp, {r10, r11, r12}\n\t" + /* r[2] * h[1] */ + "UMAAL r7, r8, %[len], r5\n\t" + /* Replace h[0] with h[3] */ + "LDR r4, [lr, #12]\n\t" + /* r[1] * h[2] */ + "UMULL r10, r11, %[m], r6\n\t" + /* r[2] * h[2] */ + "UMAAL r8, r9, %[len], r6\n\t" + /* r[0] * h[3] */ + "UMAAL r7, r10, %[ctx], r4\n\t" + /* r[3] * h[1] */ + "UMAAL r8, r11, %[notLast], r5\n\t" + /* r[1] * h[3] */ + "UMAAL r8, r10, %[m], r4\n\t" + /* r[3] * h[2] */ + "UMAAL r9, r11, %[notLast], r6\n\t" + /* r[2] * h[3] */ + "UMAAL r9, r10, %[len], r4\n\t" + /* Replace h[1] with h[4] */ + "LDR r5, [lr, #16]\n\t" + /* r[3] * h[3] */ + "UMAAL r10, r11, %[notLast], r4\n\t" + "MOV r12, #0x0\n\t" + /* r[0] * h[4] */ + "UMAAL r8, r12, %[ctx], r5\n\t" + /* r[1] * h[4] */ + "UMAAL r9, r12, %[m], r5\n\t" + /* r[2] * h[4] */ + "UMAAL r10, r12, %[len], r5\n\t" + /* r[3] * h[4] */ + "UMAAL r11, r12, %[notLast], r5\n\t" + /* DONE */ + "LDM sp, {r4, r5, r6}\n\t" +#endif /* WOLFSSL_SP_NO_UMAAL */ + /* r12 will be zero because r is masked. */ + /* Load length */ + "LDR %[len], [sp, #20]\n\t" + /* Reduce mod 2^130 - 5 */ + "BIC %[notLast], r8, #0x3\n\t" + "AND r8, r8, #0x3\n\t" + "ADDS r4, r4, %[notLast]\n\t" + "LSR %[notLast], %[notLast], #2\n\t" + "ADCS r5, r5, r9\n\t" + "ORR %[notLast], %[notLast], r9, LSL #30\n\t" + "ADCS r6, r6, r10\n\t" + "LSR r9, r9, #2\n\t" + "ADCS r7, r7, r11\n\t" + "ORR r9, r9, r10, LSL #30\n\t" + "ADC r8, r8, r12\n\t" + "LSR r10, r10, #2\n\t" + "ADDS r4, r4, %[notLast]\n\t" + "ORR r10, r10, r11, LSL #30\n\t" + "ADCS r5, r5, r9\n\t" + "LSR r11, r11, #2\n\t" + "ADCS r6, r6, r10\n\t" + "ADCS r7, r7, r11\n\t" + "ADC r8, r8, r12\n\t" + /* Sub 16 from length. */ + "SUBS %[len], %[len], #0x10\n\t" + /* Store length. */ + "STR %[len], [sp, #20]\n\t" + /* Loop again if more message to do. */ +#if defined(__GNUC__) + "BGT L_poly1305_thumb2_16_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BGT.N L_poly1305_thumb2_16_loop\n\t" +#else + "BGT.N L_poly1305_thumb2_16_loop_%=\n\t" +#endif + "STM lr, {r4, r5, r6, r7, r8}\n\t" + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_poly1305_thumb2_16_done:\n\t" +#else + "L_poly1305_thumb2_16_done_%=:\n\t" +#endif + "ADD sp, sp, #0x1c\n\t" + : [ctx] "+r" (ctx), [m] "+r" (m), [len] "+r" (len), [notLast] "+r" (notLast) + : + : "memory", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "lr", "cc" + ); +} + +XALIGNED(16) static const uint32_t L_poly1305_thumb2_rt[] = { + 0x0fffffff, 0x0ffffffc, 0x0ffffffc, 0x0ffffffc, +}; + +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG +void poly1305_set_key(Poly1305* ctx_p, const byte* key_p) +#else +void poly1305_set_key(Poly1305* ctx, const byte* key) +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ +{ +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG + register Poly1305* ctx __asm__ ("r0") = (Poly1305*)ctx_p; + register const byte* key __asm__ ("r1") = (const byte*)key_p; + register uint32_t* L_poly1305_thumb2_rt_c __asm__ ("r2") = (uint32_t*)&L_poly1305_thumb2_rt; +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ + + __asm__ __volatile__ ( + /* Load and cache padding. */ + "LDR r2, [%[key], #16]\n\t" + "LDR r3, [%[key], #20]\n\t" + "LDR r4, [%[key], #24]\n\t" + "LDR r5, [%[key], #28]\n\t" + "ADD r10, %[ctx], #0x24\n\t" + "STM r10, {r2, r3, r4, r5}\n\t" + /* Load, mask and store r. */ + "MOV r10, %[L_poly1305_thumb2_rt]\n\t" + "LDR r2, [%[key]]\n\t" + "LDR r3, [%[key], #4]\n\t" + "LDR r4, [%[key], #8]\n\t" + "LDR r5, [%[key], #12]\n\t" + "LDM r10, {r6, r7, r8, r9}\n\t" + "AND r2, r2, r6\n\t" + "AND r3, r3, r7\n\t" + "AND r4, r4, r8\n\t" + "AND r5, r5, r9\n\t" + "ADD r10, %[ctx], #0x0\n\t" + "STM r10, {r2, r3, r4, r5}\n\t" + /* h (accumulator) = 0 */ + "EOR r6, r6, r6\n\t" + "EOR r7, r7, r7\n\t" + "EOR r8, r8, r8\n\t" + "EOR r9, r9, r9\n\t" + "ADD r10, %[ctx], #0x10\n\t" + "EOR r5, r5, r5\n\t" + "STM r10, {r5, r6, r7, r8, r9}\n\t" + /* Zero leftover */ + "STR r5, [%[ctx], #52]\n\t" +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG + : [ctx] "+r" (ctx), [key] "+r" (key), + [L_poly1305_thumb2_rt] "+r" (L_poly1305_thumb2_rt_c) + : + : "memory", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "cc" +#else + : [ctx] "+r" (ctx), [key] "+r" (key) + : [L_poly1305_thumb2_rt] "r" (L_poly1305_thumb2_rt) + : "memory", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "cc" +#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */ + ); +} + +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG +void poly1305_final(Poly1305* ctx_p, byte* mac_p) +#else +void poly1305_final(Poly1305* ctx, byte* mac) +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ +{ +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG + register Poly1305* ctx __asm__ ("r0") = (Poly1305*)ctx_p; + register byte* mac __asm__ ("r1") = (byte*)mac_p; +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ + + __asm__ __volatile__ ( + "ADD r11, %[ctx], #0x10\n\t" + "LDM r11, {r2, r3, r4, r5, r6}\n\t" + /* Add 5 and check for h larger than p. */ + "ADDS r7, r2, #0x5\n\t" + "ADCS r7, r3, #0x0\n\t" + "ADCS r7, r4, #0x0\n\t" + "ADCS r7, r5, #0x0\n\t" + "ADC r7, r6, #0x0\n\t" + "SUB r7, r7, #0x4\n\t" + "LSR r7, r7, #31\n\t" + "SUB r7, r7, #0x1\n\t" + "AND r7, r7, #0x5\n\t" + /* Add 0/5 to h. */ + "ADDS r2, r2, r7\n\t" + "ADCS r3, r3, #0x0\n\t" + "ADCS r4, r4, #0x0\n\t" + "ADC r5, r5, #0x0\n\t" + /* Add padding */ + "ADD r11, %[ctx], #0x24\n\t" + "LDM r11, {r7, r8, r9, r10}\n\t" + "ADDS r2, r2, r7\n\t" + "ADCS r3, r3, r8\n\t" + "ADCS r4, r4, r9\n\t" + "ADC r5, r5, r10\n\t" + /* Store MAC */ + "STR r2, [%[mac]]\n\t" + "STR r3, [%[mac], #4]\n\t" + "STR r4, [%[mac], #8]\n\t" + "STR r5, [%[mac], #12]\n\t" + /* Zero out h. */ + "EOR r2, r2, r2\n\t" + "EOR r3, r3, r3\n\t" + "EOR r4, r4, r4\n\t" + "EOR r5, r5, r5\n\t" + "EOR r6, r6, r6\n\t" + "ADD r11, %[ctx], #0x10\n\t" + "STM r11, {r2, r3, r4, r5, r6}\n\t" + /* Zero out r. */ + "ADD r11, %[ctx], #0x0\n\t" + "STM r11, {r2, r3, r4, r5}\n\t" + /* Zero out padding. */ + "ADD r11, %[ctx], #0x24\n\t" + "STM r11, {r2, r3, r4, r5}\n\t" + : [ctx] "+r" (ctx), [mac] "+r" (mac) + : + : "memory", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "cc" + ); +} + +#endif /* HAVE_POLY1305 */ +#endif /* !__aarch64__ && __thumb__ */ +#endif /* WOLFSSL_ARMASM */ +#endif /* WOLFSSL_ARMASM_INLINE */ diff --git a/wolfcrypt/src/port/arm/thumb2-poly1305.c b/wolfcrypt/src/port/arm/thumb2-poly1305.c new file mode 100644 index 0000000000..a5f2d9ea34 --- /dev/null +++ b/wolfcrypt/src/port/arm/thumb2-poly1305.c @@ -0,0 +1,149 @@ +/* armv8-poly1305.c + * + * Copyright (C) 2006-2024 wolfSSL Inc. + * + * This file is part of wolfSSL. + * + * wolfSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * wolfSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA + */ + +#ifdef HAVE_CONFIG_H + #include +#endif + +#include +#include + +#ifdef WOLFSSL_ARMASM +#ifdef __thumb__ + +#ifdef HAVE_POLY1305 +#include +#include +#include +#include +#ifdef NO_INLINE + #include +#else + #define WOLFSSL_MISC_INCLUDED + #include +#endif +#ifdef CHACHA_AEAD_TEST + #include +#endif + +extern void poly1305_blocks_thumb2_16(Poly1305* ctx, const unsigned char* m, + word32 len, int notLast); + +/* Process 16 bytes of message at a time. + * + * @param [in] ctx Poly1305 context. + * @param [in] m Message to process. + * @param [in] bytes Length of message in bytes. + */ +void poly1305_blocks_thumb2(Poly1305* ctx, const unsigned char* m, + size_t bytes) +{ + poly1305_blocks_thumb2_16(ctx, m, bytes, 1); +} + +/* Process 16 bytes of message. + * + * @param [in] ctx Poly1305 context. + * @param [in] m Message to process. + */ +void poly1305_block_thumb2(Poly1305* ctx, const unsigned char* m) +{ + poly1305_blocks_thumb2_16(ctx, m, POLY1305_BLOCK_SIZE, 1); +} + +extern void poly1305_set_key(Poly1305* ctx, const byte* key); + +/* Set the key for the Poly1305 operation. + * + * @param [in] ctx Poly1305 context. + * @param [in] key Key data to use. + * @param [in] keySz Size of key in bytes. Must be 32. + * @return 0 on success. + * @return BAD_FUNC_ARG when ctx or key is NULL or keySz is not 32. + */ +int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz) +{ + int ret = 0; + +#ifdef CHACHA_AEAD_TEST + word32 k; + printf("Poly key used:\n"); + if (key != NULL) { + for (k = 0; k < keySz; k++) { + printf("%02x", key[k]); + if ((k+1) % 8 == 0) + printf("\n"); + } + } + printf("\n"); +#endif + + /* Validate parameters. */ + if ((ctx == NULL) || (key == NULL) || (keySz != 32)) { + ret = BAD_FUNC_ARG; + } + + if (ret == 0) { + poly1305_set_key(ctx, key); + } + + return ret; +} + +extern void poly1305_final(Poly1305* ctx, byte* mac); + +/* Finalize the Poly1305 operation calculating the MAC. + * + * @param [in] ctx Poly1305 context. + * @param [in] mac Buffer to hold the MAC. Myst be at least 16 bytes long. + * @return 0 on success. + * @return BAD_FUNC_ARG when ctx or mac is NULL. + */ +int wc_Poly1305Final(Poly1305* ctx, byte* mac) +{ + int ret = 0; + + /* Validate parameters. */ + if ((ctx == NULL) || (mac == NULL)) { + ret = BAD_FUNC_ARG; + } + + /* Process the remaining partial block - last block. */ + if (ret == 0) { + if (ctx->leftover) { + size_t i = ctx->leftover; + ctx->buffer[i++] = 1; + for (; i < POLY1305_BLOCK_SIZE; i++) { + ctx->buffer[i] = 0; + } + poly1305_blocks_thumb2_16(ctx, ctx->buffer, POLY1305_BLOCK_SIZE, + 0); + } + + poly1305_final(ctx, mac); + } + + return ret; +} + +#endif /* HAVE_POLY1305 */ +#endif /* __aarch64__ */ +#endif /* WOLFSSL_ARMASM */ diff --git a/wolfcrypt/test/test.c b/wolfcrypt/test/test.c index 4103ce083c..535377393f 100644 --- a/wolfcrypt/test/test.c +++ b/wolfcrypt/test/test.c @@ -7857,8 +7857,7 @@ WOLFSSL_TEST_SUBROUTINE wc_test_ret_t poly1305_test(void) byte tag[16]; Poly1305 enc; - WOLFSSL_SMALL_STACK_STATIC const byte msg1[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte msg1[] = { 0x43,0x72,0x79,0x70,0x74,0x6f,0x67,0x72, 0x61,0x70,0x68,0x69,0x63,0x20,0x46,0x6f, 0x72,0x75,0x6d,0x20,0x52,0x65,0x73,0x65, @@ -7866,22 +7865,19 @@ WOLFSSL_TEST_SUBROUTINE wc_test_ret_t poly1305_test(void) 0x75,0x70 }; - WOLFSSL_SMALL_STACK_STATIC const byte msg2[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte msg2[] = { 0x48,0x65,0x6c,0x6c,0x6f,0x20,0x77,0x6f,0x72, 0x6c,0x64,0x21 }; - WOLFSSL_SMALL_STACK_STATIC const byte msg3[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte msg3[] = { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }; - WOLFSSL_SMALL_STACK_STATIC const byte msg4[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte msg4[] = { 0xd3,0x1a,0x8d,0x34,0x64,0x8e,0x60,0xdb, 0x7b,0x86,0xaf,0xbc,0x53,0xef,0x7e,0xc2, 0xa4,0xad,0xed,0x51,0x29,0x6e,0x08,0xfe, @@ -7899,14 +7895,12 @@ WOLFSSL_TEST_SUBROUTINE wc_test_ret_t poly1305_test(void) 0x61,0x16 }; - WOLFSSL_SMALL_STACK_STATIC const byte msg5[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte msg5[] = { 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, }; - WOLFSSL_SMALL_STACK_STATIC const byte msg6[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte msg6[] = { 0xd3,0x1a,0x8d,0x34,0x64,0x8e,0x60,0xdb, 0x7b,0x86,0xaf,0xbc,0x53,0xef,0x7e,0xc2, 0xa4,0xad,0xed,0x51,0x29,0x6e,0x08,0xfe, @@ -7928,54 +7922,57 @@ WOLFSSL_TEST_SUBROUTINE wc_test_ret_t poly1305_test(void) 0xe5,0x76,0xd2,0x65,0x86,0xce,0xc6,0x4b, 0x61,0x16 }; + WOLFSSL_SMALL_STACK_STATIC const byte msg7[] = { + 0xe8,0x8c,0x85,0x03,0x43,0xaf,0xa7,0x85, + 0x21,0x6b,0xc3,0x45,0xc4,0x53,0x98,0xf8, + 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, + 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, + }; - byte additional[] = - { + byte additional[] = { 0x50,0x51,0x52,0x53,0xc0,0xc1,0xc2,0xc3, 0xc4,0xc5,0xc6,0xc7 }; - WOLFSSL_SMALL_STACK_STATIC const byte correct0[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte correct0[] = { 0x01,0x03,0x80,0x8a,0xfb,0x0d,0xb2,0xfd, 0x4a,0xbf,0xf6,0xaf,0x41,0x49,0xf5,0x1b }; - WOLFSSL_SMALL_STACK_STATIC const byte correct1[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte correct1[] = { 0xa8,0x06,0x1d,0xc1,0x30,0x51,0x36,0xc6, 0xc2,0x2b,0x8b,0xaf,0x0c,0x01,0x27,0xa9 }; - WOLFSSL_SMALL_STACK_STATIC const byte correct2[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte correct2[] = { 0xa6,0xf7,0x45,0x00,0x8f,0x81,0xc9,0x16, 0xa2,0x0d,0xcc,0x74,0xee,0xf2,0xb2,0xf0 }; - WOLFSSL_SMALL_STACK_STATIC const byte correct3[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte correct3[] = { 0x49,0xec,0x78,0x09,0x0e,0x48,0x1e,0xc6, 0xc2,0x6b,0x33,0xb9,0x1c,0xcc,0x03,0x07 }; - WOLFSSL_SMALL_STACK_STATIC const byte correct4[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte correct4[] = { 0x1a,0xe1,0x0b,0x59,0x4f,0x09,0xe2,0x6a, 0x7e,0x90,0x2e,0xcb,0xd0,0x60,0x06,0x91 }; - WOLFSSL_SMALL_STACK_STATIC const byte correct5[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte correct5[] = { 0x03,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, }; - WOLFSSL_SMALL_STACK_STATIC const byte correct6[] = - { + WOLFSSL_SMALL_STACK_STATIC const byte correct6[] = { 0xea,0x11,0x5c,0x4f,0xd0,0xc0,0x10,0xae, 0xf7,0xdf,0xda,0x77,0xa2,0xe9,0xaf,0xca }; + WOLFSSL_SMALL_STACK_STATIC const byte correct7[] = { + 0x14,0x00,0x00,0x88,0x5c,0x00,0x00,0x88, + 0x5c,0x00,0x00,0x88,0x5c,0x00,0x00,0x88 + }; + WOLFSSL_SMALL_STACK_STATIC const byte key[] = { 0x85,0xd6,0xbe,0x78,0x57,0x55,0x6d,0x33, @@ -8005,17 +8002,25 @@ WOLFSSL_TEST_SUBROUTINE wc_test_ret_t poly1305_test(void) 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }; - const byte* msgs[] = {NULL, msg1, msg2, msg3, msg5, msg6}; + WOLFSSL_SMALL_STACK_STATIC const byte key7[] = { + 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, + 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, + 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, + 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff + }; + + const byte* msgs[] = {NULL, msg1, msg2, msg3, msg5, msg6, msg7}; word32 szm[] = {0, sizeof(msg1), sizeof(msg2), - sizeof(msg3), sizeof(msg5), sizeof(msg6)}; - const byte* keys[] = {key, key, key2, key2, key5, key}; + sizeof(msg3), sizeof(msg5), sizeof(msg6), + sizeof(msg7)}; + const byte* keys[] = {key, key, key2, key2, key5, key, key7}; const byte* tests[] = {correct0, correct1, correct2, correct3, correct5, - correct6}; + correct6, correct7}; int i; wc_test_ret_t ret = 0; WOLFSSL_ENTER("poly1305_test"); - for (i = 0; i < 6; i++) { + for (i = 0; i < 7; i++) { ret = wc_Poly1305SetKey(&enc, keys[i], 32); if (ret != 0) return WC_TEST_RET_ENC_I(i); diff --git a/wolfssl/wolfcrypt/chacha.h b/wolfssl/wolfcrypt/chacha.h index 4cdcb8694f..987dc9fb14 100644 --- a/wolfssl/wolfcrypt/chacha.h +++ b/wolfssl/wolfcrypt/chacha.h @@ -81,11 +81,11 @@ typedef struct ChaCha { /* vpshufd reads 16 bytes but we only use bottom 4. */ byte extra[12]; #endif + word32 left; /* number of bytes leftover */ #if defined(USE_INTEL_CHACHA_SPEEDUP) || defined(WOLFSSL_ARMASM) || \ defined(WOLFSSL_RISCV_ASM) word32 over[CHACHA_CHUNK_WORDS]; #endif - word32 left; /* number of bytes leftover */ } ChaCha; /** diff --git a/wolfssl/wolfcrypt/poly1305.h b/wolfssl/wolfcrypt/poly1305.h index a765a8775d..0ac04c5de6 100644 --- a/wolfssl/wolfcrypt/poly1305.h +++ b/wolfssl/wolfcrypt/poly1305.h @@ -98,6 +98,12 @@ typedef struct Poly1305 { word64 leftover; unsigned char buffer[POLY1305_BLOCK_SIZE]; unsigned char finished; +#elif defined(WOLFSSL_ARMASM) && defined(__thumb__) + word32 r[4]; + word32 h[5]; + word32 pad[4]; + word32 leftover; + unsigned char buffer[POLY1305_BLOCK_SIZE]; #elif defined(WOLFSSL_RISCV_ASM) word64 r[2]; #ifdef WOLFSSL_RISCV_VECTOR @@ -146,16 +152,25 @@ WOLFSSL_API int wc_Poly1305_MAC(Poly1305* ctx, const byte* additional, #define poly1305_block poly1305_block_aarch64 void poly1305_blocks_aarch64(Poly1305* ctx, const unsigned char *m, - size_t bytes); + size_t bytes); void poly1305_block_aarch64(Poly1305* ctx, const unsigned char *m); #endif +#if defined(__thumb__ ) && defined(WOLFSSL_ARMASM) +#define poly1305_blocks poly1305_blocks_thumb2 +#define poly1305_block poly1305_block_thumb2 + +void poly1305_blocks_thumb2(Poly1305* ctx, const unsigned char *m, + size_t bytes); +void poly1305_block_thumb2(Poly1305* ctx, const unsigned char *m); +#endif + #if defined(WOLFSSL_RISCV_ASM) #define poly1305_blocks poly1305_blocks_riscv64 #define poly1305_block poly1305_block_riscv64 void poly1305_blocks_riscv64(Poly1305* ctx, const unsigned char *m, - size_t bytes); + size_t bytes); void poly1305_block_riscv64(Poly1305* ctx, const unsigned char *m); #endif