1/* $FreeBSD$ */ 2/* Do not modify. This file is auto-generated from armv4-gf2m.pl. */ 3#include "arm_arch.h" 4 5.text 6.code 32 7.type mul_1x1_ialu,%function 8.align 5 9mul_1x1_ialu: 10 mov r4,#0 11 bic r5,r1,#3<<30 @ a1=a&0x3fffffff 12 str r4,[sp,#0] @ tab[0]=0 13 add r6,r5,r5 @ a2=a1<<1 14 str r5,[sp,#4] @ tab[1]=a1 15 eor r7,r5,r6 @ a1^a2 16 str r6,[sp,#8] @ tab[2]=a2 17 mov r8,r5,lsl#2 @ a4=a1<<2 18 str r7,[sp,#12] @ tab[3]=a1^a2 19 eor r9,r5,r8 @ a1^a4 20 str r8,[sp,#16] @ tab[4]=a4 21 eor r4,r6,r8 @ a2^a4 22 str r9,[sp,#20] @ tab[5]=a1^a4 23 eor r7,r7,r8 @ a1^a2^a4 24 str r4,[sp,#24] @ tab[6]=a2^a4 25 and r8,r12,r0,lsl#2 26 str r7,[sp,#28] @ tab[7]=a1^a2^a4 27 28 and r9,r12,r0,lsr#1 29 ldr r5,[sp,r8] @ tab[b & 0x7] 30 and r8,r12,r0,lsr#4 31 ldr r7,[sp,r9] @ tab[b >> 3 & 0x7] 32 and r9,r12,r0,lsr#7 33 ldr r6,[sp,r8] @ tab[b >> 6 & 0x7] 34 eor r5,r5,r7,lsl#3 @ stall 35 mov r4,r7,lsr#29 36 ldr r7,[sp,r9] @ tab[b >> 9 & 0x7] 37 38 and r8,r12,r0,lsr#10 39 eor r5,r5,r6,lsl#6 40 eor r4,r4,r6,lsr#26 41 ldr r6,[sp,r8] @ tab[b >> 12 & 0x7] 42 43 and r9,r12,r0,lsr#13 44 eor r5,r5,r7,lsl#9 45 eor r4,r4,r7,lsr#23 46 ldr r7,[sp,r9] @ tab[b >> 15 & 0x7] 47 48 and r8,r12,r0,lsr#16 49 eor r5,r5,r6,lsl#12 50 eor r4,r4,r6,lsr#20 51 ldr r6,[sp,r8] @ tab[b >> 18 & 0x7] 52 53 and r9,r12,r0,lsr#19 54 eor r5,r5,r7,lsl#15 55 eor r4,r4,r7,lsr#17 56 ldr r7,[sp,r9] @ tab[b >> 21 & 0x7] 57 58 and r8,r12,r0,lsr#22 59 eor r5,r5,r6,lsl#18 60 eor r4,r4,r6,lsr#14 61 ldr r6,[sp,r8] @ tab[b >> 24 & 0x7] 62 63 and r9,r12,r0,lsr#25 64 eor r5,r5,r7,lsl#21 65 eor r4,r4,r7,lsr#11 66 ldr r7,[sp,r9] @ tab[b >> 27 & 0x7] 67 68 tst r1,#1<<30 69 and r8,r12,r0,lsr#28 70 eor r5,r5,r6,lsl#24 71 eor r4,r4,r6,lsr#8 72 ldr r6,[sp,r8] @ tab[b >> 30 ] 73 74 eorne r5,r5,r0,lsl#30 75 eorne r4,r4,r0,lsr#2 76 tst r1,#1<<31 77 eor r5,r5,r7,lsl#27 78 eor r4,r4,r7,lsr#5 79 eorne r5,r5,r0,lsl#31 80 eorne r4,r4,r0,lsr#1 81 eor r5,r5,r6,lsl#30 82 eor r4,r4,r6,lsr#2 83 84 mov pc,lr 85.size mul_1x1_ialu,.-mul_1x1_ialu 86.global bn_GF2m_mul_2x2 87.type bn_GF2m_mul_2x2,%function 88.align 5 89bn_GF2m_mul_2x2: 90#if __ARM_MAX_ARCH__>=7 91 ldr r12,.LOPENSSL_armcap 92.Lpic: ldr r12,[pc,r12] 93 tst r12,#1 94 bne .LNEON 95#endif 96 stmdb sp!,{r4-r10,lr} 97 mov r10,r0 @ reassign 1st argument 98 mov r0,r3 @ r0=b1 99 ldr r3,[sp,#32] @ load b0 100 mov r12,#7<<2 101 sub sp,sp,#32 @ allocate tab[8] 102 103 bl mul_1x1_ialu @ a1·b1 104 str r5,[r10,#8] 105 str r4,[r10,#12] 106 107 eor r0,r0,r3 @ flip b0 and b1 108 eor r1,r1,r2 @ flip a0 and a1 109 eor r3,r3,r0 110 eor r2,r2,r1 111 eor r0,r0,r3 112 eor r1,r1,r2 113 bl mul_1x1_ialu @ a0·b0 114 str r5,[r10] 115 str r4,[r10,#4] 116 117 eor r1,r1,r2 118 eor r0,r0,r3 119 bl mul_1x1_ialu @ (a1+a0)·(b1+b0) 120 ldmia r10,{r6-r9} 121 eor r5,r5,r4 122 eor r4,r4,r7 123 eor r5,r5,r6 124 eor r4,r4,r8 125 eor r5,r5,r9 126 eor r4,r4,r9 127 str r4,[r10,#8] 128 eor r5,r5,r4 129 add sp,sp,#32 @ destroy tab[8] 130 str r5,[r10,#4] 131 132#if __ARM_ARCH__>=5 133 ldmia sp!,{r4-r10,pc} 134#else 135 ldmia sp!,{r4-r10,lr} 136 tst lr,#1 137 moveq pc,lr @ be binary compatible with V4, yet 138 .word 0xe12fff1e @ interoperable with Thumb ISA:-) 139#endif 140#if __ARM_MAX_ARCH__>=7 141.arch armv7-a 142.fpu neon 143 144.align 5 145.LNEON: 146 ldr r12, [sp] @ 5th argument 147 vmov d26, r2, r1 148 vmov d27, r12, r3 149 vmov.i64 d28, #0x0000ffffffffffff 150 vmov.i64 d29, #0x00000000ffffffff 151 vmov.i64 d30, #0x000000000000ffff 152 153 vext.8 d2, d26, d26, #1 @ A1 154 vmull.p8 q1, d2, d27 @ F = A1*B 155 vext.8 d0, d27, d27, #1 @ B1 156 vmull.p8 q0, d26, d0 @ E = A*B1 157 vext.8 d4, d26, d26, #2 @ A2 158 vmull.p8 q2, d4, d27 @ H = A2*B 159 vext.8 d16, d27, d27, #2 @ B2 160 vmull.p8 q8, d26, d16 @ G = A*B2 161 vext.8 d6, d26, d26, #3 @ A3 162 veor q1, q1, q0 @ L = E + F 163 vmull.p8 q3, d6, d27 @ J = A3*B 164 vext.8 d0, d27, d27, #3 @ B3 165 veor q2, q2, q8 @ M = G + H 166 vmull.p8 q0, d26, d0 @ I = A*B3 167 veor d2, d2, d3 @ t0 = (L) (P0 + P1) << 8 168 vand d3, d3, d28 169 vext.8 d16, d27, d27, #4 @ B4 170 veor d4, d4, d5 @ t1 = (M) (P2 + P3) << 16 171 vand d5, d5, d29 172 vmull.p8 q8, d26, d16 @ K = A*B4 173 veor q3, q3, q0 @ N = I + J 174 veor d2, d2, d3 175 veor d4, d4, d5 176 veor d6, d6, d7 @ t2 = (N) (P4 + P5) << 24 177 vand d7, d7, d30 178 vext.8 q1, q1, q1, #15 179 veor d16, d16, d17 @ t3 = (K) (P6 + P7) << 32 180 vmov.i64 d17, #0 181 vext.8 q2, q2, q2, #14 182 veor d6, d6, d7 183 vmull.p8 q0, d26, d27 @ D = A*B 184 vext.8 q8, q8, q8, #12 185 vext.8 q3, q3, q3, #13 186 veor q1, q1, q2 187 veor q3, q3, q8 188 veor q0, q0, q1 189 veor q0, q0, q3 190 191 vst1.32 {q0}, [r0] 192 bx lr @ bx lr 193#endif 194.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2 195#if __ARM_MAX_ARCH__>=7 196.align 5 197.LOPENSSL_armcap: 198.word OPENSSL_armcap_P-(.Lpic+8) 199#endif 200.asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>" 201.align 5 202 203#if __ARM_MAX_ARCH__>=7 204.comm OPENSSL_armcap_P,4,4 205#endif 206