• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/ap/gpl/openssl/crypto/ec/asm/

Lines Matching refs:mov

130 	mov	8*0($a_ptr), $a0
131 mov 8*1($a_ptr), $a1
133 mov 8*2($a_ptr), $a2
135 mov 8*3($a_ptr), $a3
137 mov $a0, $t0
140 mov $a1, $t1
144 mov $a2, $t2
147 mov $a3, $t3
153 mov $a0, 8*0($r_ptr)
155 mov $a1, 8*1($r_ptr)
157 mov $a2, 8*2($r_ptr)
158 mov $a3, 8*3($r_ptr)
174 mov 8*0($a_ptr), $a0
175 mov 8*1($a_ptr), $a1
176 mov 8*2($a_ptr), $a2
177 mov $a0, $t0
178 mov 8*3($a_ptr), $a3
181 mov $a1, $t1
184 mov $a2, $t2
187 mov $a3, $t3
199 mov $a1, $t0 # a0:a3>>1
202 mov $a2, $t1
206 mov $a3, $t2
215 mov $a0, 8*0($r_ptr)
216 mov $a1, 8*1($r_ptr)
217 mov $a2, 8*2($r_ptr)
218 mov $a3, 8*3($r_ptr)
234 mov 8*0($a_ptr), $a0
236 mov 8*1($a_ptr), $a1
238 mov 8*2($a_ptr), $a2
240 mov 8*3($a_ptr), $a3
241 mov $a0, $t0
244 mov $a1, $t1
248 mov $a2, $t2
251 mov $a3, $t3
263 mov $a0, $t0
266 mov $a1, $t1
270 mov $a2, $t2
273 mov $a3, $t3
279 mov $a0, 8*0($r_ptr)
281 mov $a1, 8*1($r_ptr)
283 mov $a2, 8*2($r_ptr)
284 mov $a3, 8*3($r_ptr)
300 mov 8*0($a_ptr), $a0
302 mov 8*1($a_ptr), $a1
303 mov 8*2($a_ptr), $a2
304 mov 8*3($a_ptr), $a3
309 mov $a0, $t0
312 mov $a1, $t1
316 mov $a2, $t2
319 mov $a3, $t3
325 mov $a0, 8*0($r_ptr)
327 mov $a1, 8*1($r_ptr)
329 mov $a2, 8*2($r_ptr)
330 mov $a3, 8*3($r_ptr)
346 mov 8*0($a_ptr), $a0
348 mov 8*1($a_ptr), $a1
349 mov 8*2($a_ptr), $a2
350 mov 8*3($a_ptr), $a3
355 mov $a0, $t0
358 mov $a1, $t1
362 mov $a2, $t2
365 mov $a3, $t3
371 mov $a0, 8*0($r_ptr)
373 mov $a1, 8*1($r_ptr)
375 mov $a2, 8*2($r_ptr)
376 mov $a3, 8*3($r_ptr)
401 mov $a0, $t0
404 mov $a1, $t1
408 mov $a2, $t2
411 mov $a3, $t3
417 mov $a0, 8*0($r_ptr)
419 mov $a1, 8*1($r_ptr)
421 mov $a2, 8*2($r_ptr)
422 mov $a3, 8*3($r_ptr)
447 mov \$0x80100, %ecx
467 mov \$0x80100, %ecx
484 mov $b_org, $b_ptr
485 mov 8*0($b_org), %rax
486 mov 8*0($a_ptr), $acc1
487 mov 8*1($a_ptr), $acc2
488 mov 8*2($a_ptr), $acc3
489 mov 8*3($a_ptr), $acc4
498 mov $b_org, $b_ptr
499 mov 8*0($b_org), %rdx
500 mov 8*0($a_ptr), $acc1
501 mov 8*1($a_ptr), $acc2
502 mov 8*2($a_ptr), $acc3
503 mov 8*3($a_ptr), $acc4
524 mov %rax, $t1
526 mov .Lpoly+8*1(%rip),$poly1
527 mov %rax, $acc0
528 mov $t1, %rax
529 mov %rdx, $acc1
532 mov .Lpoly+8*3(%rip),$poly3
534 mov $t1, %rax
536 mov %rdx, $acc2
540 mov $t1, %rax
542 mov %rdx, $acc3
546 mov $acc0, %rax
549 mov %rdx, $acc4
560 mov $acc0, $t1
567 mov 8*1($b_ptr), %rax
574 mov %rax, $t1
577 mov $t1, %rax
579 mov %rdx, $t0
585 mov $t1, %rax
587 mov %rdx, $t0
593 mov $t1, %rax
595 mov %rdx, $t0
601 mov $acc1, %rax
607 mov $acc1, $t1
614 mov 8*2($b_ptr), %rax
621 mov %rax, $t1
624 mov $t1, %rax
626 mov %rdx, $t0
632 mov $t1, %rax
634 mov %rdx, $t0
640 mov $t1, %rax
642 mov %rdx, $t0
648 mov $acc2, %rax
654 mov $acc2, $t1
661 mov 8*3($b_ptr), %rax
668 mov %rax, $t1
671 mov $t1, %rax
673 mov %rdx, $t0
679 mov $t1, %rax
681 mov %rdx, $t0
687 mov $t1, %rax
689 mov %rdx, $t0
695 mov $acc3, %rax
701 mov $acc3, $t1
707 mov $acc4, $t0
710 mov $acc5, $t1
716 mov $acc0, $t2
719 mov $acc1, $t3
725 mov $acc4, 8*0($r_ptr)
727 mov $acc5, 8*1($r_ptr)
729 mov $acc0, 8*2($r_ptr)
730 mov $acc1, 8*3($r_ptr)
748 mov \$0x80100, %ecx
764 mov 8*0($a_ptr), %rax
765 mov 8*1($a_ptr), $acc6
766 mov 8*2($a_ptr), $acc7
767 mov 8*3($a_ptr), $acc0
776 mov 8*0($a_ptr), %rdx
777 mov 8*1($a_ptr), $acc6
778 mov 8*2($a_ptr), $acc7
779 mov 8*3($a_ptr), $acc0
798 mov %rax, $acc5
800 mov %rax, $acc1
801 mov $acc7, %rax
802 mov %rdx, $acc2
806 mov $acc0, %rax
808 mov %rdx, $acc3
812 mov $acc7, %rax
814 mov %rdx, $acc4
819 mov $acc0, %rax
821 mov %rdx, $t1
825 mov $acc0, %rax
828 mov %rdx, $acc5
835 mov 8*0($a_ptr), %rax
836 mov %rdx, $acc6
848 mov %rax, $acc0
849 mov 8*1($a_ptr), %rax
850 mov %rdx, $t0
855 mov 8*2($a_ptr), %rax
857 mov %rdx, $t0
862 mov 8*3($a_ptr), %rax
864 mov %rdx, $t0
869 mov $acc0, %rax
872 mov .Lpoly+8*1(%rip), $a_ptr
873 mov .Lpoly+8*3(%rip), $t1
878 mov $acc0, $t0
885 mov $acc1, %rax
890 mov $acc1, $t0
892 mov %rdx, $acc0
898 mov $acc2, %rax
903 mov $acc2, $t0
905 mov %rdx, $acc1
911 mov $acc3, %rax
916 mov $acc3, $t0
918 mov %rdx, $acc2
931 mov $acc4, $acc0
934 mov $acc5, $acc1
938 mov $acc6, $acc2
941 mov $acc7, $t0
947 mov $acc4, 8*0($r_ptr)
949 mov $acc5, 8*1($r_ptr)
951 mov $acc6, 8*2($r_ptr)
952 mov $acc7, 8*3($r_ptr)
967 mov \$32, $poly1
970 mov .Lpoly+8*3(%rip), $poly3
973 mov $acc0, %rdx
986 mov 8*1($b_ptr), %rdx
1007 mov $acc1, %rdx
1023 mov 8*2($b_ptr), %rdx
1044 mov $acc2, %rdx
1060 mov 8*3($b_ptr), %rdx
1081 mov $acc3, %rdx
1097 mov $acc4, $t2
1098 mov .Lpoly+8*1(%rip), $poly1
1100 mov $acc5, $t3
1107 mov $acc0, $t0
1111 mov $acc1, $t1
1117 mov $acc4, 8*0($r_ptr)
1119 mov $acc5, 8*1($r_ptr)
1121 mov $acc0, 8*2($r_ptr)
1122 mov $acc1, 8*3($r_ptr)
1135 mov $acc6, %rdx
1146 mov $acc7, %rdx
1153 mov 8*0+128($a_ptr), %rdx
1161 mov 8*1+128($a_ptr), %rdx
1166 mov 8*2+128($a_ptr), %rdx
1172 mov 8*3+128($a_ptr), %rdx
1176 mov \$32, $a_ptr
1180 mov $acc0, %rdx
1185 mov .Lpoly+8*3(%rip), $t1
1192 mov $acc1, %rdx
1203 mov $acc2, %rdx
1214 mov $acc3, %rdx
1230 mov .Lpoly+8*1(%rip), $a_ptr
1232 mov $acc4, $acc0
1235 mov $acc5, $acc1
1240 mov $acc6, $acc2
1243 mov $acc7, $acc3
1249 mov $acc4, 8*0($r_ptr)
1251 mov $acc5, 8*1($r_ptr)
1253 mov $acc6, 8*2($r_ptr)
1254 mov $acc7, 8*3($r_ptr)
1280 mov 8*0($in_ptr), %rax
1281 mov .Lpoly+8*3(%rip), $t2
1282 mov 8*1($in_ptr), $acc1
1283 mov 8*2($in_ptr), $acc2
1284 mov 8*3($in_ptr), $acc3
1285 mov %rax, $acc0
1286 mov .Lpoly+8*1(%rip), $t1
1290 mov %rax, $t0
1297 mov $acc1, %rax
1302 mov $acc1, $t0
1304 mov %rdx, $acc0
1310 mov $acc2, %rax
1315 mov $acc2, $t0
1317 mov %rdx, $acc1
1323 mov $acc3, %rax
1328 mov $acc3, $t0
1330 mov %rdx, $acc2
1335 mov $acc0, $t0
1337 mov $acc1, $in_ptr
1343 mov $acc2, %rax
1346 mov %rdx, $acc3
1352 mov $acc0, 8*0($r_ptr)
1354 mov $acc1, 8*1($r_ptr)
1356 mov $acc2, 8*2($r_ptr)
1357 mov $acc3, 8*3($r_ptr)
1380 mov OPENSSL_ia32cap_P+8(%rip), %eax
1413 mov \$16, %rax
1477 mov OPENSSL_ia32cap_P+8(%rip), %eax
1507 mov \$64, %rax
1599 mov \$8, %rax
1705 mov \$21, %rax
1813 " mov $b, $src0
1815 mov 8*0+$a, $acc1
1816 mov 8*1+$a, $acc2
1818 mov 8*2+$a, $acc3
1819 mov 8*3+$a, $acc4"
1826 " mov 8*0+$a, $src0
1827 mov 8*1+$a, $acc6
1829 mov 8*2+$a, $acc7
1830 mov 8*3+$a, $acc0"
1845 mov $a0, $t0
1848 mov $a1, $t1
1852 mov $a2, $t2
1855 mov $a3, $t3
1861 mov $a0, 8*0($r_ptr)
1863 mov $a1, 8*1($r_ptr)
1865 mov $a2, 8*2($r_ptr)
1866 mov $a3, 8*3($r_ptr)
1876 mov $a0, $t0
1879 mov $a1, $t1
1883 mov $a2, $t2
1886 mov $a3, $t3
1892 mov $a0, 8*0($r_ptr)
1894 mov $a1, 8*1($r_ptr)
1896 mov $a2, 8*2($r_ptr)
1897 mov $a3, 8*3($r_ptr)
1907 mov $t0, $a0
1910 mov $t1, $a1
1914 mov $t2, $a2
1917 mov $t3, $a3
1934 mov $a0, $t0
1937 mov $a1, $t1
1941 mov $a2, $t2
1944 mov $a3, $t3
1950 mov $a0, 8*0($r_ptr)
1952 mov $a1, 8*1($r_ptr)
1954 mov $a2, 8*2($r_ptr)
1955 mov $a3, 8*3($r_ptr)
1978 mov \$0x80100, %ecx
2006 mov $a_ptr, $b_ptr # backup copy
2008 mov 0x20+8*0($a_ptr), $acc4 # load in_y in "5-4-0-1" order
2009 mov 0x20+8*1($a_ptr), $acc5
2010 mov 0x20+8*2($a_ptr), $acc0
2011 mov 0x20+8*3($a_ptr), $acc1
2012 mov .Lpoly+8*1(%rip), $poly1
2013 mov .Lpoly+8*3(%rip), $poly3
2025 mov 0x40+8*0($a_ptr), $src0
2026 mov 0x40+8*1($a_ptr), $acc6
2027 mov 0x40+8*2($a_ptr), $acc7
2028 mov 0x40+8*3($a_ptr), $acc0
2037 mov 0x20($b_ptr), $src0 # $b_ptr is still valid
2038 mov 0x40+8*0($b_ptr), $acc1
2039 mov 0x40+8*1($b_ptr), $acc2
2040 mov 0x40+8*2($b_ptr), $acc3
2041 mov 0x40+8*3($b_ptr), $acc4
2048 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2049 mov $in_x+8*1(%rsp), $acc5
2051 mov $in_x+8*2(%rsp), $acc0
2052 mov $in_x+8*3(%rsp), $acc1
2056 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2057 mov $in_x+8*1(%rsp), $acc5
2059 mov $in_x+8*2(%rsp), $acc0
2060 mov $in_x+8*3(%rsp), $acc1
2077 mov $a0, $t0
2079 mov $a1, $t1
2081 mov $a2, $t2
2083 mov $a3, $t3
2095 mov $a1, $t0 # a0:a3>>1
2098 mov $a2, $t1
2102 mov $a3, $t2
2106 mov $a0, 8*0($r_ptr)
2108 mov $a1, 8*1($r_ptr)
2112 mov $a2, 8*2($r_ptr)
2113 mov $a3, 8*3($r_ptr)
2140 mov $acc6, $acc0 # harmonize sqr output and sub input
2141 mov $acc7, $acc1
2142 mov $a_ptr, $poly1
2143 mov $t1, $poly3
2146 mov $S+8*0(%rsp), $t0
2147 mov $S+8*1(%rsp), $t1
2148 mov $S+8*2(%rsp), $t2
2149 mov $S+8*3(%rsp), $acc2 # "4-5-0-1" order
2153 mov $M(%rsp), $src0
2155 mov $acc4, $acc6 # harmonize sub output and mul input
2157 mov $acc4, $S+8*0(%rsp) # have to save:-(
2158 mov $acc5, $acc2
2159 mov $acc5, $S+8*1(%rsp)
2161 mov $acc0, $S+8*2(%rsp)
2164 mov $acc1, $S+8*3(%rsp)
2165 mov $acc6, $acc1
2208 mov \$0x80100, %ecx
2240 mov $a_ptr, $b_ptr # reassign
2241 mov $b_org, $a_ptr # reassign
2258 mov 0x40+8*0($a_ptr), $src0 # load original in2_z
2259 mov 0x40+8*1($a_ptr), $acc6
2260 mov 0x40+8*2($a_ptr), $acc7
2261 mov 0x40+8*3($a_ptr), $acc0
2275 mov $src0, $in2_z+8*0(%rsp) # make in2_z copy
2276 mov $acc6, $in2_z+8*1(%rsp)
2277 mov $acc7, $in2_z+8*2(%rsp)
2278 mov $acc0, $in2_z+8*3(%rsp)
2291 mov 0x40+8*0($b_ptr), $src0 # load original in1_z
2292 mov 0x40+8*1($b_ptr), $acc6
2293 mov 0x40+8*2($b_ptr), $acc7
2294 mov 0x40+8*3($b_ptr), $acc0
2411 mov $acc0, $t0
2414 mov $acc1, $t1
2418 mov $acc2, $t2
2421 mov $acc3, $t3
2426 mov 8*0($a_ptr), $t0
2428 mov 8*1($a_ptr), $t1
2430 mov 8*2($a_ptr), $t2
2432 mov 8*3($a_ptr), $t3
2440 mov $U2+8*0(%rsp), $t0
2441 mov $U2+8*1(%rsp), $t1
2442 mov $U2+8*2(%rsp), $t2
2443 mov $U2+8*3(%rsp), $t3
2448 mov $acc0, 8*0($r_ptr) # save the result, as
2449 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2450 mov $acc2, 8*2($r_ptr)
2451 mov $acc3, 8*3($r_ptr)
2576 mov \$0x80100, %ecx
2603 mov $b_org, $b_ptr # reassign
2609 mov 0x40+8*0($a_ptr), $src0 # load original in1_z
2610 mov 0x40+8*1($a_ptr), $acc6
2611 mov 0x40+8*2($a_ptr), $acc7
2612 mov 0x40+8*3($a_ptr), $acc0
2647 mov 0x00($b_ptr), $src0 # $b_ptr is still valid
2649 mov $acc4, $acc1 # harmonize sqr output and mul input
2653 mov $acc5, $acc2
2656 mov $acc6, $acc3
2661 mov $acc7, $acc4
2716 mov $acc0, $t0
2719 mov $acc1, $t1
2723 mov $acc2, $t2
2726 mov $acc3, $t3
2731 mov 8*0($a_ptr), $t0
2733 mov 8*1($a_ptr), $t1
2735 mov 8*2($a_ptr), $t2
2737 mov 8*3($a_ptr), $t3
2745 mov $U2+8*0(%rsp), $t0
2746 mov $U2+8*1(%rsp), $t1
2747 mov $U2+8*2(%rsp), $t2
2748 mov $U2+8*3(%rsp), $t3
2753 mov $acc0, 8*0($r_ptr) # save the result, as
2754 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2755 mov $acc2, 8*2($r_ptr)
2756 mov $acc3, 8*3($r_ptr)
2875 mov $a0, $t0
2878 mov $a1, $t1
2883 mov $a2, $t2
2886 mov $a3, $t3
2892 mov $a0, 8*0($r_ptr)
2894 mov $a1, 8*1($r_ptr)
2896 mov $a2, 8*2($r_ptr)
2897 mov $a3, 8*3($r_ptr)
2908 mov $a0, $t0
2911 mov $a1, $t1
2916 mov $a2, $t2
2919 mov $a3, $t3
2925 mov $a0, 8*0($r_ptr)
2927 mov $a1, 8*1($r_ptr)
2929 mov $a2, 8*2($r_ptr)
2930 mov $a3, 8*3($r_ptr)
2941 mov $t0, $a0
2944 mov $t1, $a1
2949 mov $t2, $a2
2952 mov $t3, $a3
2970 mov $a0, $t0
2973 mov $a1, $t1
2978 mov $a2, $t2
2981 mov $a3, $t3
2987 mov $a0, 8*0($r_ptr)
2989 mov $a1, 8*1($r_ptr)
2991 mov $a2, 8*2($r_ptr)
2992 mov $a3, 8*3($r_ptr)