Lines Matching refs:NSIZE

26 #define NSIZE 16
52 for (d = 0; d < bytes; d += NSIZE*4) {
53 /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
54 asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
55 asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
56 asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
57 asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
63 /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
64 asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
65 asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
66 asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
67 asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
99 /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
100 asm volatile("vst $vr0, %0" : "=m"(p[d+NSIZE*0]));
101 asm volatile("vst $vr1, %0" : "=m"(p[d+NSIZE*1]));
102 asm volatile("vst $vr2, %0" : "=m"(p[d+NSIZE*2]));
103 asm volatile("vst $vr3, %0" : "=m"(p[d+NSIZE*3]));
104 /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
105 asm volatile("vst $vr4, %0" : "=m"(q[d+NSIZE*0]));
106 asm volatile("vst $vr5, %0" : "=m"(q[d+NSIZE*1]));
107 asm volatile("vst $vr6, %0" : "=m"(q[d+NSIZE*2]));
108 asm volatile("vst $vr7, %0" : "=m"(q[d+NSIZE*3]));
134 for (d = 0; d < bytes; d += NSIZE*4) {
136 /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
137 asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
138 asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
139 asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
140 asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
146 /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
147 asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
148 asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
149 asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
150 asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
207 * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
208 * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
235 : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
236 "+m"(p[d+NSIZE*2]), "+m"(p[d+NSIZE*3]),
237 "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1]),
238 "+m"(q[d+NSIZE*2]), "+m"(q[d+NSIZE*3])
253 #undef NSIZE
257 #define NSIZE 32
283 for (d = 0; d < bytes; d += NSIZE*2) {
284 /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
285 asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
286 asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
290 /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
291 asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
292 asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
312 /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
313 asm volatile("xvst $xr0, %0" : "=m"(p[d+NSIZE*0]));
314 asm volatile("xvst $xr1, %0" : "=m"(p[d+NSIZE*1]));
315 /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
316 asm volatile("xvst $xr2, %0" : "=m"(q[d+NSIZE*0]));
317 asm volatile("xvst $xr3, %0" : "=m"(q[d+NSIZE*1]));
343 for (d = 0; d < bytes; d += NSIZE*2) {
345 /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
346 asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
347 asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
351 /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
352 asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
353 asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
390 * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
391 * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
406 : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
407 "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1])
421 #undef NSIZE