• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/ap/gpl/iserver/libav-0.8.8/libavcodec/ppc/

Lines Matching refs:cnst

98     cnst = LD_W2;                                                   \
99 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
100 cnst = LD_W1; \
101 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
102 cnst = LD_W0; \
103 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
110 cnst = LD_W3; \
111 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
113 cnst = LD_W8; \
114 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
115 cnst = LD_W9; \
116 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
117 cnst = LD_WA; \
118 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
119 cnst = LD_WB; \
120 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
122 cnst = LD_W4; \
123 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
124 cnst = LD_W5; \
125 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
126 cnst = LD_W6; \
127 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
128 cnst = LD_W7; \
129 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
155 cnst = LD_W2; \
156 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
157 cnst = LD_W1; \
158 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
159 cnst = LD_W0; \
160 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
167 cnst = LD_W3; \
168 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
170 cnst = LD_W8; \
171 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
172 cnst = LD_W9; \
173 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
174 cnst = LD_WA; \
175 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
176 cnst = LD_WB; \
177 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
179 cnst = LD_W4; \
180 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
181 cnst = LD_W5; \
182 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
183 cnst = LD_W6; \
184 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
185 cnst = LD_W7; \
186 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
204 vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
304 cnst = LD_W2;
305 x0 = vec_madd(cnst, x0, mzero);
306 x1 = vec_madd(cnst, x1, mzero);
307 cnst = LD_W1;
308 b20 = vec_madd(cnst, b20, x0);
309 b21 = vec_madd(cnst, b21, x1);
310 cnst = LD_W0;
311 b60 = vec_madd(cnst, b60, x0);
312 b61 = vec_madd(cnst, b61, x1);
333 cnst = LD_W3;
334 x8 = vec_madd(cnst, x8, mzero);
336 cnst = LD_W8;
337 x0 = vec_madd(cnst, x0, mzero);
338 cnst = LD_W9;
339 x1 = vec_madd(cnst, x1, mzero);
340 cnst = LD_WA;
341 x2 = vec_madd(cnst, x2, x8);
342 cnst = LD_WB;
343 x3 = vec_madd(cnst, x3, x8);
345 cnst = LD_W4;
346 b70 = vec_madd(cnst, b70, x0);
347 cnst = LD_W5;
348 b50 = vec_madd(cnst, b50, x1);
349 cnst = LD_W6;
350 b30 = vec_madd(cnst, b30, x1);
351 cnst = LD_W7;
352 b10 = vec_madd(cnst, b10, x0);
365 cnst = LD_W3;
366 x8 = vec_madd(cnst, x8, mzero);
368 cnst = LD_W8;
369 x0 = vec_madd(cnst, x0, mzero);
370 cnst = LD_W9;
371 x1 = vec_madd(cnst, x1, mzero);
372 cnst = LD_WA;
373 x2 = vec_madd(cnst, x2, x8);
374 cnst = LD_WB;
375 x3 = vec_madd(cnst, x3, x8);
377 cnst = LD_W4;
378 b71 = vec_madd(cnst, b71, x0);
379 cnst = LD_W5;
380 b51 = vec_madd(cnst, b51, x1);
381 cnst = LD_W6;
382 b31 = vec_madd(cnst, b31, x1);
383 cnst = LD_W7;
384 b11 = vec_madd(cnst, b11, x0);