1/*
2 * Copyright (C) 2003  James Klicman <james@klicman.org>
3 *
4 * This file is part of Libav.
5 *
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include "config.h"
22#if HAVE_ALTIVEC_H
23#include <altivec.h>
24#endif
25#include "libavutil/common.h"
26#include "libavcodec/dsputil.h"
27#include "dsputil_altivec.h"
28
29#define vs16(v) ((vector signed short)(v))
30#define vs32(v) ((vector signed int)(v))
31#define vu8(v)  ((vector unsigned char)(v))
32#define vu16(v) ((vector unsigned short)(v))
33#define vu32(v) ((vector unsigned int)(v))
34
35
36#define C1     0.98078525066375732421875000 /* cos(1*PI/16) */
37#define C2     0.92387950420379638671875000 /* cos(2*PI/16) */
38#define C3     0.83146959543228149414062500 /* cos(3*PI/16) */
39#define C4     0.70710676908493041992187500 /* cos(4*PI/16) */
40#define C5     0.55557024478912353515625000 /* cos(5*PI/16) */
41#define C6     0.38268342614173889160156250 /* cos(6*PI/16) */
42#define C7     0.19509032368659973144531250 /* cos(7*PI/16) */
43#define SQRT_2 1.41421353816986083984375000 /* sqrt(2)      */
44
45
46#define W0 -(2 * C2)
47#define W1 (2 * C6)
48#define W2 (SQRT_2 * C6)
49#define W3 (SQRT_2 * C3)
50#define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
51#define W5 (SQRT_2 * ( C1 + C3 - C5 + C7))
52#define W6 (SQRT_2 * ( C1 + C3 + C5 - C7))
53#define W7 (SQRT_2 * ( C1 + C3 - C5 - C7))
54#define W8 (SQRT_2 * ( C7 - C3))
55#define W9 (SQRT_2 * (-C1 - C3))
56#define WA (SQRT_2 * (-C3 - C5))
57#define WB (SQRT_2 * ( C5 - C3))
58
59
60static vector float fdctconsts[3] = {
61    { W0, W1, W2, W3 },
62    { W4, W5, W6, W7 },
63    { W8, W9, WA, WB }
64};
65
66#define LD_W0 vec_splat(cnsts0, 0)
67#define LD_W1 vec_splat(cnsts0, 1)
68#define LD_W2 vec_splat(cnsts0, 2)
69#define LD_W3 vec_splat(cnsts0, 3)
70#define LD_W4 vec_splat(cnsts1, 0)
71#define LD_W5 vec_splat(cnsts1, 1)
72#define LD_W6 vec_splat(cnsts1, 2)
73#define LD_W7 vec_splat(cnsts1, 3)
74#define LD_W8 vec_splat(cnsts2, 0)
75#define LD_W9 vec_splat(cnsts2, 1)
76#define LD_WA vec_splat(cnsts2, 2)
77#define LD_WB vec_splat(cnsts2, 3)
78
79
80#define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */                  \
81    x0 = vec_add(b0, b7);               /* x0 = b0 + b7; */         \
82    x7 = vec_sub(b0, b7);               /* x7 = b0 - b7; */         \
83    x1 = vec_add(b1, b6);               /* x1 = b1 + b6; */         \
84    x6 = vec_sub(b1, b6);               /* x6 = b1 - b6; */         \
85    x2 = vec_add(b2, b5);               /* x2 = b2 + b5; */         \
86    x5 = vec_sub(b2, b5);               /* x5 = b2 - b5; */         \
87    x3 = vec_add(b3, b4);               /* x3 = b3 + b4; */         \
88    x4 = vec_sub(b3, b4);               /* x4 = b3 - b4; */         \
89                                                                    \
90    b7 = vec_add(x0, x3);               /* b7 = x0 + x3; */         \
91    b1 = vec_add(x1, x2);               /* b1 = x1 + x2; */         \
92    b0 = vec_add(b7, b1);               /* b0 = b7 + b1; */         \
93    b4 = vec_sub(b7, b1);               /* b4 = b7 - b1; */         \
94                                                                    \
95    b2 = vec_sub(x0, x3);               /* b2 = x0 - x3; */         \
96    b6 = vec_sub(x1, x2);               /* b6 = x1 - x2; */         \
97    b5 = vec_add(b6, b2);               /* b5 = b6 + b2; */         \
98    cnst = LD_W2;                                                   \
99    b5 = vec_madd(cnst, b5, mzero);     /* b5 = b5 * W2; */         \
100    cnst = LD_W1;                                                   \
101    b2 = vec_madd(cnst, b2, b5);        /* b2 = b5 + b2 * W1; */    \
102    cnst = LD_W0;                                                   \
103    b6 = vec_madd(cnst, b6, b5);        /* b6 = b5 + b6 * W0; */    \
104                                                                    \
105    x0 = vec_add(x4, x7);               /* x0 = x4 + x7; */         \
106    x1 = vec_add(x5, x6);               /* x1 = x5 + x6; */         \
107    x2 = vec_add(x4, x6);               /* x2 = x4 + x6; */         \
108    x3 = vec_add(x5, x7);               /* x3 = x5 + x7; */         \
109    x8 = vec_add(x2, x3);               /* x8 = x2 + x3; */         \
110    cnst = LD_W3;                                                   \
111    x8 = vec_madd(cnst, x8, mzero);     /* x8 = x8 * W3; */         \
112                                                                    \
113    cnst = LD_W8;                                                   \
114    x0 = vec_madd(cnst, x0, mzero);     /* x0 *= W8; */             \
115    cnst = LD_W9;                                                   \
116    x1 = vec_madd(cnst, x1, mzero);     /* x1 *= W9; */             \
117    cnst = LD_WA;                                                   \
118    x2 = vec_madd(cnst, x2, x8);        /* x2 = x2 * WA + x8; */    \
119    cnst = LD_WB;                                                   \
120    x3 = vec_madd(cnst, x3, x8);        /* x3 = x3 * WB + x8; */    \
121                                                                    \
122    cnst = LD_W4;                                                   \
123    b7 = vec_madd(cnst, x4, x0);        /* b7 = x4 * W4 + x0; */    \
124    cnst = LD_W5;                                                   \
125    b5 = vec_madd(cnst, x5, x1);        /* b5 = x5 * W5 + x1; */    \
126    cnst = LD_W6;                                                   \
127    b3 = vec_madd(cnst, x6, x1);        /* b3 = x6 * W6 + x1; */    \
128    cnst = LD_W7;                                                   \
129    b1 = vec_madd(cnst, x7, x0);        /* b1 = x7 * W7 + x0; */    \
130                                                                    \
131    b7 = vec_add(b7, x2);               /* b7 = b7 + x2; */         \
132    b5 = vec_add(b5, x3);               /* b5 = b5 + x3; */         \
133    b3 = vec_add(b3, x2);               /* b3 = b3 + x2; */         \
134    b1 = vec_add(b1, x3);               /* b1 = b1 + x3; */         \
135    /* }}} */
136
137#define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */                  \
138    x0 = vec_add(b0, b7);               /* x0 = b0 + b7; */         \
139    x7 = vec_sub(b0, b7);               /* x7 = b0 - b7; */         \
140    x1 = vec_add(b1, b6);               /* x1 = b1 + b6; */         \
141    x6 = vec_sub(b1, b6);               /* x6 = b1 - b6; */         \
142    x2 = vec_add(b2, b5);               /* x2 = b2 + b5; */         \
143    x5 = vec_sub(b2, b5);               /* x5 = b2 - b5; */         \
144    x3 = vec_add(b3, b4);               /* x3 = b3 + b4; */         \
145    x4 = vec_sub(b3, b4);               /* x4 = b3 - b4; */         \
146                                                                    \
147    b7 = vec_add(x0, x3);               /* b7 = x0 + x3; */         \
148    b1 = vec_add(x1, x2);               /* b1 = x1 + x2; */         \
149    b0 = vec_add(b7, b1);               /* b0 = b7 + b1; */         \
150    b4 = vec_sub(b7, b1);               /* b4 = b7 - b1; */         \
151                                                                    \
152    b2 = vec_sub(x0, x3);               /* b2 = x0 - x3; */         \
153    b6 = vec_sub(x1, x2);               /* b6 = x1 - x2; */         \
154    b5 = vec_add(b6, b2);               /* b5 = b6 + b2; */         \
155    cnst = LD_W2;                                                   \
156    b5 = vec_madd(cnst, b5, mzero);     /* b5 = b5 * W2; */         \
157    cnst = LD_W1;                                                   \
158    b2 = vec_madd(cnst, b2, b5);        /* b2 = b5 + b2 * W1; */    \
159    cnst = LD_W0;                                                   \
160    b6 = vec_madd(cnst, b6, b5);        /* b6 = b5 + b6 * W0; */    \
161                                                                    \
162    x0 = vec_add(x4, x7);               /* x0 = x4 + x7; */         \
163    x1 = vec_add(x5, x6);               /* x1 = x5 + x6; */         \
164    x2 = vec_add(x4, x6);               /* x2 = x4 + x6; */         \
165    x3 = vec_add(x5, x7);               /* x3 = x5 + x7; */         \
166    x8 = vec_add(x2, x3);               /* x8 = x2 + x3; */         \
167    cnst = LD_W3;                                                   \
168    x8 = vec_madd(cnst, x8, mzero);     /* x8 = x8 * W3; */         \
169                                                                    \
170    cnst = LD_W8;                                                   \
171    x0 = vec_madd(cnst, x0, mzero);     /* x0 *= W8; */             \
172    cnst = LD_W9;                                                   \
173    x1 = vec_madd(cnst, x1, mzero);     /* x1 *= W9; */             \
174    cnst = LD_WA;                                                   \
175    x2 = vec_madd(cnst, x2, x8);        /* x2 = x2 * WA + x8; */    \
176    cnst = LD_WB;                                                   \
177    x3 = vec_madd(cnst, x3, x8);        /* x3 = x3 * WB + x8; */    \
178                                                                    \
179    cnst = LD_W4;                                                   \
180    b7 = vec_madd(cnst, x4, x0);        /* b7 = x4 * W4 + x0; */    \
181    cnst = LD_W5;                                                   \
182    b5 = vec_madd(cnst, x5, x1);        /* b5 = x5 * W5 + x1; */    \
183    cnst = LD_W6;                                                   \
184    b3 = vec_madd(cnst, x6, x1);        /* b3 = x6 * W6 + x1; */    \
185    cnst = LD_W7;                                                   \
186    b1 = vec_madd(cnst, x7, x0);        /* b1 = x7 * W7 + x0; */    \
187                                                                    \
188    b7 = vec_add(b7, x2);               /* b7 += x2; */             \
189    b5 = vec_add(b5, x3);               /* b5 += x3; */             \
190    b3 = vec_add(b3, x2);               /* b3 += x2; */             \
191    b1 = vec_add(b1, x3);               /* b1 += x3; */             \
192    /* }}} */
193
194
195
196/* two dimensional discrete cosine transform */
197
198void fdct_altivec(int16_t *block)
199{
200    vector signed short *bp;
201    vector float *cp;
202    vector float b00, b10, b20, b30, b40, b50, b60, b70;
203    vector float b01, b11, b21, b31, b41, b51, b61, b71;
204    vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
205    vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;
206
207    /* setup constants {{{ */
208    /* mzero = -0.0 */
209    mzero = ((vector float)vec_splat_u32(-1));
210    mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
211    cp = fdctconsts;
212    cnsts0 = vec_ld(0, cp); cp++;
213    cnsts1 = vec_ld(0, cp); cp++;
214    cnsts2 = vec_ld(0, cp);
215    /* }}} */
216
217
218    /* 8x8 matrix transpose (vector short[8]) {{{ */
219#define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
220
221    bp = (vector signed short*)block;
222    b00 = ((vector float)vec_ld(0,    bp));
223    b40 = ((vector float)vec_ld(16*4, bp));
224    b01 = ((vector float)MERGE_S16(h, b00, b40));
225    b11 = ((vector float)MERGE_S16(l, b00, b40));
226    bp++;
227    b10 = ((vector float)vec_ld(0,    bp));
228    b50 = ((vector float)vec_ld(16*4, bp));
229    b21 = ((vector float)MERGE_S16(h, b10, b50));
230    b31 = ((vector float)MERGE_S16(l, b10, b50));
231    bp++;
232    b20 = ((vector float)vec_ld(0,    bp));
233    b60 = ((vector float)vec_ld(16*4, bp));
234    b41 = ((vector float)MERGE_S16(h, b20, b60));
235    b51 = ((vector float)MERGE_S16(l, b20, b60));
236    bp++;
237    b30 = ((vector float)vec_ld(0,    bp));
238    b70 = ((vector float)vec_ld(16*4, bp));
239    b61 = ((vector float)MERGE_S16(h, b30, b70));
240    b71 = ((vector float)MERGE_S16(l, b30, b70));
241
242    x0 = ((vector float)MERGE_S16(h, b01, b41));
243    x1 = ((vector float)MERGE_S16(l, b01, b41));
244    x2 = ((vector float)MERGE_S16(h, b11, b51));
245    x3 = ((vector float)MERGE_S16(l, b11, b51));
246    x4 = ((vector float)MERGE_S16(h, b21, b61));
247    x5 = ((vector float)MERGE_S16(l, b21, b61));
248    x6 = ((vector float)MERGE_S16(h, b31, b71));
249    x7 = ((vector float)MERGE_S16(l, b31, b71));
250
251    b00 = ((vector float)MERGE_S16(h, x0, x4));
252    b10 = ((vector float)MERGE_S16(l, x0, x4));
253    b20 = ((vector float)MERGE_S16(h, x1, x5));
254    b30 = ((vector float)MERGE_S16(l, x1, x5));
255    b40 = ((vector float)MERGE_S16(h, x2, x6));
256    b50 = ((vector float)MERGE_S16(l, x2, x6));
257    b60 = ((vector float)MERGE_S16(h, x3, x7));
258    b70 = ((vector float)MERGE_S16(l, x3, x7));
259
260#undef MERGE_S16
261    /* }}} */
262
263
264/* Some of the initial calculations can be done as vector short before
265 * conversion to vector float.  The following code section takes advantage
266 * of this.
267 */
268    /* fdct rows {{{ */
269    x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
270    x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
271    x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
272    x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
273    x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
274    x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
275    x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
276    x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));
277
278    b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
279    b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));
280
281    b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
282    b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));
283
284#define CTF0(n) \
285    b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
286    b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
287    b##n##1 = vec_ctf(vs32(b##n##1), 0); \
288    b##n##0 = vec_ctf(vs32(b##n##0), 0);
289
290    CTF0(0);
291    CTF0(4);
292
293    b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
294    b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));
295
296    CTF0(2);
297    CTF0(6);
298
299#undef CTF0
300
301    x0 = vec_add(b60, b20);
302    x1 = vec_add(b61, b21);
303
304    cnst = LD_W2;
305    x0 = vec_madd(cnst, x0, mzero);
306    x1 = vec_madd(cnst, x1, mzero);
307    cnst = LD_W1;
308    b20 = vec_madd(cnst, b20, x0);
309    b21 = vec_madd(cnst, b21, x1);
310    cnst = LD_W0;
311    b60 = vec_madd(cnst, b60, x0);
312    b61 = vec_madd(cnst, b61, x1);
313
314#define CTFX(x,b) \
315    b##0 = ((vector float)vec_unpackh(vs16(x))); \
316    b##1 = ((vector float)vec_unpackl(vs16(x))); \
317    b##0 = vec_ctf(vs32(b##0), 0); \
318    b##1 = vec_ctf(vs32(b##1), 0); \
319
320    CTFX(x4, b7);
321    CTFX(x5, b5);
322    CTFX(x6, b3);
323    CTFX(x7, b1);
324
325#undef CTFX
326
327
328    x0 = vec_add(b70, b10);
329    x1 = vec_add(b50, b30);
330    x2 = vec_add(b70, b30);
331    x3 = vec_add(b50, b10);
332    x8 = vec_add(x2, x3);
333    cnst = LD_W3;
334    x8 = vec_madd(cnst, x8, mzero);
335
336    cnst = LD_W8;
337    x0 = vec_madd(cnst, x0, mzero);
338    cnst = LD_W9;
339    x1 = vec_madd(cnst, x1, mzero);
340    cnst = LD_WA;
341    x2 = vec_madd(cnst, x2, x8);
342    cnst = LD_WB;
343    x3 = vec_madd(cnst, x3, x8);
344
345    cnst = LD_W4;
346    b70 = vec_madd(cnst, b70, x0);
347    cnst = LD_W5;
348    b50 = vec_madd(cnst, b50, x1);
349    cnst = LD_W6;
350    b30 = vec_madd(cnst, b30, x1);
351    cnst = LD_W7;
352    b10 = vec_madd(cnst, b10, x0);
353
354    b70 = vec_add(b70, x2);
355    b50 = vec_add(b50, x3);
356    b30 = vec_add(b30, x2);
357    b10 = vec_add(b10, x3);
358
359
360    x0 = vec_add(b71, b11);
361    x1 = vec_add(b51, b31);
362    x2 = vec_add(b71, b31);
363    x3 = vec_add(b51, b11);
364    x8 = vec_add(x2, x3);
365    cnst = LD_W3;
366    x8 = vec_madd(cnst, x8, mzero);
367
368    cnst = LD_W8;
369    x0 = vec_madd(cnst, x0, mzero);
370    cnst = LD_W9;
371    x1 = vec_madd(cnst, x1, mzero);
372    cnst = LD_WA;
373    x2 = vec_madd(cnst, x2, x8);
374    cnst = LD_WB;
375    x3 = vec_madd(cnst, x3, x8);
376
377    cnst = LD_W4;
378    b71 = vec_madd(cnst, b71, x0);
379    cnst = LD_W5;
380    b51 = vec_madd(cnst, b51, x1);
381    cnst = LD_W6;
382    b31 = vec_madd(cnst, b31, x1);
383    cnst = LD_W7;
384    b11 = vec_madd(cnst, b11, x0);
385
386    b71 = vec_add(b71, x2);
387    b51 = vec_add(b51, x3);
388    b31 = vec_add(b31, x2);
389    b11 = vec_add(b11, x3);
390    /* }}} */
391
392
393    /* 8x8 matrix transpose (vector float[8][2]) {{{ */
394    x0 = vec_mergel(b00, b20);
395    x1 = vec_mergeh(b00, b20);
396    x2 = vec_mergel(b10, b30);
397    x3 = vec_mergeh(b10, b30);
398
399    b00 = vec_mergeh(x1, x3);
400    b10 = vec_mergel(x1, x3);
401    b20 = vec_mergeh(x0, x2);
402    b30 = vec_mergel(x0, x2);
403
404    x4 = vec_mergel(b41, b61);
405    x5 = vec_mergeh(b41, b61);
406    x6 = vec_mergel(b51, b71);
407    x7 = vec_mergeh(b51, b71);
408
409    b41 = vec_mergeh(x5, x7);
410    b51 = vec_mergel(x5, x7);
411    b61 = vec_mergeh(x4, x6);
412    b71 = vec_mergel(x4, x6);
413
414    x0 = vec_mergel(b01, b21);
415    x1 = vec_mergeh(b01, b21);
416    x2 = vec_mergel(b11, b31);
417    x3 = vec_mergeh(b11, b31);
418
419    x4 = vec_mergel(b40, b60);
420    x5 = vec_mergeh(b40, b60);
421    x6 = vec_mergel(b50, b70);
422    x7 = vec_mergeh(b50, b70);
423
424    b40 = vec_mergeh(x1, x3);
425    b50 = vec_mergel(x1, x3);
426    b60 = vec_mergeh(x0, x2);
427    b70 = vec_mergel(x0, x2);
428
429    b01 = vec_mergeh(x5, x7);
430    b11 = vec_mergel(x5, x7);
431    b21 = vec_mergeh(x4, x6);
432    b31 = vec_mergel(x4, x6);
433    /* }}} */
434
435
436    FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
437    FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);
438
439
440    /* round, convert back to short {{{ */
441#define CTS(n) \
442    b##n##0 = vec_round(b##n##0); \
443    b##n##1 = vec_round(b##n##1); \
444    b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
445    b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
446    b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
447    vec_st(vs16(b##n##0), 0, bp);
448
449    bp = (vector signed short*)block;
450    CTS(0); bp++;
451    CTS(1); bp++;
452    CTS(2); bp++;
453    CTS(3); bp++;
454    CTS(4); bp++;
455    CTS(5); bp++;
456    CTS(6); bp++;
457    CTS(7);
458
459#undef CTS
460    /* }}} */
461}
462
463/* vim:set foldmethod=marker foldlevel=0: */
464