1/*
2 * Copyright (c) 2001 Michel Lespinasse
3 *
4 * This file is part of Libav.
5 *
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21/*
22 * NOTE: This code is based on GPL code from the libmpeg2 project.  The
23 * author, Michel Lespinasses, has given explicit permission to release
24 * under LGPL as part of Libav.
25 */
26
27/*
28 * Libav integration by Dieter Shirley
29 *
30 * This file is a direct copy of the AltiVec IDCT module from the libmpeg2
31 * project.  I've deleted all of the libmpeg2-specific code, renamed the
32 * functions and reordered the function parameters.  The only change to the
33 * IDCT function itself was to factor out the partial transposition, and to
34 * perform a full transpose at the end of the function.
35 */
36
37
38#include <stdlib.h>                                      /* malloc(), free() */
39#include <string.h>
40#include "config.h"
41#if HAVE_ALTIVEC_H
42#include <altivec.h>
43#endif
44#include "libavcodec/dsputil.h"
45#include "types_altivec.h"
46#include "dsputil_altivec.h"
47
48#define IDCT_HALF                                       \
49    /* 1st stage */                                     \
50    t1 = vec_mradds (a1, vx7, vx1 );                    \
51    t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7));    \
52    t7 = vec_mradds (a2, vx5, vx3);                     \
53    t3 = vec_mradds (ma2, vx3, vx5);                    \
54                                                        \
55    /* 2nd stage */                                     \
56    t5 = vec_adds (vx0, vx4);                           \
57    t0 = vec_subs (vx0, vx4);                           \
58    t2 = vec_mradds (a0, vx6, vx2);                     \
59    t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6));    \
60    t6 = vec_adds (t8, t3);                             \
61    t3 = vec_subs (t8, t3);                             \
62    t8 = vec_subs (t1, t7);                             \
63    t1 = vec_adds (t1, t7);                             \
64                                                        \
65    /* 3rd stage */                                     \
66    t7 = vec_adds (t5, t2);                             \
67    t2 = vec_subs (t5, t2);                             \
68    t5 = vec_adds (t0, t4);                             \
69    t0 = vec_subs (t0, t4);                             \
70    t4 = vec_subs (t8, t3);                             \
71    t3 = vec_adds (t8, t3);                             \
72                                                        \
73    /* 4th stage */                                     \
74    vy0 = vec_adds (t7, t1);                            \
75    vy7 = vec_subs (t7, t1);                            \
76    vy1 = vec_mradds (c4, t3, t5);                      \
77    vy6 = vec_mradds (mc4, t3, t5);                     \
78    vy2 = vec_mradds (c4, t4, t0);                      \
79    vy5 = vec_mradds (mc4, t4, t0);                     \
80    vy3 = vec_adds (t2, t6);                            \
81    vy4 = vec_subs (t2, t6);
82
83
84#define IDCT                                                            \
85    vec_s16 vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7;                \
86    vec_s16 vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7;                \
87    vec_s16 a0, a1, a2, ma2, c4, mc4, zero, bias;                  \
88    vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8;                    \
89    vec_u16 shift;                                                 \
90                                                                        \
91    c4 = vec_splat (constants[0], 0);                                   \
92    a0 = vec_splat (constants[0], 1);                                   \
93    a1 = vec_splat (constants[0], 2);                                   \
94    a2 = vec_splat (constants[0], 3);                                   \
95    mc4 = vec_splat (constants[0], 4);                                  \
96    ma2 = vec_splat (constants[0], 5);                                  \
97    bias = (vec_s16)vec_splat ((vec_s32)constants[0], 3);     \
98                                                                        \
99    zero = vec_splat_s16 (0);                                           \
100    shift = vec_splat_u16 (4);                                          \
101                                                                        \
102    vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero);    \
103    vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero);    \
104    vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero);    \
105    vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero);    \
106    vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero);    \
107    vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero);    \
108    vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero);    \
109    vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero);    \
110                                                                        \
111    IDCT_HALF                                                           \
112                                                                        \
113    vx0 = vec_mergeh (vy0, vy4);                                        \
114    vx1 = vec_mergel (vy0, vy4);                                        \
115    vx2 = vec_mergeh (vy1, vy5);                                        \
116    vx3 = vec_mergel (vy1, vy5);                                        \
117    vx4 = vec_mergeh (vy2, vy6);                                        \
118    vx5 = vec_mergel (vy2, vy6);                                        \
119    vx6 = vec_mergeh (vy3, vy7);                                        \
120    vx7 = vec_mergel (vy3, vy7);                                        \
121                                                                        \
122    vy0 = vec_mergeh (vx0, vx4);                                        \
123    vy1 = vec_mergel (vx0, vx4);                                        \
124    vy2 = vec_mergeh (vx1, vx5);                                        \
125    vy3 = vec_mergel (vx1, vx5);                                        \
126    vy4 = vec_mergeh (vx2, vx6);                                        \
127    vy5 = vec_mergel (vx2, vx6);                                        \
128    vy6 = vec_mergeh (vx3, vx7);                                        \
129    vy7 = vec_mergel (vx3, vx7);                                        \
130                                                                        \
131    vx0 = vec_adds (vec_mergeh (vy0, vy4), bias);                       \
132    vx1 = vec_mergel (vy0, vy4);                                        \
133    vx2 = vec_mergeh (vy1, vy5);                                        \
134    vx3 = vec_mergel (vy1, vy5);                                        \
135    vx4 = vec_mergeh (vy2, vy6);                                        \
136    vx5 = vec_mergel (vy2, vy6);                                        \
137    vx6 = vec_mergeh (vy3, vy7);                                        \
138    vx7 = vec_mergel (vy3, vy7);                                        \
139                                                                        \
140    IDCT_HALF                                                           \
141                                                                        \
142    shift = vec_splat_u16 (6);                                          \
143    vx0 = vec_sra (vy0, shift);                                         \
144    vx1 = vec_sra (vy1, shift);                                         \
145    vx2 = vec_sra (vy2, shift);                                         \
146    vx3 = vec_sra (vy3, shift);                                         \
147    vx4 = vec_sra (vy4, shift);                                         \
148    vx5 = vec_sra (vy5, shift);                                         \
149    vx6 = vec_sra (vy6, shift);                                         \
150    vx7 = vec_sra (vy7, shift);
151
152
153static const vec_s16 constants[5] = {
154    {23170, 13573,  6518, 21895, -23170, -21895,    32,    31},
155    {16384, 22725, 21407, 19266,  16384,  19266, 21407, 22725},
156    {22725, 31521, 29692, 26722,  22725,  26722, 29692, 31521},
157    {21407, 29692, 27969, 25172,  21407,  25172, 27969, 29692},
158    {19266, 26722, 25172, 22654,  19266,  22654, 25172, 26722}
159};
160
161void idct_put_altivec(uint8_t* dest, int stride, int16_t *blk)
162{
163    vec_s16 *block = (vec_s16*)blk;
164    vec_u8 tmp;
165
166    IDCT
167
168#define COPY(dest,src)                                          \
169    tmp = vec_packsu (src, src);                                \
170    vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);       \
171    vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
172
173    COPY (dest, vx0)    dest += stride;
174    COPY (dest, vx1)    dest += stride;
175    COPY (dest, vx2)    dest += stride;
176    COPY (dest, vx3)    dest += stride;
177    COPY (dest, vx4)    dest += stride;
178    COPY (dest, vx5)    dest += stride;
179    COPY (dest, vx6)    dest += stride;
180    COPY (dest, vx7)
181}
182
183void idct_add_altivec(uint8_t* dest, int stride, int16_t *blk)
184{
185    vec_s16 *block = (vec_s16*)blk;
186    vec_u8 tmp;
187    vec_s16 tmp2, tmp3;
188    vec_u8 perm0;
189    vec_u8 perm1;
190    vec_u8 p0, p1, p;
191
192    IDCT
193
194    p0 = vec_lvsl (0, dest);
195    p1 = vec_lvsl (stride, dest);
196    p = vec_splat_u8 (-1);
197    perm0 = vec_mergeh (p, p0);
198    perm1 = vec_mergeh (p, p1);
199
200#define ADD(dest,src,perm)                                              \
201    /* *(uint64_t *)&tmp = *(uint64_t *)dest; */                        \
202    tmp = vec_ld (0, dest);                                             \
203    tmp2 = (vec_s16)vec_perm (tmp, (vec_u8)zero, perm);       \
204    tmp3 = vec_adds (tmp2, src);                                        \
205    tmp = vec_packsu (tmp3, tmp3);                                      \
206    vec_ste ((vec_u32)tmp, 0, (unsigned int *)dest);               \
207    vec_ste ((vec_u32)tmp, 4, (unsigned int *)dest);
208
209    ADD (dest, vx0, perm0)      dest += stride;
210    ADD (dest, vx1, perm1)      dest += stride;
211    ADD (dest, vx2, perm0)      dest += stride;
212    ADD (dest, vx3, perm1)      dest += stride;
213    ADD (dest, vx4, perm0)      dest += stride;
214    ADD (dest, vx5, perm1)      dest += stride;
215    ADD (dest, vx6, perm0)      dest += stride;
216    ADD (dest, vx7, perm1)
217}
218
219