1/*
2 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include "libavcodec/dsputil.h"
22#include "libavcodec/h264data.h"
23
24#include "gcc_fixes.h"
25
26#include "dsputil_ppc.h"
27#include "dsputil_altivec.h"
28#include "util_altivec.h"
29#include "types_altivec.h"
30
31#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
32#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
33
34#define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
35#define PREFIX_h264_chroma_mc8_altivec         put_h264_chroma_mc8_altivec
36#define PREFIX_h264_chroma_mc8_num             altivec_put_h264_chroma_mc8_num
37#define PREFIX_h264_qpel16_h_lowpass_altivec   put_h264_qpel16_h_lowpass_altivec
38#define PREFIX_h264_qpel16_h_lowpass_num       altivec_put_h264_qpel16_h_lowpass_num
39#define PREFIX_h264_qpel16_v_lowpass_altivec   put_h264_qpel16_v_lowpass_altivec
40#define PREFIX_h264_qpel16_v_lowpass_num       altivec_put_h264_qpel16_v_lowpass_num
41#define PREFIX_h264_qpel16_hv_lowpass_altivec  put_h264_qpel16_hv_lowpass_altivec
42#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_put_h264_qpel16_hv_lowpass_num
43#include "h264_template_altivec.c"
44#undef OP_U8_ALTIVEC
45#undef PREFIX_h264_chroma_mc8_altivec
46#undef PREFIX_h264_chroma_mc8_num
47#undef PREFIX_h264_qpel16_h_lowpass_altivec
48#undef PREFIX_h264_qpel16_h_lowpass_num
49#undef PREFIX_h264_qpel16_v_lowpass_altivec
50#undef PREFIX_h264_qpel16_v_lowpass_num
51#undef PREFIX_h264_qpel16_hv_lowpass_altivec
52#undef PREFIX_h264_qpel16_hv_lowpass_num
53
54#define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
55#define PREFIX_h264_chroma_mc8_altivec         avg_h264_chroma_mc8_altivec
56#define PREFIX_h264_chroma_mc8_num             altivec_avg_h264_chroma_mc8_num
57#define PREFIX_h264_qpel16_h_lowpass_altivec   avg_h264_qpel16_h_lowpass_altivec
58#define PREFIX_h264_qpel16_h_lowpass_num       altivec_avg_h264_qpel16_h_lowpass_num
59#define PREFIX_h264_qpel16_v_lowpass_altivec   avg_h264_qpel16_v_lowpass_altivec
60#define PREFIX_h264_qpel16_v_lowpass_num       altivec_avg_h264_qpel16_v_lowpass_num
61#define PREFIX_h264_qpel16_hv_lowpass_altivec  avg_h264_qpel16_hv_lowpass_altivec
62#define PREFIX_h264_qpel16_hv_lowpass_num      altivec_avg_h264_qpel16_hv_lowpass_num
63#include "h264_template_altivec.c"
64#undef OP_U8_ALTIVEC
65#undef PREFIX_h264_chroma_mc8_altivec
66#undef PREFIX_h264_chroma_mc8_num
67#undef PREFIX_h264_qpel16_h_lowpass_altivec
68#undef PREFIX_h264_qpel16_h_lowpass_num
69#undef PREFIX_h264_qpel16_v_lowpass_altivec
70#undef PREFIX_h264_qpel16_v_lowpass_num
71#undef PREFIX_h264_qpel16_hv_lowpass_altivec
72#undef PREFIX_h264_qpel16_hv_lowpass_num
73
74#define H264_MC(OPNAME, SIZE, CODETYPE) \
75static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
76    OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
77}\
78\
79static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
80    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
81    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
82    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
83}\
84\
85static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
86    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
87}\
88\
89static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
90    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
91    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
92    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
93}\
94\
95static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
96    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
97    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
98    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
99}\
100\
101static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
102    OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
103}\
104\
105static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
106    DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
107    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
108    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
109}\
110\
111static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
112    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
113    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
114    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
115    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
116    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
117}\
118\
119static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
120    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
121    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
122    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
123    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
124    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
125}\
126\
127static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
128    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
129    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
130    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
131    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
132    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
133}\
134\
135static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
136    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
137    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
138    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
139    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
140    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
141}\
142\
143static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
144    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
145    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
146}\
147\
148static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
149    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
150    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
151    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
152    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
153    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
154    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
155}\
156\
157static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
158    DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
159    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
160    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
161    put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
162    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
163    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
164}\
165\
166static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
167    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
168    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
169    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
170    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
171    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
172    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
173}\
174\
175static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
176    DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
177    DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
178    DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
179    put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
180    put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
181    OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
182}\
183
184/* this code assume that stride % 16 == 0 */
185void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
186   DECLARE_ALIGNED_16(signed int, ABCD[4]) =
187                        {((8 - x) * (8 - y)),
188                             ((x) * (8 - y)),
189                         ((8 - x) * (y)),
190                             ((x) * (y))};
191    register int i;
192    vec_u8 fperm;
193    const vec_s32 vABCD = vec_ld(0, ABCD);
194    const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
195    const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
196    const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
197    const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
198    LOAD_ZERO;
199    const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
200    const vec_u16 v6us  = vec_splat_u16(6);
201    register int loadSecond     = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
202    register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
203
204    vec_u8 vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
205    vec_u8 vsrc0uc, vsrc1uc;
206    vec_s16 vsrc0ssH, vsrc1ssH;
207    vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
208    vec_s16 vsrc2ssH, vsrc3ssH, psum;
209    vec_u8 vdst, ppsum, fsum;
210
211    if (((unsigned long)dst) % 16 == 0) {
212        fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
213                           0x14, 0x15, 0x16, 0x17,
214                           0x08, 0x09, 0x0A, 0x0B,
215                           0x0C, 0x0D, 0x0E, 0x0F};
216    } else {
217        fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
218                           0x04, 0x05, 0x06, 0x07,
219                           0x18, 0x19, 0x1A, 0x1B,
220                           0x1C, 0x1D, 0x1E, 0x1F};
221    }
222
223    vsrcAuc = vec_ld(0, src);
224
225    if (loadSecond)
226        vsrcBuc = vec_ld(16, src);
227    vsrcperm0 = vec_lvsl(0, src);
228    vsrcperm1 = vec_lvsl(1, src);
229
230    vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
231    if (reallyBadAlign)
232        vsrc1uc = vsrcBuc;
233    else
234        vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
235
236    vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
237    vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
238
239    if (!loadSecond) {// -> !reallyBadAlign
240        for (i = 0 ; i < h ; i++) {
241
242
243            vsrcCuc = vec_ld(stride + 0, src);
244
245            vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
246            vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
247
248            vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc);
249            vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc);
250
251            psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
252            psum = vec_mladd(vB, vsrc1ssH, psum);
253            psum = vec_mladd(vC, vsrc2ssH, psum);
254            psum = vec_mladd(vD, vsrc3ssH, psum);
255            psum = vec_add(v28ss, psum);
256            psum = vec_sra(psum, v6us);
257
258            vdst = vec_ld(0, dst);
259            ppsum = (vec_u8)vec_packsu(psum, psum);
260            fsum = vec_perm(vdst, ppsum, fperm);
261
262            vec_st(fsum, 0, dst);
263
264            vsrc0ssH = vsrc2ssH;
265            vsrc1ssH = vsrc3ssH;
266
267            dst += stride;
268            src += stride;
269        }
270    } else {
271        vec_u8 vsrcDuc;
272        for (i = 0 ; i < h ; i++) {
273            vsrcCuc = vec_ld(stride + 0, src);
274            vsrcDuc = vec_ld(stride + 16, src);
275
276            vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
277            if (reallyBadAlign)
278                vsrc3uc = vsrcDuc;
279            else
280                vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
281
282            vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc);
283            vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc);
284
285            psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
286            psum = vec_mladd(vB, vsrc1ssH, psum);
287            psum = vec_mladd(vC, vsrc2ssH, psum);
288            psum = vec_mladd(vD, vsrc3ssH, psum);
289            psum = vec_add(v28ss, psum);
290            psum = vec_sr(psum, v6us);
291
292            vdst = vec_ld(0, dst);
293            ppsum = (vec_u8)vec_pack(psum, psum);
294            fsum = vec_perm(vdst, ppsum, fperm);
295
296            vec_st(fsum, 0, dst);
297
298            vsrc0ssH = vsrc2ssH;
299            vsrc1ssH = vsrc3ssH;
300
301            dst += stride;
302            src += stride;
303        }
304    }
305}
306
307static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
308                                    const uint8_t * src2, int dst_stride,
309                                    int src_stride1, int h)
310{
311    int i;
312    vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
313
314    mask_ = vec_lvsl(0, src2);
315
316    for (i = 0; i < h; i++) {
317
318        tmp1 = vec_ld(i * src_stride1, src1);
319        mask = vec_lvsl(i * src_stride1, src1);
320        tmp2 = vec_ld(i * src_stride1 + 15, src1);
321
322        a = vec_perm(tmp1, tmp2, mask);
323
324        tmp1 = vec_ld(i * 16, src2);
325        tmp2 = vec_ld(i * 16 + 15, src2);
326
327        b = vec_perm(tmp1, tmp2, mask_);
328
329        tmp1 = vec_ld(0, dst);
330        mask = vec_lvsl(0, dst);
331        tmp2 = vec_ld(15, dst);
332
333        d = vec_avg(a, b);
334
335        edges = vec_perm(tmp2, tmp1, mask);
336
337        align = vec_lvsr(0, dst);
338
339        tmp2 = vec_perm(d, edges, align);
340        tmp1 = vec_perm(edges, d, align);
341
342        vec_st(tmp2, 15, dst);
343        vec_st(tmp1, 0 , dst);
344
345        dst += dst_stride;
346    }
347}
348
349static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
350                                    const uint8_t * src2, int dst_stride,
351                                    int src_stride1, int h)
352{
353    int i;
354    vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
355
356    mask_ = vec_lvsl(0, src2);
357
358    for (i = 0; i < h; i++) {
359
360        tmp1 = vec_ld(i * src_stride1, src1);
361        mask = vec_lvsl(i * src_stride1, src1);
362        tmp2 = vec_ld(i * src_stride1 + 15, src1);
363
364        a = vec_perm(tmp1, tmp2, mask);
365
366        tmp1 = vec_ld(i * 16, src2);
367        tmp2 = vec_ld(i * 16 + 15, src2);
368
369        b = vec_perm(tmp1, tmp2, mask_);
370
371        tmp1 = vec_ld(0, dst);
372        mask = vec_lvsl(0, dst);
373        tmp2 = vec_ld(15, dst);
374
375        d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
376
377        edges = vec_perm(tmp2, tmp1, mask);
378
379        align = vec_lvsr(0, dst);
380
381        tmp2 = vec_perm(d, edges, align);
382        tmp1 = vec_perm(edges, d, align);
383
384        vec_st(tmp2, 15, dst);
385        vec_st(tmp1, 0 , dst);
386
387        dst += dst_stride;
388    }
389}
390
391/* Implemented but could be faster
392#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
393#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
394 */
395
396H264_MC(put_, 16, altivec)
397H264_MC(avg_, 16, altivec)
398
399
400/****************************************************************************
401 * IDCT transform:
402 ****************************************************************************/
403
404#define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3)               \
405    /* 1st stage */                                               \
406    vz0 = vec_add(vb0,vb2);       /* temp[0] = Y[0] + Y[2] */     \
407    vz1 = vec_sub(vb0,vb2);       /* temp[1] = Y[0] - Y[2] */     \
408    vz2 = vec_sra(vb1,vec_splat_u16(1));                          \
409    vz2 = vec_sub(vz2,vb3);       /* temp[2] = Y[1].1/2 - Y[3] */ \
410    vz3 = vec_sra(vb3,vec_splat_u16(1));                          \
411    vz3 = vec_add(vb1,vz3);       /* temp[3] = Y[1] + Y[3].1/2 */ \
412    /* 2nd stage: output */                                       \
413    va0 = vec_add(vz0,vz3);       /* x[0] = temp[0] + temp[3] */  \
414    va1 = vec_add(vz1,vz2);       /* x[1] = temp[1] + temp[2] */  \
415    va2 = vec_sub(vz1,vz2);       /* x[2] = temp[1] - temp[2] */  \
416    va3 = vec_sub(vz0,vz3)        /* x[3] = temp[0] - temp[3] */
417
418#define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
419    b0 = vec_mergeh( a0, a0 ); \
420    b1 = vec_mergeh( a1, a0 ); \
421    b2 = vec_mergeh( a2, a0 ); \
422    b3 = vec_mergeh( a3, a0 ); \
423    a0 = vec_mergeh( b0, b2 ); \
424    a1 = vec_mergel( b0, b2 ); \
425    a2 = vec_mergeh( b1, b3 ); \
426    a3 = vec_mergel( b1, b3 ); \
427    b0 = vec_mergeh( a0, a2 ); \
428    b1 = vec_mergel( a0, a2 ); \
429    b2 = vec_mergeh( a1, a3 ); \
430    b3 = vec_mergel( a1, a3 )
431
432#define VEC_LOAD_U8_ADD_S16_STORE_U8(va)                      \
433    vdst_orig = vec_ld(0, dst);                               \
434    vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);          \
435    vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst);         \
436    va = vec_add(va, vdst_ss);                                \
437    va_u8 = vec_packsu(va, zero_s16v);                        \
438    va_u32 = vec_splat((vec_u32)va_u8, 0);                  \
439    vec_ste(va_u32, element, (uint32_t*)dst);
440
441static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
442{
443    vec_s16 va0, va1, va2, va3;
444    vec_s16 vz0, vz1, vz2, vz3;
445    vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
446    vec_u8 va_u8;
447    vec_u32 va_u32;
448    vec_s16 vdst_ss;
449    const vec_u16 v6us = vec_splat_u16(6);
450    vec_u8 vdst, vdst_orig;
451    vec_u8 vdst_mask = vec_lvsl(0, dst);
452    int element = ((unsigned long)dst & 0xf) >> 2;
453    LOAD_ZERO;
454
455    block[0] += 32;  /* add 32 as a DC-level for rounding */
456
457    vtmp0 = vec_ld(0,block);
458    vtmp1 = vec_sld(vtmp0, vtmp0, 8);
459    vtmp2 = vec_ld(16,block);
460    vtmp3 = vec_sld(vtmp2, vtmp2, 8);
461
462    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
463    VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
464    VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
465
466    va0 = vec_sra(va0,v6us);
467    va1 = vec_sra(va1,v6us);
468    va2 = vec_sra(va2,v6us);
469    va3 = vec_sra(va3,v6us);
470
471    VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
472    dst += stride;
473    VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
474    dst += stride;
475    VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
476    dst += stride;
477    VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
478}
479
480#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7) {\
481    /*        a0  = SRC(0) + SRC(4); */ \
482    vec_s16 a0v = vec_add(s0, s4);    \
483    /*        a2  = SRC(0) - SRC(4); */ \
484    vec_s16 a2v = vec_sub(s0, s4);    \
485    /*        a4  =           (SRC(2)>>1) - SRC(6); */ \
486    vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6);    \
487    /*        a6  =           (SRC(6)>>1) + SRC(2); */ \
488    vec_s16 a6v = vec_add(vec_sra(s6, onev), s2);    \
489    /*        b0  =         a0 + a6; */ \
490    vec_s16 b0v = vec_add(a0v, a6v);  \
491    /*        b2  =         a2 + a4; */ \
492    vec_s16 b2v = vec_add(a2v, a4v);  \
493    /*        b4  =         a2 - a4; */ \
494    vec_s16 b4v = vec_sub(a2v, a4v);  \
495    /*        b6  =         a0 - a6; */ \
496    vec_s16 b6v = vec_sub(a0v, a6v);  \
497    /* a1 =  SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
498    /*        a1 =             (SRC(5)-SRC(3)) -  (SRC(7)  +  (SRC(7)>>1)); */ \
499    vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
500    /* a3 =  SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
501    /*        a3 =             (SRC(7)+SRC(1)) -  (SRC(3)  +  (SRC(3)>>1)); */ \
502    vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
503    /* a5 =  SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
504    /*        a5 =             (SRC(7)-SRC(1)) +   SRC(5) +   (SRC(5)>>1); */ \
505    vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
506    /*        a7 =                SRC(5)+SRC(3) +  SRC(1) +   (SRC(1)>>1); */ \
507    vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
508    /*        b1 =                  (a7>>2)  +  a1; */ \
509    vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
510    /*        b3 =          a3 +        (a5>>2); */ \
511    vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
512    /*        b5 =                  (a3>>2)  -   a5; */ \
513    vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
514    /*        b7 =           a7 -        (a1>>2); */ \
515    vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
516    /* DST(0,    b0 + b7); */ \
517    d0 = vec_add(b0v, b7v); \
518    /* DST(1,    b2 + b5); */ \
519    d1 = vec_add(b2v, b5v); \
520    /* DST(2,    b4 + b3); */ \
521    d2 = vec_add(b4v, b3v); \
522    /* DST(3,    b6 + b1); */ \
523    d3 = vec_add(b6v, b1v); \
524    /* DST(4,    b6 - b1); */ \
525    d4 = vec_sub(b6v, b1v); \
526    /* DST(5,    b4 - b3); */ \
527    d5 = vec_sub(b4v, b3v); \
528    /* DST(6,    b2 - b5); */ \
529    d6 = vec_sub(b2v, b5v); \
530    /* DST(7,    b0 - b7); */ \
531    d7 = vec_sub(b0v, b7v); \
532}
533
534#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
535    /* unaligned load */                                       \
536    vec_u8 hv = vec_ld( 0, dest );                           \
537    vec_u8 lv = vec_ld( 7, dest );                           \
538    vec_u8 dstv   = vec_perm( hv, lv, (vec_u8)perm_ldv );  \
539    vec_s16 idct_sh6 = vec_sra(idctv, sixv);                 \
540    vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv);   \
541    vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16);  \
542    vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum);        \
543    vec_u8 edgehv;                                           \
544    /* unaligned store */                                      \
545    vec_u8 bodyv  = vec_perm( idstsum8, idstsum8, perm_stv );\
546    vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv );     \
547    lv    = vec_sel( lv, bodyv, edgelv );                      \
548    vec_st( lv, 7, dest );                                     \
549    hv    = vec_ld( 0, dest );                                 \
550    edgehv = vec_perm( zero_u8v, sel, perm_stv );              \
551    hv    = vec_sel( hv, bodyv, edgehv );                      \
552    vec_st( hv, 0, dest );                                     \
553 }
554
555void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
556    vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
557    vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
558    vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
559
560    vec_u8 perm_ldv = vec_lvsl(0, dst);
561    vec_u8 perm_stv = vec_lvsr(8, dst);
562
563    const vec_u16 onev = vec_splat_u16(1);
564    const vec_u16 twov = vec_splat_u16(2);
565    const vec_u16 sixv = vec_splat_u16(6);
566
567    const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
568    LOAD_ZERO;
569
570    dct[0] += 32; // rounding for the >>6 at the end
571
572    s0 = vec_ld(0x00, (int16_t*)dct);
573    s1 = vec_ld(0x10, (int16_t*)dct);
574    s2 = vec_ld(0x20, (int16_t*)dct);
575    s3 = vec_ld(0x30, (int16_t*)dct);
576    s4 = vec_ld(0x40, (int16_t*)dct);
577    s5 = vec_ld(0x50, (int16_t*)dct);
578    s6 = vec_ld(0x60, (int16_t*)dct);
579    s7 = vec_ld(0x70, (int16_t*)dct);
580
581    IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
582                     d0, d1, d2, d3, d4, d5, d6, d7);
583
584    TRANSPOSE8( d0,  d1,  d2,  d3,  d4,  d5,  d6, d7 );
585
586    IDCT8_1D_ALTIVEC(d0,  d1,  d2,  d3,  d4,  d5,  d6, d7,
587                     idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
588
589    ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
590    ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
591    ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
592    ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
593    ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
594    ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
595    ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
596    ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
597}
598
599static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size)
600{
601    vec_s16 dc16;
602    vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
603    LOAD_ZERO;
604    DECLARE_ALIGNED_16(int, dc);
605    int i;
606
607    dc = (block[0] + 32) >> 6;
608    dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
609
610    if (size == 4)
611        dc16 = vec_sld(dc16, zero_s16v, 8);
612    dcplus = vec_packsu(dc16, zero_s16v);
613    dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
614
615    aligner = vec_lvsr(0, dst);
616    dcplus = vec_perm(dcplus, dcplus, aligner);
617    dcminus = vec_perm(dcminus, dcminus, aligner);
618
619    for (i = 0; i < size; i += 4) {
620        v0 = vec_ld(0, dst+0*stride);
621        v1 = vec_ld(0, dst+1*stride);
622        v2 = vec_ld(0, dst+2*stride);
623        v3 = vec_ld(0, dst+3*stride);
624
625        v0 = vec_adds(v0, dcplus);
626        v1 = vec_adds(v1, dcplus);
627        v2 = vec_adds(v2, dcplus);
628        v3 = vec_adds(v3, dcplus);
629
630        v0 = vec_subs(v0, dcminus);
631        v1 = vec_subs(v1, dcminus);
632        v2 = vec_subs(v2, dcminus);
633        v3 = vec_subs(v3, dcminus);
634
635        vec_st(v0, 0, dst+0*stride);
636        vec_st(v1, 0, dst+1*stride);
637        vec_st(v2, 0, dst+2*stride);
638        vec_st(v3, 0, dst+3*stride);
639
640        dst += 4*stride;
641    }
642}
643
644static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
645{
646    h264_idct_dc_add_internal(dst, block, stride, 4);
647}
648
649static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
650{
651    h264_idct_dc_add_internal(dst, block, stride, 8);
652}
653
654static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
655    int i;
656    for(i=0; i<16; i++){
657        int nnz = nnzc[ scan8[i] ];
658        if(nnz){
659            if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
660            else                      ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
661        }
662    }
663}
664
665static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
666    int i;
667    for(i=0; i<16; i++){
668        if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
669        else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
670    }
671}
672
673static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
674    int i;
675    for(i=0; i<16; i+=4){
676        int nnz = nnzc[ scan8[i] ];
677        if(nnz){
678            if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
679            else                      ff_h264_idct8_add_altivec   (dst + block_offset[i], block + i*16, stride);
680        }
681    }
682}
683
684static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
685    int i;
686    for(i=16; i<16+8; i++){
687        if(nnzc[ scan8[i] ])
688            ff_h264_idct_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
689        else if(block[i*16])
690            h264_idct_dc_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
691    }
692}
693
694#define transpose4x16(r0, r1, r2, r3) {      \
695    register vec_u8 r4;                    \
696    register vec_u8 r5;                    \
697    register vec_u8 r6;                    \
698    register vec_u8 r7;                    \
699                                             \
700    r4 = vec_mergeh(r0, r2);  /*0, 2 set 0*/ \
701    r5 = vec_mergel(r0, r2);  /*0, 2 set 1*/ \
702    r6 = vec_mergeh(r1, r3);  /*1, 3 set 0*/ \
703    r7 = vec_mergel(r1, r3);  /*1, 3 set 1*/ \
704                                             \
705    r0 = vec_mergeh(r4, r6);  /*all set 0*/  \
706    r1 = vec_mergel(r4, r6);  /*all set 1*/  \
707    r2 = vec_mergeh(r5, r7);  /*all set 2*/  \
708    r3 = vec_mergel(r5, r7);  /*all set 3*/  \
709}
710
711static inline void write16x4(uint8_t *dst, int dst_stride,
712                             register vec_u8 r0, register vec_u8 r1,
713                             register vec_u8 r2, register vec_u8 r3) {
714    DECLARE_ALIGNED_16(unsigned char, result[64]);
715    uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
716    int int_dst_stride = dst_stride/4;
717
718    vec_st(r0, 0, result);
719    vec_st(r1, 16, result);
720    vec_st(r2, 32, result);
721    vec_st(r3, 48, result);
722    /* FIXME: there has to be a better way!!!! */
723    *dst_int = *src_int;
724    *(dst_int+   int_dst_stride) = *(src_int + 1);
725    *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
726    *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
727    *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
728    *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
729    *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
730    *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
731    *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
732    *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
733    *(dst_int+10*int_dst_stride) = *(src_int + 10);
734    *(dst_int+11*int_dst_stride) = *(src_int + 11);
735    *(dst_int+12*int_dst_stride) = *(src_int + 12);
736    *(dst_int+13*int_dst_stride) = *(src_int + 13);
737    *(dst_int+14*int_dst_stride) = *(src_int + 14);
738    *(dst_int+15*int_dst_stride) = *(src_int + 15);
739}
740
741/** \brief performs a 6x16 transpose of data in src, and stores it to dst
742    \todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
743    out of unaligned_load() */
744#define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
745    register vec_u8 r0  = unaligned_load(0,             src);            \
746    register vec_u8 r1  = unaligned_load(   src_stride, src);            \
747    register vec_u8 r2  = unaligned_load(2* src_stride, src);            \
748    register vec_u8 r3  = unaligned_load(3* src_stride, src);            \
749    register vec_u8 r4  = unaligned_load(4* src_stride, src);            \
750    register vec_u8 r5  = unaligned_load(5* src_stride, src);            \
751    register vec_u8 r6  = unaligned_load(6* src_stride, src);            \
752    register vec_u8 r7  = unaligned_load(7* src_stride, src);            \
753    register vec_u8 r14 = unaligned_load(14*src_stride, src);            \
754    register vec_u8 r15 = unaligned_load(15*src_stride, src);            \
755                                                                           \
756    r8  = unaligned_load( 8*src_stride, src);                              \
757    r9  = unaligned_load( 9*src_stride, src);                              \
758    r10 = unaligned_load(10*src_stride, src);                              \
759    r11 = unaligned_load(11*src_stride, src);                              \
760    r12 = unaligned_load(12*src_stride, src);                              \
761    r13 = unaligned_load(13*src_stride, src);                              \
762                                                                           \
763    /*Merge first pairs*/                                                  \
764    r0 = vec_mergeh(r0, r8);    /*0, 8*/                                   \
765    r1 = vec_mergeh(r1, r9);    /*1, 9*/                                   \
766    r2 = vec_mergeh(r2, r10);   /*2,10*/                                   \
767    r3 = vec_mergeh(r3, r11);   /*3,11*/                                   \
768    r4 = vec_mergeh(r4, r12);   /*4,12*/                                   \
769    r5 = vec_mergeh(r5, r13);   /*5,13*/                                   \
770    r6 = vec_mergeh(r6, r14);   /*6,14*/                                   \
771    r7 = vec_mergeh(r7, r15);   /*7,15*/                                   \
772                                                                           \
773    /*Merge second pairs*/                                                 \
774    r8  = vec_mergeh(r0, r4);   /*0,4, 8,12 set 0*/                        \
775    r9  = vec_mergel(r0, r4);   /*0,4, 8,12 set 1*/                        \
776    r10 = vec_mergeh(r1, r5);   /*1,5, 9,13 set 0*/                        \
777    r11 = vec_mergel(r1, r5);   /*1,5, 9,13 set 1*/                        \
778    r12 = vec_mergeh(r2, r6);   /*2,6,10,14 set 0*/                        \
779    r13 = vec_mergel(r2, r6);   /*2,6,10,14 set 1*/                        \
780    r14 = vec_mergeh(r3, r7);   /*3,7,11,15 set 0*/                        \
781    r15 = vec_mergel(r3, r7);   /*3,7,11,15 set 1*/                        \
782                                                                           \
783    /*Third merge*/                                                        \
784    r0 = vec_mergeh(r8,  r12);  /*0,2,4,6,8,10,12,14 set 0*/               \
785    r1 = vec_mergel(r8,  r12);  /*0,2,4,6,8,10,12,14 set 1*/               \
786    r2 = vec_mergeh(r9,  r13);  /*0,2,4,6,8,10,12,14 set 2*/               \
787    r4 = vec_mergeh(r10, r14);  /*1,3,5,7,9,11,13,15 set 0*/               \
788    r5 = vec_mergel(r10, r14);  /*1,3,5,7,9,11,13,15 set 1*/               \
789    r6 = vec_mergeh(r11, r15);  /*1,3,5,7,9,11,13,15 set 2*/               \
790    /* Don't need to compute 3 and 7*/                                     \
791                                                                           \
792    /*Final merge*/                                                        \
793    r8  = vec_mergeh(r0, r4);   /*all set 0*/                              \
794    r9  = vec_mergel(r0, r4);   /*all set 1*/                              \
795    r10 = vec_mergeh(r1, r5);   /*all set 2*/                              \
796    r11 = vec_mergel(r1, r5);   /*all set 3*/                              \
797    r12 = vec_mergeh(r2, r6);   /*all set 4*/                              \
798    r13 = vec_mergel(r2, r6);   /*all set 5*/                              \
799    /* Don't need to compute 14 and 15*/                                   \
800                                                                           \
801}
802
803// out: o = |x-y| < a
804static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
805                                         register vec_u8 y,
806                                         register vec_u8 a) {
807
808    register vec_u8 diff = vec_subs(x, y);
809    register vec_u8 diffneg = vec_subs(y, x);
810    register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */
811    o = (vec_u8)vec_cmplt(o, a);
812    return o;
813}
814
815static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
816                                           register vec_u8 p1,
817                                           register vec_u8 q0,
818                                           register vec_u8 q1,
819                                           register vec_u8 alpha,
820                                           register vec_u8 beta) {
821
822    register vec_u8 mask;
823    register vec_u8 tempmask;
824
825    mask = diff_lt_altivec(p0, q0, alpha);
826    tempmask = diff_lt_altivec(p1, p0, beta);
827    mask = vec_and(mask, tempmask);
828    tempmask = diff_lt_altivec(q1, q0, beta);
829    mask = vec_and(mask, tempmask);
830
831    return mask;
832}
833
834// out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
835static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
836                                       register vec_u8 p1,
837                                       register vec_u8 p2,
838                                       register vec_u8 q0,
839                                       register vec_u8 tc0) {
840
841    register vec_u8 average = vec_avg(p0, q0);
842    register vec_u8 temp;
843    register vec_u8 uncliped;
844    register vec_u8 ones;
845    register vec_u8 max;
846    register vec_u8 min;
847    register vec_u8 newp1;
848
849    temp = vec_xor(average, p2);
850    average = vec_avg(average, p2);     /*avg(p2, avg(p0, q0)) */
851    ones = vec_splat_u8(1);
852    temp = vec_and(temp, ones);         /*(p2^avg(p0, q0)) & 1 */
853    uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
854    max = vec_adds(p1, tc0);
855    min = vec_subs(p1, tc0);
856    newp1 = vec_max(min, uncliped);
857    newp1 = vec_min(max, newp1);
858    return newp1;
859}
860
861#define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) {                                           \
862                                                                                                  \
863    const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4));                               \
864                                                                                                  \
865    register vec_u8 pq0bit = vec_xor(p0,q0);                                                    \
866    register vec_u8 q1minus;                                                                    \
867    register vec_u8 p0minus;                                                                    \
868    register vec_u8 stage1;                                                                     \
869    register vec_u8 stage2;                                                                     \
870    register vec_u8 vec160;                                                                     \
871    register vec_u8 delta;                                                                      \
872    register vec_u8 deltaneg;                                                                   \
873                                                                                                  \
874    q1minus = vec_nor(q1, q1);                 /* 255 - q1 */                                     \
875    stage1 = vec_avg(p1, q1minus);             /* (p1 - q1 + 256)>>1 */                           \
876    stage2 = vec_sr(stage1, vec_splat_u8(1));  /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */     \
877    p0minus = vec_nor(p0, p0);                 /* 255 - p0 */                                     \
878    stage1 = vec_avg(q0, p0minus);             /* (q0 - p0 + 256)>>1 */                           \
879    pq0bit = vec_and(pq0bit, vec_splat_u8(1));                                                    \
880    stage2 = vec_avg(stage2, pq0bit);          /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
881    stage2 = vec_adds(stage2, stage1);         /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */  \
882    vec160 = vec_ld(0, &A0v);                                                                     \
883    deltaneg = vec_subs(vec160, stage2);       /* -d */                                           \
884    delta = vec_subs(stage2, vec160);          /* d */                                            \
885    deltaneg = vec_min(tc0masked, deltaneg);                                                      \
886    delta = vec_min(tc0masked, delta);                                                            \
887    p0 = vec_subs(p0, deltaneg);                                                                  \
888    q0 = vec_subs(q0, delta);                                                                     \
889    p0 = vec_adds(p0, delta);                                                                     \
890    q0 = vec_adds(q0, deltaneg);                                                                  \
891}
892
893#define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) {            \
894    DECLARE_ALIGNED_16(unsigned char, temp[16]);                                             \
895    register vec_u8 alphavec;                                                              \
896    register vec_u8 betavec;                                                               \
897    register vec_u8 mask;                                                                  \
898    register vec_u8 p1mask;                                                                \
899    register vec_u8 q1mask;                                                                \
900    register vector signed   char tc0vec;                                                    \
901    register vec_u8 finaltc0;                                                              \
902    register vec_u8 tc0masked;                                                             \
903    register vec_u8 newp1;                                                                 \
904    register vec_u8 newq1;                                                                 \
905                                                                                             \
906    temp[0] = alpha;                                                                         \
907    temp[1] = beta;                                                                          \
908    alphavec = vec_ld(0, temp);                                                              \
909    betavec = vec_splat(alphavec, 0x1);                                                      \
910    alphavec = vec_splat(alphavec, 0x0);                                                     \
911    mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */            \
912                                                                                             \
913    *((int *)temp) = *((int *)tc0);                                                          \
914    tc0vec = vec_ld(0, (signed char*)temp);                                                  \
915    tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
916    tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
917    mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1)));  /* if tc0[i] >= 0 */         \
918    finaltc0 = vec_and((vec_u8)tc0vec, mask);     /* tc = tc0 */                           \
919                                                                                             \
920    p1mask = diff_lt_altivec(p2, p0, betavec);                                               \
921    p1mask = vec_and(p1mask, mask);                             /* if ( |p2 - p0| < beta) */ \
922    tc0masked = vec_and(p1mask, (vec_u8)tc0vec);                                           \
923    finaltc0 = vec_sub(finaltc0, p1mask);                       /* tc++ */                   \
924    newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked);                                      \
925    /*end if*/                                                                               \
926                                                                                             \
927    q1mask = diff_lt_altivec(q2, q0, betavec);                                               \
928    q1mask = vec_and(q1mask, mask);                             /* if ( |q2 - q0| < beta ) */\
929    tc0masked = vec_and(q1mask, (vec_u8)tc0vec);                                           \
930    finaltc0 = vec_sub(finaltc0, q1mask);                       /* tc++ */                   \
931    newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked);                                      \
932    /*end if*/                                                                               \
933                                                                                             \
934    h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0);                                            \
935    p1 = newp1;                                                                              \
936    q1 = newq1;                                                                              \
937}
938
939static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
940
941    if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
942        register vec_u8 p2 = vec_ld(-3*stride, pix);
943        register vec_u8 p1 = vec_ld(-2*stride, pix);
944        register vec_u8 p0 = vec_ld(-1*stride, pix);
945        register vec_u8 q0 = vec_ld(0, pix);
946        register vec_u8 q1 = vec_ld(stride, pix);
947        register vec_u8 q2 = vec_ld(2*stride, pix);
948        h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
949        vec_st(p1, -2*stride, pix);
950        vec_st(p0, -1*stride, pix);
951        vec_st(q0, 0, pix);
952        vec_st(q1, stride, pix);
953    }
954}
955
956static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
957
958    register vec_u8 line0, line1, line2, line3, line4, line5;
959    if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
960        return;
961    readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
962    h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
963    transpose4x16(line1, line2, line3, line4);
964    write16x4(pix-2, stride, line1, line2, line3, line4);
965}
966
967static av_always_inline
968void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset, int w, int h)
969{
970    int y, aligned;
971    vec_u8 vblock;
972    vec_s16 vtemp, vweight, voffset, v0, v1;
973    vec_u16 vlog2_denom;
974    DECLARE_ALIGNED_16(int32_t, temp[4]);
975    LOAD_ZERO;
976
977    offset <<= log2_denom;
978    if(log2_denom) offset += 1<<(log2_denom-1);
979    temp[0] = log2_denom;
980    temp[1] = weight;
981    temp[2] = offset;
982
983    vtemp = (vec_s16)vec_ld(0, temp);
984    vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
985    vweight = vec_splat(vtemp, 3);
986    voffset = vec_splat(vtemp, 5);
987    aligned = !((unsigned long)block & 0xf);
988
989    for (y=0; y<h; y++) {
990        vblock = vec_ld(0, block);
991
992        v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
993        v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
994
995        if (w == 16 || aligned) {
996            v0 = vec_mladd(v0, vweight, zero_s16v);
997            v0 = vec_adds(v0, voffset);
998            v0 = vec_sra(v0, vlog2_denom);
999        }
1000        if (w == 16 || !aligned) {
1001            v1 = vec_mladd(v1, vweight, zero_s16v);
1002            v1 = vec_adds(v1, voffset);
1003            v1 = vec_sra(v1, vlog2_denom);
1004        }
1005        vblock = vec_packsu(v0, v1);
1006        vec_st(vblock, 0, block);
1007
1008        block += stride;
1009    }
1010}
1011
1012static av_always_inline
1013void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom,
1014                               int weightd, int weights, int offset, int w, int h)
1015{
1016    int y, dst_aligned, src_aligned;
1017    vec_u8 vsrc, vdst;
1018    vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
1019    vec_u16 vlog2_denom;
1020    DECLARE_ALIGNED_16(int32_t, temp[4]);
1021    LOAD_ZERO;
1022
1023    offset = ((offset + 1) | 1) << log2_denom;
1024    temp[0] = log2_denom+1;
1025    temp[1] = weights;
1026    temp[2] = weightd;
1027    temp[3] = offset;
1028
1029    vtemp = (vec_s16)vec_ld(0, temp);
1030    vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
1031    vweights = vec_splat(vtemp, 3);
1032    vweightd = vec_splat(vtemp, 5);
1033    voffset = vec_splat(vtemp, 7);
1034    dst_aligned = !((unsigned long)dst & 0xf);
1035    src_aligned = !((unsigned long)src & 0xf);
1036
1037    for (y=0; y<h; y++) {
1038        vdst = vec_ld(0, dst);
1039        vsrc = vec_ld(0, src);
1040
1041        v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
1042        v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
1043        v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
1044        v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
1045
1046        if (w == 8) {
1047            if (src_aligned)
1048                v3 = v2;
1049            else
1050                v2 = v3;
1051        }
1052
1053        if (w == 16 || dst_aligned) {
1054            v0 = vec_mladd(v0, vweightd, zero_s16v);
1055            v2 = vec_mladd(v2, vweights, zero_s16v);
1056
1057            v0 = vec_adds(v0, voffset);
1058            v0 = vec_adds(v0, v2);
1059            v0 = vec_sra(v0, vlog2_denom);
1060        }
1061        if (w == 16 || !dst_aligned) {
1062            v1 = vec_mladd(v1, vweightd, zero_s16v);
1063            v3 = vec_mladd(v3, vweights, zero_s16v);
1064
1065            v1 = vec_adds(v1, voffset);
1066            v1 = vec_adds(v1, v3);
1067            v1 = vec_sra(v1, vlog2_denom);
1068        }
1069        vdst = vec_packsu(v0, v1);
1070        vec_st(vdst, 0, dst);
1071
1072        dst += stride;
1073        src += stride;
1074    }
1075}
1076
1077#define H264_WEIGHT(W,H) \
1078static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
1079    weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \
1080}\
1081static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
1082    biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
1083}
1084
1085H264_WEIGHT(16,16)
1086H264_WEIGHT(16, 8)
1087H264_WEIGHT( 8,16)
1088H264_WEIGHT( 8, 8)
1089H264_WEIGHT( 8, 4)
1090
1091void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
1092
1093    if (has_altivec()) {
1094        c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
1095        c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec;
1096        c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
1097        c->h264_idct_add = ff_h264_idct_add_altivec;
1098        c->h264_idct_add8 = ff_h264_idct_add8_altivec;
1099        c->h264_idct_add16 = ff_h264_idct_add16_altivec;
1100        c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
1101        c->h264_idct_dc_add= h264_idct_dc_add_altivec;
1102        c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec;
1103        c->h264_idct8_add = ff_h264_idct8_add_altivec;
1104        c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
1105        c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
1106        c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
1107
1108#define dspfunc(PFX, IDX, NUM) \
1109        c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
1110        c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
1111        c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
1112        c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
1113        c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
1114        c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
1115        c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
1116        c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
1117        c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
1118        c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
1119        c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
1120        c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
1121        c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
1122        c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
1123        c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
1124        c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
1125
1126        dspfunc(put_h264_qpel, 0, 16);
1127        dspfunc(avg_h264_qpel, 0, 16);
1128#undef dspfunc
1129
1130        c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16x16_altivec;
1131        c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels16x8_altivec;
1132        c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels8x16_altivec;
1133        c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels8x8_altivec;
1134        c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels8x4_altivec;
1135        c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16x16_altivec;
1136        c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels16x8_altivec;
1137        c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels8x16_altivec;
1138        c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels8x8_altivec;
1139        c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels8x4_altivec;
1140    }
1141}
1142