1/*
2 * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3 * Copyright (c) 2006  Stefan Gehrer <stefan.gehrer@gmx.de>
4 *
5 * MMX-optimized DSP functions, based on H.264 optimizations by
6 * Michael Niedermayer and Loren Merritt
7 *
8 * This file is part of FFmpeg.
9 *
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include "libavutil/attributes.h"
26#include "libavutil/common.h"
27#include "libavutil/cpu.h"
28#include "libavutil/x86/asm.h"
29#include "libavutil/x86/cpu.h"
30#include "libavcodec/cavsdsp.h"
31#include "libavcodec/idctdsp.h"
32#include "constants.h"
33#include "fpel.h"
34#include "idctdsp.h"
35#include "config.h"
36
37#if HAVE_MMX_INLINE
38
39/* in/out: mma=mma+mmb, mmb=mmb-mma */
40#define SUMSUB_BA( a, b ) \
41    "paddw "#b", "#a" \n\t"\
42    "paddw "#b", "#b" \n\t"\
43    "psubw "#a", "#b" \n\t"
44
45/*****************************************************************************
46 *
47 * inverse transform
48 *
49 ****************************************************************************/
50
51static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
52{
53    __asm__ volatile(
54        "movq 112(%0), %%mm4  \n\t" /* mm4 = src7 */
55        "movq  16(%0), %%mm5  \n\t" /* mm5 = src1 */
56        "movq  80(%0), %%mm2  \n\t" /* mm2 = src5 */
57        "movq  48(%0), %%mm7  \n\t" /* mm7 = src3 */
58        "movq   %%mm4, %%mm0  \n\t"
59        "movq   %%mm5, %%mm3  \n\t"
60        "movq   %%mm2, %%mm6  \n\t"
61        "movq   %%mm7, %%mm1  \n\t"
62
63        "paddw  %%mm4, %%mm4  \n\t" /* mm4 = 2*src7 */
64        "paddw  %%mm3, %%mm3  \n\t" /* mm3 = 2*src1 */
65        "paddw  %%mm6, %%mm6  \n\t" /* mm6 = 2*src5 */
66        "paddw  %%mm1, %%mm1  \n\t" /* mm1 = 2*src3 */
67        "paddw  %%mm4, %%mm0  \n\t" /* mm0 = 3*src7 */
68        "paddw  %%mm3, %%mm5  \n\t" /* mm5 = 3*src1 */
69        "paddw  %%mm6, %%mm2  \n\t" /* mm2 = 3*src5 */
70        "paddw  %%mm1, %%mm7  \n\t" /* mm7 = 3*src3 */
71        "psubw  %%mm4, %%mm5  \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */
72        "paddw  %%mm6, %%mm7  \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */
73        "psubw  %%mm2, %%mm1  \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */
74        "paddw  %%mm0, %%mm3  \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */
75
76        "movq   %%mm5, %%mm4  \n\t"
77        "movq   %%mm7, %%mm6  \n\t"
78        "movq   %%mm3, %%mm0  \n\t"
79        "movq   %%mm1, %%mm2  \n\t"
80        SUMSUB_BA( %%mm7, %%mm5 )   /* mm7 = a0 + a1  mm5 = a0 - a1 */
81        "paddw  %%mm3, %%mm7  \n\t" /* mm7 = a0 + a1 + a3 */
82        "paddw  %%mm1, %%mm5  \n\t" /* mm5 = a0 - a1 + a2 */
83        "paddw  %%mm7, %%mm7  \n\t"
84        "paddw  %%mm5, %%mm5  \n\t"
85        "paddw  %%mm6, %%mm7  \n\t" /* mm7 = b4 */
86        "paddw  %%mm4, %%mm5  \n\t" /* mm5 = b5 */
87
88        SUMSUB_BA( %%mm1, %%mm3 )   /* mm1 = a3 + a2  mm3 = a3 - a2 */
89        "psubw  %%mm1, %%mm4  \n\t" /* mm4 = a0 - a2 - a3 */
90        "movq   %%mm4, %%mm1  \n\t" /* mm1 = a0 - a2 - a3 */
91        "psubw  %%mm6, %%mm3  \n\t" /* mm3 = a3 - a2 - a1 */
92        "paddw  %%mm1, %%mm1  \n\t"
93        "paddw  %%mm3, %%mm3  \n\t"
94        "psubw  %%mm2, %%mm1  \n\t" /* mm1 = b7 */
95        "paddw  %%mm0, %%mm3  \n\t" /* mm3 = b6 */
96
97        "movq  32(%0), %%mm2  \n\t" /* mm2 = src2 */
98        "movq  96(%0), %%mm6  \n\t" /* mm6 = src6 */
99        "movq   %%mm2, %%mm4  \n\t"
100        "movq   %%mm6, %%mm0  \n\t"
101        "psllw  $2,    %%mm4  \n\t" /* mm4 = 4*src2 */
102        "psllw  $2,    %%mm6  \n\t" /* mm6 = 4*src6 */
103        "paddw  %%mm4, %%mm2  \n\t" /* mm2 = 5*src2 */
104        "paddw  %%mm6, %%mm0  \n\t" /* mm0 = 5*src6 */
105        "paddw  %%mm2, %%mm2  \n\t"
106        "paddw  %%mm0, %%mm0  \n\t"
107        "psubw  %%mm0, %%mm4  \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */
108        "paddw  %%mm2, %%mm6  \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */
109
110        "movq    (%0), %%mm2  \n\t" /* mm2 = src0 */
111        "movq  64(%0), %%mm0  \n\t" /* mm0 = src4 */
112        SUMSUB_BA( %%mm0, %%mm2 )   /* mm0 = src0+src4  mm2 = src0-src4 */
113        "psllw  $3,    %%mm0  \n\t"
114        "psllw  $3,    %%mm2  \n\t"
115        "paddw  %1,    %%mm0  \n\t" /* add rounding bias */
116        "paddw  %1,    %%mm2  \n\t" /* add rounding bias */
117
118        SUMSUB_BA( %%mm6, %%mm0 )   /* mm6 = a4 + a6  mm0 = a4 - a6 */
119        SUMSUB_BA( %%mm4, %%mm2 )   /* mm4 = a5 + a7  mm2 = a5 - a7 */
120        SUMSUB_BA( %%mm7, %%mm6 )   /* mm7 = dst0  mm6 = dst7 */
121        SUMSUB_BA( %%mm5, %%mm4 )   /* mm5 = dst1  mm4 = dst6 */
122        SUMSUB_BA( %%mm3, %%mm2 )   /* mm3 = dst2  mm2 = dst5 */
123        SUMSUB_BA( %%mm1, %%mm0 )   /* mm1 = dst3  mm0 = dst4 */
124        :: "r"(block), "m"(bias)
125    );
126}
127
128#define SBUTTERFLY(a,b,t,n,m)\
129    "mov" #m " " #a ", " #t "         \n\t" /* abcd */\
130    "punpckl" #n " " #b ", " #a "     \n\t" /* aebf */\
131    "punpckh" #n " " #b ", " #t "     \n\t" /* cgdh */\
132
133#define TRANSPOSE4(a,b,c,d,t)\
134    SBUTTERFLY(a,b,t,wd,q) /* a=aebf t=cgdh */\
135    SBUTTERFLY(c,d,b,wd,q) /* c=imjn b=kolp */\
136    SBUTTERFLY(a,c,d,dq,q) /* a=aeim d=bfjn */\
137    SBUTTERFLY(t,b,c,dq,q) /* t=cgko c=dhlp */
138
139static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
140{
141    int i;
142    DECLARE_ALIGNED(8, int16_t, b2)[64];
143
144    for(i=0; i<2; i++){
145        DECLARE_ALIGNED(8, uint64_t, tmp);
146
147        cavs_idct8_1d(block+4*i, ff_pw_4.a);
148
149        __asm__ volatile(
150            "psraw     $3, %%mm7  \n\t"
151            "psraw     $3, %%mm6  \n\t"
152            "psraw     $3, %%mm5  \n\t"
153            "psraw     $3, %%mm4  \n\t"
154            "psraw     $3, %%mm3  \n\t"
155            "psraw     $3, %%mm2  \n\t"
156            "psraw     $3, %%mm1  \n\t"
157            "psraw     $3, %%mm0  \n\t"
158            "movq   %%mm7,    %0   \n\t"
159            TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
160            "movq   %%mm0,  8(%1)  \n\t"
161            "movq   %%mm6, 24(%1)  \n\t"
162            "movq   %%mm7, 40(%1)  \n\t"
163            "movq   %%mm4, 56(%1)  \n\t"
164            "movq    %0,    %%mm7  \n\t"
165            TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
166            "movq   %%mm7,   (%1)  \n\t"
167            "movq   %%mm1, 16(%1)  \n\t"
168            "movq   %%mm0, 32(%1)  \n\t"
169            "movq   %%mm3, 48(%1)  \n\t"
170            : "=m"(tmp)
171            : "r"(b2+32*i)
172            : "memory"
173        );
174    }
175
176    for(i=0; i<2; i++){
177        cavs_idct8_1d(b2+4*i, ff_pw_64.a);
178
179        __asm__ volatile(
180            "psraw     $7, %%mm7  \n\t"
181            "psraw     $7, %%mm6  \n\t"
182            "psraw     $7, %%mm5  \n\t"
183            "psraw     $7, %%mm4  \n\t"
184            "psraw     $7, %%mm3  \n\t"
185            "psraw     $7, %%mm2  \n\t"
186            "psraw     $7, %%mm1  \n\t"
187            "psraw     $7, %%mm0  \n\t"
188            "movq   %%mm7,    (%0)  \n\t"
189            "movq   %%mm5,  16(%0)  \n\t"
190            "movq   %%mm3,  32(%0)  \n\t"
191            "movq   %%mm1,  48(%0)  \n\t"
192            "movq   %%mm0,  64(%0)  \n\t"
193            "movq   %%mm2,  80(%0)  \n\t"
194            "movq   %%mm4,  96(%0)  \n\t"
195            "movq   %%mm6, 112(%0)  \n\t"
196            :: "r"(b2+4*i)
197            : "memory"
198        );
199    }
200
201    ff_add_pixels_clamped_mmx(b2, dst, stride);
202}
203
204#endif /* HAVE_MMX_INLINE */
205
206#if (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE)
207
208/*****************************************************************************
209 *
210 * motion compensation
211 *
212 ****************************************************************************/
213
214/* vertical filter [-1 -2 96 42 -7  0]  */
215#define QPEL_CAVSV1(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
216        "movd (%0), "#F"            \n\t"\
217        "movq "#C", %%mm6           \n\t"\
218        "pmullw "MANGLE(MUL1)", %%mm6\n\t"\
219        "movq "#D", %%mm7           \n\t"\
220        "pmullw "MANGLE(MUL2)", %%mm7\n\t"\
221        "psllw $3, "#E"             \n\t"\
222        "psubw "#E", %%mm6          \n\t"\
223        "psraw $3, "#E"             \n\t"\
224        "paddw %%mm7, %%mm6         \n\t"\
225        "paddw "#E", %%mm6          \n\t"\
226        "paddw "#B", "#B"           \n\t"\
227        "pxor %%mm7, %%mm7          \n\t"\
228        "add %2, %0                 \n\t"\
229        "punpcklbw %%mm7, "#F"      \n\t"\
230        "psubw "#B", %%mm6          \n\t"\
231        "psraw $1, "#B"             \n\t"\
232        "psubw "#A", %%mm6          \n\t"\
233        "paddw "MANGLE(ADD)", %%mm6 \n\t"\
234        "psraw $7, %%mm6            \n\t"\
235        "packuswb %%mm6, %%mm6      \n\t"\
236        OP(%%mm6, (%1), A, d)            \
237        "add %3, %1                 \n\t"
238
239/* vertical filter [ 0 -1  5  5 -1  0]  */
240#define QPEL_CAVSV2(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
241        "movd (%0), "#F"            \n\t"\
242        "movq "#C", %%mm6           \n\t"\
243        "paddw "#D", %%mm6          \n\t"\
244        "pmullw "MANGLE(MUL1)", %%mm6\n\t"\
245        "add %2, %0                 \n\t"\
246        "punpcklbw %%mm7, "#F"      \n\t"\
247        "psubw "#B", %%mm6          \n\t"\
248        "psubw "#E", %%mm6          \n\t"\
249        "paddw "MANGLE(ADD)", %%mm6 \n\t"\
250        "psraw $3, %%mm6            \n\t"\
251        "packuswb %%mm6, %%mm6      \n\t"\
252        OP(%%mm6, (%1), A, d)            \
253        "add %3, %1                 \n\t"
254
255/* vertical filter [ 0 -7 42 96 -2 -1]  */
256#define QPEL_CAVSV3(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
257        "movd (%0), "#F"            \n\t"\
258        "movq "#C", %%mm6           \n\t"\
259        "pmullw "MANGLE(MUL2)", %%mm6\n\t"\
260        "movq "#D", %%mm7           \n\t"\
261        "pmullw "MANGLE(MUL1)", %%mm7\n\t"\
262        "psllw $3, "#B"             \n\t"\
263        "psubw "#B", %%mm6          \n\t"\
264        "psraw $3, "#B"             \n\t"\
265        "paddw %%mm7, %%mm6         \n\t"\
266        "paddw "#B", %%mm6          \n\t"\
267        "paddw "#E", "#E"           \n\t"\
268        "pxor %%mm7, %%mm7          \n\t"\
269        "add %2, %0                 \n\t"\
270        "punpcklbw %%mm7, "#F"      \n\t"\
271        "psubw "#E", %%mm6          \n\t"\
272        "psraw $1, "#E"             \n\t"\
273        "psubw "#F", %%mm6          \n\t"\
274        "paddw "MANGLE(ADD)", %%mm6 \n\t"\
275        "psraw $7, %%mm6            \n\t"\
276        "packuswb %%mm6, %%mm6      \n\t"\
277        OP(%%mm6, (%1), A, d)            \
278        "add %3, %1                 \n\t"
279
280
281#define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
282    int w= 2;\
283    src -= 2*srcStride;\
284    \
285    while(w--){\
286      __asm__ volatile(\
287        "pxor %%mm7, %%mm7          \n\t"\
288        "movd (%0), %%mm0           \n\t"\
289        "add %2, %0                 \n\t"\
290        "movd (%0), %%mm1           \n\t"\
291        "add %2, %0                 \n\t"\
292        "movd (%0), %%mm2           \n\t"\
293        "add %2, %0                 \n\t"\
294        "movd (%0), %%mm3           \n\t"\
295        "add %2, %0                 \n\t"\
296        "movd (%0), %%mm4           \n\t"\
297        "add %2, %0                 \n\t"\
298        "punpcklbw %%mm7, %%mm0     \n\t"\
299        "punpcklbw %%mm7, %%mm1     \n\t"\
300        "punpcklbw %%mm7, %%mm2     \n\t"\
301        "punpcklbw %%mm7, %%mm3     \n\t"\
302        "punpcklbw %%mm7, %%mm4     \n\t"\
303        VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
304        VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
305        VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
306        VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
307        VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, ADD, MUL1, MUL2)\
308        VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, ADD, MUL1, MUL2)\
309        VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
310        VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
311        \
312        : "+a"(src), "+c"(dst)\
313        : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride)\
314          NAMED_CONSTRAINTS_ADD(ADD,MUL1,MUL2)\
315        : "memory"\
316     );\
317     if(h==16){\
318        __asm__ volatile(\
319            VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
320            VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
321            VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, ADD, MUL1, MUL2)\
322            VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, ADD, MUL1, MUL2)\
323            VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
324            VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
325            VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
326            VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
327            \
328           : "+a"(src), "+c"(dst)\
329           : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride)\
330             NAMED_CONSTRAINTS_ADD(ADD,MUL1,MUL2)\
331           : "memory"\
332        );\
333     }\
334     src += 4-(h+5)*srcStride;\
335     dst += 4-h*dstStride;\
336   }
337
338#define QPEL_CAVS(OPNAME, OP, MMX)\
339static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
340    int h=8;\
341    __asm__ volatile(\
342        "pxor %%mm7, %%mm7          \n\t"\
343        "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\
344        "1:                         \n\t"\
345        "movq    (%0), %%mm0        \n\t"\
346        "movq   1(%0), %%mm2        \n\t"\
347        "movq %%mm0, %%mm1          \n\t"\
348        "movq %%mm2, %%mm3          \n\t"\
349        "punpcklbw %%mm7, %%mm0     \n\t"\
350        "punpckhbw %%mm7, %%mm1     \n\t"\
351        "punpcklbw %%mm7, %%mm2     \n\t"\
352        "punpckhbw %%mm7, %%mm3     \n\t"\
353        "paddw %%mm2, %%mm0         \n\t"\
354        "paddw %%mm3, %%mm1         \n\t"\
355        "pmullw %%mm6, %%mm0        \n\t"\
356        "pmullw %%mm6, %%mm1        \n\t"\
357        "movq   -1(%0), %%mm2       \n\t"\
358        "movq    2(%0), %%mm4       \n\t"\
359        "movq %%mm2, %%mm3          \n\t"\
360        "movq %%mm4, %%mm5          \n\t"\
361        "punpcklbw %%mm7, %%mm2     \n\t"\
362        "punpckhbw %%mm7, %%mm3     \n\t"\
363        "punpcklbw %%mm7, %%mm4     \n\t"\
364        "punpckhbw %%mm7, %%mm5     \n\t"\
365        "paddw %%mm4, %%mm2         \n\t"\
366        "paddw %%mm3, %%mm5         \n\t"\
367        "psubw %%mm2, %%mm0         \n\t"\
368        "psubw %%mm5, %%mm1         \n\t"\
369        "movq "MANGLE(ff_pw_4)", %%mm5\n\t"\
370        "paddw %%mm5, %%mm0         \n\t"\
371        "paddw %%mm5, %%mm1         \n\t"\
372        "psraw $3, %%mm0            \n\t"\
373        "psraw $3, %%mm1            \n\t"\
374        "packuswb %%mm1, %%mm0      \n\t"\
375        OP(%%mm0, (%1),%%mm5, q)         \
376        "add %3, %0                 \n\t"\
377        "add %4, %1                 \n\t"\
378        "decl %2                    \n\t"\
379        " jnz 1b                    \n\t"\
380        : "+a"(src), "+c"(dst), "+m"(h)\
381        : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
382          NAMED_CONSTRAINTS_ADD(ff_pw_4,ff_pw_5)\
383        : "memory"\
384    );\
385}\
386\
387static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
388  QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42)      \
389}\
390\
391static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
392  QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_42)        \
393}\
394\
395static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
396  QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42)      \
397}\
398\
399static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
400    OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst  , src  , dstStride, srcStride, 8);\
401}\
402static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
403    OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst  , src  , dstStride, srcStride, 16);\
404    OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
405}\
406\
407static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
408    OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst  , src  , dstStride, srcStride, 8);\
409}\
410static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
411    OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst  , src  , dstStride, srcStride, 16);\
412    OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
413}\
414\
415static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
416    OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst  , src  , dstStride, srcStride, 8);\
417}\
418static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
419    OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst  , src  , dstStride, srcStride, 16);\
420    OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
421}\
422\
423static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
424    OPNAME ## cavs_qpel8_h_ ## MMX(dst  , src  , dstStride, srcStride);\
425    OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
426    src += 8*srcStride;\
427    dst += 8*dstStride;\
428    OPNAME ## cavs_qpel8_h_ ## MMX(dst  , src  , dstStride, srcStride);\
429    OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
430}\
431
432#define CAVS_MC(OPNAME, SIZE, MMX) \
433static void OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
434{\
435    OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
436}\
437\
438static void OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
439{\
440    OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
441}\
442\
443static void OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
444{\
445    OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
446}\
447\
448static void OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
449{\
450    OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
451}\
452
453#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "    \n\t"
454#define AVG_3DNOW_OP(a,b,temp, size) \
455"mov" #size " " #b ", " #temp "   \n\t"\
456"pavgusb " #temp ", " #a "        \n\t"\
457"mov" #size " " #a ", " #b "      \n\t"
458#define AVG_MMXEXT_OP(a, b, temp, size) \
459"mov" #size " " #b ", " #temp "   \n\t"\
460"pavgb " #temp ", " #a "          \n\t"\
461"mov" #size " " #a ", " #b "      \n\t"
462
463#endif /* (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE) */
464
465#if HAVE_MMX_EXTERNAL
466static void put_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src,
467                                    ptrdiff_t stride)
468{
469    ff_put_pixels8_mmx(dst, src, stride, 8);
470}
471
472static void avg_cavs_qpel8_mc00_mmx(uint8_t *dst, uint8_t *src,
473                                    ptrdiff_t stride)
474{
475    ff_avg_pixels8_mmx(dst, src, stride, 8);
476}
477
478static void avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, uint8_t *src,
479                                       ptrdiff_t stride)
480{
481    ff_avg_pixels8_mmxext(dst, src, stride, 8);
482}
483
484static void put_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src,
485                                     ptrdiff_t stride)
486{
487    ff_put_pixels16_mmx(dst, src, stride, 16);
488}
489
490static void avg_cavs_qpel16_mc00_mmx(uint8_t *dst, uint8_t *src,
491                                     ptrdiff_t stride)
492{
493    ff_avg_pixels16_mmx(dst, src, stride, 16);
494}
495
496static void avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, uint8_t *src,
497                                        ptrdiff_t stride)
498{
499    ff_avg_pixels16_mmxext(dst, src, stride, 16);
500}
501
502static void put_cavs_qpel16_mc00_sse2(uint8_t *dst, uint8_t *src,
503                                      ptrdiff_t stride)
504{
505    ff_put_pixels16_sse2(dst, src, stride, 16);
506}
507
508static void avg_cavs_qpel16_mc00_sse2(uint8_t *dst, uint8_t *src,
509                                      ptrdiff_t stride)
510{
511    ff_avg_pixels16_sse2(dst, src, stride, 16);
512}
513#endif
514
515static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c,
516                                     AVCodecContext *avctx)
517{
518#if HAVE_MMX_EXTERNAL
519    c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_mmx;
520    c->put_cavs_qpel_pixels_tab[1][0] = put_cavs_qpel8_mc00_mmx;
521    c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmx;
522    c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmx;
523#endif
524
525#if HAVE_MMX_INLINE
526    c->cavs_idct8_add = cavs_idct8_add_mmx;
527    c->idct_perm      = FF_TRANSPOSE_IDCT_PERM;
528#endif /* HAVE_MMX_INLINE */
529}
530
531#define DSPFUNC(PFX, IDX, NUM, EXT)                                                       \
532    c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \
533    c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \
534    c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \
535    c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \
536
537#if HAVE_MMXEXT_INLINE
538QPEL_CAVS(put_,        PUT_OP, mmxext)
539QPEL_CAVS(avg_, AVG_MMXEXT_OP, mmxext)
540
541CAVS_MC(put_,  8, mmxext)
542CAVS_MC(put_, 16, mmxext)
543CAVS_MC(avg_,  8, mmxext)
544CAVS_MC(avg_, 16, mmxext)
545#endif /* HAVE_MMXEXT_INLINE */
546
547#if HAVE_AMD3DNOW_INLINE
548QPEL_CAVS(put_,       PUT_OP, 3dnow)
549QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
550
551CAVS_MC(put_, 8, 3dnow)
552CAVS_MC(put_, 16,3dnow)
553CAVS_MC(avg_, 8, 3dnow)
554CAVS_MC(avg_, 16,3dnow)
555
556static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c,
557                                       AVCodecContext *avctx)
558{
559    DSPFUNC(put, 0, 16, 3dnow);
560    DSPFUNC(put, 1,  8, 3dnow);
561    DSPFUNC(avg, 0, 16, 3dnow);
562    DSPFUNC(avg, 1,  8, 3dnow);
563}
564#endif /* HAVE_AMD3DNOW_INLINE */
565
566av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx)
567{
568    int cpu_flags = av_get_cpu_flags();
569
570    cavsdsp_init_mmx(c, avctx);
571#if HAVE_AMD3DNOW_INLINE
572    if (INLINE_AMD3DNOW(cpu_flags))
573        cavsdsp_init_3dnow(c, avctx);
574#endif /* HAVE_AMD3DNOW_INLINE */
575#if HAVE_MMXEXT_INLINE
576    if (INLINE_MMXEXT(cpu_flags)) {
577        DSPFUNC(put, 0, 16, mmxext);
578        DSPFUNC(put, 1,  8, mmxext);
579        DSPFUNC(avg, 0, 16, mmxext);
580        DSPFUNC(avg, 1,  8, mmxext);
581    }
582#endif
583#if HAVE_MMX_EXTERNAL
584    if (EXTERNAL_MMXEXT(cpu_flags)) {
585        c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmxext;
586        c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmxext;
587    }
588#endif
589#if HAVE_SSE2_EXTERNAL
590    if (EXTERNAL_SSE2(cpu_flags)) {
591        c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_sse2;
592        c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_sse2;
593    }
594#endif
595}
596