1/*
2 * VC-1 and WMV3 - DSP functions MMX-optimized
3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
4 *
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use,
9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following
12 * conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27#include "libavutil/x86_cpu.h"
28#include "libavcodec/dsputil.h"
29#include "dsputil_mmx.h"
30
31/** Add rounder from mm7 to mm3 and pack result at destination */
32#define NORMALIZE_MMX(SHIFT)                                    \
33     "paddw     %%mm7, %%mm3           \n\t" /* +bias-r */      \
34     "paddw     %%mm7, %%mm4           \n\t" /* +bias-r */      \
35     "psraw     "SHIFT", %%mm3         \n\t"                    \
36     "psraw     "SHIFT", %%mm4         \n\t"
37
38#define TRANSFER_DO_PACK                        \
39     "packuswb  %%mm4, %%mm3           \n\t"    \
40     "movq      %%mm3, (%2)            \n\t"
41
42#define TRANSFER_DONT_PACK                      \
43     "movq      %%mm3, 0(%2)           \n\t"    \
44     "movq      %%mm4, 8(%2)           \n\t"
45
46/** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
47#define DO_UNPACK(reg)  "punpcklbw %%mm0, " reg "\n\t"
48#define DONT_UNPACK(reg)
49
50/** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
51#define LOAD_ROUNDER_MMX(ROUND)                 \
52     "movd      "ROUND", %%mm7         \n\t"    \
53     "punpcklwd %%mm7, %%mm7           \n\t"    \
54     "punpckldq %%mm7, %%mm7           \n\t"
55
56#define SHIFT2_LINE(OFF, R0,R1,R2,R3)           \
57    "paddw     %%mm"#R2", %%mm"#R1"    \n\t"    \
58    "movd      (%0,%3), %%mm"#R0"      \n\t"    \
59    "pmullw    %%mm6, %%mm"#R1"        \n\t"    \
60    "punpcklbw %%mm0, %%mm"#R0"        \n\t"    \
61    "movd      (%0,%2), %%mm"#R3"      \n\t"    \
62    "psubw     %%mm"#R0", %%mm"#R1"    \n\t"    \
63    "punpcklbw %%mm0, %%mm"#R3"        \n\t"    \
64    "paddw     %%mm7, %%mm"#R1"        \n\t"    \
65    "psubw     %%mm"#R3", %%mm"#R1"    \n\t"    \
66    "psraw     %4, %%mm"#R1"           \n\t"    \
67    "movq      %%mm"#R1", "#OFF"(%1)   \n\t"    \
68    "add       %2, %0                  \n\t"
69
70DECLARE_ALIGNED_16(const uint64_t, ff_pw_9) = 0x0009000900090009ULL;
71
72/** Sacrifying mm6 allows to pipeline loads from src */
73static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
74                                       const uint8_t *src, x86_reg stride,
75                                       int rnd, int64_t shift)
76{
77    __asm__ volatile(
78        "mov       $3, %%"REG_c"           \n\t"
79        LOAD_ROUNDER_MMX("%5")
80        "movq      "MANGLE(ff_pw_9)", %%mm6 \n\t"
81        "1:                                \n\t"
82        "movd      (%0), %%mm2             \n\t"
83        "add       %2, %0                  \n\t"
84        "movd      (%0), %%mm3             \n\t"
85        "punpcklbw %%mm0, %%mm2            \n\t"
86        "punpcklbw %%mm0, %%mm3            \n\t"
87        SHIFT2_LINE(  0, 1, 2, 3, 4)
88        SHIFT2_LINE( 24, 2, 3, 4, 1)
89        SHIFT2_LINE( 48, 3, 4, 1, 2)
90        SHIFT2_LINE( 72, 4, 1, 2, 3)
91        SHIFT2_LINE( 96, 1, 2, 3, 4)
92        SHIFT2_LINE(120, 2, 3, 4, 1)
93        SHIFT2_LINE(144, 3, 4, 1, 2)
94        SHIFT2_LINE(168, 4, 1, 2, 3)
95        "sub       %6, %0                  \n\t"
96        "add       $8, %1                  \n\t"
97        "dec       %%"REG_c"               \n\t"
98        "jnz 1b                            \n\t"
99        : "+r"(src), "+r"(dst)
100        : "r"(stride), "r"(-2*stride),
101          "m"(shift), "m"(rnd), "r"(9*stride-4)
102        : "%"REG_c, "memory"
103    );
104}
105
106/**
107 * Data is already unpacked, so some operations can directly be made from
108 * memory.
109 */
110static void vc1_put_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,
111                                       const int16_t *src, int rnd)
112{
113    int h = 8;
114
115    src -= 1;
116    rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */
117    __asm__ volatile(
118        LOAD_ROUNDER_MMX("%4")
119        "movq      "MANGLE(ff_pw_128)", %%mm6\n\t"
120        "movq      "MANGLE(ff_pw_9)", %%mm5 \n\t"
121        "1:                                \n\t"
122        "movq      2*0+0(%1), %%mm1        \n\t"
123        "movq      2*0+8(%1), %%mm2        \n\t"
124        "movq      2*1+0(%1), %%mm3        \n\t"
125        "movq      2*1+8(%1), %%mm4        \n\t"
126        "paddw     2*3+0(%1), %%mm1        \n\t"
127        "paddw     2*3+8(%1), %%mm2        \n\t"
128        "paddw     2*2+0(%1), %%mm3        \n\t"
129        "paddw     2*2+8(%1), %%mm4        \n\t"
130        "pmullw    %%mm5, %%mm3            \n\t"
131        "pmullw    %%mm5, %%mm4            \n\t"
132        "psubw     %%mm1, %%mm3            \n\t"
133        "psubw     %%mm2, %%mm4            \n\t"
134        NORMALIZE_MMX("$7")
135        /* Remove bias */
136        "paddw     %%mm6, %%mm3            \n\t"
137        "paddw     %%mm6, %%mm4            \n\t"
138        TRANSFER_DO_PACK
139        "add       $24, %1                 \n\t"
140        "add       %3, %2                  \n\t"
141        "decl      %0                      \n\t"
142        "jnz 1b                            \n\t"
143        : "+r"(h), "+r" (src),  "+r" (dst)
144        : "r"(stride), "m"(rnd)
145        : "memory"
146    );
147}
148
149
150/**
151 * Purely vertical or horizontal 1/2 shift interpolation.
152 * Sacrify mm6 for *9 factor.
153 */
154static void vc1_put_shift2_mmx(uint8_t *dst, const uint8_t *src,
155                               x86_reg stride, int rnd, x86_reg offset)
156{
157    rnd = 8-rnd;
158    __asm__ volatile(
159        "mov       $8, %%"REG_c"           \n\t"
160        LOAD_ROUNDER_MMX("%5")
161        "movq      "MANGLE(ff_pw_9)", %%mm6\n\t"
162        "1:                                \n\t"
163        "movd      0(%0   ), %%mm3         \n\t"
164        "movd      4(%0   ), %%mm4         \n\t"
165        "movd      0(%0,%2), %%mm1         \n\t"
166        "movd      4(%0,%2), %%mm2         \n\t"
167        "add       %2, %0                  \n\t"
168        "punpcklbw %%mm0, %%mm3            \n\t"
169        "punpcklbw %%mm0, %%mm4            \n\t"
170        "punpcklbw %%mm0, %%mm1            \n\t"
171        "punpcklbw %%mm0, %%mm2            \n\t"
172        "paddw     %%mm1, %%mm3            \n\t"
173        "paddw     %%mm2, %%mm4            \n\t"
174        "movd      0(%0,%3), %%mm1         \n\t"
175        "movd      4(%0,%3), %%mm2         \n\t"
176        "pmullw    %%mm6, %%mm3            \n\t" /* 0,9,9,0*/
177        "pmullw    %%mm6, %%mm4            \n\t" /* 0,9,9,0*/
178        "punpcklbw %%mm0, %%mm1            \n\t"
179        "punpcklbw %%mm0, %%mm2            \n\t"
180        "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,0*/
181        "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,0*/
182        "movd      0(%0,%2), %%mm1         \n\t"
183        "movd      4(%0,%2), %%mm2         \n\t"
184        "punpcklbw %%mm0, %%mm1            \n\t"
185        "punpcklbw %%mm0, %%mm2            \n\t"
186        "psubw     %%mm1, %%mm3            \n\t" /*-1,9,9,-1*/
187        "psubw     %%mm2, %%mm4            \n\t" /*-1,9,9,-1*/
188        NORMALIZE_MMX("$4")
189        "packuswb  %%mm4, %%mm3            \n\t"
190        "movq      %%mm3, (%1)             \n\t"
191        "add       %6, %0                  \n\t"
192        "add       %4, %1                  \n\t"
193        "dec       %%"REG_c"               \n\t"
194        "jnz 1b                            \n\t"
195        : "+r"(src),  "+r"(dst)
196        : "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),
197          "g"(stride-offset)
198        : "%"REG_c, "memory"
199    );
200}
201
202/**
203 * Filter coefficients made global to allow access by all 1 or 3 quarter shift
204 * interpolation functions.
205 */
206DECLARE_ASM_CONST(16, uint64_t, ff_pw_53) = 0x0035003500350035ULL;
207DECLARE_ASM_CONST(16, uint64_t, ff_pw_18) = 0x0012001200120012ULL;
208
209/**
210 * Core of the 1/4 and 3/4 shift bicubic interpolation.
211 *
212 * @param UNPACK  Macro unpacking arguments from 8 to 16bits (can be empty).
213 * @param MOVQ    "movd 1" or "movq 2", if data read is already unpacked.
214 * @param A1      Address of 1st tap (beware of unpacked/packed).
215 * @param A2      Address of 2nd tap
216 * @param A3      Address of 3rd tap
217 * @param A4      Address of 4th tap
218 */
219#define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4)       \
220     MOVQ "*0+"A1", %%mm1       \n\t"                           \
221     MOVQ "*4+"A1", %%mm2       \n\t"                           \
222     UNPACK("%%mm1")                                            \
223     UNPACK("%%mm2")                                            \
224     "pmullw    "MANGLE(ff_pw_3)", %%mm1\n\t"                   \
225     "pmullw    "MANGLE(ff_pw_3)", %%mm2\n\t"                   \
226     MOVQ "*0+"A2", %%mm3       \n\t"                           \
227     MOVQ "*4+"A2", %%mm4       \n\t"                           \
228     UNPACK("%%mm3")                                            \
229     UNPACK("%%mm4")                                            \
230     "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                 \
231     "pmullw    %%mm6, %%mm4    \n\t" /* *18 */                 \
232     "psubw     %%mm1, %%mm3    \n\t" /* 18,-3 */               \
233     "psubw     %%mm2, %%mm4    \n\t" /* 18,-3 */               \
234     MOVQ "*0+"A4", %%mm1       \n\t"                           \
235     MOVQ "*4+"A4", %%mm2       \n\t"                           \
236     UNPACK("%%mm1")                                            \
237     UNPACK("%%mm2")                                            \
238     "psllw     $2, %%mm1       \n\t" /* 4* */                  \
239     "psllw     $2, %%mm2       \n\t" /* 4* */                  \
240     "psubw     %%mm1, %%mm3    \n\t" /* -4,18,-3 */            \
241     "psubw     %%mm2, %%mm4    \n\t" /* -4,18,-3 */            \
242     MOVQ "*0+"A3", %%mm1       \n\t"                           \
243     MOVQ "*4+"A3", %%mm2       \n\t"                           \
244     UNPACK("%%mm1")                                            \
245     UNPACK("%%mm2")                                            \
246     "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                 \
247     "pmullw    %%mm5, %%mm2    \n\t" /* *53 */                 \
248     "paddw     %%mm1, %%mm3    \n\t" /* 4,53,18,-3 */          \
249     "paddw     %%mm2, %%mm4    \n\t" /* 4,53,18,-3 */
250
251/**
252 * Macro to build the vertical 16bits version of vc1_put_shift[13].
253 * Here, offset=src_stride. Parameters passed A1 to A4 must use
254 * %3 (src_stride) and %4 (3*src_stride).
255 *
256 * @param  NAME   Either 1 or 3
257 * @see MSPEL_FILTER13_CORE for information on A1->A4
258 */
259#define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4)                    \
260static void                                                             \
261vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src,      \
262                                 x86_reg src_stride,                   \
263                                 int rnd, int64_t shift)                \
264{                                                                       \
265    int h = 8;                                                          \
266    src -= src_stride;                                                  \
267    __asm__ volatile(                                                       \
268        LOAD_ROUNDER_MMX("%5")                                          \
269        "movq      "MANGLE(ff_pw_53)", %%mm5\n\t"                       \
270        "movq      "MANGLE(ff_pw_18)", %%mm6\n\t"                       \
271        ASMALIGN(3)                                                     \
272        "1:                        \n\t"                                \
273        MSPEL_FILTER13_CORE(DO_UNPACK, "movd  1", A1, A2, A3, A4)       \
274        NORMALIZE_MMX("%6")                                             \
275        TRANSFER_DONT_PACK                                              \
276        /* Last 3 (in fact 4) bytes on the line */                      \
277        "movd      8+"A1", %%mm1   \n\t"                                \
278        DO_UNPACK("%%mm1")                                              \
279        "movq      %%mm1, %%mm3    \n\t"                                \
280        "paddw     %%mm1, %%mm1    \n\t"                                \
281        "paddw     %%mm3, %%mm1    \n\t" /* 3* */                       \
282        "movd      8+"A2", %%mm3   \n\t"                                \
283        DO_UNPACK("%%mm3")                                              \
284        "pmullw    %%mm6, %%mm3    \n\t" /* *18 */                      \
285        "psubw     %%mm1, %%mm3    \n\t" /*18,-3 */                     \
286        "movd      8+"A3", %%mm1   \n\t"                                \
287        DO_UNPACK("%%mm1")                                              \
288        "pmullw    %%mm5, %%mm1    \n\t" /* *53 */                      \
289        "paddw     %%mm1, %%mm3    \n\t" /*53,18,-3 */                  \
290        "movd      8+"A4", %%mm1   \n\t"                                \
291        DO_UNPACK("%%mm1")                                              \
292        "psllw     $2, %%mm1       \n\t" /* 4* */                       \
293        "psubw     %%mm1, %%mm3    \n\t"                                \
294        "paddw     %%mm7, %%mm3    \n\t"                                \
295        "psraw     %6, %%mm3       \n\t"                                \
296        "movq      %%mm3, 16(%2)   \n\t"                                \
297        "add       %3, %1          \n\t"                                \
298        "add       $24, %2         \n\t"                                \
299        "decl      %0              \n\t"                                \
300        "jnz 1b                    \n\t"                                \
301        : "+r"(h), "+r" (src),  "+r" (dst)                              \
302        : "r"(src_stride), "r"(3*src_stride),                           \
303          "m"(rnd), "m"(shift)                                          \
304        : "memory"                                                      \
305    );                                                                  \
306}
307
308/**
309 * Macro to build the horizontal 16bits version of vc1_put_shift[13].
310 * Here, offset=16bits, so parameters passed A1 to A4 should be simple.
311 *
312 * @param  NAME   Either 1 or 3
313 * @see MSPEL_FILTER13_CORE for information on A1->A4
314 */
315#define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4)                    \
316static void                                                             \
317vc1_put_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride,         \
318                                 const int16_t *src, int rnd)           \
319{                                                                       \
320    int h = 8;                                                          \
321    src -= 1;                                                           \
322    rnd -= (-4+58+13-3)*256; /* Add -256 bias */                        \
323    __asm__ volatile(                                                       \
324        LOAD_ROUNDER_MMX("%4")                                          \
325        "movq      "MANGLE(ff_pw_18)", %%mm6   \n\t"                    \
326        "movq      "MANGLE(ff_pw_53)", %%mm5   \n\t"                    \
327        ASMALIGN(3)                                                     \
328        "1:                        \n\t"                                \
329        MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4)      \
330        NORMALIZE_MMX("$7")                                             \
331        /* Remove bias */                                               \
332        "paddw     "MANGLE(ff_pw_128)", %%mm3  \n\t"                    \
333        "paddw     "MANGLE(ff_pw_128)", %%mm4  \n\t"                    \
334        TRANSFER_DO_PACK                                                \
335        "add       $24, %1         \n\t"                                \
336        "add       %3, %2          \n\t"                                \
337        "decl      %0              \n\t"                                \
338        "jnz 1b                    \n\t"                                \
339        : "+r"(h), "+r" (src),  "+r" (dst)                              \
340        : "r"(stride), "m"(rnd)                                         \
341        : "memory"                                                      \
342    );                                                                  \
343}
344
345/**
346 * Macro to build the 8bits, any direction, version of vc1_put_shift[13].
347 * Here, offset=src_stride. Parameters passed A1 to A4 must use
348 * %3 (offset) and %4 (3*offset).
349 *
350 * @param  NAME   Either 1 or 3
351 * @see MSPEL_FILTER13_CORE for information on A1->A4
352 */
353#define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4)                         \
354static void                                                             \
355vc1_put_## NAME ## _mmx(uint8_t *dst, const uint8_t *src,               \
356                        x86_reg stride, int rnd, x86_reg offset)      \
357{                                                                       \
358    int h = 8;                                                          \
359    src -= offset;                                                      \
360    rnd = 32-rnd;                                                       \
361    __asm__ volatile (                                                      \
362        LOAD_ROUNDER_MMX("%6")                                          \
363        "movq      "MANGLE(ff_pw_53)", %%mm5       \n\t"                \
364        "movq      "MANGLE(ff_pw_18)", %%mm6       \n\t"                \
365        ASMALIGN(3)                                                     \
366        "1:                        \n\t"                                \
367        MSPEL_FILTER13_CORE(DO_UNPACK, "movd   1", A1, A2, A3, A4)      \
368        NORMALIZE_MMX("$6")                                             \
369        TRANSFER_DO_PACK                                                \
370        "add       %5, %1          \n\t"                                \
371        "add       %5, %2          \n\t"                                \
372        "decl      %0              \n\t"                                \
373        "jnz 1b                    \n\t"                                \
374        : "+r"(h), "+r" (src),  "+r" (dst)                              \
375        : "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd)             \
376        : "memory"                                                      \
377    );                                                                  \
378}
379
380/** 1/4 shift bicubic interpolation */
381MSPEL_FILTER13_8B     (shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )")
382MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4  )", "0(%1,%3,2)", "0(%1,%3  )", "0(%1     )")
383MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)")
384
385/** 3/4 shift bicubic interpolation */
386MSPEL_FILTER13_8B     (shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )")
387MSPEL_FILTER13_VER_16B(shift3, "0(%1     )", "0(%1,%3  )", "0(%1,%3,2)", "0(%1,%4  )")
388MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)")
389
390typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
391typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
392typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
393
394/**
395 * Interpolates fractional pel values by applying proper vertical then
396 * horizontal filter.
397 *
398 * @param  dst     Destination buffer for interpolated pels.
399 * @param  src     Source buffer.
400 * @param  stride  Stride for both src and dst buffers.
401 * @param  hmode   Horizontal filter (expressed in quarter pixels shift).
402 * @param  hmode   Vertical filter.
403 * @param  rnd     Rounding bias.
404 */
405static void vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,
406                         int hmode, int vmode, int rnd)
407{
408    static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =
409         { NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };
410    static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =
411         { NULL, vc1_put_hor_16b_shift1_mmx, vc1_put_hor_16b_shift2_mmx, vc1_put_hor_16b_shift3_mmx };
412    static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =
413         { NULL, vc1_put_shift1_mmx, vc1_put_shift2_mmx, vc1_put_shift3_mmx };
414
415    __asm__ volatile(
416        "pxor %%mm0, %%mm0         \n\t"
417        ::: "memory"
418    );
419
420    if (vmode) { /* Vertical filter to apply */
421        if (hmode) { /* Horizontal filter to apply, output to tmp */
422            static const int shift_value[] = { 0, 5, 1, 5 };
423            int              shift = (shift_value[hmode]+shift_value[vmode])>>1;
424            int              r;
425            DECLARE_ALIGNED_16(int16_t, tmp[12*8]);
426
427            r = (1<<(shift-1)) + rnd-1;
428            vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);
429
430            vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);
431            return;
432        }
433        else { /* No horizontal filter, output 8 lines to dst */
434            vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);
435            return;
436        }
437    }
438
439    /* Horizontal mode with no vertical mode */
440    vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);
441}
442
443void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd);
444
445/** Macro to ease bicubic filter interpolation functions declarations */
446#define DECLARE_FUNCTION(a, b)                                          \
447static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
448     vc1_mspel_mc(dst, src, stride, a, b, rnd);                         \
449}
450
451DECLARE_FUNCTION(0, 1)
452DECLARE_FUNCTION(0, 2)
453DECLARE_FUNCTION(0, 3)
454
455DECLARE_FUNCTION(1, 0)
456DECLARE_FUNCTION(1, 1)
457DECLARE_FUNCTION(1, 2)
458DECLARE_FUNCTION(1, 3)
459
460DECLARE_FUNCTION(2, 0)
461DECLARE_FUNCTION(2, 1)
462DECLARE_FUNCTION(2, 2)
463DECLARE_FUNCTION(2, 3)
464
465DECLARE_FUNCTION(3, 0)
466DECLARE_FUNCTION(3, 1)
467DECLARE_FUNCTION(3, 2)
468DECLARE_FUNCTION(3, 3)
469
470void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
471    dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
472    dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
473    dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
474    dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
475
476    dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
477    dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
478    dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
479    dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
480
481    dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
482    dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
483    dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
484    dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
485
486    dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
487    dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
488    dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
489    dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
490}
491