1/*
2 * VC-1 and WMV3 - DSP functions MMX-optimized
3 * Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
4 *
5 * Permission is hereby granted, free of charge, to any person
6 * obtaining a copy of this software and associated documentation
7 * files (the "Software"), to deal in the Software without
8 * restriction, including without limitation the rights to use,
9 * copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following
12 * conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27#include "libavutil/attributes.h"
28#include "libavutil/cpu.h"
29#include "libavutil/x86/cpu.h"
30#include "libavutil/x86/asm.h"
31#include "libavcodec/vc1dsp.h"
32#include "fpel.h"
33#include "vc1dsp.h"
34#include "config.h"
35
36#define LOOP_FILTER(EXT) \
37void ff_vc1_v_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
38void ff_vc1_h_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \
39void ff_vc1_v_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
40void ff_vc1_h_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \
41\
42static void vc1_v_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
43{ \
44    ff_vc1_v_loop_filter8_ ## EXT(src,   stride, pq); \
45    ff_vc1_v_loop_filter8_ ## EXT(src+8, stride, pq); \
46} \
47\
48static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \
49{ \
50    ff_vc1_h_loop_filter8_ ## EXT(src,          stride, pq); \
51    ff_vc1_h_loop_filter8_ ## EXT(src+8*stride, stride, pq); \
52}
53
54#if HAVE_YASM
55LOOP_FILTER(mmxext)
56LOOP_FILTER(sse2)
57LOOP_FILTER(ssse3)
58
59void ff_vc1_h_loop_filter8_sse4(uint8_t *src, int stride, int pq);
60
61static void vc1_h_loop_filter16_sse4(uint8_t *src, int stride, int pq)
62{
63    ff_vc1_h_loop_filter8_sse4(src,          stride, pq);
64    ff_vc1_h_loop_filter8_sse4(src+8*stride, stride, pq);
65}
66static void avg_vc1_mspel_mc00_mmxext(uint8_t *dst, const uint8_t *src,
67                                      ptrdiff_t stride, int rnd)
68{
69    ff_avg_pixels8_mmxext(dst, src, stride, 8);
70}
71static void avg_vc1_mspel_mc00_16_sse2(uint8_t *dst, const uint8_t *src,
72                                       ptrdiff_t stride, int rnd)
73{
74    ff_avg_pixels16_sse2(dst, src, stride, 16);
75}
76
77#endif /* HAVE_YASM */
78
79void ff_put_vc1_chroma_mc8_nornd_mmx  (uint8_t *dst, uint8_t *src,
80                                       int stride, int h, int x, int y);
81void ff_avg_vc1_chroma_mc8_nornd_mmxext(uint8_t *dst, uint8_t *src,
82                                        int stride, int h, int x, int y);
83void ff_avg_vc1_chroma_mc8_nornd_3dnow(uint8_t *dst, uint8_t *src,
84                                       int stride, int h, int x, int y);
85void ff_put_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src,
86                                       int stride, int h, int x, int y);
87void ff_avg_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src,
88                                       int stride, int h, int x, int y);
89
90
91av_cold void ff_vc1dsp_init_x86(VC1DSPContext *dsp)
92{
93    int cpu_flags = av_get_cpu_flags();
94
95    if (HAVE_6REGS && INLINE_MMX(cpu_flags))
96        ff_vc1dsp_init_mmx(dsp);
97
98    if (HAVE_6REGS && INLINE_MMXEXT(cpu_flags))
99        ff_vc1dsp_init_mmxext(dsp);
100
101#define ASSIGN_LF(EXT) \
102        dsp->vc1_v_loop_filter4  = ff_vc1_v_loop_filter4_ ## EXT; \
103        dsp->vc1_h_loop_filter4  = ff_vc1_h_loop_filter4_ ## EXT; \
104        dsp->vc1_v_loop_filter8  = ff_vc1_v_loop_filter8_ ## EXT; \
105        dsp->vc1_h_loop_filter8  = ff_vc1_h_loop_filter8_ ## EXT; \
106        dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \
107        dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT
108
109#if HAVE_YASM
110    if (EXTERNAL_MMX(cpu_flags)) {
111        dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_mmx;
112    }
113    if (EXTERNAL_AMD3DNOW(cpu_flags)) {
114        dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_3dnow;
115    }
116    if (EXTERNAL_MMXEXT(cpu_flags)) {
117        ASSIGN_LF(mmxext);
118        dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_mmxext;
119
120        dsp->avg_vc1_mspel_pixels_tab[1][0]      = avg_vc1_mspel_mc00_mmxext;
121    }
122    if (EXTERNAL_SSE2(cpu_flags)) {
123        dsp->vc1_v_loop_filter8  = ff_vc1_v_loop_filter8_sse2;
124        dsp->vc1_h_loop_filter8  = ff_vc1_h_loop_filter8_sse2;
125        dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2;
126        dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2;
127        dsp->avg_vc1_mspel_pixels_tab[0][0]      = avg_vc1_mspel_mc00_16_sse2;
128    }
129    if (EXTERNAL_SSSE3(cpu_flags)) {
130        ASSIGN_LF(ssse3);
131        dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_ssse3;
132        dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_ssse3;
133    }
134    if (EXTERNAL_SSE4(cpu_flags)) {
135        dsp->vc1_h_loop_filter8  = ff_vc1_h_loop_filter8_sse4;
136        dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4;
137    }
138#endif /* HAVE_YASM */
139}
140