1/*
2 * SIMD-optimized LPC functions
3 * Copyright (c) 2007 Loren Merritt
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "libavutil/attributes.h"
23#include "libavutil/cpu.h"
24#include "libavutil/mem.h"
25#include "libavutil/x86/asm.h"
26#include "libavutil/x86/cpu.h"
27#include "libavcodec/lpc.h"
28
29DECLARE_ASM_CONST(16, double, pd_1)[2] = { 1.0, 1.0 };
30DECLARE_ASM_CONST(16, double, pd_2)[2] = { 2.0, 2.0 };
31
32#if HAVE_SSE2_INLINE
33
34static void lpc_apply_welch_window_sse2(const int32_t *data, int len,
35                                        double *w_data)
36{
37    double c = 2.0 / (len-1.0);
38    int n2 = len>>1;
39    x86_reg i = -n2*sizeof(int32_t);
40    x86_reg j =  n2*sizeof(int32_t);
41    __asm__ volatile(
42        "movsd   %4,     %%xmm7                \n\t"
43        "movapd  "MANGLE(pd_1)", %%xmm6        \n\t"
44        "movapd  "MANGLE(pd_2)", %%xmm5        \n\t"
45        "movlhps %%xmm7, %%xmm7                \n\t"
46        "subpd   %%xmm5, %%xmm7                \n\t"
47        "addsd   %%xmm6, %%xmm7                \n\t"
48        "test    $1,     %5                    \n\t"
49        "jz      2f                            \n\t"
50#define WELCH(MOVPD, offset)\
51        "1:                                    \n\t"\
52        "movapd   %%xmm7,  %%xmm1              \n\t"\
53        "mulpd    %%xmm1,  %%xmm1              \n\t"\
54        "movapd   %%xmm6,  %%xmm0              \n\t"\
55        "subpd    %%xmm1,  %%xmm0              \n\t"\
56        "pshufd   $0x4e,   %%xmm0, %%xmm1      \n\t"\
57        "cvtpi2pd (%3,%0), %%xmm2              \n\t"\
58        "cvtpi2pd "#offset"*4(%3,%1), %%xmm3   \n\t"\
59        "mulpd    %%xmm0,  %%xmm2              \n\t"\
60        "mulpd    %%xmm1,  %%xmm3              \n\t"\
61        "movapd   %%xmm2, (%2,%0,2)            \n\t"\
62        MOVPD"    %%xmm3, "#offset"*8(%2,%1,2) \n\t"\
63        "subpd    %%xmm5,  %%xmm7              \n\t"\
64        "sub      $8,      %1                  \n\t"\
65        "add      $8,      %0                  \n\t"\
66        "jl 1b                                 \n\t"\
67
68        WELCH("movupd", -1)
69        "jmp 3f                                \n\t"
70        "2:                                    \n\t"
71        WELCH("movapd", -2)
72        "3:                                    \n\t"
73        :"+&r"(i), "+&r"(j)
74        :"r"(w_data+n2), "r"(data+n2), "m"(c), "r"(len)
75         NAMED_CONSTRAINTS_ARRAY_ADD(pd_1,pd_2)
76         XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
77                                    "%xmm5", "%xmm6", "%xmm7")
78    );
79#undef WELCH
80}
81
82static void lpc_compute_autocorr_sse2(const double *data, int len, int lag,
83                                      double *autoc)
84{
85    int j;
86
87    if((x86_reg)data & 15)
88        data++;
89
90    for(j=0; j<lag; j+=2){
91        x86_reg i = -len*sizeof(double);
92        if(j == lag-2) {
93            __asm__ volatile(
94                "movsd    "MANGLE(pd_1)", %%xmm0    \n\t"
95                "movsd    "MANGLE(pd_1)", %%xmm1    \n\t"
96                "movsd    "MANGLE(pd_1)", %%xmm2    \n\t"
97                "1:                                 \n\t"
98                "movapd   (%2,%0), %%xmm3           \n\t"
99                "movupd -8(%3,%0), %%xmm4           \n\t"
100                "movapd   (%3,%0), %%xmm5           \n\t"
101                "mulpd     %%xmm3, %%xmm4           \n\t"
102                "mulpd     %%xmm3, %%xmm5           \n\t"
103                "mulpd -16(%3,%0), %%xmm3           \n\t"
104                "addpd     %%xmm4, %%xmm1           \n\t"
105                "addpd     %%xmm5, %%xmm0           \n\t"
106                "addpd     %%xmm3, %%xmm2           \n\t"
107                "add       $16,    %0               \n\t"
108                "jl 1b                              \n\t"
109                "movhlps   %%xmm0, %%xmm3           \n\t"
110                "movhlps   %%xmm1, %%xmm4           \n\t"
111                "movhlps   %%xmm2, %%xmm5           \n\t"
112                "addsd     %%xmm3, %%xmm0           \n\t"
113                "addsd     %%xmm4, %%xmm1           \n\t"
114                "addsd     %%xmm5, %%xmm2           \n\t"
115                "movsd     %%xmm0,   (%1)           \n\t"
116                "movsd     %%xmm1,  8(%1)           \n\t"
117                "movsd     %%xmm2, 16(%1)           \n\t"
118                :"+&r"(i)
119                :"r"(autoc+j), "r"(data+len), "r"(data+len-j)
120                 NAMED_CONSTRAINTS_ARRAY_ADD(pd_1)
121                :"memory"
122            );
123        } else {
124            __asm__ volatile(
125                "movsd    "MANGLE(pd_1)", %%xmm0    \n\t"
126                "movsd    "MANGLE(pd_1)", %%xmm1    \n\t"
127                "1:                                 \n\t"
128                "movapd   (%3,%0), %%xmm3           \n\t"
129                "movupd -8(%4,%0), %%xmm4           \n\t"
130                "mulpd     %%xmm3, %%xmm4           \n\t"
131                "mulpd    (%4,%0), %%xmm3           \n\t"
132                "addpd     %%xmm4, %%xmm1           \n\t"
133                "addpd     %%xmm3, %%xmm0           \n\t"
134                "add       $16,    %0               \n\t"
135                "jl 1b                              \n\t"
136                "movhlps   %%xmm0, %%xmm3           \n\t"
137                "movhlps   %%xmm1, %%xmm4           \n\t"
138                "addsd     %%xmm3, %%xmm0           \n\t"
139                "addsd     %%xmm4, %%xmm1           \n\t"
140                "movsd     %%xmm0, %1               \n\t"
141                "movsd     %%xmm1, %2               \n\t"
142                :"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1])
143                :"r"(data+len), "r"(data+len-j)
144                 NAMED_CONSTRAINTS_ARRAY_ADD(pd_1)
145            );
146        }
147    }
148}
149
150#endif /* HAVE_SSE2_INLINE */
151
152av_cold void ff_lpc_init_x86(LPCContext *c)
153{
154#if HAVE_SSE2_INLINE
155    int cpu_flags = av_get_cpu_flags();
156
157    if (HAVE_SSE2_INLINE && cpu_flags & (AV_CPU_FLAG_SSE2 | AV_CPU_FLAG_SSE2SLOW)) {
158        c->lpc_apply_welch_window = lpc_apply_welch_window_sse2;
159        c->lpc_compute_autocorr   = lpc_compute_autocorr_sse2;
160    }
161#endif /* HAVE_SSE2_INLINE */
162}
163