1;******************************************************************************
2;* VP9 loop filter SIMD optimizations
3;*
4;* Copyright (C) 2013-2014 Cl��ment B��sch <u pkh me>
5;*
6;* This file is part of FFmpeg.
7;*
8;* FFmpeg is free software; you can redistribute it and/or
9;* modify it under the terms of the GNU Lesser General Public
10;* License as published by the Free Software Foundation; either
11;* version 2.1 of the License, or (at your option) any later version.
12;*
13;* FFmpeg is distributed in the hope that it will be useful,
14;* but WITHOUT ANY WARRANTY; without even the implied warranty of
15;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16;* Lesser General Public License for more details.
17;*
18;* You should have received a copy of the GNU Lesser General Public
19;* License along with FFmpeg; if not, write to the Free Software
20;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21;******************************************************************************
22
23%include "libavutil/x86/x86util.asm"
24
25%if ARCH_X86_64
26
27SECTION_RODATA
28
29cextern pb_3
30cextern pb_80
31
32pb_4:   times 16 db 0x04
33pb_10:  times 16 db 0x10
34pb_40:  times 16 db 0x40
35pb_81:  times 16 db 0x81
36pb_f8:  times 16 db 0xf8
37pb_fe:  times 16 db 0xfe
38
39pw_4:   times  8 dw 4
40pw_8:   times  8 dw 8
41
42; with mix functions, two 8-bit thresholds are stored in a 16-bit storage,
43; the following mask is used to splat both in the same register
44mask_mix: times 8 db 0
45          times 8 db 1
46
47mask_mix84: times 8 db 0xff
48            times 8 db 0x00
49mask_mix48: times 8 db 0x00
50            times 8 db 0xff
51
52SECTION .text
53
54; %1 = abs(%2-%3)
55%macro ABSSUB 4 ; dst, src1 (RO), src2 (RO), tmp
56    psubusb             %1, %3, %2
57    psubusb             %4, %2, %3
58    por                 %1, %4
59%endmacro
60
61; %1 = %1<=%2
62%macro CMP_LTE 3-4 ; src/dst, cmp, tmp, pb_80
63%if %0 == 4
64    pxor                %1, %4
65%endif
66    pcmpgtb             %3, %2, %1          ; cmp > src?
67    pcmpeqb             %1, %2              ; cmp == src? XXX: avoid this with a -1/+1 well placed?
68    por                 %1, %3              ; cmp >= src?
69%endmacro
70
71; %1 = abs(%2-%3) <= %4
72%macro ABSSUB_CMP 6-7 [pb_80]; dst, src1, src2, cmp, tmp1, tmp2, [pb_80]
73    ABSSUB              %1, %2, %3, %6      ; dst = abs(src1-src2)
74    CMP_LTE             %1, %4, %6, %7      ; dst <= cmp
75%endmacro
76
77%macro MASK_APPLY 4 ; %1=new_data/dst %2=old_data %3=mask %4=tmp
78    pand                %1, %3              ; new &= mask
79    pandn               %4, %3, %2          ; tmp = ~mask & old
80    por                 %1, %4              ; new&mask | old&~mask
81%endmacro
82
83%macro FILTER_SUBx2_ADDx2 8 ; %1=dst %2=h/l %3=cache %4=sub1 %5=sub2 %6=add1 %7=add2 %8=rshift
84    punpck%2bw          %3, %4, m0
85    psubw               %1, %3
86    punpck%2bw          %3, %5, m0
87    psubw               %1, %3
88    punpck%2bw          %3, %6, m0
89    paddw               %1, %3
90    punpck%2bw          %3, %7, m0
91    paddw               %3, %1
92    psraw               %1, %3, %8
93%endmacro
94
95%macro FILTER_INIT 8 ; tmp1, tmp2, cacheL, cacheH, dstp, filterid, mask, source
96    FILTER%6_INIT       %1, l, %3
97    FILTER%6_INIT       %2, h, %4
98    packuswb            %1, %2
99    MASK_APPLY          %1, %8, %7, %2
100    mova                %5, %1
101%endmacro
102
103%macro FILTER_UPDATE 11-14 ; tmp1, tmp2, cacheL, cacheH, dstp, -, -, +, +, rshift, mask, [source], [preload reg + value]
104%if %0 == 13 ; no source + preload
105    mova                %12, %13
106%elif %0 == 14 ; source + preload
107    mova                %13, %14
108%endif
109    FILTER_SUBx2_ADDx2  %1, l, %3, %6, %7, %8, %9, %10
110    FILTER_SUBx2_ADDx2  %2, h, %4, %6, %7, %8, %9, %10
111    packuswb            %1, %2
112%if %0 == 12 || %0 == 14
113    MASK_APPLY          %1, %12, %11, %2
114%else
115    MASK_APPLY          %1, %5, %11, %2
116%endif
117    mova                %5, %1
118%endmacro
119
120%macro SRSHIFT3B_2X 4 ; reg1, reg2, [pb_10], tmp
121    mova                %4, [pb_f8]
122    pand                %1, %4
123    pand                %2, %4
124    psrlq               %1, 3
125    psrlq               %2, 3
126    pxor                %1, %3
127    pxor                %2, %3
128    psubb               %1, %3
129    psubb               %2, %3
130%endmacro
131
132%macro EXTRACT_POS_NEG 3 ; i8, neg, pos
133    pxor                %3, %3
134    pxor                %2, %2
135    pcmpgtb             %3, %1                          ; i8 < 0 mask
136    psubb               %2, %1                          ; neg values (only the originally - will be kept)
137    pand                %2, %3                          ; negative values of i8 (but stored as +)
138    pandn               %3, %1                          ; positive values of i8
139%endmacro
140
141; clip_u8(u8 + i8)
142%macro SIGN_ADD 5 ; dst, u8, i8, tmp1, tmp2
143    EXTRACT_POS_NEG     %3, %4, %5
144    psubusb             %1, %2, %4                      ; sub the negatives
145    paddusb             %1, %5                          ; add the positives
146%endmacro
147
148; clip_u8(u8 - i8)
149%macro SIGN_SUB 5 ; dst, u8, i8, tmp1, tmp2
150    EXTRACT_POS_NEG     %3, %4, %5
151    psubusb             %1, %2, %5                      ; sub the positives
152    paddusb             %1, %4                          ; add the negatives
153%endmacro
154
155%macro FILTER6_INIT 3 ; %1=dst %2=h/l %3=cache
156    punpck%2bw          %1, m14, m0                     ; p3: B->W
157    paddw               %3, %1, %1                      ; p3*2
158    paddw               %3, %1                          ; p3*3
159    punpck%2bw          %1, m15, m0                     ; p2: B->W
160    paddw               %3, %1                          ; p3*3 + p2
161    paddw               %3, %1                          ; p3*3 + p2*2
162    punpck%2bw          %1, m10, m0                     ; p1: B->W
163    paddw               %3, %1                          ; p3*3 + p2*2 + p1
164    punpck%2bw          %1, m11, m0                     ; p0: B->W
165    paddw               %3, %1                          ; p3*3 + p2*2 + p1 + p0
166    punpck%2bw          %1, m12, m0                     ; q0: B->W
167    paddw               %3, %1                          ; p3*3 + p2*2 + p1 + p0 + q0
168    paddw               %3, [pw_4]                      ; p3*3 + p2*2 + p1 + p0 + q0 + 4
169    psraw               %1, %3, 3                       ; (p3*3 + p2*2 + p1 + p0 + q0 + 4) >> 3
170%endmacro
171
172%macro FILTER14_INIT 3 ; %1=dst %2=h/l %3=cache
173    punpck%2bw          %1, m2, m0                      ; p7: B->W
174    psllw               %3, %1, 3                       ; p7*8
175    psubw               %3, %1                          ; p7*7
176    punpck%2bw          %1, m3, m0                      ; p6: B->W
177    paddw               %3, %1                          ; p7*7 + p6
178    paddw               %3, %1                          ; p7*7 + p6*2
179    punpck%2bw          %1, m8, m0                      ; p5: B->W
180    paddw               %3, %1                          ; p7*7 + p6*2 + p5
181    punpck%2bw          %1, m9, m0                      ; p4: B->W
182    paddw               %3, %1                          ; p7*7 + p6*2 + p5 + p4
183    punpck%2bw          %1, m14, m0                     ; p3: B->W
184    paddw               %3, %1                          ; p7*7 + p6*2 + p5 + p4 + p3
185    punpck%2bw          %1, m15, m0                     ; p2: B->W
186    paddw               %3, %1                          ; p7*7 + p6*2 + p5 + .. + p2
187    punpck%2bw          %1, m10, m0                     ; p1: B->W
188    paddw               %3, %1                          ; p7*7 + p6*2 + p5 + .. + p1
189    punpck%2bw          %1, m11, m0                     ; p0: B->W
190    paddw               %3, %1                          ; p7*7 + p6*2 + p5 + .. + p0
191    punpck%2bw          %1, m12, m0                     ; q0: B->W
192    paddw               %3, %1                          ; p7*7 + p6*2 + p5 + .. + p0 + q0
193    paddw               %3, [pw_8]                      ; p7*7 + p6*2 + p5 + .. + p0 + q0 + 8
194    psraw               %1, %3, 4                       ; (p7*7 + p6*2 + p5 + .. + p0 + q0 + 8) >> 4
195%endmacro
196
197%macro TRANSPOSE16x16B 17
198    mova %17, m%16
199    SBUTTERFLY bw,  %1,  %2,  %16
200    SBUTTERFLY bw,  %3,  %4,  %16
201    SBUTTERFLY bw,  %5,  %6,  %16
202    SBUTTERFLY bw,  %7,  %8,  %16
203    SBUTTERFLY bw,  %9,  %10, %16
204    SBUTTERFLY bw,  %11, %12, %16
205    SBUTTERFLY bw,  %13, %14, %16
206    mova m%16,  %17
207    mova  %17, m%14
208    SBUTTERFLY bw,  %15, %16, %14
209    SBUTTERFLY wd,  %1,  %3,  %14
210    SBUTTERFLY wd,  %2,  %4,  %14
211    SBUTTERFLY wd,  %5,  %7,  %14
212    SBUTTERFLY wd,  %6,  %8,  %14
213    SBUTTERFLY wd,  %9,  %11, %14
214    SBUTTERFLY wd,  %10, %12, %14
215    SBUTTERFLY wd,  %13, %15, %14
216    mova m%14,  %17
217    mova  %17, m%12
218    SBUTTERFLY wd,  %14, %16, %12
219    SBUTTERFLY dq,  %1,  %5,  %12
220    SBUTTERFLY dq,  %2,  %6,  %12
221    SBUTTERFLY dq,  %3,  %7,  %12
222    SBUTTERFLY dq,  %4,  %8,  %12
223    SBUTTERFLY dq,  %9,  %13, %12
224    SBUTTERFLY dq,  %10, %14, %12
225    SBUTTERFLY dq,  %11, %15, %12
226    mova m%12, %17
227    mova  %17, m%8
228    SBUTTERFLY dq,  %12, %16, %8
229    SBUTTERFLY qdq, %1,  %9,  %8
230    SBUTTERFLY qdq, %2,  %10, %8
231    SBUTTERFLY qdq, %3,  %11, %8
232    SBUTTERFLY qdq, %4,  %12, %8
233    SBUTTERFLY qdq, %5,  %13, %8
234    SBUTTERFLY qdq, %6,  %14, %8
235    SBUTTERFLY qdq, %7,  %15, %8
236    mova m%8, %17
237    mova %17, m%1
238    SBUTTERFLY qdq, %8,  %16, %1
239    mova m%1, %17
240    SWAP %2,  %9
241    SWAP %3,  %5
242    SWAP %4,  %13
243    SWAP %6,  %11
244    SWAP %8,  %15
245    SWAP %12, %14
246%endmacro
247
248; transpose 16 half lines (high part) to 8 full centered lines
249%macro TRANSPOSE16x8B 16
250    punpcklbw   m%1,  m%2
251    punpcklbw   m%3,  m%4
252    punpcklbw   m%5,  m%6
253    punpcklbw   m%7,  m%8
254    punpcklbw   m%9,  m%10
255    punpcklbw   m%11, m%12
256    punpcklbw   m%13, m%14
257    punpcklbw   m%15, m%16
258    SBUTTERFLY  wd,  %1,  %3,  %2
259    SBUTTERFLY  wd,  %5,  %7,  %2
260    SBUTTERFLY  wd,  %9,  %11, %2
261    SBUTTERFLY  wd,  %13, %15, %2
262    SBUTTERFLY  dq,  %1,  %5,  %2
263    SBUTTERFLY  dq,  %3,  %7,  %2
264    SBUTTERFLY  dq,  %9,  %13, %2
265    SBUTTERFLY  dq,  %11, %15, %2
266    SBUTTERFLY  qdq, %1,  %9,  %2
267    SBUTTERFLY  qdq, %3,  %11, %2
268    SBUTTERFLY  qdq, %5,  %13, %2
269    SBUTTERFLY  qdq, %7,  %15, %2
270    SWAP %5, %1
271    SWAP %6, %9
272    SWAP %7, %1
273    SWAP %8, %13
274    SWAP %9, %3
275    SWAP %10, %11
276    SWAP %11, %1
277    SWAP %12, %15
278%endmacro
279
280%macro DEFINE_REAL_P7_TO_Q7 0-1 0
281%define P7 dst1q + 2*mstrideq  + %1
282%define P6 dst1q +   mstrideq  + %1
283%define P5 dst1q               + %1
284%define P4 dst1q +    strideq  + %1
285%define P3 dstq  + 4*mstrideq  + %1
286%define P2 dstq  +   mstride3q + %1
287%define P1 dstq  + 2*mstrideq  + %1
288%define P0 dstq  +   mstrideq  + %1
289%define Q0 dstq                + %1
290%define Q1 dstq  +   strideq   + %1
291%define Q2 dstq  + 2*strideq   + %1
292%define Q3 dstq  +   stride3q  + %1
293%define Q4 dstq  + 4*strideq   + %1
294%define Q5 dst2q + mstrideq    + %1
295%define Q6 dst2q               + %1
296%define Q7 dst2q +  strideq    + %1
297%endmacro
298
299; ..............AB -> AAAAAAAABBBBBBBB
300%macro SPLATB_MIX 1-2 [mask_mix]
301%if cpuflag(ssse3)
302    pshufb     %1, %2
303%else
304    punpcklbw  %1, %1
305    punpcklqdq %1, %1
306    pshuflw    %1, %1, 0
307    pshufhw    %1, %1, 0x55
308%endif
309%endmacro
310
311%macro LOOPFILTER 2 ; %1=v/h %2=size1
312    lea mstrideq, [strideq]
313    neg mstrideq
314
315    lea stride3q, [strideq+2*strideq]
316    mov mstride3q, stride3q
317    neg mstride3q
318
319%ifidn %1, h
320%if %2 > 16
321%define movx movh
322    lea dstq, [dstq + 8*strideq - 4]
323%else
324%define movx movu
325    lea dstq, [dstq + 8*strideq - 8] ; go from top center (h pos) to center left (v pos)
326%endif
327%endif
328
329    lea dst1q, [dstq + 2*mstride3q]                         ; dst1q = &dst[stride * -6]
330    lea dst2q, [dstq + 2* stride3q]                         ; dst2q = &dst[stride * +6]
331
332    DEFINE_REAL_P7_TO_Q7
333
334%ifidn %1, h
335    movx                    m0, [P7]
336    movx                    m1, [P6]
337    movx                    m2, [P5]
338    movx                    m3, [P4]
339    movx                    m4, [P3]
340    movx                    m5, [P2]
341    movx                    m6, [P1]
342    movx                    m7, [P0]
343    movx                    m8, [Q0]
344    movx                    m9, [Q1]
345    movx                   m10, [Q2]
346    movx                   m11, [Q3]
347    movx                   m12, [Q4]
348    movx                   m13, [Q5]
349    movx                   m14, [Q6]
350    movx                   m15, [Q7]
351%define P7 rsp +   0
352%define P6 rsp +  16
353%define P5 rsp +  32
354%define P4 rsp +  48
355%define P3 rsp +  64
356%define P2 rsp +  80
357%define P1 rsp +  96
358%define P0 rsp + 112
359%define Q0 rsp + 128
360%define Q1 rsp + 144
361%define Q2 rsp + 160
362%define Q3 rsp + 176
363%define Q4 rsp + 192
364%define Q5 rsp + 208
365%define Q6 rsp + 224
366%define Q7 rsp + 240
367
368%if %2 == 16
369    TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp]
370    mova           [P7],  m0
371    mova           [P6],  m1
372    mova           [P5],  m2
373    mova           [P4],  m3
374%else
375    TRANSPOSE16x8B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
376%endif
377    mova           [P3],  m4
378    mova           [P2],  m5
379    mova           [P1],  m6
380    mova           [P0],  m7
381    mova           [Q0],  m8
382    mova           [Q1],  m9
383    mova           [Q2], m10
384    mova           [Q3], m11
385%if %2 == 16
386    mova           [Q4], m12
387    mova           [Q5], m13
388    mova           [Q6], m14
389    mova           [Q7], m15
390%endif
391%endif
392
393    ; calc fm mask
394%if %2 == 16
395%if cpuflag(ssse3)
396    pxor                m0, m0
397%endif
398    SPLATB_REG          m2, I, m0                       ; I I I I ...
399    SPLATB_REG          m3, E, m0                       ; E E E E ...
400%else
401%if cpuflag(ssse3)
402    mova                m0, [mask_mix]
403%endif
404    movd                m2, Id
405    movd                m3, Ed
406    SPLATB_MIX          m2, m0
407    SPLATB_MIX          m3, m0
408%endif
409    mova                m0, [pb_80]
410    pxor                m2, m0
411    pxor                m3, m0
412%ifidn %1, v
413    mova                m8, [P3]
414    mova                m9, [P2]
415    mova               m10, [P1]
416    mova               m11, [P0]
417    mova               m12, [Q0]
418    mova               m13, [Q1]
419    mova               m14, [Q2]
420    mova               m15, [Q3]
421%else
422    ; In case of horizontal, P3..Q3 are already present in some registers due
423    ; to the previous transpose, so we just swap registers.
424    SWAP                 8,  4, 12
425    SWAP                 9,  5, 13
426    SWAP                10,  6, 14
427    SWAP                11,  7, 15
428%endif
429    ABSSUB_CMP          m5,  m8,  m9, m2, m6, m7, m0    ; m5 = abs(p3-p2) <= I
430    ABSSUB_CMP          m1,  m9, m10, m2, m6, m7, m0    ; m1 = abs(p2-p1) <= I
431    pand                m5, m1
432    ABSSUB_CMP          m1, m10, m11, m2, m6, m7, m0    ; m1 = abs(p1-p0) <= I
433    pand                m5, m1
434    ABSSUB_CMP          m1, m12, m13, m2, m6, m7, m0    ; m1 = abs(q1-q0) <= I
435    pand                m5, m1
436    ABSSUB_CMP          m1, m13, m14, m2, m6, m7, m0    ; m1 = abs(q2-q1) <= I
437    pand                m5, m1
438    ABSSUB_CMP          m1, m14, m15, m2, m6, m7, m0    ; m1 = abs(q3-q2) <= I
439    pand                m5, m1
440    ABSSUB              m1, m11, m12, m7                ; abs(p0-q0)
441    paddusb             m1, m1                          ; abs(p0-q0) * 2
442    ABSSUB              m2, m10, m13, m7                ; abs(p1-q1)
443    pand                m2, [pb_fe]                     ; drop lsb so shift can work
444    psrlq               m2, 1                           ; abs(p1-q1)/2
445    paddusb             m1, m2                          ; abs(p0-q0)*2 + abs(p1-q1)/2
446    pxor                m1, m0
447    pcmpgtb             m4, m3, m1                      ; E > X?
448    pcmpeqb             m3, m1                          ; E == X?
449    por                 m3, m4                          ; E >= X?
450    pand                m3, m5                          ; fm final value
451
452    ; (m3: fm, m8..15: p3 p2 p1 p0 q0 q1 q2 q3)
453    ; calc flat8in (if not 44_16) and hev masks
454    mova                m6, [pb_81]                     ; [1 1 1 1 ...] ^ 0x80
455%if %2 != 44
456    ABSSUB_CMP          m2, m8, m11, m6, m4, m5         ; abs(p3 - p0) <= 1
457    mova                m8, [pb_80]
458    ABSSUB_CMP          m1, m9, m11, m6, m4, m5, m8     ; abs(p2 - p0) <= 1
459    pand                m2, m1
460    ABSSUB              m4, m10, m11, m5                ; abs(p1 - p0)
461%if %2 == 16
462%if cpuflag(ssse3)
463    pxor                m0, m0
464%endif
465    SPLATB_REG          m7, H, m0                       ; H H H H ...
466%else
467    movd                m7, Hd
468    SPLATB_MIX          m7
469%endif
470    pxor                m7, m8
471    pxor                m4, m8
472    pcmpgtb             m0, m4, m7                      ; abs(p1 - p0) > H (1/2 hev condition)
473    CMP_LTE             m4, m6, m5                      ; abs(p1 - p0) <= 1
474    pand                m2, m4                          ; (flat8in)
475    ABSSUB              m4, m13, m12, m1                ; abs(q1 - q0)
476    pxor                m4, m8
477    pcmpgtb             m5, m4, m7                      ; abs(q1 - q0) > H (2/2 hev condition)
478    por                 m0, m5                          ; hev final value
479    CMP_LTE             m4, m6, m5                      ; abs(q1 - q0) <= 1
480    pand                m2, m4                          ; (flat8in)
481    ABSSUB_CMP          m1, m14, m12, m6, m4, m5, m8    ; abs(q2 - q0) <= 1
482    pand                m2, m1
483    ABSSUB_CMP          m1, m15, m12, m6, m4, m5, m8    ; abs(q3 - q0) <= 1
484    pand                m2, m1                          ; flat8in final value
485%if %2 == 84 || %2 == 48
486    pand                m2, [mask_mix%2]
487%endif
488%else
489    mova                m6, [pb_80]
490    movd                m7, Hd
491    SPLATB_MIX          m7
492    pxor                m7, m6
493    ABSSUB              m4, m10, m11, m1                ; abs(p1 - p0)
494    pxor                m4, m6
495    pcmpgtb             m0, m4, m7                      ; abs(p1 - p0) > H (1/2 hev condition)
496    ABSSUB              m4, m13, m12, m1                ; abs(q1 - q0)
497    pxor                m4, m6
498    pcmpgtb             m5, m4, m7                      ; abs(q1 - q0) > H (2/2 hev condition)
499    por                 m0, m5                          ; hev final value
500%endif
501
502%if %2 == 16
503    ; (m0: hev, m2: flat8in, m3: fm, m6: pb_81, m9..15: p2 p1 p0 q0 q1 q2 q3)
504    ; calc flat8out mask
505    mova                m8, [P7]
506    mova                m9, [P6]
507    ABSSUB_CMP          m1, m8, m11, m6, m4, m5         ; abs(p7 - p0) <= 1
508    ABSSUB_CMP          m7, m9, m11, m6, m4, m5         ; abs(p6 - p0) <= 1
509    pand                m1, m7
510    mova                m8, [P5]
511    mova                m9, [P4]
512    ABSSUB_CMP          m7, m8, m11, m6, m4, m5         ; abs(p5 - p0) <= 1
513    pand                m1, m7
514    ABSSUB_CMP          m7, m9, m11, m6, m4, m5         ; abs(p4 - p0) <= 1
515    pand                m1, m7
516    mova                m14, [Q4]
517    mova                m15, [Q5]
518    ABSSUB_CMP          m7, m14, m12, m6, m4, m5        ; abs(q4 - q0) <= 1
519    pand                m1, m7
520    ABSSUB_CMP          m7, m15, m12, m6, m4, m5        ; abs(q5 - q0) <= 1
521    pand                m1, m7
522    mova                m14, [Q6]
523    mova                m15, [Q7]
524    ABSSUB_CMP          m7, m14, m12, m6, m4, m5        ; abs(q4 - q0) <= 1
525    pand                m1, m7
526    ABSSUB_CMP          m7, m15, m12, m6, m4, m5        ; abs(q5 - q0) <= 1
527    pand                m1, m7                          ; flat8out final value
528%endif
529
530    ; if (fm) {
531    ;     if (out && in) filter_14()
532    ;     else if (in)   filter_6()
533    ;     else if (hev)  filter_2()
534    ;     else           filter_4()
535    ; }
536    ;
537    ; f14:                                                                            fm &  out &  in
538    ; f6:  fm & ~f14 & in        => fm & ~(out & in) & in                          => fm & ~out &  in
539    ; f2:  fm & ~f14 & ~f6 & hev => fm & ~(out & in) & ~(~out & in) & hev          => fm &  ~in &  hev
540    ; f4:  fm & ~f14 & ~f6 & ~f2 => fm & ~(out & in) & ~(~out & in) & ~(~in & hev) => fm &  ~in & ~hev
541
542    ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m8..15: p5 p4 p1 p0 q0 q1 q6 q7)
543    ; filter2()
544%if %2 != 44
545    mova                m6, [pb_80]                     ; already in m6 if 44_16
546%endif
547    pxor                m15, m12, m6                    ; q0 ^ 0x80
548    pxor                m14, m11, m6                    ; p0 ^ 0x80
549    psubsb              m15, m14                        ; (signed) q0 - p0
550    pxor                m4, m10, m6                     ; p1 ^ 0x80
551    pxor                m5, m13, m6                     ; q1 ^ 0x80
552    psubsb              m4, m5                          ; (signed) p1 - q1
553    paddsb              m4, m15                         ;   (q0 - p0) + (p1 - q1)
554    paddsb              m4, m15                         ; 2*(q0 - p0) + (p1 - q1)
555    paddsb              m4, m15                         ; 3*(q0 - p0) + (p1 - q1)
556    paddsb              m6, m4, [pb_4]                  ; m6: f1 = clip(f + 4, 127)
557    paddsb              m4, [pb_3]                      ; m4: f2 = clip(f + 3, 127)
558    mova                m14, [pb_10]                    ; will be reused in filter4()
559    SRSHIFT3B_2X        m6, m4, m14, m7                 ; f1 and f2 sign byte shift by 3
560    SIGN_SUB            m7, m12, m6, m5, m9             ; m7 = q0 - f1
561    SIGN_ADD            m8, m11, m4, m5, m9             ; m8 = p0 + f2
562%if %2 != 44
563    pandn               m6, m2, m3                      ;  ~mask(in) & mask(fm)
564    pand                m6, m0                          ; (~mask(in) & mask(fm)) & mask(hev)
565%else
566    pand                m6, m3, m0
567%endif
568    MASK_APPLY          m7, m12, m6, m5                 ; m7 = filter2(q0) & mask / we write it in filter4()
569    MASK_APPLY          m8, m11, m6, m5                 ; m8 = filter2(p0) & mask / we write it in filter4()
570
571    ; (m0: hev, [m1: flat8out], [m2: flat8in], m3: fm, m7..m8: q0' p0', m10..13: p1 p0 q0 q1, m14: pb_10, m15: q0-p0)
572    ; filter4()
573    mova                m4, m15
574    paddsb              m15, m4                         ; 2 * (q0 - p0)
575    paddsb              m15, m4                         ; 3 * (q0 - p0)
576    paddsb              m6, m15, [pb_4]                 ; m6:  f1 = clip(f + 4, 127)
577    paddsb              m15, [pb_3]                     ; m15: f2 = clip(f + 3, 127)
578    SRSHIFT3B_2X        m6, m15, m14, m9                ; f1 and f2 sign byte shift by 3
579%if %2 != 44
580%define p0tmp m7
581%define q0tmp m9
582    pandn               m5, m2, m3                      ;               ~mask(in) & mask(fm)
583    pandn               m0, m5                          ; ~mask(hev) & (~mask(in) & mask(fm))
584%else
585%define p0tmp m1
586%define q0tmp m2
587    pandn               m0, m3
588%endif
589    SIGN_SUB            q0tmp, m12, m6, m4, m14         ; q0 - f1
590    MASK_APPLY          q0tmp, m7, m0, m5               ; filter4(q0) & mask
591    mova                [Q0], q0tmp
592    SIGN_ADD            p0tmp, m11, m15, m4, m14        ; p0 + f2
593    MASK_APPLY          p0tmp, m8, m0, m5               ; filter4(p0) & mask
594    mova                [P0], p0tmp
595    paddb               m6, [pb_80]                     ;
596    pxor                m8, m8                          ;   f=(f1+1)>>1
597    pavgb               m6, m8                          ;
598    psubb               m6, [pb_40]                     ;
599    SIGN_ADD            m7, m10, m6, m8, m9             ; p1 + f
600    SIGN_SUB            m4, m13, m6, m8, m9             ; q1 - f
601    MASK_APPLY          m7, m10, m0, m14                ; m7 = filter4(p1)
602    MASK_APPLY          m4, m13, m0, m14                ; m4 = filter4(q1)
603    mova                [P1], m7
604    mova                [Q1], m4
605
606    ; ([m1: flat8out], m2: flat8in, m3: fm, m10..13: p1 p0 q0 q1)
607    ; filter6()
608%if %2 != 44
609    pxor                m0, m0
610%if %2 > 16
611    pand                m3, m2
612%else
613    pand                m2, m3                          ;               mask(fm) & mask(in)
614    pandn               m3, m1, m2                      ; ~mask(out) & (mask(fm) & mask(in))
615%endif
616    mova               m14, [P3]
617    mova               m15, [P2]
618    mova                m8, [Q2]
619    mova                m9, [Q3]
620    FILTER_INIT         m4, m5, m6, m7, [P2], 6,                     m3, m15    ; [p2]
621    FILTER_UPDATE       m6, m7, m4, m5, [P1], m14, m15, m10, m13, 3, m3         ; [p1] -p3 -p2 +p1 +q1
622    FILTER_UPDATE       m4, m5, m6, m7, [P0], m14, m10, m11,  m8, 3, m3         ; [p0] -p3 -p1 +p0 +q2
623    FILTER_UPDATE       m6, m7, m4, m5, [Q0], m14, m11, m12,  m9, 3, m3         ; [q0] -p3 -p0 +q0 +q3
624    FILTER_UPDATE       m4, m5, m6, m7, [Q1], m15, m12, m13,  m9, 3, m3         ; [q1] -p2 -q0 +q1 +q3
625    FILTER_UPDATE       m6, m7, m4, m5, [Q2], m10, m13,  m8,  m9, 3, m3,  m8    ; [q2] -p1 -q1 +q2 +q3
626%endif
627
628    ; (m0: 0, [m1: flat8out], m2: fm & flat8in, m8..15: q2 q3 p1 p0 q0 q1 p3 p2)
629    ; filter14()
630    ;
631    ;                            m2  m3  m8  m9 m14 m15 m10 m11 m12 m13
632    ;
633    ;                                    q2  q3  p3  p2  p1  p0  q0  q1
634    ; p6  -7                     p7  p6  p5  p4   .   .   .   .   .
635    ; p5  -6  -p7 -p6 +p5 +q1     .   .   .                           .
636    ; p4  -5  -p7 -p5 +p4 +q2     .       .   .                      q2
637    ; p3  -4  -p7 -p4 +p3 +q3     .           .   .                  q3
638    ; p2  -3  -p7 -p3 +p2 +q4     .               .   .              q4
639    ; p1  -2  -p7 -p2 +p1 +q5     .                   .   .          q5
640    ; p0  -1  -p7 -p1 +p0 +q6     .                       .   .      q6
641    ; q0  +0  -p7 -p0 +q0 +q7     .                           .   .  q7
642    ; q1  +1  -p6 -q0 +q1 +q7    q1   .                           .   .
643    ; q2  +2  -p5 -q1 +q2 +q7     .  q2   .                           .
644    ; q3  +3  -p4 -q2 +q3 +q7         .  q3   .                       .
645    ; q4  +4  -p3 -q3 +q4 +q7             .  q4   .                   .
646    ; q5  +5  -p2 -q4 +q5 +q7                 .  q5   .               .
647    ; q6  +6  -p1 -q5 +q6 +q7                     .  q6   .           .
648
649%if %2 == 16
650    pand            m1, m2                                                              ; mask(out) & (mask(fm) & mask(in))
651    mova            m2, [P7]
652    mova            m3, [P6]
653    mova            m8, [P5]
654    mova            m9, [P4]
655    FILTER_INIT     m4, m5, m6, m7, [P6],  14,                   m1,  m3
656    FILTER_UPDATE   m6, m7, m4, m5, [P5],  m2,  m3,  m8, m13, 4, m1,  m8                ; [p5] -p7 -p6 +p5 +q1
657    FILTER_UPDATE   m4, m5, m6, m7, [P4],  m2,  m8,  m9, m13, 4, m1,  m9, m13, [Q2]     ; [p4] -p7 -p5 +p4 +q2
658    FILTER_UPDATE   m6, m7, m4, m5, [P3],  m2,  m9, m14, m13, 4, m1, m14, m13, [Q3]     ; [p3] -p7 -p4 +p3 +q3
659    FILTER_UPDATE   m4, m5, m6, m7, [P2],  m2, m14, m15, m13, 4, m1,      m13, [Q4]     ; [p2] -p7 -p3 +p2 +q4
660    FILTER_UPDATE   m6, m7, m4, m5, [P1],  m2, m15, m10, m13, 4, m1,      m13, [Q5]     ; [p1] -p7 -p2 +p1 +q5
661    FILTER_UPDATE   m4, m5, m6, m7, [P0],  m2, m10, m11, m13, 4, m1,      m13, [Q6]     ; [p0] -p7 -p1 +p0 +q6
662    FILTER_UPDATE   m6, m7, m4, m5, [Q0],  m2, m11, m12, m13, 4, m1,      m13, [Q7]     ; [q0] -p7 -p0 +q0 +q7
663    FILTER_UPDATE   m4, m5, m6, m7, [Q1],  m3, m12,  m2, m13, 4, m1,       m2, [Q1]     ; [q1] -p6 -q0 +q1 +q7
664    FILTER_UPDATE   m6, m7, m4, m5, [Q2],  m8,  m2,  m3, m13, 4, m1,       m3, [Q2]     ; [q2] -p5 -q1 +q2 +q7
665    FILTER_UPDATE   m4, m5, m6, m7, [Q3],  m9,  m3,  m8, m13, 4, m1,  m8,  m8, [Q3]     ; [q3] -p4 -q2 +q3 +q7
666    FILTER_UPDATE   m6, m7, m4, m5, [Q4], m14,  m8,  m9, m13, 4, m1,  m9,  m9, [Q4]     ; [q4] -p3 -q3 +q4 +q7
667    FILTER_UPDATE   m4, m5, m6, m7, [Q5], m15,  m9, m14, m13, 4, m1, m14, m14, [Q5]     ; [q5] -p2 -q4 +q5 +q7
668    FILTER_UPDATE   m6, m7, m4, m5, [Q6], m10, m14, m15, m13, 4, m1, m15, m15, [Q6]     ; [q6] -p1 -q5 +q6 +q7
669%endif
670
671%ifidn %1, h
672%if %2 == 16
673    mova                    m0, [P7]
674    mova                    m1, [P6]
675    mova                    m2, [P5]
676    mova                    m3, [P4]
677    mova                    m4, [P3]
678    mova                    m5, [P2]
679    mova                    m6, [P1]
680    mova                    m7, [P0]
681    mova                    m8, [Q0]
682    mova                    m9, [Q1]
683    mova                   m10, [Q2]
684    mova                   m11, [Q3]
685    mova                   m12, [Q4]
686    mova                   m13, [Q5]
687    mova                   m14, [Q6]
688    mova                   m15, [Q7]
689    TRANSPOSE16x16B 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, [rsp]
690    DEFINE_REAL_P7_TO_Q7
691    movu  [P7],  m0
692    movu  [P6],  m1
693    movu  [P5],  m2
694    movu  [P4],  m3
695    movu  [P3],  m4
696    movu  [P2],  m5
697    movu  [P1],  m6
698    movu  [P0],  m7
699    movu  [Q0],  m8
700    movu  [Q1],  m9
701    movu  [Q2], m10
702    movu  [Q3], m11
703    movu  [Q4], m12
704    movu  [Q5], m13
705    movu  [Q6], m14
706    movu  [Q7], m15
707%elif %2 == 44
708    SWAP 0, 7   ; m0 = p1
709    SWAP 3, 4   ; m3 = q1
710    DEFINE_REAL_P7_TO_Q7 2
711    SBUTTERFLY  bw, 0, 1, 8
712    SBUTTERFLY  bw, 2, 3, 8
713    SBUTTERFLY  wd, 0, 2, 8
714    SBUTTERFLY  wd, 1, 3, 8
715    SBUTTERFLY  dq, 0, 4, 8
716    SBUTTERFLY  dq, 1, 5, 8
717    SBUTTERFLY  dq, 2, 6, 8
718    SBUTTERFLY  dq, 3, 7, 8
719    movd  [P7], m0
720    punpckhqdq m0, m8
721    movd  [P6], m0
722    movd  [Q0], m1
723    punpckhqdq  m1, m9
724    movd  [Q1], m1
725    movd  [P3], m2
726    punpckhqdq  m2, m10
727    movd  [P2], m2
728    movd  [Q4], m3
729    punpckhqdq m3, m11
730    movd  [Q5], m3
731    movd  [P5], m4
732    punpckhqdq m4, m12
733    movd  [P4], m4
734    movd  [Q2], m5
735    punpckhqdq m5, m13
736    movd  [Q3], m5
737    movd  [P1], m6
738    punpckhqdq m6, m14
739    movd  [P0], m6
740    movd  [Q6], m7
741    punpckhqdq m7, m8
742    movd  [Q7], m7
743%else
744    ; the following code do a transpose of 8 full lines to 16 half
745    ; lines (high part). It is inlined to avoid the need of a staging area
746    mova                    m0, [P3]
747    mova                    m1, [P2]
748    mova                    m2, [P1]
749    mova                    m3, [P0]
750    mova                    m4, [Q0]
751    mova                    m5, [Q1]
752    mova                    m6, [Q2]
753    mova                    m7, [Q3]
754    DEFINE_REAL_P7_TO_Q7
755    SBUTTERFLY  bw,  0,  1, 8
756    SBUTTERFLY  bw,  2,  3, 8
757    SBUTTERFLY  bw,  4,  5, 8
758    SBUTTERFLY  bw,  6,  7, 8
759    SBUTTERFLY  wd,  0,  2, 8
760    SBUTTERFLY  wd,  1,  3, 8
761    SBUTTERFLY  wd,  4,  6, 8
762    SBUTTERFLY  wd,  5,  7, 8
763    SBUTTERFLY  dq,  0,  4, 8
764    SBUTTERFLY  dq,  1,  5, 8
765    SBUTTERFLY  dq,  2,  6, 8
766    SBUTTERFLY  dq,  3,  7, 8
767    movh  [P7], m0
768    punpckhqdq m0, m8
769    movh  [P6], m0
770    movh  [Q0], m1
771    punpckhqdq  m1, m9
772    movh  [Q1], m1
773    movh  [P3], m2
774    punpckhqdq  m2, m10
775    movh  [P2], m2
776    movh  [Q4], m3
777    punpckhqdq m3, m11
778    movh  [Q5], m3
779    movh  [P5], m4
780    punpckhqdq m4, m12
781    movh  [P4], m4
782    movh  [Q2], m5
783    punpckhqdq m5, m13
784    movh  [Q3], m5
785    movh  [P1], m6
786    punpckhqdq m6, m14
787    movh  [P0], m6
788    movh  [Q6], m7
789    punpckhqdq m7, m8
790    movh  [Q7], m7
791%endif
792%endif
793
794    RET
795%endmacro
796
797%macro LPF_16_VH 2
798INIT_XMM %2
799cglobal vp9_loop_filter_v_%1_16, 5,10,16,      dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3
800    LOOPFILTER v, %1
801cglobal vp9_loop_filter_h_%1_16, 5,10,16, 256, dst, stride, E, I, H, mstride, dst1, dst2, stride3, mstride3
802    LOOPFILTER h, %1
803%endmacro
804
805%macro LPF_16_VH_ALL_OPTS 1
806LPF_16_VH %1, sse2
807LPF_16_VH %1, ssse3
808LPF_16_VH %1, avx
809%endmacro
810
811LPF_16_VH_ALL_OPTS 16
812LPF_16_VH_ALL_OPTS 44
813LPF_16_VH_ALL_OPTS 48
814LPF_16_VH_ALL_OPTS 84
815LPF_16_VH_ALL_OPTS 88
816
817%endif ; x86-64
818