1;******************************************************************************
2;* MMX optimized DSP utils
3;* Copyright (c) 2008 Loren Merritt
4;*
5;* This file is part of FFmpeg.
6;*
7;* FFmpeg is free software; you can redistribute it and/or
8;* modify it under the terms of the GNU Lesser General Public
9;* License as published by the Free Software Foundation; either
10;* version 2.1 of the License, or (at your option) any later version.
11;*
12;* FFmpeg is distributed in the hope that it will be useful,
13;* but WITHOUT ANY WARRANTY; without even the implied warranty of
14;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15;* Lesser General Public License for more details.
16;*
17;* You should have received a copy of the GNU Lesser General Public
18;* License along with FFmpeg; if not, write to the Free Software
19;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20;******************************************************************************
21
22%include "x86inc.asm"
23
24section .text align=16
25
26%macro PSWAPD_SSE 2
27    pshufw %1, %2, 0x4e
28%endmacro
29%macro PSWAPD_3DN1 2
30    movq  %1, %2
31    psrlq %1, 32
32    punpckldq %1, %2
33%endmacro
34
35%macro FLOAT_TO_INT16_INTERLEAVE6 1
36; void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len)
37cglobal float_to_int16_interleave6_%1, 2,7,0, dst, src, src1, src2, src3, src4, src5
38%ifdef ARCH_X86_64
39    %define lend r10d
40    mov     lend, r2d
41%else
42    %define lend dword r2m
43%endif
44    mov src1q, [srcq+1*gprsize]
45    mov src2q, [srcq+2*gprsize]
46    mov src3q, [srcq+3*gprsize]
47    mov src4q, [srcq+4*gprsize]
48    mov src5q, [srcq+5*gprsize]
49    mov srcq,  [srcq]
50    sub src1q, srcq
51    sub src2q, srcq
52    sub src3q, srcq
53    sub src4q, srcq
54    sub src5q, srcq
55.loop:
56    cvtps2pi   mm0, [srcq]
57    cvtps2pi   mm1, [srcq+src1q]
58    cvtps2pi   mm2, [srcq+src2q]
59    cvtps2pi   mm3, [srcq+src3q]
60    cvtps2pi   mm4, [srcq+src4q]
61    cvtps2pi   mm5, [srcq+src5q]
62    packssdw   mm0, mm3
63    packssdw   mm1, mm4
64    packssdw   mm2, mm5
65    pswapd     mm3, mm0
66    punpcklwd  mm0, mm1
67    punpckhwd  mm1, mm2
68    punpcklwd  mm2, mm3
69    pswapd     mm3, mm0
70    punpckldq  mm0, mm2
71    punpckhdq  mm2, mm1
72    punpckldq  mm1, mm3
73    movq [dstq   ], mm0
74    movq [dstq+16], mm2
75    movq [dstq+ 8], mm1
76    add srcq, 8
77    add dstq, 24
78    sub lend, 2
79    jg .loop
80    emms
81    RET
82%endmacro ; FLOAT_TO_INT16_INTERLEAVE6
83
84%define pswapd PSWAPD_SSE
85FLOAT_TO_INT16_INTERLEAVE6 sse
86%define cvtps2pi pf2id
87%define pswapd PSWAPD_3DN1
88FLOAT_TO_INT16_INTERLEAVE6 3dnow
89%undef pswapd
90FLOAT_TO_INT16_INTERLEAVE6 3dn2
91%undef cvtps2pi
92
93
94
95; void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top)
96cglobal add_hfyu_median_prediction_mmx2, 6,6,0, dst, top, diff, w, left, left_top
97    movq    mm0, [topq]
98    movq    mm2, mm0
99    movd    mm4, [left_topq]
100    psllq   mm2, 8
101    movq    mm1, mm0
102    por     mm4, mm2
103    movd    mm3, [leftq]
104    psubb   mm0, mm4 ; t-tl
105    add    dstq, wq
106    add    topq, wq
107    add   diffq, wq
108    neg      wq
109    jmp .skip
110.loop:
111    movq    mm4, [topq+wq]
112    movq    mm0, mm4
113    psllq   mm4, 8
114    por     mm4, mm1
115    movq    mm1, mm0 ; t
116    psubb   mm0, mm4 ; t-tl
117.skip:
118    movq    mm2, [diffq+wq]
119%assign i 0
120%rep 8
121    movq    mm4, mm0
122    paddb   mm4, mm3 ; t-tl+l
123    movq    mm5, mm3
124    pmaxub  mm3, mm1
125    pminub  mm5, mm1
126    pminub  mm3, mm4
127    pmaxub  mm3, mm5 ; median
128    paddb   mm3, mm2 ; +residual
129%if i==0
130    movq    mm7, mm3
131    psllq   mm7, 56
132%else
133    movq    mm6, mm3
134    psrlq   mm7, 8
135    psllq   mm6, 56
136    por     mm7, mm6
137%endif
138%if i<7
139    psrlq   mm0, 8
140    psrlq   mm1, 8
141    psrlq   mm2, 8
142%endif
143%assign i i+1
144%endrep
145    movq [dstq+wq], mm7
146    add      wq, 8
147    jl .loop
148    movzx   r2d, byte [dstq-1]
149    mov [leftq], r2d
150    movzx   r2d, byte [topq-1]
151    mov [left_topq], r2d
152    RET
153