1/*
2 * Copyright (C) 2013 Xiaolei Yu <dreifachstein@gmail.com>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include "rgb2yuv_neon_common.S"
22
23/* downsampled R16G16B16 x8 */
24alias_qw    r16x8,  q7
25alias_qw    g16x8,  q8
26alias_qw    b16x8,  q9
27
28alias   n16x16_o,   q11
29alias   n16x16_ol,  q11_l
30alias   n16x16_oh,  q11_h
31
32alias   y32x16_el,  q12
33alias   y32x16_eh,  q13
34alias   y32x16_ol,  q14
35alias   y32x16_oh,  q15
36
37alias   y16x16_e,   q12
38alias   y16x16_el,  q12_l
39alias   y16x16_eh,  q12_h
40alias   y16x16_o,   q13
41alias   y16x16_ol,  q13_l
42alias   y16x16_oh,  q13_h
43
44
45alias   y8x16,  y16x16_e
46
47
48.macro init     src
49    // load s32x3x3, narrow to s16x3x3
50    vld3.i32    {q13_l, q14_l, q15_l},          [\src]!
51    vld3.i32    {q13_h[0], q14_h[0], q15_h[0]}, [\src]
52
53    vmovn.i32   CO_R, q13
54    vmovn.i32   CO_G, q14
55    vmovn.i32   CO_B, q15
56
57    vmov.u8     BIAS_Y, #16
58    vmov.u8     BIAS_U, #128
59.endm
60
61
62.macro compute_y_16x1_step  action, s8x16, coeff
63    vmov.u8     n16x16_o,   #0
64    vtrn.u8     \s8x16,     n16x16_o
65
66    \action     y32x16_el,  \s8x16\()_l,    \coeff
67    \action     y32x16_eh,  \s8x16\()_h,    \coeff
68    \action     y32x16_ol,  n16x16_ol,      \coeff
69    \action     y32x16_oh,  n16x16_oh,      \coeff
70.endm
71
72/*
73 * in:      r8x16, g8x16, b8x16
74 * out:     y8x16
75 * clobber: q11-q15, r8x16, g8x16, b8x16
76 */
77.macro compute_y_16x1
78    compute_y_16x1_step vmull, r8x16, CO_RY
79    compute_y_16x1_step vmlal, g8x16, CO_GY
80    compute_y_16x1_step vmlal, b8x16, CO_BY
81
82    vrshrn.i32  y16x16_el,  y32x16_el,  #15
83    vrshrn.i32  y16x16_eh,  y32x16_eh,  #15
84    vrshrn.i32  y16x16_ol,  y32x16_ol,  #15
85    vrshrn.i32  y16x16_oh,  y32x16_oh,  #15
86
87    vtrn.8      y16x16_e,   y16x16_o
88    vadd.u8     y8x16,      y8x16,      BIAS_Y
89.endm
90
91alias   c32x8_l,    q14
92alias   c32x8_h,    q15
93
94alias_qw    c16x8,  q13
95alias_qw    c8x8x2, q10
96
97.macro compute_chroma_8x1_step  action, s16x8, coeff
98    \action     c32x8_l,    \s16x8\()_l,    \coeff
99    \action     c32x8_h,    \s16x8\()_h,    \coeff
100.endm
101
102/*
103 * in:      r16x8, g16x8, b16x8
104 * out:     c8x8
105 * clobber: q14-q15
106 */
107.macro compute_chroma_8x1   c, C
108    compute_chroma_8x1_step vmull, r16x8, CO_R\C
109    compute_chroma_8x1_step vmlal, g16x8, CO_G\C
110    compute_chroma_8x1_step vmlal, b16x8, CO_B\C
111
112    vrshrn.i32  c16x8_l,    c32x8_l,    #15
113    vrshrn.i32  c16x8_h,    c32x8_h,    #15
114    vmovn.i16   \c\()8x8,   c16x8
115    vadd.u8     \c\()8x8,   \c\()8x8,   BIAS_\C
116.endm
117
118
119    loop_420sp  rgbx, nv12, init, kernel_420_16x2, 32
120