1/* 2 * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org> 3 * 4 * This file is part of FFmpeg. 5 * 6 * FFmpeg is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * FFmpeg is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with FFmpeg; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 19 */ 20 21#include "libavcodec/dsputil.h" 22 23#include "dsputil_altivec.h" 24#include "util_altivec.h" 25 26static void vector_fmul_altivec(float *dst, const float *src, int len) 27{ 28 int i; 29 vector float d0, d1, s, zero = (vector float)vec_splat_u32(0); 30 for(i=0; i<len-7; i+=8) { 31 d0 = vec_ld(0, dst+i); 32 s = vec_ld(0, src+i); 33 d1 = vec_ld(16, dst+i); 34 d0 = vec_madd(d0, s, zero); 35 d1 = vec_madd(d1, vec_ld(16,src+i), zero); 36 vec_st(d0, 0, dst+i); 37 vec_st(d1, 16, dst+i); 38 } 39} 40 41static void vector_fmul_reverse_altivec(float *dst, const float *src0, 42 const float *src1, int len) 43{ 44 int i; 45 vector float d, s0, s1, h0, l0, 46 s2, s3, zero = (vector float)vec_splat_u32(0); 47 src1 += len-4; 48 for(i=0; i<len-7; i+=8) { 49 s1 = vec_ld(0, src1-i); // [a,b,c,d] 50 s0 = vec_ld(0, src0+i); 51 l0 = vec_mergel(s1, s1); // [c,c,d,d] 52 s3 = vec_ld(-16, src1-i); 53 h0 = vec_mergeh(s1, s1); // [a,a,b,b] 54 s2 = vec_ld(16, src0+i); 55 s1 = vec_mergeh(vec_mergel(l0,h0), // [d,b,d,b] 56 vec_mergeh(l0,h0)); // [c,a,c,a] 57 // [d,c,b,a] 58 l0 = vec_mergel(s3, s3); 59 d = vec_madd(s0, s1, zero); 60 h0 = vec_mergeh(s3, s3); 61 vec_st(d, 0, dst+i); 62 s3 = vec_mergeh(vec_mergel(l0,h0), 63 vec_mergeh(l0,h0)); 64 d = vec_madd(s2, s3, zero); 65 vec_st(d, 16, dst+i); 66 } 67} 68 69static void vector_fmul_add_altivec(float *dst, const float *src0, 70 const float *src1, const float *src2, 71 int len) 72{ 73 int i; 74 vector float d, s0, s1, s2, t0, t1, edges; 75 vector unsigned char align = vec_lvsr(0,dst), 76 mask = vec_lvsl(0, dst); 77 78 for (i=0; i<len-3; i+=4) { 79 t0 = vec_ld(0, dst+i); 80 t1 = vec_ld(15, dst+i); 81 s0 = vec_ld(0, src0+i); 82 s1 = vec_ld(0, src1+i); 83 s2 = vec_ld(0, src2+i); 84 edges = vec_perm(t1 ,t0, mask); 85 d = vec_madd(s0,s1,s2); 86 t1 = vec_perm(d, edges, align); 87 t0 = vec_perm(edges, d, align); 88 vec_st(t1, 15, dst+i); 89 vec_st(t0, 0, dst+i); 90 } 91} 92 93static void vector_fmul_window_altivec(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len) 94{ 95 union { 96 vector float v; 97 float s[4]; 98 } vadd; 99 vector float vadd_bias, zero, t0, t1, s0, s1, wi, wj; 100 const vector unsigned char reverse = vcprm(3,2,1,0); 101 int i,j; 102 103 dst += len; 104 win += len; 105 src0+= len; 106 107 vadd.s[0] = add_bias; 108 vadd_bias = vec_splat(vadd.v, 0); 109 zero = (vector float)vec_splat_u32(0); 110 111 for(i=-len*4, j=len*4-16; i<0; i+=16, j-=16) { 112 s0 = vec_ld(i, src0); 113 s1 = vec_ld(j, src1); 114 wi = vec_ld(i, win); 115 wj = vec_ld(j, win); 116 117 s1 = vec_perm(s1, s1, reverse); 118 wj = vec_perm(wj, wj, reverse); 119 120 t0 = vec_madd(s0, wj, vadd_bias); 121 t0 = vec_nmsub(s1, wi, t0); 122 t1 = vec_madd(s0, wi, vadd_bias); 123 t1 = vec_madd(s1, wj, t1); 124 t1 = vec_perm(t1, t1, reverse); 125 126 vec_st(t0, i, dst); 127 vec_st(t1, j, dst); 128 } 129} 130 131static void int32_to_float_fmul_scalar_altivec(float *dst, const int *src, float mul, int len) 132{ 133 union { 134 vector float v; 135 float s[4]; 136 } mul_u; 137 int i; 138 vector float src1, src2, dst1, dst2, mul_v, zero; 139 140 zero = (vector float)vec_splat_u32(0); 141 mul_u.s[0] = mul; 142 mul_v = vec_splat(mul_u.v, 0); 143 144 for(i=0; i<len; i+=8) { 145 src1 = vec_ctf(vec_ld(0, src+i), 0); 146 src2 = vec_ctf(vec_ld(16, src+i), 0); 147 dst1 = vec_madd(src1, mul_v, zero); 148 dst2 = vec_madd(src2, mul_v, zero); 149 vec_st(dst1, 0, dst+i); 150 vec_st(dst2, 16, dst+i); 151 } 152} 153 154 155static vector signed short 156float_to_int16_one_altivec(const float *src) 157{ 158 vector float s0 = vec_ld(0, src); 159 vector float s1 = vec_ld(16, src); 160 vector signed int t0 = vec_cts(s0, 0); 161 vector signed int t1 = vec_cts(s1, 0); 162 return vec_packs(t0,t1); 163} 164 165static void float_to_int16_altivec(int16_t *dst, const float *src, long len) 166{ 167 int i; 168 vector signed short d0, d1, d; 169 vector unsigned char align; 170 if(((long)dst)&15) //FIXME 171 for(i=0; i<len-7; i+=8) { 172 d0 = vec_ld(0, dst+i); 173 d = float_to_int16_one_altivec(src+i); 174 d1 = vec_ld(15, dst+i); 175 d1 = vec_perm(d1, d0, vec_lvsl(0,dst+i)); 176 align = vec_lvsr(0, dst+i); 177 d0 = vec_perm(d1, d, align); 178 d1 = vec_perm(d, d1, align); 179 vec_st(d0, 0, dst+i); 180 vec_st(d1,15, dst+i); 181 } 182 else 183 for(i=0; i<len-7; i+=8) { 184 d = float_to_int16_one_altivec(src+i); 185 vec_st(d, 0, dst+i); 186 } 187} 188 189static void 190float_to_int16_interleave_altivec(int16_t *dst, const float **src, 191 long len, int channels) 192{ 193 int i; 194 vector signed short d0, d1, d2, c0, c1, t0, t1; 195 vector unsigned char align; 196 if(channels == 1) 197 float_to_int16_altivec(dst, src[0], len); 198 else 199 if (channels == 2) { 200 if(((long)dst)&15) 201 for(i=0; i<len-7; i+=8) { 202 d0 = vec_ld(0, dst + i); 203 t0 = float_to_int16_one_altivec(src[0] + i); 204 d1 = vec_ld(31, dst + i); 205 t1 = float_to_int16_one_altivec(src[1] + i); 206 c0 = vec_mergeh(t0, t1); 207 c1 = vec_mergel(t0, t1); 208 d2 = vec_perm(d1, d0, vec_lvsl(0, dst + i)); 209 align = vec_lvsr(0, dst + i); 210 d0 = vec_perm(d2, c0, align); 211 d1 = vec_perm(c0, c1, align); 212 vec_st(d0, 0, dst + i); 213 d0 = vec_perm(c1, d2, align); 214 vec_st(d1, 15, dst + i); 215 vec_st(d0, 31, dst + i); 216 dst+=8; 217 } 218 else 219 for(i=0; i<len-7; i+=8) { 220 t0 = float_to_int16_one_altivec(src[0] + i); 221 t1 = float_to_int16_one_altivec(src[1] + i); 222 d0 = vec_mergeh(t0, t1); 223 d1 = vec_mergel(t0, t1); 224 vec_st(d0, 0, dst + i); 225 vec_st(d1, 16, dst + i); 226 dst+=8; 227 } 228 } else { 229 DECLARE_ALIGNED(16, int16_t, tmp)[len]; 230 int c, j; 231 for (c = 0; c < channels; c++) { 232 float_to_int16_altivec(tmp, src[c], len); 233 for (i = 0, j = c; i < len; i++, j+=channels) { 234 dst[j] = tmp[i]; 235 } 236 } 237 } 238} 239 240void float_init_altivec(DSPContext* c, AVCodecContext *avctx) 241{ 242 c->vector_fmul = vector_fmul_altivec; 243 c->vector_fmul_reverse = vector_fmul_reverse_altivec; 244 c->vector_fmul_add = vector_fmul_add_altivec; 245 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_altivec; 246 if(!(avctx->flags & CODEC_FLAG_BITEXACT)) { 247 c->vector_fmul_window = vector_fmul_window_altivec; 248 c->float_to_int16 = float_to_int16_altivec; 249 c->float_to_int16_interleave = float_to_int16_interleave_altivec; 250 } 251} 252