1/* ffmpeg/libavcodec/ppc/fdct_altivec.c, this file is part of the 2 * AltiVec optimized library for the FFMPEG Multimedia System 3 * Copyright (C) 2003 James Klicman <james@klicman.org> 4 * 5 * This file is part of FFmpeg. 6 * 7 * FFmpeg is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * FFmpeg is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with FFmpeg; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22 23#include "libavutil/common.h" 24#include "libavcodec/dsputil.h" 25#include "dsputil_ppc.h" 26#include "gcc_fixes.h" 27 28 29#define vs16(v) ((vector signed short)(v)) 30#define vs32(v) ((vector signed int)(v)) 31#define vu8(v) ((vector unsigned char)(v)) 32#define vu16(v) ((vector unsigned short)(v)) 33#define vu32(v) ((vector unsigned int)(v)) 34 35 36#define C1 0.98078525066375732421875000 /* cos(1*PI/16) */ 37#define C2 0.92387950420379638671875000 /* cos(2*PI/16) */ 38#define C3 0.83146959543228149414062500 /* cos(3*PI/16) */ 39#define C4 0.70710676908493041992187500 /* cos(4*PI/16) */ 40#define C5 0.55557024478912353515625000 /* cos(5*PI/16) */ 41#define C6 0.38268342614173889160156250 /* cos(6*PI/16) */ 42#define C7 0.19509032368659973144531250 /* cos(7*PI/16) */ 43#define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */ 44 45 46#define W0 -(2 * C2) 47#define W1 (2 * C6) 48#define W2 (SQRT_2 * C6) 49#define W3 (SQRT_2 * C3) 50#define W4 (SQRT_2 * (-C1 + C3 + C5 - C7)) 51#define W5 (SQRT_2 * ( C1 + C3 - C5 + C7)) 52#define W6 (SQRT_2 * ( C1 + C3 + C5 - C7)) 53#define W7 (SQRT_2 * ( C1 + C3 - C5 - C7)) 54#define W8 (SQRT_2 * ( C7 - C3)) 55#define W9 (SQRT_2 * (-C1 - C3)) 56#define WA (SQRT_2 * (-C3 - C5)) 57#define WB (SQRT_2 * ( C5 - C3)) 58 59 60static vector float fdctconsts[3] = { 61 { W0, W1, W2, W3 }, 62 { W4, W5, W6, W7 }, 63 { W8, W9, WA, WB } 64}; 65 66#define LD_W0 vec_splat(cnsts0, 0) 67#define LD_W1 vec_splat(cnsts0, 1) 68#define LD_W2 vec_splat(cnsts0, 2) 69#define LD_W3 vec_splat(cnsts0, 3) 70#define LD_W4 vec_splat(cnsts1, 0) 71#define LD_W5 vec_splat(cnsts1, 1) 72#define LD_W6 vec_splat(cnsts1, 2) 73#define LD_W7 vec_splat(cnsts1, 3) 74#define LD_W8 vec_splat(cnsts2, 0) 75#define LD_W9 vec_splat(cnsts2, 1) 76#define LD_WA vec_splat(cnsts2, 2) 77#define LD_WB vec_splat(cnsts2, 3) 78 79 80#define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \ 81 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \ 82 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \ 83 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \ 84 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \ 85 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \ 86 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \ 87 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \ 88 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \ 89 \ 90 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \ 91 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \ 92 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \ 93 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \ 94 \ 95 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \ 96 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \ 97 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \ 98 cnst = LD_W2; \ 99 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \ 100 cnst = LD_W1; \ 101 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \ 102 cnst = LD_W0; \ 103 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \ 104 \ 105 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \ 106 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \ 107 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \ 108 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \ 109 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \ 110 cnst = LD_W3; \ 111 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \ 112 \ 113 cnst = LD_W8; \ 114 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \ 115 cnst = LD_W9; \ 116 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \ 117 cnst = LD_WA; \ 118 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \ 119 cnst = LD_WB; \ 120 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \ 121 \ 122 cnst = LD_W4; \ 123 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \ 124 cnst = LD_W5; \ 125 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \ 126 cnst = LD_W6; \ 127 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \ 128 cnst = LD_W7; \ 129 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \ 130 \ 131 b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \ 132 b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \ 133 b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \ 134 b1 = vec_add(b1, x3); /* b1 = b1 + x3; */ \ 135 /* }}} */ 136 137#define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \ 138 x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \ 139 x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \ 140 x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \ 141 x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \ 142 x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \ 143 x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \ 144 x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \ 145 x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \ 146 \ 147 b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \ 148 b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \ 149 b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \ 150 b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \ 151 \ 152 b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \ 153 b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \ 154 b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \ 155 cnst = LD_W2; \ 156 b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \ 157 cnst = LD_W1; \ 158 b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \ 159 cnst = LD_W0; \ 160 b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \ 161 \ 162 x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \ 163 x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \ 164 x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \ 165 x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \ 166 x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \ 167 cnst = LD_W3; \ 168 x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \ 169 \ 170 cnst = LD_W8; \ 171 x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \ 172 cnst = LD_W9; \ 173 x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \ 174 cnst = LD_WA; \ 175 x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \ 176 cnst = LD_WB; \ 177 x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \ 178 \ 179 cnst = LD_W4; \ 180 b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \ 181 cnst = LD_W5; \ 182 b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \ 183 cnst = LD_W6; \ 184 b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \ 185 cnst = LD_W7; \ 186 b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \ 187 \ 188 b7 = vec_add(b7, x2); /* b7 += x2; */ \ 189 b5 = vec_add(b5, x3); /* b5 += x3; */ \ 190 b3 = vec_add(b3, x2); /* b3 += x2; */ \ 191 b1 = vec_add(b1, x3); /* b1 += x3; */ \ 192 /* }}} */ 193 194 195 196/* two dimensional discrete cosine transform */ 197 198void fdct_altivec(int16_t *block) 199{ 200POWERPC_PERF_DECLARE(altivec_fdct, 1); 201 vector signed short *bp; 202 vector float *cp; 203 vector float b00, b10, b20, b30, b40, b50, b60, b70; 204 vector float b01, b11, b21, b31, b41, b51, b61, b71; 205 vector float mzero, cnst, cnsts0, cnsts1, cnsts2; 206 vector float x0, x1, x2, x3, x4, x5, x6, x7, x8; 207 208 POWERPC_PERF_START_COUNT(altivec_fdct, 1); 209 210 211 /* setup constants {{{ */ 212 /* mzero = -0.0 */ 213 mzero = ((vector float)vec_splat_u32(-1)); 214 mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero))); 215 cp = fdctconsts; 216 cnsts0 = vec_ld(0, cp); cp++; 217 cnsts1 = vec_ld(0, cp); cp++; 218 cnsts2 = vec_ld(0, cp); 219 /* }}} */ 220 221 222 /* 8x8 matrix transpose (vector short[8]) {{{ */ 223#define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b)) 224 225 bp = (vector signed short*)block; 226 b00 = ((vector float)vec_ld(0, bp)); 227 b40 = ((vector float)vec_ld(16*4, bp)); 228 b01 = ((vector float)MERGE_S16(h, b00, b40)); 229 b11 = ((vector float)MERGE_S16(l, b00, b40)); 230 bp++; 231 b10 = ((vector float)vec_ld(0, bp)); 232 b50 = ((vector float)vec_ld(16*4, bp)); 233 b21 = ((vector float)MERGE_S16(h, b10, b50)); 234 b31 = ((vector float)MERGE_S16(l, b10, b50)); 235 bp++; 236 b20 = ((vector float)vec_ld(0, bp)); 237 b60 = ((vector float)vec_ld(16*4, bp)); 238 b41 = ((vector float)MERGE_S16(h, b20, b60)); 239 b51 = ((vector float)MERGE_S16(l, b20, b60)); 240 bp++; 241 b30 = ((vector float)vec_ld(0, bp)); 242 b70 = ((vector float)vec_ld(16*4, bp)); 243 b61 = ((vector float)MERGE_S16(h, b30, b70)); 244 b71 = ((vector float)MERGE_S16(l, b30, b70)); 245 246 x0 = ((vector float)MERGE_S16(h, b01, b41)); 247 x1 = ((vector float)MERGE_S16(l, b01, b41)); 248 x2 = ((vector float)MERGE_S16(h, b11, b51)); 249 x3 = ((vector float)MERGE_S16(l, b11, b51)); 250 x4 = ((vector float)MERGE_S16(h, b21, b61)); 251 x5 = ((vector float)MERGE_S16(l, b21, b61)); 252 x6 = ((vector float)MERGE_S16(h, b31, b71)); 253 x7 = ((vector float)MERGE_S16(l, b31, b71)); 254 255 b00 = ((vector float)MERGE_S16(h, x0, x4)); 256 b10 = ((vector float)MERGE_S16(l, x0, x4)); 257 b20 = ((vector float)MERGE_S16(h, x1, x5)); 258 b30 = ((vector float)MERGE_S16(l, x1, x5)); 259 b40 = ((vector float)MERGE_S16(h, x2, x6)); 260 b50 = ((vector float)MERGE_S16(l, x2, x6)); 261 b60 = ((vector float)MERGE_S16(h, x3, x7)); 262 b70 = ((vector float)MERGE_S16(l, x3, x7)); 263 264#undef MERGE_S16 265 /* }}} */ 266 267 268/* Some of the initial calculations can be done as vector short before 269 * conversion to vector float. The following code section takes advantage 270 * of this. 271 */ 272#if 1 273 /* fdct rows {{{ */ 274 x0 = ((vector float)vec_add(vs16(b00), vs16(b70))); 275 x7 = ((vector float)vec_sub(vs16(b00), vs16(b70))); 276 x1 = ((vector float)vec_add(vs16(b10), vs16(b60))); 277 x6 = ((vector float)vec_sub(vs16(b10), vs16(b60))); 278 x2 = ((vector float)vec_add(vs16(b20), vs16(b50))); 279 x5 = ((vector float)vec_sub(vs16(b20), vs16(b50))); 280 x3 = ((vector float)vec_add(vs16(b30), vs16(b40))); 281 x4 = ((vector float)vec_sub(vs16(b30), vs16(b40))); 282 283 b70 = ((vector float)vec_add(vs16(x0), vs16(x3))); 284 b10 = ((vector float)vec_add(vs16(x1), vs16(x2))); 285 286 b00 = ((vector float)vec_add(vs16(b70), vs16(b10))); 287 b40 = ((vector float)vec_sub(vs16(b70), vs16(b10))); 288 289#define CTF0(n) \ 290 b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \ 291 b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \ 292 b##n##1 = vec_ctf(vs32(b##n##1), 0); \ 293 b##n##0 = vec_ctf(vs32(b##n##0), 0); 294 295 CTF0(0); 296 CTF0(4); 297 298 b20 = ((vector float)vec_sub(vs16(x0), vs16(x3))); 299 b60 = ((vector float)vec_sub(vs16(x1), vs16(x2))); 300 301 CTF0(2); 302 CTF0(6); 303 304#undef CTF0 305 306 x0 = vec_add(b60, b20); 307 x1 = vec_add(b61, b21); 308 309 cnst = LD_W2; 310 x0 = vec_madd(cnst, x0, mzero); 311 x1 = vec_madd(cnst, x1, mzero); 312 cnst = LD_W1; 313 b20 = vec_madd(cnst, b20, x0); 314 b21 = vec_madd(cnst, b21, x1); 315 cnst = LD_W0; 316 b60 = vec_madd(cnst, b60, x0); 317 b61 = vec_madd(cnst, b61, x1); 318 319#define CTFX(x,b) \ 320 b##0 = ((vector float)vec_unpackh(vs16(x))); \ 321 b##1 = ((vector float)vec_unpackl(vs16(x))); \ 322 b##0 = vec_ctf(vs32(b##0), 0); \ 323 b##1 = vec_ctf(vs32(b##1), 0); \ 324 325 CTFX(x4, b7); 326 CTFX(x5, b5); 327 CTFX(x6, b3); 328 CTFX(x7, b1); 329 330#undef CTFX 331 332 333 x0 = vec_add(b70, b10); 334 x1 = vec_add(b50, b30); 335 x2 = vec_add(b70, b30); 336 x3 = vec_add(b50, b10); 337 x8 = vec_add(x2, x3); 338 cnst = LD_W3; 339 x8 = vec_madd(cnst, x8, mzero); 340 341 cnst = LD_W8; 342 x0 = vec_madd(cnst, x0, mzero); 343 cnst = LD_W9; 344 x1 = vec_madd(cnst, x1, mzero); 345 cnst = LD_WA; 346 x2 = vec_madd(cnst, x2, x8); 347 cnst = LD_WB; 348 x3 = vec_madd(cnst, x3, x8); 349 350 cnst = LD_W4; 351 b70 = vec_madd(cnst, b70, x0); 352 cnst = LD_W5; 353 b50 = vec_madd(cnst, b50, x1); 354 cnst = LD_W6; 355 b30 = vec_madd(cnst, b30, x1); 356 cnst = LD_W7; 357 b10 = vec_madd(cnst, b10, x0); 358 359 b70 = vec_add(b70, x2); 360 b50 = vec_add(b50, x3); 361 b30 = vec_add(b30, x2); 362 b10 = vec_add(b10, x3); 363 364 365 x0 = vec_add(b71, b11); 366 x1 = vec_add(b51, b31); 367 x2 = vec_add(b71, b31); 368 x3 = vec_add(b51, b11); 369 x8 = vec_add(x2, x3); 370 cnst = LD_W3; 371 x8 = vec_madd(cnst, x8, mzero); 372 373 cnst = LD_W8; 374 x0 = vec_madd(cnst, x0, mzero); 375 cnst = LD_W9; 376 x1 = vec_madd(cnst, x1, mzero); 377 cnst = LD_WA; 378 x2 = vec_madd(cnst, x2, x8); 379 cnst = LD_WB; 380 x3 = vec_madd(cnst, x3, x8); 381 382 cnst = LD_W4; 383 b71 = vec_madd(cnst, b71, x0); 384 cnst = LD_W5; 385 b51 = vec_madd(cnst, b51, x1); 386 cnst = LD_W6; 387 b31 = vec_madd(cnst, b31, x1); 388 cnst = LD_W7; 389 b11 = vec_madd(cnst, b11, x0); 390 391 b71 = vec_add(b71, x2); 392 b51 = vec_add(b51, x3); 393 b31 = vec_add(b31, x2); 394 b11 = vec_add(b11, x3); 395 /* }}} */ 396#else 397 /* convert to float {{{ */ 398#define CTF(n) \ 399 vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \ 400 vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \ 401 b##n##1 = vec_ctf(vs32(b##n##1), 0); \ 402 b##n##0 = vec_ctf(vs32(b##n##0), 0); \ 403 404 CTF(0); 405 CTF(1); 406 CTF(2); 407 CTF(3); 408 CTF(4); 409 CTF(5); 410 CTF(6); 411 CTF(7); 412 413#undef CTF 414 /* }}} */ 415 416 FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70); 417 FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71); 418#endif 419 420 421 /* 8x8 matrix transpose (vector float[8][2]) {{{ */ 422 x0 = vec_mergel(b00, b20); 423 x1 = vec_mergeh(b00, b20); 424 x2 = vec_mergel(b10, b30); 425 x3 = vec_mergeh(b10, b30); 426 427 b00 = vec_mergeh(x1, x3); 428 b10 = vec_mergel(x1, x3); 429 b20 = vec_mergeh(x0, x2); 430 b30 = vec_mergel(x0, x2); 431 432 x4 = vec_mergel(b41, b61); 433 x5 = vec_mergeh(b41, b61); 434 x6 = vec_mergel(b51, b71); 435 x7 = vec_mergeh(b51, b71); 436 437 b41 = vec_mergeh(x5, x7); 438 b51 = vec_mergel(x5, x7); 439 b61 = vec_mergeh(x4, x6); 440 b71 = vec_mergel(x4, x6); 441 442 x0 = vec_mergel(b01, b21); 443 x1 = vec_mergeh(b01, b21); 444 x2 = vec_mergel(b11, b31); 445 x3 = vec_mergeh(b11, b31); 446 447 x4 = vec_mergel(b40, b60); 448 x5 = vec_mergeh(b40, b60); 449 x6 = vec_mergel(b50, b70); 450 x7 = vec_mergeh(b50, b70); 451 452 b40 = vec_mergeh(x1, x3); 453 b50 = vec_mergel(x1, x3); 454 b60 = vec_mergeh(x0, x2); 455 b70 = vec_mergel(x0, x2); 456 457 b01 = vec_mergeh(x5, x7); 458 b11 = vec_mergel(x5, x7); 459 b21 = vec_mergeh(x4, x6); 460 b31 = vec_mergel(x4, x6); 461 /* }}} */ 462 463 464 FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70); 465 FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71); 466 467 468 /* round, convert back to short {{{ */ 469#define CTS(n) \ 470 b##n##0 = vec_round(b##n##0); \ 471 b##n##1 = vec_round(b##n##1); \ 472 b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \ 473 b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \ 474 b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \ 475 vec_st(vs16(b##n##0), 0, bp); 476 477 bp = (vector signed short*)block; 478 CTS(0); bp++; 479 CTS(1); bp++; 480 CTS(2); bp++; 481 CTS(3); bp++; 482 CTS(4); bp++; 483 CTS(5); bp++; 484 CTS(6); bp++; 485 CTS(7); 486 487#undef CTS 488 /* }}} */ 489 490POWERPC_PERF_STOP_COUNT(altivec_fdct, 1); 491} 492 493/* vim:set foldmethod=marker foldlevel=0: */ 494