1/* 2 * Copyright (C) 1999-2001 Aaron Holtzman <aholtzma@ess.engr.uvic.ca> 3 * 4 * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. 5 * See http://libmpeg2.sourceforge.net/ for updates. 6 * 7 * mpeg2dec is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * mpeg2dec is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with mpeg2dec; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22#include "libavutil/common.h" 23#include "libavcodec/dsputil.h" 24 25#include "libavutil/x86_cpu.h" 26#include "dsputil_mmx.h" 27 28#define ROW_SHIFT 11 29#define COL_SHIFT 6 30 31#define round(bias) ((int)(((bias)+0.5) * (1<<ROW_SHIFT))) 32#define rounder(bias) {round (bias), round (bias)} 33 34 35#if 0 36/* C row IDCT - it is just here to document the MMXEXT and MMX versions */ 37static inline void idct_row (int16_t * row, int offset, 38 int16_t * table, int32_t * rounder) 39{ 40 int C1, C2, C3, C4, C5, C6, C7; 41 int a0, a1, a2, a3, b0, b1, b2, b3; 42 43 row += offset; 44 45 C1 = table[1]; 46 C2 = table[2]; 47 C3 = table[3]; 48 C4 = table[4]; 49 C5 = table[5]; 50 C6 = table[6]; 51 C7 = table[7]; 52 53 a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + *rounder; 54 a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + *rounder; 55 a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + *rounder; 56 a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + *rounder; 57 58 b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7]; 59 b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7]; 60 b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7]; 61 b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7]; 62 63 row[0] = (a0 + b0) >> ROW_SHIFT; 64 row[1] = (a1 + b1) >> ROW_SHIFT; 65 row[2] = (a2 + b2) >> ROW_SHIFT; 66 row[3] = (a3 + b3) >> ROW_SHIFT; 67 row[4] = (a3 - b3) >> ROW_SHIFT; 68 row[5] = (a2 - b2) >> ROW_SHIFT; 69 row[6] = (a1 - b1) >> ROW_SHIFT; 70 row[7] = (a0 - b0) >> ROW_SHIFT; 71} 72#endif 73 74 75/* MMXEXT row IDCT */ 76 77#define mmxext_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, -c4, -c2, \ 78 c4, c6, c4, c6, \ 79 c1, c3, -c1, -c5, \ 80 c5, c7, c3, -c7, \ 81 c4, -c6, c4, -c6, \ 82 -c4, c2, c4, -c2, \ 83 c5, -c1, c3, -c1, \ 84 c7, c3, c7, -c5 } 85 86static inline void mmxext_row_head (int16_t * const row, const int offset, 87 const int16_t * const table) 88{ 89 __asm__ volatile( 90 "movq (%0), %%mm2 \n\t" /* mm2 = x6 x4 x2 x0 */ 91 92 "movq 8(%0), %%mm5 \n\t" /* mm5 = x7 x5 x3 x1 */ 93 "movq %%mm2, %%mm0 \n\t" /* mm0 = x6 x4 x2 x0 */ 94 95 "movq (%1), %%mm3 \n\t" /* mm3 = -C2 -C4 C2 C4 */ 96 "movq %%mm5, %%mm6 \n\t" /* mm6 = x7 x5 x3 x1 */ 97 98 "movq 8(%1), %%mm4 \n\t" /* mm4 = C6 C4 C6 C4 */ 99 "pmaddwd %%mm0, %%mm3 \n\t" /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */ 100 101 "pshufw $0x4e, %%mm2, %%mm2 \n\t" /* mm2 = x2 x0 x6 x4 */ 102 :: "r" ((row+offset)), "r" (table) 103 ); 104} 105 106static inline void mmxext_row (const int16_t * const table, 107 const int32_t * const rounder) 108{ 109 __asm__ volatile ( 110 "movq 16(%0), %%mm1 \n\t" /* mm1 = -C5 -C1 C3 C1 */ 111 "pmaddwd %%mm2, %%mm4 \n\t" /* mm4 = C4*x0+C6*x2 C4*x4+C6*x6 */ 112 113 "pmaddwd 32(%0), %%mm0 \n\t" /* mm0 = C4*x4-C6*x6 C4*x0-C6*x2 */ 114 "pshufw $0x4e, %%mm6, %%mm6 \n\t" /* mm6 = x3 x1 x7 x5 */ 115 116 "movq 24(%0), %%mm7 \n\t" /* mm7 = -C7 C3 C7 C5 */ 117 "pmaddwd %%mm5, %%mm1 \n\t" /* mm1= -C1*x5-C5*x7 C1*x1+C3*x3 */ 118 119 "paddd (%1), %%mm3 \n\t" /* mm3 += rounder */ 120 "pmaddwd %%mm6, %%mm7 \n\t" /* mm7 = C3*x1-C7*x3 C5*x5+C7*x7 */ 121 122 "pmaddwd 40(%0), %%mm2 \n\t" /* mm2= C4*x0-C2*x2 -C4*x4+C2*x6 */ 123 "paddd %%mm4, %%mm3 \n\t" /* mm3 = a1 a0 + rounder */ 124 125 "pmaddwd 48(%0), %%mm5 \n\t" /* mm5 = C3*x5-C1*x7 C5*x1-C1*x3 */ 126 "movq %%mm3, %%mm4 \n\t" /* mm4 = a1 a0 + rounder */ 127 128 "pmaddwd 56(%0), %%mm6 \n\t" /* mm6 = C7*x1-C5*x3 C7*x5+C3*x7 */ 129 "paddd %%mm7, %%mm1 \n\t" /* mm1 = b1 b0 */ 130 131 "paddd (%1), %%mm0 \n\t" /* mm0 += rounder */ 132 "psubd %%mm1, %%mm3 \n\t" /* mm3 = a1-b1 a0-b0 + rounder */ 133 134 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm3 \n\t" /* mm3 = y6 y7 */ 135 "paddd %%mm4, %%mm1 \n\t" /* mm1 = a1+b1 a0+b0 + rounder */ 136 137 "paddd %%mm2, %%mm0 \n\t" /* mm0 = a3 a2 + rounder */ 138 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm1 \n\t" /* mm1 = y1 y0 */ 139 140 "paddd %%mm6, %%mm5 \n\t" /* mm5 = b3 b2 */ 141 "movq %%mm0, %%mm4 \n\t" /* mm4 = a3 a2 + rounder */ 142 143 "paddd %%mm5, %%mm0 \n\t" /* mm0 = a3+b3 a2+b2 + rounder */ 144 "psubd %%mm5, %%mm4 \n\t" /* mm4 = a3-b3 a2-b2 + rounder */ 145 : : "r" (table), "r" (rounder)); 146} 147 148static inline void mmxext_row_tail (int16_t * const row, const int store) 149{ 150 __asm__ volatile ( 151 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t" /* mm0 = y3 y2 */ 152 153 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm4 \n\t" /* mm4 = y4 y5 */ 154 155 "packssdw %%mm0, %%mm1 \n\t" /* mm1 = y3 y2 y1 y0 */ 156 157 "packssdw %%mm3, %%mm4 \n\t" /* mm4 = y6 y7 y4 y5 */ 158 159 "movq %%mm1, (%0) \n\t" /* save y3 y2 y1 y0 */ 160 "pshufw $0xb1, %%mm4, %%mm4 \n\t" /* mm4 = y7 y6 y5 y4 */ 161 162 /* slot */ 163 164 "movq %%mm4, 8(%0) \n\t" /* save y7 y6 y5 y4 */ 165 :: "r" (row+store) 166 ); 167} 168 169static inline void mmxext_row_mid (int16_t * const row, const int store, 170 const int offset, 171 const int16_t * const table) 172{ 173 __asm__ volatile ( 174 "movq (%0,%1), %%mm2 \n\t" /* mm2 = x6 x4 x2 x0 */ 175 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t" /* mm0 = y3 y2 */ 176 177 "movq 8(%0,%1), %%mm5 \n\t" /* mm5 = x7 x5 x3 x1 */ 178 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm4 \n\t" /* mm4 = y4 y5 */ 179 180 "packssdw %%mm0, %%mm1 \n\t" /* mm1 = y3 y2 y1 y0 */ 181 "movq %%mm5, %%mm6 \n\t" /* mm6 = x7 x5 x3 x1 */ 182 183 "packssdw %%mm3, %%mm4 \n\t" /* mm4 = y6 y7 y4 y5 */ 184 "movq %%mm2, %%mm0 \n\t" /* mm0 = x6 x4 x2 x0 */ 185 186 "movq %%mm1, (%0,%2) \n\t" /* save y3 y2 y1 y0 */ 187 "pshufw $0xb1, %%mm4, %%mm4\n\t" /* mm4 = y7 y6 y5 y4 */ 188 189 "movq (%3), %%mm3 \n\t" /* mm3 = -C2 -C4 C2 C4 */ 190 "movq %%mm4, 8(%0,%2) \n\t" /* save y7 y6 y5 y4 */ 191 192 "pmaddwd %%mm0, %%mm3 \n\t" /* mm3= -C4*x4-C2*x6 C4*x0+C2*x2 */ 193 194 "movq 8(%3), %%mm4 \n\t" /* mm4 = C6 C4 C6 C4 */ 195 "pshufw $0x4e, %%mm2, %%mm2\n\t" /* mm2 = x2 x0 x6 x4 */ 196 :: "r" (row), "r" ((x86_reg) (2*offset)), "r" ((x86_reg) (2*store)), "r" (table) 197 ); 198} 199 200 201/* MMX row IDCT */ 202 203#define mmx_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, c4, c6, \ 204 c4, c6, -c4, -c2, \ 205 c1, c3, c3, -c7, \ 206 c5, c7, -c1, -c5, \ 207 c4, -c6, c4, -c2, \ 208 -c4, c2, c4, -c6, \ 209 c5, -c1, c7, -c5, \ 210 c7, c3, c3, -c1 } 211 212static inline void mmx_row_head (int16_t * const row, const int offset, 213 const int16_t * const table) 214{ 215 __asm__ volatile ( 216 "movq (%0), %%mm2 \n\t" /* mm2 = x6 x4 x2 x0 */ 217 218 "movq 8(%0), %%mm5 \n\t" /* mm5 = x7 x5 x3 x1 */ 219 "movq %%mm2, %%mm0 \n\t" /* mm0 = x6 x4 x2 x0 */ 220 221 "movq (%1), %%mm3 \n\t" /* mm3 = C6 C4 C2 C4 */ 222 "movq %%mm5, %%mm6 \n\t" /* mm6 = x7 x5 x3 x1 */ 223 224 "punpckldq %%mm0, %%mm0 \n\t" /* mm0 = x2 x0 x2 x0 */ 225 226 "movq 8(%1), %%mm4 \n\t" /* mm4 = -C2 -C4 C6 C4 */ 227 "pmaddwd %%mm0, %%mm3 \n\t" /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */ 228 229 "movq 16(%1), %%mm1 \n\t" /* mm1 = -C7 C3 C3 C1 */ 230 "punpckhdq %%mm2, %%mm2 \n\t" /* mm2 = x6 x4 x6 x4 */ 231 :: "r" ((row+offset)), "r" (table) 232 ); 233} 234 235static inline void mmx_row (const int16_t * const table, 236 const int32_t * const rounder) 237{ 238 __asm__ volatile ( 239 "pmaddwd %%mm2, %%mm4 \n\t" /* mm4 = -C4*x4-C2*x6 C4*x4+C6*x6 */ 240 "punpckldq %%mm5, %%mm5 \n\t" /* mm5 = x3 x1 x3 x1 */ 241 242 "pmaddwd 32(%0), %%mm0 \n\t" /* mm0 = C4*x0-C2*x2 C4*x0-C6*x2 */ 243 "punpckhdq %%mm6, %%mm6 \n\t" /* mm6 = x7 x5 x7 x5 */ 244 245 "movq 24(%0), %%mm7 \n\t" /* mm7 = -C5 -C1 C7 C5 */ 246 "pmaddwd %%mm5, %%mm1 \n\t" /* mm1 = C3*x1-C7*x3 C1*x1+C3*x3 */ 247 248 "paddd (%1), %%mm3 \n\t" /* mm3 += rounder */ 249 "pmaddwd %%mm6, %%mm7 \n\t" /* mm7 = -C1*x5-C5*x7 C5*x5+C7*x7 */ 250 251 "pmaddwd 40(%0), %%mm2 \n\t" /* mm2 = C4*x4-C6*x6 -C4*x4+C2*x6 */ 252 "paddd %%mm4, %%mm3 \n\t" /* mm3 = a1 a0 + rounder */ 253 254 "pmaddwd 48(%0), %%mm5 \n\t" /* mm5 = C7*x1-C5*x3 C5*x1-C1*x3 */ 255 "movq %%mm3, %%mm4 \n\t" /* mm4 = a1 a0 + rounder */ 256 257 "pmaddwd 56(%0), %%mm6 \n\t" /* mm6 = C3*x5-C1*x7 C7*x5+C3*x7 */ 258 "paddd %%mm7, %%mm1 \n\t" /* mm1 = b1 b0 */ 259 260 "paddd (%1), %%mm0 \n\t" /* mm0 += rounder */ 261 "psubd %%mm1, %%mm3 \n\t" /* mm3 = a1-b1 a0-b0 + rounder */ 262 263 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm3 \n\t" /* mm3 = y6 y7 */ 264 "paddd %%mm4, %%mm1 \n\t" /* mm1 = a1+b1 a0+b0 + rounder */ 265 266 "paddd %%mm2, %%mm0 \n\t" /* mm0 = a3 a2 + rounder */ 267 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm1 \n\t" /* mm1 = y1 y0 */ 268 269 "paddd %%mm6, %%mm5 \n\t" /* mm5 = b3 b2 */ 270 "movq %%mm0, %%mm7 \n\t" /* mm7 = a3 a2 + rounder */ 271 272 "paddd %%mm5, %%mm0 \n\t" /* mm0 = a3+b3 a2+b2 + rounder */ 273 "psubd %%mm5, %%mm7 \n\t" /* mm7 = a3-b3 a2-b2 + rounder */ 274 :: "r" (table), "r" (rounder) 275 ); 276} 277 278static inline void mmx_row_tail (int16_t * const row, const int store) 279{ 280 __asm__ volatile ( 281 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t" /* mm0 = y3 y2 */ 282 283 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm7 \n\t" /* mm7 = y4 y5 */ 284 285 "packssdw %%mm0, %%mm1 \n\t" /* mm1 = y3 y2 y1 y0 */ 286 287 "packssdw %%mm3, %%mm7 \n\t" /* mm7 = y6 y7 y4 y5 */ 288 289 "movq %%mm1, (%0) \n\t" /* save y3 y2 y1 y0 */ 290 "movq %%mm7, %%mm4 \n\t" /* mm4 = y6 y7 y4 y5 */ 291 292 "pslld $16, %%mm7 \n\t" /* mm7 = y7 0 y5 0 */ 293 294 "psrld $16, %%mm4 \n\t" /* mm4 = 0 y6 0 y4 */ 295 296 "por %%mm4, %%mm7 \n\t" /* mm7 = y7 y6 y5 y4 */ 297 298 /* slot */ 299 300 "movq %%mm7, 8(%0) \n\t" /* save y7 y6 y5 y4 */ 301 :: "r" (row+store) 302 ); 303} 304 305static inline void mmx_row_mid (int16_t * const row, const int store, 306 const int offset, const int16_t * const table) 307{ 308 309 __asm__ volatile ( 310 "movq (%0,%1), %%mm2 \n\t" /* mm2 = x6 x4 x2 x0 */ 311 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t" /* mm0 = y3 y2 */ 312 313 "movq 8(%0,%1), %%mm5 \n\t" /* mm5 = x7 x5 x3 x1 */ 314 "psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm7 \n\t" /* mm7 = y4 y5 */ 315 316 "packssdw %%mm0, %%mm1 \n\t" /* mm1 = y3 y2 y1 y0 */ 317 "movq %%mm5, %%mm6 \n\t" /* mm6 = x7 x5 x3 x1 */ 318 319 "packssdw %%mm3, %%mm7 \n\t" /* mm7 = y6 y7 y4 y5 */ 320 "movq %%mm2, %%mm0 \n\t" /* mm0 = x6 x4 x2 x0 */ 321 322 "movq %%mm1, (%0,%2) \n\t" /* save y3 y2 y1 y0 */ 323 "movq %%mm7, %%mm1 \n\t" /* mm1 = y6 y7 y4 y5 */ 324 325 "punpckldq %%mm0, %%mm0 \n\t" /* mm0 = x2 x0 x2 x0 */ 326 "psrld $16, %%mm7 \n\t" /* mm7 = 0 y6 0 y4 */ 327 328 "movq (%3), %%mm3 \n\t" /* mm3 = C6 C4 C2 C4 */ 329 "pslld $16, %%mm1 \n\t" /* mm1 = y7 0 y5 0 */ 330 331 "movq 8(%3), %%mm4 \n\t" /* mm4 = -C2 -C4 C6 C4 */ 332 "por %%mm1, %%mm7 \n\t" /* mm7 = y7 y6 y5 y4 */ 333 334 "movq 16(%3), %%mm1 \n\t" /* mm1 = -C7 C3 C3 C1 */ 335 "punpckhdq %%mm2, %%mm2 \n\t" /* mm2 = x6 x4 x6 x4 */ 336 337 "movq %%mm7, 8(%0,%2) \n\t" /* save y7 y6 y5 y4 */ 338 "pmaddwd %%mm0, %%mm3 \n\t" /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */ 339 : : "r" (row), "r" ((x86_reg) (2*offset)), "r" ((x86_reg) (2*store)), "r" (table) 340 ); 341} 342 343 344#if 0 345/* C column IDCT - it is just here to document the MMXEXT and MMX versions */ 346static inline void idct_col (int16_t * col, int offset) 347{ 348/* multiplication - as implemented on mmx */ 349#define F(c,x) (((c) * (x)) >> 16) 350 351/* saturation - it helps us handle torture test cases */ 352#define S(x) (((x)>32767) ? 32767 : ((x)<-32768) ? -32768 : (x)) 353 354 int16_t x0, x1, x2, x3, x4, x5, x6, x7; 355 int16_t y0, y1, y2, y3, y4, y5, y6, y7; 356 int16_t a0, a1, a2, a3, b0, b1, b2, b3; 357 int16_t u04, v04, u26, v26, u17, v17, u35, v35, u12, v12; 358 359 col += offset; 360 361 x0 = col[0*8]; 362 x1 = col[1*8]; 363 x2 = col[2*8]; 364 x3 = col[3*8]; 365 x4 = col[4*8]; 366 x5 = col[5*8]; 367 x6 = col[6*8]; 368 x7 = col[7*8]; 369 370 u04 = S (x0 + x4); 371 v04 = S (x0 - x4); 372 u26 = S (F (T2, x6) + x2); 373 v26 = S (F (T2, x2) - x6); 374 375 a0 = S (u04 + u26); 376 a1 = S (v04 + v26); 377 a2 = S (v04 - v26); 378 a3 = S (u04 - u26); 379 380 u17 = S (F (T1, x7) + x1); 381 v17 = S (F (T1, x1) - x7); 382 u35 = S (F (T3, x5) + x3); 383 v35 = S (F (T3, x3) - x5); 384 385 b0 = S (u17 + u35); 386 b3 = S (v17 - v35); 387 u12 = S (u17 - u35); 388 v12 = S (v17 + v35); 389 u12 = S (2 * F (C4, u12)); 390 v12 = S (2 * F (C4, v12)); 391 b1 = S (u12 + v12); 392 b2 = S (u12 - v12); 393 394 y0 = S (a0 + b0) >> COL_SHIFT; 395 y1 = S (a1 + b1) >> COL_SHIFT; 396 y2 = S (a2 + b2) >> COL_SHIFT; 397 y3 = S (a3 + b3) >> COL_SHIFT; 398 399 y4 = S (a3 - b3) >> COL_SHIFT; 400 y5 = S (a2 - b2) >> COL_SHIFT; 401 y6 = S (a1 - b1) >> COL_SHIFT; 402 y7 = S (a0 - b0) >> COL_SHIFT; 403 404 col[0*8] = y0; 405 col[1*8] = y1; 406 col[2*8] = y2; 407 col[3*8] = y3; 408 col[4*8] = y4; 409 col[5*8] = y5; 410 col[6*8] = y6; 411 col[7*8] = y7; 412} 413#endif 414 415 416/* MMX column IDCT */ 417static inline void idct_col (int16_t * const col, const int offset) 418{ 419#define T1 13036 420#define T2 27146 421#define T3 43790 422#define C4 23170 423 424 DECLARE_ALIGNED(8, static const short, t1_vector)[] = { 425 T1,T1,T1,T1, 426 T2,T2,T2,T2, 427 T3,T3,T3,T3, 428 C4,C4,C4,C4 429 }; 430 431 /* column code adapted from Peter Gubanov */ 432 /* http://www.elecard.com/peter/idct.shtml */ 433 434 __asm__ volatile ( 435 "movq (%0), %%mm0 \n\t" /* mm0 = T1 */ 436 437 "movq 2*8(%1), %%mm1 \n\t" /* mm1 = x1 */ 438 "movq %%mm0, %%mm2 \n\t" /* mm2 = T1 */ 439 440 "movq 7*2*8(%1), %%mm4 \n\t" /* mm4 = x7 */ 441 "pmulhw %%mm1, %%mm0 \n\t" /* mm0 = T1*x1 */ 442 443 "movq 16(%0), %%mm5 \n\t" /* mm5 = T3 */ 444 "pmulhw %%mm4, %%mm2 \n\t" /* mm2 = T1*x7 */ 445 446 "movq 2*5*8(%1), %%mm6 \n\t" /* mm6 = x5 */ 447 "movq %%mm5, %%mm7 \n\t" /* mm7 = T3-1 */ 448 449 "movq 3*8*2(%1), %%mm3 \n\t" /* mm3 = x3 */ 450 "psubsw %%mm4, %%mm0 \n\t" /* mm0 = v17 */ 451 452 "movq 8(%0), %%mm4 \n\t" /* mm4 = T2 */ 453 "pmulhw %%mm3, %%mm5 \n\t" /* mm5 = (T3-1)*x3 */ 454 455 "paddsw %%mm2, %%mm1 \n\t" /* mm1 = u17 */ 456 "pmulhw %%mm6, %%mm7 \n\t" /* mm7 = (T3-1)*x5 */ 457 458 /* slot */ 459 460 "movq %%mm4, %%mm2 \n\t" /* mm2 = T2 */ 461 "paddsw %%mm3, %%mm5 \n\t" /* mm5 = T3*x3 */ 462 463 "pmulhw 2*8*2(%1), %%mm4 \n\t" /* mm4 = T2*x2 */ 464 "paddsw %%mm6, %%mm7 \n\t" /* mm7 = T3*x5 */ 465 466 "psubsw %%mm6, %%mm5 \n\t" /* mm5 = v35 */ 467 "paddsw %%mm3, %%mm7 \n\t" /* mm7 = u35 */ 468 469 "movq 6*8*2(%1), %%mm3 \n\t" /* mm3 = x6 */ 470 "movq %%mm0, %%mm6 \n\t" /* mm6 = v17 */ 471 472 "pmulhw %%mm3, %%mm2 \n\t" /* mm2 = T2*x6 */ 473 "psubsw %%mm5, %%mm0 \n\t" /* mm0 = b3 */ 474 475 "psubsw %%mm3, %%mm4 \n\t" /* mm4 = v26 */ 476 "paddsw %%mm6, %%mm5 \n\t" /* mm5 = v12 */ 477 478 "movq %%mm0, 3*8*2(%1)\n\t" /* save b3 in scratch0 */ 479 "movq %%mm1, %%mm6 \n\t" /* mm6 = u17 */ 480 481 "paddsw 2*8*2(%1), %%mm2 \n\t" /* mm2 = u26 */ 482 "paddsw %%mm7, %%mm6 \n\t" /* mm6 = b0 */ 483 484 "psubsw %%mm7, %%mm1 \n\t" /* mm1 = u12 */ 485 "movq %%mm1, %%mm7 \n\t" /* mm7 = u12 */ 486 487 "movq 0*8(%1), %%mm3 \n\t" /* mm3 = x0 */ 488 "paddsw %%mm5, %%mm1 \n\t" /* mm1 = u12+v12 */ 489 490 "movq 24(%0), %%mm0 \n\t" /* mm0 = C4/2 */ 491 "psubsw %%mm5, %%mm7 \n\t" /* mm7 = u12-v12 */ 492 493 "movq %%mm6, 5*8*2(%1)\n\t" /* save b0 in scratch1 */ 494 "pmulhw %%mm0, %%mm1 \n\t" /* mm1 = b1/2 */ 495 496 "movq %%mm4, %%mm6 \n\t" /* mm6 = v26 */ 497 "pmulhw %%mm0, %%mm7 \n\t" /* mm7 = b2/2 */ 498 499 "movq 4*8*2(%1), %%mm5 \n\t" /* mm5 = x4 */ 500 "movq %%mm3, %%mm0 \n\t" /* mm0 = x0 */ 501 502 "psubsw %%mm5, %%mm3 \n\t" /* mm3 = v04 */ 503 "paddsw %%mm5, %%mm0 \n\t" /* mm0 = u04 */ 504 505 "paddsw %%mm3, %%mm4 \n\t" /* mm4 = a1 */ 506 "movq %%mm0, %%mm5 \n\t" /* mm5 = u04 */ 507 508 "psubsw %%mm6, %%mm3 \n\t" /* mm3 = a2 */ 509 "paddsw %%mm2, %%mm5 \n\t" /* mm5 = a0 */ 510 511 "paddsw %%mm1, %%mm1 \n\t" /* mm1 = b1 */ 512 "psubsw %%mm2, %%mm0 \n\t" /* mm0 = a3 */ 513 514 "paddsw %%mm7, %%mm7 \n\t" /* mm7 = b2 */ 515 "movq %%mm3, %%mm2 \n\t" /* mm2 = a2 */ 516 517 "movq %%mm4, %%mm6 \n\t" /* mm6 = a1 */ 518 "paddsw %%mm7, %%mm3 \n\t" /* mm3 = a2+b2 */ 519 520 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm3\n\t" /* mm3 = y2 */ 521 "paddsw %%mm1, %%mm4\n\t" /* mm4 = a1+b1 */ 522 523 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm4\n\t" /* mm4 = y1 */ 524 "psubsw %%mm1, %%mm6 \n\t" /* mm6 = a1-b1 */ 525 526 "movq 5*8*2(%1), %%mm1 \n\t" /* mm1 = b0 */ 527 "psubsw %%mm7, %%mm2 \n\t" /* mm2 = a2-b2 */ 528 529 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm6\n\t" /* mm6 = y6 */ 530 "movq %%mm5, %%mm7 \n\t" /* mm7 = a0 */ 531 532 "movq %%mm4, 1*8*2(%1)\n\t" /* save y1 */ 533 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm2\n\t" /* mm2 = y5 */ 534 535 "movq %%mm3, 2*8*2(%1)\n\t" /* save y2 */ 536 "paddsw %%mm1, %%mm5 \n\t" /* mm5 = a0+b0 */ 537 538 "movq 3*8*2(%1), %%mm4 \n\t" /* mm4 = b3 */ 539 "psubsw %%mm1, %%mm7 \n\t" /* mm7 = a0-b0 */ 540 541 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm5\n\t" /* mm5 = y0 */ 542 "movq %%mm0, %%mm3 \n\t" /* mm3 = a3 */ 543 544 "movq %%mm2, 5*8*2(%1)\n\t" /* save y5 */ 545 "psubsw %%mm4, %%mm3 \n\t" /* mm3 = a3-b3 */ 546 547 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm7\n\t" /* mm7 = y7 */ 548 "paddsw %%mm0, %%mm4 \n\t" /* mm4 = a3+b3 */ 549 550 "movq %%mm5, 0*8*2(%1)\n\t" /* save y0 */ 551 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm3\n\t" /* mm3 = y4 */ 552 553 "movq %%mm6, 6*8*2(%1)\n\t" /* save y6 */ 554 "psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm4\n\t" /* mm4 = y3 */ 555 556 "movq %%mm7, 7*8*2(%1)\n\t" /* save y7 */ 557 558 "movq %%mm3, 4*8*2(%1)\n\t" /* save y4 */ 559 560 "movq %%mm4, 3*8*2(%1)\n\t" /* save y3 */ 561 :: "r" (t1_vector), "r" (col+offset) 562 ); 563 564#undef T1 565#undef T2 566#undef T3 567#undef C4 568} 569 570 571DECLARE_ALIGNED(8, static const int32_t, rounder0)[] = 572 rounder ((1 << (COL_SHIFT - 1)) - 0.5); 573DECLARE_ALIGNED(8, static const int32_t, rounder4)[] = rounder (0); 574DECLARE_ALIGNED(8, static const int32_t, rounder1)[] = 575 rounder (1.25683487303); /* C1*(C1/C4+C1+C7)/2 */ 576DECLARE_ALIGNED(8, static const int32_t, rounder7)[] = 577 rounder (-0.25); /* C1*(C7/C4+C7-C1)/2 */ 578DECLARE_ALIGNED(8, static const int32_t, rounder2)[] = 579 rounder (0.60355339059); /* C2 * (C6+C2)/2 */ 580DECLARE_ALIGNED(8, static const int32_t, rounder6)[] = 581 rounder (-0.25); /* C2 * (C6-C2)/2 */ 582DECLARE_ALIGNED(8, static const int32_t, rounder3)[] = 583 rounder (0.087788325588); /* C3*(-C3/C4+C3+C5)/2 */ 584DECLARE_ALIGNED(8, static const int32_t, rounder5)[] = 585 rounder (-0.441341716183); /* C3*(-C5/C4+C5-C3)/2 */ 586 587#undef COL_SHIFT 588#undef ROW_SHIFT 589 590#define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \ 591void idct (int16_t * const block) \ 592{ \ 593 DECLARE_ALIGNED(16, static const int16_t, table04)[] = \ 594 table (22725, 21407, 19266, 16384, 12873, 8867, 4520); \ 595 DECLARE_ALIGNED(16, static const int16_t, table17)[] = \ 596 table (31521, 29692, 26722, 22725, 17855, 12299, 6270); \ 597 DECLARE_ALIGNED(16, static const int16_t, table26)[] = \ 598 table (29692, 27969, 25172, 21407, 16819, 11585, 5906); \ 599 DECLARE_ALIGNED(16, static const int16_t, table35)[] = \ 600 table (26722, 25172, 22654, 19266, 15137, 10426, 5315); \ 601 \ 602 idct_row_head (block, 0*8, table04); \ 603 idct_row (table04, rounder0); \ 604 idct_row_mid (block, 0*8, 4*8, table04); \ 605 idct_row (table04, rounder4); \ 606 idct_row_mid (block, 4*8, 1*8, table17); \ 607 idct_row (table17, rounder1); \ 608 idct_row_mid (block, 1*8, 7*8, table17); \ 609 idct_row (table17, rounder7); \ 610 idct_row_mid (block, 7*8, 2*8, table26); \ 611 idct_row (table26, rounder2); \ 612 idct_row_mid (block, 2*8, 6*8, table26); \ 613 idct_row (table26, rounder6); \ 614 idct_row_mid (block, 6*8, 3*8, table35); \ 615 idct_row (table35, rounder3); \ 616 idct_row_mid (block, 3*8, 5*8, table35); \ 617 idct_row (table35, rounder5); \ 618 idct_row_tail (block, 5*8); \ 619 \ 620 idct_col (block, 0); \ 621 idct_col (block, 4); \ 622} 623 624declare_idct (ff_mmxext_idct, mmxext_table, 625 mmxext_row_head, mmxext_row, mmxext_row_tail, mmxext_row_mid) 626 627declare_idct (ff_mmx_idct, mmx_table, 628 mmx_row_head, mmx_row, mmx_row_tail, mmx_row_mid) 629 630