1/* 2 * MMX optimized motion estimation 3 * Copyright (c) 2001 Fabrice Bellard 4 * Copyright (c) 2002-2004 Michael Niedermayer 5 * 6 * mostly by Michael Niedermayer <michaelni@gmx.at> 7 * 8 * This file is part of Libav. 9 * 10 * Libav is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; either 13 * version 2.1 of the License, or (at your option) any later version. 14 * 15 * Libav is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with Libav; if not, write to the Free Software 22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 23 */ 24 25#include "libavutil/x86_cpu.h" 26#include "libavcodec/dsputil.h" 27#include "dsputil_mmx.h" 28 29DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={ 300x0000000000000000ULL, 310x0001000100010001ULL, 320x0002000200020002ULL, 33}; 34 35DECLARE_ASM_CONST(8, uint64_t, bone)= 0x0101010101010101LL; 36 37static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) 38{ 39 x86_reg len= -(stride*h); 40 __asm__ volatile( 41 ".p2align 4 \n\t" 42 "1: \n\t" 43 "movq (%1, %%"REG_a"), %%mm0 \n\t" 44 "movq (%2, %%"REG_a"), %%mm2 \n\t" 45 "movq (%2, %%"REG_a"), %%mm4 \n\t" 46 "add %3, %%"REG_a" \n\t" 47 "psubusb %%mm0, %%mm2 \n\t" 48 "psubusb %%mm4, %%mm0 \n\t" 49 "movq (%1, %%"REG_a"), %%mm1 \n\t" 50 "movq (%2, %%"REG_a"), %%mm3 \n\t" 51 "movq (%2, %%"REG_a"), %%mm5 \n\t" 52 "psubusb %%mm1, %%mm3 \n\t" 53 "psubusb %%mm5, %%mm1 \n\t" 54 "por %%mm2, %%mm0 \n\t" 55 "por %%mm1, %%mm3 \n\t" 56 "movq %%mm0, %%mm1 \n\t" 57 "movq %%mm3, %%mm2 \n\t" 58 "punpcklbw %%mm7, %%mm0 \n\t" 59 "punpckhbw %%mm7, %%mm1 \n\t" 60 "punpcklbw %%mm7, %%mm3 \n\t" 61 "punpckhbw %%mm7, %%mm2 \n\t" 62 "paddw %%mm1, %%mm0 \n\t" 63 "paddw %%mm3, %%mm2 \n\t" 64 "paddw %%mm2, %%mm0 \n\t" 65 "paddw %%mm0, %%mm6 \n\t" 66 "add %3, %%"REG_a" \n\t" 67 " js 1b \n\t" 68 : "+a" (len) 69 : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg)stride) 70 ); 71} 72 73static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) 74{ 75 __asm__ volatile( 76 ".p2align 4 \n\t" 77 "1: \n\t" 78 "movq (%1), %%mm0 \n\t" 79 "movq (%1, %3), %%mm1 \n\t" 80 "psadbw (%2), %%mm0 \n\t" 81 "psadbw (%2, %3), %%mm1 \n\t" 82 "paddw %%mm0, %%mm6 \n\t" 83 "paddw %%mm1, %%mm6 \n\t" 84 "lea (%1,%3,2), %1 \n\t" 85 "lea (%2,%3,2), %2 \n\t" 86 "sub $2, %0 \n\t" 87 " jg 1b \n\t" 88 : "+r" (h), "+r" (blk1), "+r" (blk2) 89 : "r" ((x86_reg)stride) 90 ); 91} 92 93static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h) 94{ 95 int ret; 96 __asm__ volatile( 97 "pxor %%xmm2, %%xmm2 \n\t" 98 ".p2align 4 \n\t" 99 "1: \n\t" 100 "movdqu (%1), %%xmm0 \n\t" 101 "movdqu (%1, %4), %%xmm1 \n\t" 102 "psadbw (%2), %%xmm0 \n\t" 103 "psadbw (%2, %4), %%xmm1 \n\t" 104 "paddw %%xmm0, %%xmm2 \n\t" 105 "paddw %%xmm1, %%xmm2 \n\t" 106 "lea (%1,%4,2), %1 \n\t" 107 "lea (%2,%4,2), %2 \n\t" 108 "sub $2, %0 \n\t" 109 " jg 1b \n\t" 110 "movhlps %%xmm2, %%xmm0 \n\t" 111 "paddw %%xmm0, %%xmm2 \n\t" 112 "movd %%xmm2, %3 \n\t" 113 : "+r" (h), "+r" (blk1), "+r" (blk2), "=r"(ret) 114 : "r" ((x86_reg)stride) 115 ); 116 return ret; 117} 118 119static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) 120{ 121 __asm__ volatile( 122 ".p2align 4 \n\t" 123 "1: \n\t" 124 "movq (%1), %%mm0 \n\t" 125 "movq (%1, %3), %%mm1 \n\t" 126 "pavgb 1(%1), %%mm0 \n\t" 127 "pavgb 1(%1, %3), %%mm1 \n\t" 128 "psadbw (%2), %%mm0 \n\t" 129 "psadbw (%2, %3), %%mm1 \n\t" 130 "paddw %%mm0, %%mm6 \n\t" 131 "paddw %%mm1, %%mm6 \n\t" 132 "lea (%1,%3,2), %1 \n\t" 133 "lea (%2,%3,2), %2 \n\t" 134 "sub $2, %0 \n\t" 135 " jg 1b \n\t" 136 : "+r" (h), "+r" (blk1), "+r" (blk2) 137 : "r" ((x86_reg)stride) 138 ); 139} 140 141static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) 142{ 143 __asm__ volatile( 144 "movq (%1), %%mm0 \n\t" 145 "add %3, %1 \n\t" 146 ".p2align 4 \n\t" 147 "1: \n\t" 148 "movq (%1), %%mm1 \n\t" 149 "movq (%1, %3), %%mm2 \n\t" 150 "pavgb %%mm1, %%mm0 \n\t" 151 "pavgb %%mm2, %%mm1 \n\t" 152 "psadbw (%2), %%mm0 \n\t" 153 "psadbw (%2, %3), %%mm1 \n\t" 154 "paddw %%mm0, %%mm6 \n\t" 155 "paddw %%mm1, %%mm6 \n\t" 156 "movq %%mm2, %%mm0 \n\t" 157 "lea (%1,%3,2), %1 \n\t" 158 "lea (%2,%3,2), %2 \n\t" 159 "sub $2, %0 \n\t" 160 " jg 1b \n\t" 161 : "+r" (h), "+r" (blk1), "+r" (blk2) 162 : "r" ((x86_reg)stride) 163 ); 164} 165 166static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) 167{ 168 __asm__ volatile( 169 "movq "MANGLE(bone)", %%mm5 \n\t" 170 "movq (%1), %%mm0 \n\t" 171 "pavgb 1(%1), %%mm0 \n\t" 172 "add %3, %1 \n\t" 173 ".p2align 4 \n\t" 174 "1: \n\t" 175 "movq (%1), %%mm1 \n\t" 176 "movq (%1,%3), %%mm2 \n\t" 177 "pavgb 1(%1), %%mm1 \n\t" 178 "pavgb 1(%1,%3), %%mm2 \n\t" 179 "psubusb %%mm5, %%mm1 \n\t" 180 "pavgb %%mm1, %%mm0 \n\t" 181 "pavgb %%mm2, %%mm1 \n\t" 182 "psadbw (%2), %%mm0 \n\t" 183 "psadbw (%2,%3), %%mm1 \n\t" 184 "paddw %%mm0, %%mm6 \n\t" 185 "paddw %%mm1, %%mm6 \n\t" 186 "movq %%mm2, %%mm0 \n\t" 187 "lea (%1,%3,2), %1 \n\t" 188 "lea (%2,%3,2), %2 \n\t" 189 "sub $2, %0 \n\t" 190 " jg 1b \n\t" 191 : "+r" (h), "+r" (blk1), "+r" (blk2) 192 : "r" ((x86_reg)stride) 193 ); 194} 195 196static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h) 197{ 198 x86_reg len= -(stride*h); 199 __asm__ volatile( 200 ".p2align 4 \n\t" 201 "1: \n\t" 202 "movq (%1, %%"REG_a"), %%mm0 \n\t" 203 "movq (%2, %%"REG_a"), %%mm1 \n\t" 204 "movq (%1, %%"REG_a"), %%mm2 \n\t" 205 "movq (%2, %%"REG_a"), %%mm3 \n\t" 206 "punpcklbw %%mm7, %%mm0 \n\t" 207 "punpcklbw %%mm7, %%mm1 \n\t" 208 "punpckhbw %%mm7, %%mm2 \n\t" 209 "punpckhbw %%mm7, %%mm3 \n\t" 210 "paddw %%mm0, %%mm1 \n\t" 211 "paddw %%mm2, %%mm3 \n\t" 212 "movq (%3, %%"REG_a"), %%mm4 \n\t" 213 "movq (%3, %%"REG_a"), %%mm2 \n\t" 214 "paddw %%mm5, %%mm1 \n\t" 215 "paddw %%mm5, %%mm3 \n\t" 216 "psrlw $1, %%mm1 \n\t" 217 "psrlw $1, %%mm3 \n\t" 218 "packuswb %%mm3, %%mm1 \n\t" 219 "psubusb %%mm1, %%mm4 \n\t" 220 "psubusb %%mm2, %%mm1 \n\t" 221 "por %%mm4, %%mm1 \n\t" 222 "movq %%mm1, %%mm0 \n\t" 223 "punpcklbw %%mm7, %%mm0 \n\t" 224 "punpckhbw %%mm7, %%mm1 \n\t" 225 "paddw %%mm1, %%mm0 \n\t" 226 "paddw %%mm0, %%mm6 \n\t" 227 "add %4, %%"REG_a" \n\t" 228 " js 1b \n\t" 229 : "+a" (len) 230 : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((x86_reg)stride) 231 ); 232} 233 234static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) 235{ 236 x86_reg len= -(stride*h); 237 __asm__ volatile( 238 "movq (%1, %%"REG_a"), %%mm0 \n\t" 239 "movq 1(%1, %%"REG_a"), %%mm2 \n\t" 240 "movq %%mm0, %%mm1 \n\t" 241 "movq %%mm2, %%mm3 \n\t" 242 "punpcklbw %%mm7, %%mm0 \n\t" 243 "punpckhbw %%mm7, %%mm1 \n\t" 244 "punpcklbw %%mm7, %%mm2 \n\t" 245 "punpckhbw %%mm7, %%mm3 \n\t" 246 "paddw %%mm2, %%mm0 \n\t" 247 "paddw %%mm3, %%mm1 \n\t" 248 ".p2align 4 \n\t" 249 "1: \n\t" 250 "movq (%2, %%"REG_a"), %%mm2 \n\t" 251 "movq 1(%2, %%"REG_a"), %%mm4 \n\t" 252 "movq %%mm2, %%mm3 \n\t" 253 "movq %%mm4, %%mm5 \n\t" 254 "punpcklbw %%mm7, %%mm2 \n\t" 255 "punpckhbw %%mm7, %%mm3 \n\t" 256 "punpcklbw %%mm7, %%mm4 \n\t" 257 "punpckhbw %%mm7, %%mm5 \n\t" 258 "paddw %%mm4, %%mm2 \n\t" 259 "paddw %%mm5, %%mm3 \n\t" 260 "movq 16+"MANGLE(round_tab)", %%mm5 \n\t" 261 "paddw %%mm2, %%mm0 \n\t" 262 "paddw %%mm3, %%mm1 \n\t" 263 "paddw %%mm5, %%mm0 \n\t" 264 "paddw %%mm5, %%mm1 \n\t" 265 "movq (%3, %%"REG_a"), %%mm4 \n\t" 266 "movq (%3, %%"REG_a"), %%mm5 \n\t" 267 "psrlw $2, %%mm0 \n\t" 268 "psrlw $2, %%mm1 \n\t" 269 "packuswb %%mm1, %%mm0 \n\t" 270 "psubusb %%mm0, %%mm4 \n\t" 271 "psubusb %%mm5, %%mm0 \n\t" 272 "por %%mm4, %%mm0 \n\t" 273 "movq %%mm0, %%mm4 \n\t" 274 "punpcklbw %%mm7, %%mm0 \n\t" 275 "punpckhbw %%mm7, %%mm4 \n\t" 276 "paddw %%mm0, %%mm6 \n\t" 277 "paddw %%mm4, %%mm6 \n\t" 278 "movq %%mm2, %%mm0 \n\t" 279 "movq %%mm3, %%mm1 \n\t" 280 "add %4, %%"REG_a" \n\t" 281 " js 1b \n\t" 282 : "+a" (len) 283 : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((x86_reg)stride) 284 ); 285} 286 287static inline int sum_mmx(void) 288{ 289 int ret; 290 __asm__ volatile( 291 "movq %%mm6, %%mm0 \n\t" 292 "psrlq $32, %%mm6 \n\t" 293 "paddw %%mm0, %%mm6 \n\t" 294 "movq %%mm6, %%mm0 \n\t" 295 "psrlq $16, %%mm6 \n\t" 296 "paddw %%mm0, %%mm6 \n\t" 297 "movd %%mm6, %0 \n\t" 298 : "=r" (ret) 299 ); 300 return ret&0xFFFF; 301} 302 303static inline int sum_mmx2(void) 304{ 305 int ret; 306 __asm__ volatile( 307 "movd %%mm6, %0 \n\t" 308 : "=r" (ret) 309 ); 310 return ret; 311} 312 313static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) 314{ 315 sad8_2_mmx(blk1, blk1+1, blk2, stride, h); 316} 317static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) 318{ 319 sad8_2_mmx(blk1, blk1+stride, blk2, stride, h); 320} 321 322 323#define PIX_SAD(suf)\ 324static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ 325{\ 326 assert(h==8);\ 327 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ 328 "pxor %%mm6, %%mm6 \n\t":);\ 329\ 330 sad8_1_ ## suf(blk1, blk2, stride, 8);\ 331\ 332 return sum_ ## suf();\ 333}\ 334static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ 335{\ 336 assert(h==8);\ 337 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ 338 "pxor %%mm6, %%mm6 \n\t"\ 339 "movq %0, %%mm5 \n\t"\ 340 :: "m"(round_tab[1]) \ 341 );\ 342\ 343 sad8_x2a_ ## suf(blk1, blk2, stride, 8);\ 344\ 345 return sum_ ## suf();\ 346}\ 347\ 348static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ 349{\ 350 assert(h==8);\ 351 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ 352 "pxor %%mm6, %%mm6 \n\t"\ 353 "movq %0, %%mm5 \n\t"\ 354 :: "m"(round_tab[1]) \ 355 );\ 356\ 357 sad8_y2a_ ## suf(blk1, blk2, stride, 8);\ 358\ 359 return sum_ ## suf();\ 360}\ 361\ 362static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ 363{\ 364 assert(h==8);\ 365 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ 366 "pxor %%mm6, %%mm6 \n\t"\ 367 ::);\ 368\ 369 sad8_4_ ## suf(blk1, blk2, stride, 8);\ 370\ 371 return sum_ ## suf();\ 372}\ 373\ 374static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ 375{\ 376 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ 377 "pxor %%mm6, %%mm6 \n\t":);\ 378\ 379 sad8_1_ ## suf(blk1 , blk2 , stride, h);\ 380 sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\ 381\ 382 return sum_ ## suf();\ 383}\ 384static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ 385{\ 386 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ 387 "pxor %%mm6, %%mm6 \n\t"\ 388 "movq %0, %%mm5 \n\t"\ 389 :: "m"(round_tab[1]) \ 390 );\ 391\ 392 sad8_x2a_ ## suf(blk1 , blk2 , stride, h);\ 393 sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\ 394\ 395 return sum_ ## suf();\ 396}\ 397static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ 398{\ 399 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ 400 "pxor %%mm6, %%mm6 \n\t"\ 401 "movq %0, %%mm5 \n\t"\ 402 :: "m"(round_tab[1]) \ 403 );\ 404\ 405 sad8_y2a_ ## suf(blk1 , blk2 , stride, h);\ 406 sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\ 407\ 408 return sum_ ## suf();\ 409}\ 410static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ 411{\ 412 __asm__ volatile("pxor %%mm7, %%mm7 \n\t"\ 413 "pxor %%mm6, %%mm6 \n\t"\ 414 ::);\ 415\ 416 sad8_4_ ## suf(blk1 , blk2 , stride, h);\ 417 sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\ 418\ 419 return sum_ ## suf();\ 420}\ 421 422PIX_SAD(mmx) 423PIX_SAD(mmx2) 424 425void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx) 426{ 427 int mm_flags = av_get_cpu_flags(); 428 429 if (mm_flags & AV_CPU_FLAG_MMX) { 430 c->pix_abs[0][0] = sad16_mmx; 431 c->pix_abs[0][1] = sad16_x2_mmx; 432 c->pix_abs[0][2] = sad16_y2_mmx; 433 c->pix_abs[0][3] = sad16_xy2_mmx; 434 c->pix_abs[1][0] = sad8_mmx; 435 c->pix_abs[1][1] = sad8_x2_mmx; 436 c->pix_abs[1][2] = sad8_y2_mmx; 437 c->pix_abs[1][3] = sad8_xy2_mmx; 438 439 c->sad[0]= sad16_mmx; 440 c->sad[1]= sad8_mmx; 441 } 442 if (mm_flags & AV_CPU_FLAG_MMX2) { 443 c->pix_abs[0][0] = sad16_mmx2; 444 c->pix_abs[1][0] = sad8_mmx2; 445 446 c->sad[0]= sad16_mmx2; 447 c->sad[1]= sad8_mmx2; 448 449 if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ 450 c->pix_abs[0][1] = sad16_x2_mmx2; 451 c->pix_abs[0][2] = sad16_y2_mmx2; 452 c->pix_abs[0][3] = sad16_xy2_mmx2; 453 c->pix_abs[1][1] = sad8_x2_mmx2; 454 c->pix_abs[1][2] = sad8_y2_mmx2; 455 c->pix_abs[1][3] = sad8_xy2_mmx2; 456 } 457 } 458 if ((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) { 459 c->sad[0]= sad16_sse2; 460 } 461} 462