1/* 2 * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at> 3 * 4 * This file is part of FFmpeg. 5 * 6 * FFmpeg is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * FFmpeg is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with FFmpeg; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 19 */ 20 21#include <inttypes.h> 22#include "config.h" 23#include "libswscale/swscale.h" 24#include "libswscale/swscale_internal.h" 25#include "libavutil/attributes.h" 26#include "libavutil/avassert.h" 27#include "libavutil/intreadwrite.h" 28#include "libavutil/x86/asm.h" 29#include "libavutil/x86/cpu.h" 30#include "libavutil/cpu.h" 31#include "libavutil/pixdesc.h" 32 33#if HAVE_INLINE_ASM 34 35#define DITHER1XBPP 36 37DECLARE_ASM_CONST(8, uint64_t, bF8)= 0xF8F8F8F8F8F8F8F8LL; 38DECLARE_ASM_CONST(8, uint64_t, bFC)= 0xFCFCFCFCFCFCFCFCLL; 39DECLARE_ASM_CONST(8, uint64_t, w10)= 0x0010001000100010LL; 40DECLARE_ASM_CONST(8, uint64_t, w02)= 0x0002000200020002LL; 41 42const DECLARE_ALIGNED(8, uint64_t, ff_dither4)[2] = { 43 0x0103010301030103LL, 44 0x0200020002000200LL,}; 45 46const DECLARE_ALIGNED(8, uint64_t, ff_dither8)[2] = { 47 0x0602060206020602LL, 48 0x0004000400040004LL,}; 49 50DECLARE_ASM_CONST(8, uint64_t, b16Mask)= 0x001F001F001F001FLL; 51DECLARE_ASM_CONST(8, uint64_t, g16Mask)= 0x07E007E007E007E0LL; 52DECLARE_ASM_CONST(8, uint64_t, r16Mask)= 0xF800F800F800F800LL; 53DECLARE_ASM_CONST(8, uint64_t, b15Mask)= 0x001F001F001F001FLL; 54DECLARE_ASM_CONST(8, uint64_t, g15Mask)= 0x03E003E003E003E0LL; 55DECLARE_ASM_CONST(8, uint64_t, r15Mask)= 0x7C007C007C007C00LL; 56 57DECLARE_ALIGNED(8, const uint64_t, ff_M24A) = 0x00FF0000FF0000FFLL; 58DECLARE_ALIGNED(8, const uint64_t, ff_M24B) = 0xFF0000FF0000FF00LL; 59DECLARE_ALIGNED(8, const uint64_t, ff_M24C) = 0x0000FF0000FF0000LL; 60 61DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YOffset) = 0x1010101010101010ULL; 62DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UVOffset) = 0x8080808080808080ULL; 63DECLARE_ALIGNED(8, const uint64_t, ff_w1111) = 0x0001000100010001ULL; 64 65 66//MMX versions 67#if HAVE_MMX_INLINE 68#undef RENAME 69#define COMPILE_TEMPLATE_MMXEXT 0 70#define RENAME(a) a ## _mmx 71#include "swscale_template.c" 72#endif 73 74// MMXEXT versions 75#if HAVE_MMXEXT_INLINE 76#undef RENAME 77#undef COMPILE_TEMPLATE_MMXEXT 78#define COMPILE_TEMPLATE_MMXEXT 1 79#define RENAME(a) a ## _mmxext 80#include "swscale_template.c" 81#endif 82 83void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex, 84 int lastInLumBuf, int lastInChrBuf) 85{ 86 const int dstH= c->dstH; 87 const int flags= c->flags; 88 int16_t **lumPixBuf= c->lumPixBuf; 89 int16_t **chrUPixBuf= c->chrUPixBuf; 90 int16_t **alpPixBuf= c->alpPixBuf; 91 const int vLumBufSize= c->vLumBufSize; 92 const int vChrBufSize= c->vChrBufSize; 93 int32_t *vLumFilterPos= c->vLumFilterPos; 94 int32_t *vChrFilterPos= c->vChrFilterPos; 95 int16_t *vLumFilter= c->vLumFilter; 96 int16_t *vChrFilter= c->vChrFilter; 97 int32_t *lumMmxFilter= c->lumMmxFilter; 98 int32_t *chrMmxFilter= c->chrMmxFilter; 99 int32_t av_unused *alpMmxFilter= c->alpMmxFilter; 100 const int vLumFilterSize= c->vLumFilterSize; 101 const int vChrFilterSize= c->vChrFilterSize; 102 const int chrDstY= dstY>>c->chrDstVSubSample; 103 const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input 104 const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input 105 106 c->blueDither= ff_dither8[dstY&1]; 107 if (c->dstFormat == AV_PIX_FMT_RGB555 || c->dstFormat == AV_PIX_FMT_BGR555) 108 c->greenDither= ff_dither8[dstY&1]; 109 else 110 c->greenDither= ff_dither4[dstY&1]; 111 c->redDither= ff_dither8[(dstY+1)&1]; 112 if (dstY < dstH - 2) { 113 const int16_t **lumSrcPtr= (const int16_t **)(void*) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize; 114 const int16_t **chrUSrcPtr= (const int16_t **)(void*) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize; 115 const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)(void*) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL; 116 int i; 117 118 if (firstLumSrcY < 0 || firstLumSrcY + vLumFilterSize > c->srcH) { 119 const int16_t **tmpY = (const int16_t **) lumPixBuf + 2 * vLumBufSize; 120 int neg = -firstLumSrcY, i, end = FFMIN(c->srcH - firstLumSrcY, vLumFilterSize); 121 for (i = 0; i < neg; i++) 122 tmpY[i] = lumSrcPtr[neg]; 123 for ( ; i < end; i++) 124 tmpY[i] = lumSrcPtr[i]; 125 for ( ; i < vLumFilterSize; i++) 126 tmpY[i] = tmpY[i-1]; 127 lumSrcPtr = tmpY; 128 129 if (alpSrcPtr) { 130 const int16_t **tmpA = (const int16_t **) alpPixBuf + 2 * vLumBufSize; 131 for (i = 0; i < neg; i++) 132 tmpA[i] = alpSrcPtr[neg]; 133 for ( ; i < end; i++) 134 tmpA[i] = alpSrcPtr[i]; 135 for ( ; i < vLumFilterSize; i++) 136 tmpA[i] = tmpA[i - 1]; 137 alpSrcPtr = tmpA; 138 } 139 } 140 if (firstChrSrcY < 0 || firstChrSrcY + vChrFilterSize > c->chrSrcH) { 141 const int16_t **tmpU = (const int16_t **) chrUPixBuf + 2 * vChrBufSize; 142 int neg = -firstChrSrcY, i, end = FFMIN(c->chrSrcH - firstChrSrcY, vChrFilterSize); 143 for (i = 0; i < neg; i++) { 144 tmpU[i] = chrUSrcPtr[neg]; 145 } 146 for ( ; i < end; i++) { 147 tmpU[i] = chrUSrcPtr[i]; 148 } 149 for ( ; i < vChrFilterSize; i++) { 150 tmpU[i] = tmpU[i - 1]; 151 } 152 chrUSrcPtr = tmpU; 153 } 154 155 if (flags & SWS_ACCURATE_RND) { 156 int s= APCK_SIZE / 8; 157 for (i=0; i<vLumFilterSize; i+=2) { 158 *(const void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ]; 159 *(const void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)]; 160 lumMmxFilter[s*i+APCK_COEF/4 ]= 161 lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ] 162 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0); 163 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) { 164 *(const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ]; 165 *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)]; 166 alpMmxFilter[s*i+APCK_COEF/4 ]= 167 alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ]; 168 } 169 } 170 for (i=0; i<vChrFilterSize; i+=2) { 171 *(const void**)&chrMmxFilter[s*i ]= chrUSrcPtr[i ]; 172 *(const void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrUSrcPtr[i+(vChrFilterSize>1)]; 173 chrMmxFilter[s*i+APCK_COEF/4 ]= 174 chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ] 175 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0); 176 } 177 } else { 178 for (i=0; i<vLumFilterSize; i++) { 179 *(const void**)&lumMmxFilter[4*i+0]= lumSrcPtr[i]; 180 lumMmxFilter[4*i+2]= 181 lumMmxFilter[4*i+3]= 182 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001U; 183 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) { 184 *(const void**)&alpMmxFilter[4*i+0]= alpSrcPtr[i]; 185 alpMmxFilter[4*i+2]= 186 alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2]; 187 } 188 } 189 for (i=0; i<vChrFilterSize; i++) { 190 *(const void**)&chrMmxFilter[4*i+0]= chrUSrcPtr[i]; 191 chrMmxFilter[4*i+2]= 192 chrMmxFilter[4*i+3]= 193 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001U; 194 } 195 } 196 } 197} 198 199#if HAVE_MMXEXT 200static void yuv2yuvX_sse3(const int16_t *filter, int filterSize, 201 const int16_t **src, uint8_t *dest, int dstW, 202 const uint8_t *dither, int offset) 203{ 204 if(((int)dest) & 15){ 205 yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset); 206 return; 207 } 208 if (offset) { 209 __asm__ volatile("movq (%0), %%xmm3\n\t" 210 "movdqa %%xmm3, %%xmm4\n\t" 211 "psrlq $24, %%xmm3\n\t" 212 "psllq $40, %%xmm4\n\t" 213 "por %%xmm4, %%xmm3\n\t" 214 :: "r"(dither) 215 ); 216 } else { 217 __asm__ volatile("movq (%0), %%xmm3\n\t" 218 :: "r"(dither) 219 ); 220 } 221 filterSize--; 222 __asm__ volatile( 223 "pxor %%xmm0, %%xmm0\n\t" 224 "punpcklbw %%xmm0, %%xmm3\n\t" 225 "movd %0, %%xmm1\n\t" 226 "punpcklwd %%xmm1, %%xmm1\n\t" 227 "punpckldq %%xmm1, %%xmm1\n\t" 228 "punpcklqdq %%xmm1, %%xmm1\n\t" 229 "psllw $3, %%xmm1\n\t" 230 "paddw %%xmm1, %%xmm3\n\t" 231 "psraw $4, %%xmm3\n\t" 232 ::"m"(filterSize) 233 ); 234 __asm__ volatile( 235 "movdqa %%xmm3, %%xmm4\n\t" 236 "movdqa %%xmm3, %%xmm7\n\t" 237 "movl %3, %%ecx\n\t" 238 "mov %0, %%"REG_d" \n\t"\ 239 "mov (%%"REG_d"), %%"REG_S" \n\t"\ 240 ".p2align 4 \n\t" /* FIXME Unroll? */\ 241 "1: \n\t"\ 242 "movddup 8(%%"REG_d"), %%xmm0 \n\t" /* filterCoeff */\ 243 "movdqa (%%"REG_S", %%"REG_c", 2), %%xmm2 \n\t" /* srcData */\ 244 "movdqa 16(%%"REG_S", %%"REG_c", 2), %%xmm5 \n\t" /* srcData */\ 245 "add $16, %%"REG_d" \n\t"\ 246 "mov (%%"REG_d"), %%"REG_S" \n\t"\ 247 "test %%"REG_S", %%"REG_S" \n\t"\ 248 "pmulhw %%xmm0, %%xmm2 \n\t"\ 249 "pmulhw %%xmm0, %%xmm5 \n\t"\ 250 "paddw %%xmm2, %%xmm3 \n\t"\ 251 "paddw %%xmm5, %%xmm4 \n\t"\ 252 " jnz 1b \n\t"\ 253 "psraw $3, %%xmm3 \n\t"\ 254 "psraw $3, %%xmm4 \n\t"\ 255 "packuswb %%xmm4, %%xmm3 \n\t" 256 "movntdq %%xmm3, (%1, %%"REG_c")\n\t" 257 "add $16, %%"REG_c" \n\t"\ 258 "cmp %2, %%"REG_c" \n\t"\ 259 "movdqa %%xmm7, %%xmm3\n\t" 260 "movdqa %%xmm7, %%xmm4\n\t" 261 "mov %0, %%"REG_d" \n\t"\ 262 "mov (%%"REG_d"), %%"REG_S" \n\t"\ 263 "jb 1b \n\t"\ 264 :: "g" (filter), 265 "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset) 266 : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,) 267 "%"REG_d, "%"REG_S, "%"REG_c 268 ); 269} 270#endif 271 272#endif /* HAVE_INLINE_ASM */ 273 274#define SCALE_FUNC(filter_n, from_bpc, to_bpc, opt) \ 275void ff_hscale ## from_bpc ## to ## to_bpc ## _ ## filter_n ## _ ## opt( \ 276 SwsContext *c, int16_t *data, \ 277 int dstW, const uint8_t *src, \ 278 const int16_t *filter, \ 279 const int32_t *filterPos, int filterSize) 280 281#define SCALE_FUNCS(filter_n, opt) \ 282 SCALE_FUNC(filter_n, 8, 15, opt); \ 283 SCALE_FUNC(filter_n, 9, 15, opt); \ 284 SCALE_FUNC(filter_n, 10, 15, opt); \ 285 SCALE_FUNC(filter_n, 12, 15, opt); \ 286 SCALE_FUNC(filter_n, 14, 15, opt); \ 287 SCALE_FUNC(filter_n, 16, 15, opt); \ 288 SCALE_FUNC(filter_n, 8, 19, opt); \ 289 SCALE_FUNC(filter_n, 9, 19, opt); \ 290 SCALE_FUNC(filter_n, 10, 19, opt); \ 291 SCALE_FUNC(filter_n, 12, 19, opt); \ 292 SCALE_FUNC(filter_n, 14, 19, opt); \ 293 SCALE_FUNC(filter_n, 16, 19, opt) 294 295#define SCALE_FUNCS_MMX(opt) \ 296 SCALE_FUNCS(4, opt); \ 297 SCALE_FUNCS(8, opt); \ 298 SCALE_FUNCS(X, opt) 299 300#define SCALE_FUNCS_SSE(opt) \ 301 SCALE_FUNCS(4, opt); \ 302 SCALE_FUNCS(8, opt); \ 303 SCALE_FUNCS(X4, opt); \ 304 SCALE_FUNCS(X8, opt) 305 306#if ARCH_X86_32 307SCALE_FUNCS_MMX(mmx); 308#endif 309SCALE_FUNCS_SSE(sse2); 310SCALE_FUNCS_SSE(ssse3); 311SCALE_FUNCS_SSE(sse4); 312 313#define VSCALEX_FUNC(size, opt) \ 314void ff_yuv2planeX_ ## size ## _ ## opt(const int16_t *filter, int filterSize, \ 315 const int16_t **src, uint8_t *dest, int dstW, \ 316 const uint8_t *dither, int offset) 317#define VSCALEX_FUNCS(opt) \ 318 VSCALEX_FUNC(8, opt); \ 319 VSCALEX_FUNC(9, opt); \ 320 VSCALEX_FUNC(10, opt) 321 322#if ARCH_X86_32 323VSCALEX_FUNCS(mmxext); 324#endif 325VSCALEX_FUNCS(sse2); 326VSCALEX_FUNCS(sse4); 327VSCALEX_FUNC(16, sse4); 328VSCALEX_FUNCS(avx); 329 330#define VSCALE_FUNC(size, opt) \ 331void ff_yuv2plane1_ ## size ## _ ## opt(const int16_t *src, uint8_t *dst, int dstW, \ 332 const uint8_t *dither, int offset) 333#define VSCALE_FUNCS(opt1, opt2) \ 334 VSCALE_FUNC(8, opt1); \ 335 VSCALE_FUNC(9, opt2); \ 336 VSCALE_FUNC(10, opt2); \ 337 VSCALE_FUNC(16, opt1) 338 339#if ARCH_X86_32 340VSCALE_FUNCS(mmx, mmxext); 341#endif 342VSCALE_FUNCS(sse2, sse2); 343VSCALE_FUNC(16, sse4); 344VSCALE_FUNCS(avx, avx); 345 346#define INPUT_Y_FUNC(fmt, opt) \ 347void ff_ ## fmt ## ToY_ ## opt(uint8_t *dst, const uint8_t *src, \ 348 const uint8_t *unused1, const uint8_t *unused2, \ 349 int w, uint32_t *unused) 350#define INPUT_UV_FUNC(fmt, opt) \ 351void ff_ ## fmt ## ToUV_ ## opt(uint8_t *dstU, uint8_t *dstV, \ 352 const uint8_t *unused0, \ 353 const uint8_t *src1, \ 354 const uint8_t *src2, \ 355 int w, uint32_t *unused) 356#define INPUT_FUNC(fmt, opt) \ 357 INPUT_Y_FUNC(fmt, opt); \ 358 INPUT_UV_FUNC(fmt, opt) 359#define INPUT_FUNCS(opt) \ 360 INPUT_FUNC(uyvy, opt); \ 361 INPUT_FUNC(yuyv, opt); \ 362 INPUT_UV_FUNC(nv12, opt); \ 363 INPUT_UV_FUNC(nv21, opt); \ 364 INPUT_FUNC(rgba, opt); \ 365 INPUT_FUNC(bgra, opt); \ 366 INPUT_FUNC(argb, opt); \ 367 INPUT_FUNC(abgr, opt); \ 368 INPUT_FUNC(rgb24, opt); \ 369 INPUT_FUNC(bgr24, opt) 370 371#if ARCH_X86_32 372INPUT_FUNCS(mmx); 373#endif 374INPUT_FUNCS(sse2); 375INPUT_FUNCS(ssse3); 376INPUT_FUNCS(avx); 377 378av_cold void ff_sws_init_swscale_x86(SwsContext *c) 379{ 380 int cpu_flags = av_get_cpu_flags(); 381 382#if HAVE_MMX_INLINE 383 if (INLINE_MMX(cpu_flags)) 384 sws_init_swscale_mmx(c); 385#endif 386#if HAVE_MMXEXT_INLINE 387 if (INLINE_MMXEXT(cpu_flags)) 388 sws_init_swscale_mmxext(c); 389 if (cpu_flags & AV_CPU_FLAG_SSE3){ 390 if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND)) 391 c->yuv2planeX = yuv2yuvX_sse3; 392 } 393#endif 394 395#define ASSIGN_SCALE_FUNC2(hscalefn, filtersize, opt1, opt2) do { \ 396 if (c->srcBpc == 8) { \ 397 hscalefn = c->dstBpc <= 14 ? ff_hscale8to15_ ## filtersize ## _ ## opt2 : \ 398 ff_hscale8to19_ ## filtersize ## _ ## opt1; \ 399 } else if (c->srcBpc == 9) { \ 400 hscalefn = c->dstBpc <= 14 ? ff_hscale9to15_ ## filtersize ## _ ## opt2 : \ 401 ff_hscale9to19_ ## filtersize ## _ ## opt1; \ 402 } else if (c->srcBpc == 10) { \ 403 hscalefn = c->dstBpc <= 14 ? ff_hscale10to15_ ## filtersize ## _ ## opt2 : \ 404 ff_hscale10to19_ ## filtersize ## _ ## opt1; \ 405 } else if (c->srcBpc == 12) { \ 406 hscalefn = c->dstBpc <= 14 ? ff_hscale12to15_ ## filtersize ## _ ## opt2 : \ 407 ff_hscale12to19_ ## filtersize ## _ ## opt1; \ 408 } else if (c->srcBpc == 14 || ((c->srcFormat==AV_PIX_FMT_PAL8||isAnyRGB(c->srcFormat)) && av_pix_fmt_desc_get(c->srcFormat)->comp[0].depth_minus1<15)) { \ 409 hscalefn = c->dstBpc <= 14 ? ff_hscale14to15_ ## filtersize ## _ ## opt2 : \ 410 ff_hscale14to19_ ## filtersize ## _ ## opt1; \ 411 } else { /* c->srcBpc == 16 */ \ 412 av_assert0(c->srcBpc == 16);\ 413 hscalefn = c->dstBpc <= 14 ? ff_hscale16to15_ ## filtersize ## _ ## opt2 : \ 414 ff_hscale16to19_ ## filtersize ## _ ## opt1; \ 415 } \ 416} while (0) 417#define ASSIGN_MMX_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \ 418 switch (filtersize) { \ 419 case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \ 420 case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \ 421 default: ASSIGN_SCALE_FUNC2(hscalefn, X, opt1, opt2); break; \ 422 } 423#define ASSIGN_VSCALEX_FUNC(vscalefn, opt, do_16_case, condition_8bit) \ 424switch(c->dstBpc){ \ 425 case 16: do_16_case; break; \ 426 case 10: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_10_ ## opt; break; \ 427 case 9: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_9_ ## opt; break; \ 428 default: if (condition_8bit) /*vscalefn = ff_yuv2planeX_8_ ## opt;*/ break; \ 429 } 430#define ASSIGN_VSCALE_FUNC(vscalefn, opt1, opt2, opt2chk) \ 431 switch(c->dstBpc){ \ 432 case 16: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2plane1_16_ ## opt1; break; \ 433 case 10: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_10_ ## opt2; break; \ 434 case 9: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_9_ ## opt2; break; \ 435 case 8: vscalefn = ff_yuv2plane1_8_ ## opt1; break; \ 436 default: av_assert0(c->dstBpc>8); \ 437 } 438#define case_rgb(x, X, opt) \ 439 case AV_PIX_FMT_ ## X: \ 440 c->lumToYV12 = ff_ ## x ## ToY_ ## opt; \ 441 if (!c->chrSrcHSubSample) \ 442 c->chrToYV12 = ff_ ## x ## ToUV_ ## opt; \ 443 break 444#if ARCH_X86_32 445 if (EXTERNAL_MMX(cpu_flags)) { 446 ASSIGN_MMX_SCALE_FUNC(c->hyScale, c->hLumFilterSize, mmx, mmx); 447 ASSIGN_MMX_SCALE_FUNC(c->hcScale, c->hChrFilterSize, mmx, mmx); 448 ASSIGN_VSCALE_FUNC(c->yuv2plane1, mmx, mmxext, cpu_flags & AV_CPU_FLAG_MMXEXT); 449 450 switch (c->srcFormat) { 451 case AV_PIX_FMT_Y400A: 452 c->lumToYV12 = ff_yuyvToY_mmx; 453 if (c->alpPixBuf) 454 c->alpToYV12 = ff_uyvyToY_mmx; 455 break; 456 case AV_PIX_FMT_YUYV422: 457 c->lumToYV12 = ff_yuyvToY_mmx; 458 c->chrToYV12 = ff_yuyvToUV_mmx; 459 break; 460 case AV_PIX_FMT_UYVY422: 461 c->lumToYV12 = ff_uyvyToY_mmx; 462 c->chrToYV12 = ff_uyvyToUV_mmx; 463 break; 464 case AV_PIX_FMT_NV12: 465 c->chrToYV12 = ff_nv12ToUV_mmx; 466 break; 467 case AV_PIX_FMT_NV21: 468 c->chrToYV12 = ff_nv21ToUV_mmx; 469 break; 470 case_rgb(rgb24, RGB24, mmx); 471 case_rgb(bgr24, BGR24, mmx); 472 case_rgb(bgra, BGRA, mmx); 473 case_rgb(rgba, RGBA, mmx); 474 case_rgb(abgr, ABGR, mmx); 475 case_rgb(argb, ARGB, mmx); 476 default: 477 break; 478 } 479 } 480 if (EXTERNAL_MMXEXT(cpu_flags)) { 481 ASSIGN_VSCALEX_FUNC(c->yuv2planeX, mmxext, , 1); 482 } 483#endif /* ARCH_X86_32 */ 484#define ASSIGN_SSE_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \ 485 switch (filtersize) { \ 486 case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \ 487 case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \ 488 default: if (filtersize & 4) ASSIGN_SCALE_FUNC2(hscalefn, X4, opt1, opt2); \ 489 else ASSIGN_SCALE_FUNC2(hscalefn, X8, opt1, opt2); \ 490 break; \ 491 } 492 if (EXTERNAL_SSE2(cpu_flags)) { 493 ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, sse2, sse2); 494 ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, sse2, sse2); 495 ASSIGN_VSCALEX_FUNC(c->yuv2planeX, sse2, , 496 HAVE_ALIGNED_STACK || ARCH_X86_64); 497 ASSIGN_VSCALE_FUNC(c->yuv2plane1, sse2, sse2, 1); 498 499 switch (c->srcFormat) { 500 case AV_PIX_FMT_Y400A: 501 c->lumToYV12 = ff_yuyvToY_sse2; 502 if (c->alpPixBuf) 503 c->alpToYV12 = ff_uyvyToY_sse2; 504 break; 505 case AV_PIX_FMT_YUYV422: 506 c->lumToYV12 = ff_yuyvToY_sse2; 507 c->chrToYV12 = ff_yuyvToUV_sse2; 508 break; 509 case AV_PIX_FMT_UYVY422: 510 c->lumToYV12 = ff_uyvyToY_sse2; 511 c->chrToYV12 = ff_uyvyToUV_sse2; 512 break; 513 case AV_PIX_FMT_NV12: 514 c->chrToYV12 = ff_nv12ToUV_sse2; 515 break; 516 case AV_PIX_FMT_NV21: 517 c->chrToYV12 = ff_nv21ToUV_sse2; 518 break; 519 case_rgb(rgb24, RGB24, sse2); 520 case_rgb(bgr24, BGR24, sse2); 521 case_rgb(bgra, BGRA, sse2); 522 case_rgb(rgba, RGBA, sse2); 523 case_rgb(abgr, ABGR, sse2); 524 case_rgb(argb, ARGB, sse2); 525 default: 526 break; 527 } 528 } 529 if (EXTERNAL_SSSE3(cpu_flags)) { 530 ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, ssse3, ssse3); 531 ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, ssse3, ssse3); 532 switch (c->srcFormat) { 533 case_rgb(rgb24, RGB24, ssse3); 534 case_rgb(bgr24, BGR24, ssse3); 535 default: 536 break; 537 } 538 } 539 if (EXTERNAL_SSE4(cpu_flags)) { 540 /* Xto15 don't need special sse4 functions */ 541 ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, sse4, ssse3); 542 ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, sse4, ssse3); 543 ASSIGN_VSCALEX_FUNC(c->yuv2planeX, sse4, 544 if (!isBE(c->dstFormat)) c->yuv2planeX = ff_yuv2planeX_16_sse4, 545 HAVE_ALIGNED_STACK || ARCH_X86_64); 546 if (c->dstBpc == 16 && !isBE(c->dstFormat)) 547 c->yuv2plane1 = ff_yuv2plane1_16_sse4; 548 } 549 550 if (EXTERNAL_AVX(cpu_flags)) { 551 ASSIGN_VSCALEX_FUNC(c->yuv2planeX, avx, , 552 HAVE_ALIGNED_STACK || ARCH_X86_64); 553 ASSIGN_VSCALE_FUNC(c->yuv2plane1, avx, avx, 1); 554 555 switch (c->srcFormat) { 556 case AV_PIX_FMT_YUYV422: 557 c->chrToYV12 = ff_yuyvToUV_avx; 558 break; 559 case AV_PIX_FMT_UYVY422: 560 c->chrToYV12 = ff_uyvyToUV_avx; 561 break; 562 case AV_PIX_FMT_NV12: 563 c->chrToYV12 = ff_nv12ToUV_avx; 564 break; 565 case AV_PIX_FMT_NV21: 566 c->chrToYV12 = ff_nv21ToUV_avx; 567 break; 568 case_rgb(rgb24, RGB24, avx); 569 case_rgb(bgr24, BGR24, avx); 570 case_rgb(bgra, BGRA, avx); 571 case_rgb(rgba, RGBA, avx); 572 case_rgb(abgr, ABGR, avx); 573 case_rgb(argb, ARGB, avx); 574 default: 575 break; 576 } 577 } 578} 579