1/* 2 * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at> 3 * 4 * This file is part of FFmpeg. 5 * 6 * FFmpeg is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * FFmpeg is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with FFmpeg; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 19 */ 20 21#include <math.h> 22#include <stdint.h> 23#include <stdio.h> 24#include <string.h> 25 26#include "libavutil/avutil.h" 27#include "libavutil/bswap.h" 28#include "libavutil/cpu.h" 29#include "libavutil/intreadwrite.h" 30#include "libavutil/mathematics.h" 31#include "libavutil/pixdesc.h" 32#include "libavutil/avassert.h" 33#include "config.h" 34#include "rgb2rgb.h" 35#include "swscale.h" 36#include "swscale_internal.h" 37 38#define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos)) 39 40#define r ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? b_r : r_b) 41#define b ((origin == AV_PIX_FMT_BGR48BE || origin == AV_PIX_FMT_BGR48LE || origin == AV_PIX_FMT_BGRA64BE || origin == AV_PIX_FMT_BGRA64LE) ? r_b : b_r) 42 43static av_always_inline void 44rgb64ToY_c_template(uint16_t *dst, const uint16_t *src, int width, 45 enum AVPixelFormat origin, int32_t *rgb2yuv) 46{ 47 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX]; 48 int i; 49 for (i = 0; i < width; i++) { 50 unsigned int r_b = input_pixel(&src[i*4+0]); 51 unsigned int g = input_pixel(&src[i*4+1]); 52 unsigned int b_r = input_pixel(&src[i*4+2]); 53 54 dst[i] = (ry*r + gy*g + by*b + (0x2001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; 55 } 56} 57 58static av_always_inline void 59rgb64ToUV_c_template(uint16_t *dstU, uint16_t *dstV, 60 const uint16_t *src1, const uint16_t *src2, 61 int width, enum AVPixelFormat origin, int32_t *rgb2yuv) 62{ 63 int i; 64 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 65 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 66 av_assert1(src1==src2); 67 for (i = 0; i < width; i++) { 68 int r_b = input_pixel(&src1[i*4+0]); 69 int g = input_pixel(&src1[i*4+1]); 70 int b_r = input_pixel(&src1[i*4+2]); 71 72 dstU[i] = (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; 73 dstV[i] = (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; 74 } 75} 76 77static av_always_inline void 78rgb64ToUV_half_c_template(uint16_t *dstU, uint16_t *dstV, 79 const uint16_t *src1, const uint16_t *src2, 80 int width, enum AVPixelFormat origin, int32_t *rgb2yuv) 81{ 82 int i; 83 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 84 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 85 av_assert1(src1==src2); 86 for (i = 0; i < width; i++) { 87 int r_b = (input_pixel(&src1[8 * i + 0]) + input_pixel(&src1[8 * i + 4]) + 1) >> 1; 88 int g = (input_pixel(&src1[8 * i + 1]) + input_pixel(&src1[8 * i + 5]) + 1) >> 1; 89 int b_r = (input_pixel(&src1[8 * i + 2]) + input_pixel(&src1[8 * i + 6]) + 1) >> 1; 90 91 dstU[i]= (ru*r + gu*g + bu*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; 92 dstV[i]= (rv*r + gv*g + bv*b + (0x10001<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT; 93 } 94} 95 96#define rgb64funcs(pattern, BE_LE, origin) \ 97static void pattern ## 64 ## BE_LE ## ToY_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused0, const uint8_t *unused1,\ 98 int width, uint32_t *rgb2yuv) \ 99{ \ 100 const uint16_t *src = (const uint16_t *) _src; \ 101 uint16_t *dst = (uint16_t *) _dst; \ 102 rgb64ToY_c_template(dst, src, width, origin, rgb2yuv); \ 103} \ 104 \ 105static void pattern ## 64 ## BE_LE ## ToUV_c(uint8_t *_dstU, uint8_t *_dstV, \ 106 const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \ 107 int width, uint32_t *rgb2yuv) \ 108{ \ 109 const uint16_t *src1 = (const uint16_t *) _src1, \ 110 *src2 = (const uint16_t *) _src2; \ 111 uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \ 112 rgb64ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \ 113} \ 114 \ 115static void pattern ## 64 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, \ 116 const uint8_t *unused0, const uint8_t *_src1, const uint8_t *_src2, \ 117 int width, uint32_t *rgb2yuv) \ 118{ \ 119 const uint16_t *src1 = (const uint16_t *) _src1, \ 120 *src2 = (const uint16_t *) _src2; \ 121 uint16_t *dstU = (uint16_t *) _dstU, *dstV = (uint16_t *) _dstV; \ 122 rgb64ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \ 123} 124 125rgb64funcs(rgb, LE, AV_PIX_FMT_RGBA64LE) 126rgb64funcs(rgb, BE, AV_PIX_FMT_RGBA64BE) 127rgb64funcs(bgr, LE, AV_PIX_FMT_BGRA64LE) 128rgb64funcs(bgr, BE, AV_PIX_FMT_BGRA64BE) 129 130static av_always_inline void rgb48ToY_c_template(uint16_t *dst, 131 const uint16_t *src, int width, 132 enum AVPixelFormat origin, 133 int32_t *rgb2yuv) 134{ 135 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX]; 136 int i; 137 for (i = 0; i < width; i++) { 138 unsigned int r_b = input_pixel(&src[i * 3 + 0]); 139 unsigned int g = input_pixel(&src[i * 3 + 1]); 140 unsigned int b_r = input_pixel(&src[i * 3 + 2]); 141 142 dst[i] = (ry*r + gy*g + by*b + (0x2001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT; 143 } 144} 145 146static av_always_inline void rgb48ToUV_c_template(uint16_t *dstU, 147 uint16_t *dstV, 148 const uint16_t *src1, 149 const uint16_t *src2, 150 int width, 151 enum AVPixelFormat origin, 152 int32_t *rgb2yuv) 153{ 154 int i; 155 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 156 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 157 av_assert1(src1 == src2); 158 for (i = 0; i < width; i++) { 159 int r_b = input_pixel(&src1[i * 3 + 0]); 160 int g = input_pixel(&src1[i * 3 + 1]); 161 int b_r = input_pixel(&src1[i * 3 + 2]); 162 163 dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT; 164 dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT; 165 } 166} 167 168static av_always_inline void rgb48ToUV_half_c_template(uint16_t *dstU, 169 uint16_t *dstV, 170 const uint16_t *src1, 171 const uint16_t *src2, 172 int width, 173 enum AVPixelFormat origin, 174 int32_t *rgb2yuv) 175{ 176 int i; 177 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 178 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 179 av_assert1(src1 == src2); 180 for (i = 0; i < width; i++) { 181 int r_b = (input_pixel(&src1[6 * i + 0]) + 182 input_pixel(&src1[6 * i + 3]) + 1) >> 1; 183 int g = (input_pixel(&src1[6 * i + 1]) + 184 input_pixel(&src1[6 * i + 4]) + 1) >> 1; 185 int b_r = (input_pixel(&src1[6 * i + 2]) + 186 input_pixel(&src1[6 * i + 5]) + 1) >> 1; 187 188 dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT; 189 dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT; 190 } 191} 192 193#undef r 194#undef b 195#undef input_pixel 196 197#define rgb48funcs(pattern, BE_LE, origin) \ 198static void pattern ## 48 ## BE_LE ## ToY_c(uint8_t *_dst, \ 199 const uint8_t *_src, \ 200 const uint8_t *unused0, const uint8_t *unused1,\ 201 int width, \ 202 uint32_t *rgb2yuv) \ 203{ \ 204 const uint16_t *src = (const uint16_t *)_src; \ 205 uint16_t *dst = (uint16_t *)_dst; \ 206 rgb48ToY_c_template(dst, src, width, origin, rgb2yuv); \ 207} \ 208 \ 209static void pattern ## 48 ## BE_LE ## ToUV_c(uint8_t *_dstU, \ 210 uint8_t *_dstV, \ 211 const uint8_t *unused0, \ 212 const uint8_t *_src1, \ 213 const uint8_t *_src2, \ 214 int width, \ 215 uint32_t *rgb2yuv) \ 216{ \ 217 const uint16_t *src1 = (const uint16_t *)_src1, \ 218 *src2 = (const uint16_t *)_src2; \ 219 uint16_t *dstU = (uint16_t *)_dstU, \ 220 *dstV = (uint16_t *)_dstV; \ 221 rgb48ToUV_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \ 222} \ 223 \ 224static void pattern ## 48 ## BE_LE ## ToUV_half_c(uint8_t *_dstU, \ 225 uint8_t *_dstV, \ 226 const uint8_t *unused0, \ 227 const uint8_t *_src1, \ 228 const uint8_t *_src2, \ 229 int width, \ 230 uint32_t *rgb2yuv) \ 231{ \ 232 const uint16_t *src1 = (const uint16_t *)_src1, \ 233 *src2 = (const uint16_t *)_src2; \ 234 uint16_t *dstU = (uint16_t *)_dstU, \ 235 *dstV = (uint16_t *)_dstV; \ 236 rgb48ToUV_half_c_template(dstU, dstV, src1, src2, width, origin, rgb2yuv); \ 237} 238 239rgb48funcs(rgb, LE, AV_PIX_FMT_RGB48LE) 240rgb48funcs(rgb, BE, AV_PIX_FMT_RGB48BE) 241rgb48funcs(bgr, LE, AV_PIX_FMT_BGR48LE) 242rgb48funcs(bgr, BE, AV_PIX_FMT_BGR48BE) 243 244#define input_pixel(i) ((origin == AV_PIX_FMT_RGBA || \ 245 origin == AV_PIX_FMT_BGRA || \ 246 origin == AV_PIX_FMT_ARGB || \ 247 origin == AV_PIX_FMT_ABGR) \ 248 ? AV_RN32A(&src[(i) * 4]) \ 249 : (isBE(origin) ? AV_RB16(&src[(i) * 2]) \ 250 : AV_RL16(&src[(i) * 2]))) 251 252static av_always_inline void rgb16_32ToY_c_template(int16_t *dst, 253 const uint8_t *src, 254 int width, 255 enum AVPixelFormat origin, 256 int shr, int shg, 257 int shb, int shp, 258 int maskr, int maskg, 259 int maskb, int rsh, 260 int gsh, int bsh, int S, 261 int32_t *rgb2yuv) 262{ 263 const int ry = rgb2yuv[RY_IDX]<<rsh, gy = rgb2yuv[GY_IDX]<<gsh, by = rgb2yuv[BY_IDX]<<bsh; 264 const unsigned rnd = (32<<((S)-1)) + (1<<(S-7)); 265 int i; 266 267 for (i = 0; i < width; i++) { 268 int px = input_pixel(i) >> shp; 269 int b = (px & maskb) >> shb; 270 int g = (px & maskg) >> shg; 271 int r = (px & maskr) >> shr; 272 273 dst[i] = (ry * r + gy * g + by * b + rnd) >> ((S)-6); 274 } 275} 276 277static av_always_inline void rgb16_32ToUV_c_template(int16_t *dstU, 278 int16_t *dstV, 279 const uint8_t *src, 280 int width, 281 enum AVPixelFormat origin, 282 int shr, int shg, 283 int shb, int shp, 284 int maskr, int maskg, 285 int maskb, int rsh, 286 int gsh, int bsh, int S, 287 int32_t *rgb2yuv) 288{ 289 const int ru = rgb2yuv[RU_IDX] << rsh, gu = rgb2yuv[GU_IDX] << gsh, bu = rgb2yuv[BU_IDX] << bsh, 290 rv = rgb2yuv[RV_IDX] << rsh, gv = rgb2yuv[GV_IDX] << gsh, bv = rgb2yuv[BV_IDX] << bsh; 291 const unsigned rnd = (256u<<((S)-1)) + (1<<(S-7)); 292 int i; 293 294 for (i = 0; i < width; i++) { 295 int px = input_pixel(i) >> shp; 296 int b = (px & maskb) >> shb; 297 int g = (px & maskg) >> shg; 298 int r = (px & maskr) >> shr; 299 300 dstU[i] = (ru * r + gu * g + bu * b + rnd) >> ((S)-6); 301 dstV[i] = (rv * r + gv * g + bv * b + rnd) >> ((S)-6); 302 } 303} 304 305static av_always_inline void rgb16_32ToUV_half_c_template(int16_t *dstU, 306 int16_t *dstV, 307 const uint8_t *src, 308 int width, 309 enum AVPixelFormat origin, 310 int shr, int shg, 311 int shb, int shp, 312 int maskr, int maskg, 313 int maskb, int rsh, 314 int gsh, int bsh, int S, 315 int32_t *rgb2yuv) 316{ 317 const int ru = rgb2yuv[RU_IDX] << rsh, gu = rgb2yuv[GU_IDX] << gsh, bu = rgb2yuv[BU_IDX] << bsh, 318 rv = rgb2yuv[RV_IDX] << rsh, gv = rgb2yuv[GV_IDX] << gsh, bv = rgb2yuv[BV_IDX] << bsh, 319 maskgx = ~(maskr | maskb); 320 const unsigned rnd = (256U<<(S)) + (1<<(S-6)); 321 int i; 322 323 maskr |= maskr << 1; 324 maskb |= maskb << 1; 325 maskg |= maskg << 1; 326 for (i = 0; i < width; i++) { 327 unsigned px0 = input_pixel(2 * i + 0) >> shp; 328 unsigned px1 = input_pixel(2 * i + 1) >> shp; 329 int b, r, g = (px0 & maskgx) + (px1 & maskgx); 330 int rb = px0 + px1 - g; 331 332 b = (rb & maskb) >> shb; 333 if (shp || 334 origin == AV_PIX_FMT_BGR565LE || origin == AV_PIX_FMT_BGR565BE || 335 origin == AV_PIX_FMT_RGB565LE || origin == AV_PIX_FMT_RGB565BE) { 336 g >>= shg; 337 } else { 338 g = (g & maskg) >> shg; 339 } 340 r = (rb & maskr) >> shr; 341 342 dstU[i] = (ru * r + gu * g + bu * b + (unsigned)rnd) >> ((S)-6+1); 343 dstV[i] = (rv * r + gv * g + bv * b + (unsigned)rnd) >> ((S)-6+1); 344 } 345} 346 347#undef input_pixel 348 349#define rgb16_32_wrapper(fmt, name, shr, shg, shb, shp, maskr, \ 350 maskg, maskb, rsh, gsh, bsh, S) \ 351static void name ## ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, \ 352 int width, uint32_t *tab) \ 353{ \ 354 rgb16_32ToY_c_template((int16_t*)dst, src, width, fmt, shr, shg, shb, shp, \ 355 maskr, maskg, maskb, rsh, gsh, bsh, S, tab); \ 356} \ 357 \ 358static void name ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \ 359 const uint8_t *unused0, const uint8_t *src, const uint8_t *dummy, \ 360 int width, uint32_t *tab) \ 361{ \ 362 rgb16_32ToUV_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \ 363 shr, shg, shb, shp, \ 364 maskr, maskg, maskb, rsh, gsh, bsh, S, tab);\ 365} \ 366 \ 367static void name ## ToUV_half_c(uint8_t *dstU, uint8_t *dstV, \ 368 const uint8_t *unused0, const uint8_t *src, \ 369 const uint8_t *dummy, \ 370 int width, uint32_t *tab) \ 371{ \ 372 rgb16_32ToUV_half_c_template((int16_t*)dstU, (int16_t*)dstV, src, width, fmt, \ 373 shr, shg, shb, shp, \ 374 maskr, maskg, maskb, \ 375 rsh, gsh, bsh, S, tab); \ 376} 377 378rgb16_32_wrapper(AV_PIX_FMT_BGR32, bgr32, 16, 0, 0, 0, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8) 379rgb16_32_wrapper(AV_PIX_FMT_BGR32_1, bgr321, 16, 0, 0, 8, 0xFF0000, 0xFF00, 0x00FF, 8, 0, 8, RGB2YUV_SHIFT + 8) 380rgb16_32_wrapper(AV_PIX_FMT_RGB32, rgb32, 0, 0, 16, 0, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8) 381rgb16_32_wrapper(AV_PIX_FMT_RGB32_1, rgb321, 0, 0, 16, 8, 0x00FF, 0xFF00, 0xFF0000, 8, 0, 8, RGB2YUV_SHIFT + 8) 382rgb16_32_wrapper(AV_PIX_FMT_BGR565LE, bgr16le, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8) 383rgb16_32_wrapper(AV_PIX_FMT_BGR555LE, bgr15le, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7) 384rgb16_32_wrapper(AV_PIX_FMT_BGR444LE, bgr12le, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4) 385rgb16_32_wrapper(AV_PIX_FMT_RGB565LE, rgb16le, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8) 386rgb16_32_wrapper(AV_PIX_FMT_RGB555LE, rgb15le, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7) 387rgb16_32_wrapper(AV_PIX_FMT_RGB444LE, rgb12le, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4) 388rgb16_32_wrapper(AV_PIX_FMT_BGR565BE, bgr16be, 0, 0, 0, 0, 0x001F, 0x07E0, 0xF800, 11, 5, 0, RGB2YUV_SHIFT + 8) 389rgb16_32_wrapper(AV_PIX_FMT_BGR555BE, bgr15be, 0, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, 10, 5, 0, RGB2YUV_SHIFT + 7) 390rgb16_32_wrapper(AV_PIX_FMT_BGR444BE, bgr12be, 0, 0, 0, 0, 0x000F, 0x00F0, 0x0F00, 8, 4, 0, RGB2YUV_SHIFT + 4) 391rgb16_32_wrapper(AV_PIX_FMT_RGB565BE, rgb16be, 0, 0, 0, 0, 0xF800, 0x07E0, 0x001F, 0, 5, 11, RGB2YUV_SHIFT + 8) 392rgb16_32_wrapper(AV_PIX_FMT_RGB555BE, rgb15be, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, 0, 5, 10, RGB2YUV_SHIFT + 7) 393rgb16_32_wrapper(AV_PIX_FMT_RGB444BE, rgb12be, 0, 0, 0, 0, 0x0F00, 0x00F0, 0x000F, 0, 4, 8, RGB2YUV_SHIFT + 4) 394 395static void gbr24pToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, 396 const uint8_t *gsrc, const uint8_t *bsrc, const uint8_t *rsrc, 397 int width, uint32_t *rgb2yuv) 398{ 399 uint16_t *dstU = (uint16_t *)_dstU; 400 uint16_t *dstV = (uint16_t *)_dstV; 401 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 402 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 403 404 int i; 405 for (i = 0; i < width; i++) { 406 unsigned int g = gsrc[2*i] + gsrc[2*i+1]; 407 unsigned int b = bsrc[2*i] + bsrc[2*i+1]; 408 unsigned int r = rsrc[2*i] + rsrc[2*i+1]; 409 410 dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1); 411 dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-6))) >> (RGB2YUV_SHIFT-6+1); 412 } 413} 414 415static void rgba64ToA_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1, 416 const uint8_t *unused2, int width, uint32_t *unused) 417{ 418 int16_t *dst = (int16_t *)_dst; 419 const uint16_t *src = (const uint16_t *)_src; 420 int i; 421 for (i = 0; i < width; i++) 422 dst[i] = src[4 * i + 3]; 423} 424 425static void abgrToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) 426{ 427 int16_t *dst = (int16_t *)_dst; 428 int i; 429 for (i=0; i<width; i++) { 430 dst[i]= src[4*i]<<6; 431 } 432} 433 434static void rgbaToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) 435{ 436 int16_t *dst = (int16_t *)_dst; 437 int i; 438 for (i=0; i<width; i++) { 439 dst[i]= src[4*i+3]<<6; 440 } 441} 442 443static void palToA_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal) 444{ 445 int16_t *dst = (int16_t *)_dst; 446 int i; 447 for (i=0; i<width; i++) { 448 int d= src[i]; 449 450 dst[i]= (pal[d] >> 24)<<6; 451 } 452} 453 454static void palToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *pal) 455{ 456 int16_t *dst = (int16_t *)_dst; 457 int i; 458 for (i = 0; i < width; i++) { 459 int d = src[i]; 460 461 dst[i] = (pal[d] & 0xFF)<<6; 462 } 463} 464 465static void palToUV_c(uint8_t *_dstU, uint8_t *_dstV, 466 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2, 467 int width, uint32_t *pal) 468{ 469 uint16_t *dstU = (uint16_t *)_dstU; 470 int16_t *dstV = (int16_t *)_dstV; 471 int i; 472 av_assert1(src1 == src2); 473 for (i = 0; i < width; i++) { 474 int p = pal[src1[i]]; 475 476 dstU[i] = (uint8_t)(p>> 8)<<6; 477 dstV[i] = (uint8_t)(p>>16)<<6; 478 } 479} 480 481static void monowhite2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) 482{ 483 int16_t *dst = (int16_t *)_dst; 484 int i, j; 485 width = (width + 7) >> 3; 486 for (i = 0; i < width; i++) { 487 int d = ~src[i]; 488 for (j = 0; j < 8; j++) 489 dst[8*i+j]= ((d>>(7-j))&1) * 16383; 490 } 491 if(width&7){ 492 int d= ~src[i]; 493 for (j = 0; j < (width&7); j++) 494 dst[8*i+j]= ((d>>(7-j))&1) * 16383; 495 } 496} 497 498static void monoblack2Y_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, uint32_t *unused) 499{ 500 int16_t *dst = (int16_t *)_dst; 501 int i, j; 502 width = (width + 7) >> 3; 503 for (i = 0; i < width; i++) { 504 int d = src[i]; 505 for (j = 0; j < 8; j++) 506 dst[8*i+j]= ((d>>(7-j))&1) * 16383; 507 } 508 if(width&7){ 509 int d = src[i]; 510 for (j = 0; j < (width&7); j++) 511 dst[8*i+j] = ((d>>(7-j))&1) * 16383; 512 } 513} 514 515static void yuy2ToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, 516 uint32_t *unused) 517{ 518 int i; 519 for (i = 0; i < width; i++) 520 dst[i] = src[2 * i]; 521} 522 523static void yuy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1, 524 const uint8_t *src2, int width, uint32_t *unused) 525{ 526 int i; 527 for (i = 0; i < width; i++) { 528 dstU[i] = src1[4 * i + 1]; 529 dstV[i] = src1[4 * i + 3]; 530 } 531 av_assert1(src1 == src2); 532} 533 534static void yvy2ToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1, 535 const uint8_t *src2, int width, uint32_t *unused) 536{ 537 int i; 538 for (i = 0; i < width; i++) { 539 dstV[i] = src1[4 * i + 1]; 540 dstU[i] = src1[4 * i + 3]; 541 } 542 av_assert1(src1 == src2); 543} 544 545static void bswap16Y_c(uint8_t *_dst, const uint8_t *_src, const uint8_t *unused1, const uint8_t *unused2, int width, 546 uint32_t *unused) 547{ 548 int i; 549 const uint16_t *src = (const uint16_t *)_src; 550 uint16_t *dst = (uint16_t *)_dst; 551 for (i = 0; i < width; i++) 552 dst[i] = av_bswap16(src[i]); 553} 554 555static void bswap16UV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *_src1, 556 const uint8_t *_src2, int width, uint32_t *unused) 557{ 558 int i; 559 const uint16_t *src1 = (const uint16_t *)_src1, 560 *src2 = (const uint16_t *)_src2; 561 uint16_t *dstU = (uint16_t *)_dstU, *dstV = (uint16_t *)_dstV; 562 for (i = 0; i < width; i++) { 563 dstU[i] = av_bswap16(src1[i]); 564 dstV[i] = av_bswap16(src2[i]); 565 } 566} 567 568/* This is almost identical to the previous, end exists only because 569 * yuy2ToY/UV)(dst, src + 1, ...) would have 100% unaligned accesses. */ 570static void uyvyToY_c(uint8_t *dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, 571 uint32_t *unused) 572{ 573 int i; 574 for (i = 0; i < width; i++) 575 dst[i] = src[2 * i + 1]; 576} 577 578static void uyvyToUV_c(uint8_t *dstU, uint8_t *dstV, const uint8_t *unused0, const uint8_t *src1, 579 const uint8_t *src2, int width, uint32_t *unused) 580{ 581 int i; 582 for (i = 0; i < width; i++) { 583 dstU[i] = src1[4 * i + 0]; 584 dstV[i] = src1[4 * i + 2]; 585 } 586 av_assert1(src1 == src2); 587} 588 589static av_always_inline void nvXXtoUV_c(uint8_t *dst1, uint8_t *dst2, 590 const uint8_t *src, int width) 591{ 592 int i; 593 for (i = 0; i < width; i++) { 594 dst1[i] = src[2 * i + 0]; 595 dst2[i] = src[2 * i + 1]; 596 } 597} 598 599static void nv12ToUV_c(uint8_t *dstU, uint8_t *dstV, 600 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2, 601 int width, uint32_t *unused) 602{ 603 nvXXtoUV_c(dstU, dstV, src1, width); 604} 605 606static void nv21ToUV_c(uint8_t *dstU, uint8_t *dstV, 607 const uint8_t *unused0, const uint8_t *src1, const uint8_t *src2, 608 int width, uint32_t *unused) 609{ 610 nvXXtoUV_c(dstV, dstU, src1, width); 611} 612 613#define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos)) 614 615static void bgr24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, 616 int width, uint32_t *rgb2yuv) 617{ 618 int16_t *dst = (int16_t *)_dst; 619 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX]; 620 int i; 621 for (i = 0; i < width; i++) { 622 int b = src[i * 3 + 0]; 623 int g = src[i * 3 + 1]; 624 int r = src[i * 3 + 2]; 625 626 dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6)); 627 } 628} 629 630static void bgr24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1, 631 const uint8_t *src2, int width, uint32_t *rgb2yuv) 632{ 633 int16_t *dstU = (int16_t *)_dstU; 634 int16_t *dstV = (int16_t *)_dstV; 635 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 636 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 637 int i; 638 for (i = 0; i < width; i++) { 639 int b = src1[3 * i + 0]; 640 int g = src1[3 * i + 1]; 641 int r = src1[3 * i + 2]; 642 643 dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6); 644 dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6); 645 } 646 av_assert1(src1 == src2); 647} 648 649static void bgr24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1, 650 const uint8_t *src2, int width, uint32_t *rgb2yuv) 651{ 652 int16_t *dstU = (int16_t *)_dstU; 653 int16_t *dstV = (int16_t *)_dstV; 654 int i; 655 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 656 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 657 for (i = 0; i < width; i++) { 658 int b = src1[6 * i + 0] + src1[6 * i + 3]; 659 int g = src1[6 * i + 1] + src1[6 * i + 4]; 660 int r = src1[6 * i + 2] + src1[6 * i + 5]; 661 662 dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5); 663 dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5); 664 } 665 av_assert1(src1 == src2); 666} 667 668static void rgb24ToY_c(uint8_t *_dst, const uint8_t *src, const uint8_t *unused1, const uint8_t *unused2, int width, 669 uint32_t *rgb2yuv) 670{ 671 int16_t *dst = (int16_t *)_dst; 672 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX]; 673 int i; 674 for (i = 0; i < width; i++) { 675 int r = src[i * 3 + 0]; 676 int g = src[i * 3 + 1]; 677 int b = src[i * 3 + 2]; 678 679 dst[i] = ((ry*r + gy*g + by*b + (32<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6)); 680 } 681} 682 683static void rgb24ToUV_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1, 684 const uint8_t *src2, int width, uint32_t *rgb2yuv) 685{ 686 int16_t *dstU = (int16_t *)_dstU; 687 int16_t *dstV = (int16_t *)_dstV; 688 int i; 689 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 690 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 691 av_assert1(src1 == src2); 692 for (i = 0; i < width; i++) { 693 int r = src1[3 * i + 0]; 694 int g = src1[3 * i + 1]; 695 int b = src1[3 * i + 2]; 696 697 dstU[i] = (ru*r + gu*g + bu*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6); 698 dstV[i] = (rv*r + gv*g + bv*b + (256<<(RGB2YUV_SHIFT-1)) + (1<<(RGB2YUV_SHIFT-7)))>>(RGB2YUV_SHIFT-6); 699 } 700} 701 702static void rgb24ToUV_half_c(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *unused0, const uint8_t *src1, 703 const uint8_t *src2, int width, uint32_t *rgb2yuv) 704{ 705 int16_t *dstU = (int16_t *)_dstU; 706 int16_t *dstV = (int16_t *)_dstV; 707 int i; 708 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 709 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 710 av_assert1(src1 == src2); 711 for (i = 0; i < width; i++) { 712 int r = src1[6 * i + 0] + src1[6 * i + 3]; 713 int g = src1[6 * i + 1] + src1[6 * i + 4]; 714 int b = src1[6 * i + 2] + src1[6 * i + 5]; 715 716 dstU[i] = (ru*r + gu*g + bu*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5); 717 dstV[i] = (rv*r + gv*g + bv*b + (256<<RGB2YUV_SHIFT) + (1<<(RGB2YUV_SHIFT-6)))>>(RGB2YUV_SHIFT-5); 718 } 719} 720 721static void planar_rgb_to_y(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *rgb2yuv) 722{ 723 uint16_t *dst = (uint16_t *)_dst; 724 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX]; 725 int i; 726 for (i = 0; i < width; i++) { 727 int g = src[0][i]; 728 int b = src[1][i]; 729 int r = src[2][i]; 730 731 dst[i] = (ry*r + gy*g + by*b + (0x801<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); 732 } 733} 734 735static void planar_rgb_to_a(uint8_t *_dst, const uint8_t *src[4], int width, int32_t *unused) 736{ 737 uint16_t *dst = (uint16_t *)_dst; 738 int i; 739 for (i = 0; i < width; i++) 740 dst[i] = src[3][i] << 6; 741} 742 743static void planar_rgb_to_uv(uint8_t *_dstU, uint8_t *_dstV, const uint8_t *src[4], int width, int32_t *rgb2yuv) 744{ 745 uint16_t *dstU = (uint16_t *)_dstU; 746 uint16_t *dstV = (uint16_t *)_dstV; 747 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 748 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 749 int i; 750 for (i = 0; i < width; i++) { 751 int g = src[0][i]; 752 int b = src[1][i]; 753 int r = src[2][i]; 754 755 dstU[i] = (ru*r + gu*g + bu*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); 756 dstV[i] = (rv*r + gv*g + bv*b + (0x4001<<(RGB2YUV_SHIFT-7))) >> (RGB2YUV_SHIFT-6); 757 } 758} 759 760#define rdpx(src) \ 761 is_be ? AV_RB16(src) : AV_RL16(src) 762static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4], 763 int width, int bpc, int is_be, int32_t *rgb2yuv) 764{ 765 int i; 766 const uint16_t **src = (const uint16_t **)_src; 767 uint16_t *dst = (uint16_t *)_dst; 768 int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX]; 769 int shift = bpc < 16 ? bpc : 14; 770 for (i = 0; i < width; i++) { 771 int g = rdpx(src[0] + i); 772 int b = rdpx(src[1] + i); 773 int r = rdpx(src[2] + i); 774 775 dst[i] = ((ry*r + gy*g + by*b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14)); 776 } 777} 778 779static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV, 780 const uint8_t *_src[4], int width, 781 int bpc, int is_be, int32_t *rgb2yuv) 782{ 783 int i; 784 const uint16_t **src = (const uint16_t **)_src; 785 uint16_t *dstU = (uint16_t *)_dstU; 786 uint16_t *dstV = (uint16_t *)_dstV; 787 int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; 788 int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; 789 int shift = bpc < 16 ? bpc : 14; 790 for (i = 0; i < width; i++) { 791 int g = rdpx(src[0] + i); 792 int b = rdpx(src[1] + i); 793 int r = rdpx(src[2] + i); 794 795 dstU[i] = (ru*r + gu*g + bu*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14); 796 dstV[i] = (rv*r + gv*g + bv*b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> (RGB2YUV_SHIFT + shift - 14); 797 } 798} 799#undef rdpx 800 801#define rgb9plus_planar_funcs_endian(nbits, endian_name, endian) \ 802static void planar_rgb##nbits##endian_name##_to_y(uint8_t *dst, const uint8_t *src[4], \ 803 int w, int32_t *rgb2yuv) \ 804{ \ 805 planar_rgb16_to_y(dst, src, w, nbits, endian, rgb2yuv); \ 806} \ 807static void planar_rgb##nbits##endian_name##_to_uv(uint8_t *dstU, uint8_t *dstV, \ 808 const uint8_t *src[4], int w, int32_t *rgb2yuv) \ 809{ \ 810 planar_rgb16_to_uv(dstU, dstV, src, w, nbits, endian, rgb2yuv); \ 811} \ 812 813#define rgb9plus_planar_funcs(nbits) \ 814 rgb9plus_planar_funcs_endian(nbits, le, 0) \ 815 rgb9plus_planar_funcs_endian(nbits, be, 1) 816 817rgb9plus_planar_funcs(9) 818rgb9plus_planar_funcs(10) 819rgb9plus_planar_funcs(12) 820rgb9plus_planar_funcs(14) 821rgb9plus_planar_funcs(16) 822 823av_cold void ff_sws_init_input_funcs(SwsContext *c) 824{ 825 enum AVPixelFormat srcFormat = c->srcFormat; 826 827 c->chrToYV12 = NULL; 828 switch (srcFormat) { 829 case AV_PIX_FMT_YUYV422: 830 c->chrToYV12 = yuy2ToUV_c; 831 break; 832 case AV_PIX_FMT_YVYU422: 833 c->chrToYV12 = yvy2ToUV_c; 834 break; 835 case AV_PIX_FMT_UYVY422: 836 c->chrToYV12 = uyvyToUV_c; 837 break; 838 case AV_PIX_FMT_NV12: 839 c->chrToYV12 = nv12ToUV_c; 840 break; 841 case AV_PIX_FMT_NV21: 842 c->chrToYV12 = nv21ToUV_c; 843 break; 844 case AV_PIX_FMT_RGB8: 845 case AV_PIX_FMT_BGR8: 846 case AV_PIX_FMT_PAL8: 847 case AV_PIX_FMT_BGR4_BYTE: 848 case AV_PIX_FMT_RGB4_BYTE: 849 c->chrToYV12 = palToUV_c; 850 break; 851 case AV_PIX_FMT_GBRP9LE: 852 c->readChrPlanar = planar_rgb9le_to_uv; 853 break; 854 case AV_PIX_FMT_GBRP10LE: 855 c->readChrPlanar = planar_rgb10le_to_uv; 856 break; 857 case AV_PIX_FMT_GBRP12LE: 858 c->readChrPlanar = planar_rgb12le_to_uv; 859 break; 860 case AV_PIX_FMT_GBRP14LE: 861 c->readChrPlanar = planar_rgb14le_to_uv; 862 break; 863 case AV_PIX_FMT_GBRAP16LE: 864 case AV_PIX_FMT_GBRP16LE: 865 c->readChrPlanar = planar_rgb16le_to_uv; 866 break; 867 case AV_PIX_FMT_GBRP9BE: 868 c->readChrPlanar = planar_rgb9be_to_uv; 869 break; 870 case AV_PIX_FMT_GBRP10BE: 871 c->readChrPlanar = planar_rgb10be_to_uv; 872 break; 873 case AV_PIX_FMT_GBRP12BE: 874 c->readChrPlanar = planar_rgb12be_to_uv; 875 break; 876 case AV_PIX_FMT_GBRP14BE: 877 c->readChrPlanar = planar_rgb14be_to_uv; 878 break; 879 case AV_PIX_FMT_GBRAP16BE: 880 case AV_PIX_FMT_GBRP16BE: 881 c->readChrPlanar = planar_rgb16be_to_uv; 882 break; 883 case AV_PIX_FMT_GBRAP: 884 case AV_PIX_FMT_GBRP: 885 c->readChrPlanar = planar_rgb_to_uv; 886 break; 887#if HAVE_BIGENDIAN 888 case AV_PIX_FMT_YUV444P9LE: 889 case AV_PIX_FMT_YUV422P9LE: 890 case AV_PIX_FMT_YUV420P9LE: 891 case AV_PIX_FMT_YUV422P10LE: 892 case AV_PIX_FMT_YUV444P10LE: 893 case AV_PIX_FMT_YUV420P10LE: 894 case AV_PIX_FMT_YUV422P12LE: 895 case AV_PIX_FMT_YUV444P12LE: 896 case AV_PIX_FMT_YUV420P12LE: 897 case AV_PIX_FMT_YUV422P14LE: 898 case AV_PIX_FMT_YUV444P14LE: 899 case AV_PIX_FMT_YUV420P14LE: 900 case AV_PIX_FMT_YUV420P16LE: 901 case AV_PIX_FMT_YUV422P16LE: 902 case AV_PIX_FMT_YUV444P16LE: 903 904 case AV_PIX_FMT_YUVA444P9LE: 905 case AV_PIX_FMT_YUVA422P9LE: 906 case AV_PIX_FMT_YUVA420P9LE: 907 case AV_PIX_FMT_YUVA444P10LE: 908 case AV_PIX_FMT_YUVA422P10LE: 909 case AV_PIX_FMT_YUVA420P10LE: 910 case AV_PIX_FMT_YUVA420P16LE: 911 case AV_PIX_FMT_YUVA422P16LE: 912 case AV_PIX_FMT_YUVA444P16LE: 913 c->chrToYV12 = bswap16UV_c; 914 break; 915#else 916 case AV_PIX_FMT_YUV444P9BE: 917 case AV_PIX_FMT_YUV422P9BE: 918 case AV_PIX_FMT_YUV420P9BE: 919 case AV_PIX_FMT_YUV444P10BE: 920 case AV_PIX_FMT_YUV422P10BE: 921 case AV_PIX_FMT_YUV420P10BE: 922 case AV_PIX_FMT_YUV444P12BE: 923 case AV_PIX_FMT_YUV422P12BE: 924 case AV_PIX_FMT_YUV420P12BE: 925 case AV_PIX_FMT_YUV444P14BE: 926 case AV_PIX_FMT_YUV422P14BE: 927 case AV_PIX_FMT_YUV420P14BE: 928 case AV_PIX_FMT_YUV420P16BE: 929 case AV_PIX_FMT_YUV422P16BE: 930 case AV_PIX_FMT_YUV444P16BE: 931 932 case AV_PIX_FMT_YUVA444P9BE: 933 case AV_PIX_FMT_YUVA422P9BE: 934 case AV_PIX_FMT_YUVA420P9BE: 935 case AV_PIX_FMT_YUVA444P10BE: 936 case AV_PIX_FMT_YUVA422P10BE: 937 case AV_PIX_FMT_YUVA420P10BE: 938 case AV_PIX_FMT_YUVA420P16BE: 939 case AV_PIX_FMT_YUVA422P16BE: 940 case AV_PIX_FMT_YUVA444P16BE: 941 c->chrToYV12 = bswap16UV_c; 942 break; 943#endif 944 } 945 if (c->chrSrcHSubSample) { 946 switch (srcFormat) { 947 case AV_PIX_FMT_RGBA64BE: 948 c->chrToYV12 = rgb64BEToUV_half_c; 949 break; 950 case AV_PIX_FMT_RGBA64LE: 951 c->chrToYV12 = rgb64LEToUV_half_c; 952 break; 953 case AV_PIX_FMT_BGRA64BE: 954 c->chrToYV12 = bgr64BEToUV_half_c; 955 break; 956 case AV_PIX_FMT_BGRA64LE: 957 c->chrToYV12 = bgr64LEToUV_half_c; 958 break; 959 case AV_PIX_FMT_RGB48BE: 960 c->chrToYV12 = rgb48BEToUV_half_c; 961 break; 962 case AV_PIX_FMT_RGB48LE: 963 c->chrToYV12 = rgb48LEToUV_half_c; 964 break; 965 case AV_PIX_FMT_BGR48BE: 966 c->chrToYV12 = bgr48BEToUV_half_c; 967 break; 968 case AV_PIX_FMT_BGR48LE: 969 c->chrToYV12 = bgr48LEToUV_half_c; 970 break; 971 case AV_PIX_FMT_RGB32: 972 c->chrToYV12 = bgr32ToUV_half_c; 973 break; 974 case AV_PIX_FMT_RGB32_1: 975 c->chrToYV12 = bgr321ToUV_half_c; 976 break; 977 case AV_PIX_FMT_BGR24: 978 c->chrToYV12 = bgr24ToUV_half_c; 979 break; 980 case AV_PIX_FMT_BGR565LE: 981 c->chrToYV12 = bgr16leToUV_half_c; 982 break; 983 case AV_PIX_FMT_BGR565BE: 984 c->chrToYV12 = bgr16beToUV_half_c; 985 break; 986 case AV_PIX_FMT_BGR555LE: 987 c->chrToYV12 = bgr15leToUV_half_c; 988 break; 989 case AV_PIX_FMT_BGR555BE: 990 c->chrToYV12 = bgr15beToUV_half_c; 991 break; 992 case AV_PIX_FMT_GBRAP: 993 case AV_PIX_FMT_GBRP: 994 c->chrToYV12 = gbr24pToUV_half_c; 995 break; 996 case AV_PIX_FMT_BGR444LE: 997 c->chrToYV12 = bgr12leToUV_half_c; 998 break; 999 case AV_PIX_FMT_BGR444BE: 1000 c->chrToYV12 = bgr12beToUV_half_c; 1001 break; 1002 case AV_PIX_FMT_BGR32: 1003 c->chrToYV12 = rgb32ToUV_half_c; 1004 break; 1005 case AV_PIX_FMT_BGR32_1: 1006 c->chrToYV12 = rgb321ToUV_half_c; 1007 break; 1008 case AV_PIX_FMT_RGB24: 1009 c->chrToYV12 = rgb24ToUV_half_c; 1010 break; 1011 case AV_PIX_FMT_RGB565LE: 1012 c->chrToYV12 = rgb16leToUV_half_c; 1013 break; 1014 case AV_PIX_FMT_RGB565BE: 1015 c->chrToYV12 = rgb16beToUV_half_c; 1016 break; 1017 case AV_PIX_FMT_RGB555LE: 1018 c->chrToYV12 = rgb15leToUV_half_c; 1019 break; 1020 case AV_PIX_FMT_RGB555BE: 1021 c->chrToYV12 = rgb15beToUV_half_c; 1022 break; 1023 case AV_PIX_FMT_RGB444LE: 1024 c->chrToYV12 = rgb12leToUV_half_c; 1025 break; 1026 case AV_PIX_FMT_RGB444BE: 1027 c->chrToYV12 = rgb12beToUV_half_c; 1028 break; 1029 } 1030 } else { 1031 switch (srcFormat) { 1032 case AV_PIX_FMT_RGBA64BE: 1033 c->chrToYV12 = rgb64BEToUV_c; 1034 break; 1035 case AV_PIX_FMT_RGBA64LE: 1036 c->chrToYV12 = rgb64LEToUV_c; 1037 break; 1038 case AV_PIX_FMT_BGRA64BE: 1039 c->chrToYV12 = bgr64BEToUV_c; 1040 break; 1041 case AV_PIX_FMT_BGRA64LE: 1042 c->chrToYV12 = bgr64LEToUV_c; 1043 break; 1044 case AV_PIX_FMT_RGB48BE: 1045 c->chrToYV12 = rgb48BEToUV_c; 1046 break; 1047 case AV_PIX_FMT_RGB48LE: 1048 c->chrToYV12 = rgb48LEToUV_c; 1049 break; 1050 case AV_PIX_FMT_BGR48BE: 1051 c->chrToYV12 = bgr48BEToUV_c; 1052 break; 1053 case AV_PIX_FMT_BGR48LE: 1054 c->chrToYV12 = bgr48LEToUV_c; 1055 break; 1056 case AV_PIX_FMT_RGB32: 1057 c->chrToYV12 = bgr32ToUV_c; 1058 break; 1059 case AV_PIX_FMT_RGB32_1: 1060 c->chrToYV12 = bgr321ToUV_c; 1061 break; 1062 case AV_PIX_FMT_BGR24: 1063 c->chrToYV12 = bgr24ToUV_c; 1064 break; 1065 case AV_PIX_FMT_BGR565LE: 1066 c->chrToYV12 = bgr16leToUV_c; 1067 break; 1068 case AV_PIX_FMT_BGR565BE: 1069 c->chrToYV12 = bgr16beToUV_c; 1070 break; 1071 case AV_PIX_FMT_BGR555LE: 1072 c->chrToYV12 = bgr15leToUV_c; 1073 break; 1074 case AV_PIX_FMT_BGR555BE: 1075 c->chrToYV12 = bgr15beToUV_c; 1076 break; 1077 case AV_PIX_FMT_BGR444LE: 1078 c->chrToYV12 = bgr12leToUV_c; 1079 break; 1080 case AV_PIX_FMT_BGR444BE: 1081 c->chrToYV12 = bgr12beToUV_c; 1082 break; 1083 case AV_PIX_FMT_BGR32: 1084 c->chrToYV12 = rgb32ToUV_c; 1085 break; 1086 case AV_PIX_FMT_BGR32_1: 1087 c->chrToYV12 = rgb321ToUV_c; 1088 break; 1089 case AV_PIX_FMT_RGB24: 1090 c->chrToYV12 = rgb24ToUV_c; 1091 break; 1092 case AV_PIX_FMT_RGB565LE: 1093 c->chrToYV12 = rgb16leToUV_c; 1094 break; 1095 case AV_PIX_FMT_RGB565BE: 1096 c->chrToYV12 = rgb16beToUV_c; 1097 break; 1098 case AV_PIX_FMT_RGB555LE: 1099 c->chrToYV12 = rgb15leToUV_c; 1100 break; 1101 case AV_PIX_FMT_RGB555BE: 1102 c->chrToYV12 = rgb15beToUV_c; 1103 break; 1104 case AV_PIX_FMT_RGB444LE: 1105 c->chrToYV12 = rgb12leToUV_c; 1106 break; 1107 case AV_PIX_FMT_RGB444BE: 1108 c->chrToYV12 = rgb12beToUV_c; 1109 break; 1110 } 1111 } 1112 1113 c->lumToYV12 = NULL; 1114 c->alpToYV12 = NULL; 1115 switch (srcFormat) { 1116 case AV_PIX_FMT_GBRP9LE: 1117 c->readLumPlanar = planar_rgb9le_to_y; 1118 break; 1119 case AV_PIX_FMT_GBRP10LE: 1120 c->readLumPlanar = planar_rgb10le_to_y; 1121 break; 1122 case AV_PIX_FMT_GBRP12LE: 1123 c->readLumPlanar = planar_rgb12le_to_y; 1124 break; 1125 case AV_PIX_FMT_GBRP14LE: 1126 c->readLumPlanar = planar_rgb14le_to_y; 1127 break; 1128 case AV_PIX_FMT_GBRAP16LE: 1129 case AV_PIX_FMT_GBRP16LE: 1130 c->readLumPlanar = planar_rgb16le_to_y; 1131 break; 1132 case AV_PIX_FMT_GBRP9BE: 1133 c->readLumPlanar = planar_rgb9be_to_y; 1134 break; 1135 case AV_PIX_FMT_GBRP10BE: 1136 c->readLumPlanar = planar_rgb10be_to_y; 1137 break; 1138 case AV_PIX_FMT_GBRP12BE: 1139 c->readLumPlanar = planar_rgb12be_to_y; 1140 break; 1141 case AV_PIX_FMT_GBRP14BE: 1142 c->readLumPlanar = planar_rgb14be_to_y; 1143 break; 1144 case AV_PIX_FMT_GBRAP16BE: 1145 case AV_PIX_FMT_GBRP16BE: 1146 c->readLumPlanar = planar_rgb16be_to_y; 1147 break; 1148 case AV_PIX_FMT_GBRAP: 1149 c->readAlpPlanar = planar_rgb_to_a; 1150 case AV_PIX_FMT_GBRP: 1151 c->readLumPlanar = planar_rgb_to_y; 1152 break; 1153#if HAVE_BIGENDIAN 1154 case AV_PIX_FMT_YUV444P9LE: 1155 case AV_PIX_FMT_YUV422P9LE: 1156 case AV_PIX_FMT_YUV420P9LE: 1157 case AV_PIX_FMT_YUV444P10LE: 1158 case AV_PIX_FMT_YUV422P10LE: 1159 case AV_PIX_FMT_YUV420P10LE: 1160 case AV_PIX_FMT_YUV444P12LE: 1161 case AV_PIX_FMT_YUV422P12LE: 1162 case AV_PIX_FMT_YUV420P12LE: 1163 case AV_PIX_FMT_YUV444P14LE: 1164 case AV_PIX_FMT_YUV422P14LE: 1165 case AV_PIX_FMT_YUV420P14LE: 1166 case AV_PIX_FMT_YUV420P16LE: 1167 case AV_PIX_FMT_YUV422P16LE: 1168 case AV_PIX_FMT_YUV444P16LE: 1169 1170 case AV_PIX_FMT_GRAY16LE: 1171 c->lumToYV12 = bswap16Y_c; 1172 break; 1173 case AV_PIX_FMT_YUVA444P9LE: 1174 case AV_PIX_FMT_YUVA422P9LE: 1175 case AV_PIX_FMT_YUVA420P9LE: 1176 case AV_PIX_FMT_YUVA444P10LE: 1177 case AV_PIX_FMT_YUVA422P10LE: 1178 case AV_PIX_FMT_YUVA420P10LE: 1179 case AV_PIX_FMT_YUVA420P16LE: 1180 case AV_PIX_FMT_YUVA422P16LE: 1181 case AV_PIX_FMT_YUVA444P16LE: 1182 c->lumToYV12 = bswap16Y_c; 1183 c->alpToYV12 = bswap16Y_c; 1184 break; 1185#else 1186 case AV_PIX_FMT_YUV444P9BE: 1187 case AV_PIX_FMT_YUV422P9BE: 1188 case AV_PIX_FMT_YUV420P9BE: 1189 case AV_PIX_FMT_YUV444P10BE: 1190 case AV_PIX_FMT_YUV422P10BE: 1191 case AV_PIX_FMT_YUV420P10BE: 1192 case AV_PIX_FMT_YUV444P12BE: 1193 case AV_PIX_FMT_YUV422P12BE: 1194 case AV_PIX_FMT_YUV420P12BE: 1195 case AV_PIX_FMT_YUV444P14BE: 1196 case AV_PIX_FMT_YUV422P14BE: 1197 case AV_PIX_FMT_YUV420P14BE: 1198 case AV_PIX_FMT_YUV420P16BE: 1199 case AV_PIX_FMT_YUV422P16BE: 1200 case AV_PIX_FMT_YUV444P16BE: 1201 1202 case AV_PIX_FMT_GRAY16BE: 1203 c->lumToYV12 = bswap16Y_c; 1204 break; 1205 case AV_PIX_FMT_YUVA444P9BE: 1206 case AV_PIX_FMT_YUVA422P9BE: 1207 case AV_PIX_FMT_YUVA420P9BE: 1208 case AV_PIX_FMT_YUVA444P10BE: 1209 case AV_PIX_FMT_YUVA422P10BE: 1210 case AV_PIX_FMT_YUVA420P10BE: 1211 case AV_PIX_FMT_YUVA420P16BE: 1212 case AV_PIX_FMT_YUVA422P16BE: 1213 case AV_PIX_FMT_YUVA444P16BE: 1214 c->lumToYV12 = bswap16Y_c; 1215 c->alpToYV12 = bswap16Y_c; 1216 break; 1217#endif 1218 case AV_PIX_FMT_YUYV422: 1219 case AV_PIX_FMT_YVYU422: 1220 case AV_PIX_FMT_Y400A: 1221 c->lumToYV12 = yuy2ToY_c; 1222 break; 1223 case AV_PIX_FMT_UYVY422: 1224 c->lumToYV12 = uyvyToY_c; 1225 break; 1226 case AV_PIX_FMT_BGR24: 1227 c->lumToYV12 = bgr24ToY_c; 1228 break; 1229 case AV_PIX_FMT_BGR565LE: 1230 c->lumToYV12 = bgr16leToY_c; 1231 break; 1232 case AV_PIX_FMT_BGR565BE: 1233 c->lumToYV12 = bgr16beToY_c; 1234 break; 1235 case AV_PIX_FMT_BGR555LE: 1236 c->lumToYV12 = bgr15leToY_c; 1237 break; 1238 case AV_PIX_FMT_BGR555BE: 1239 c->lumToYV12 = bgr15beToY_c; 1240 break; 1241 case AV_PIX_FMT_BGR444LE: 1242 c->lumToYV12 = bgr12leToY_c; 1243 break; 1244 case AV_PIX_FMT_BGR444BE: 1245 c->lumToYV12 = bgr12beToY_c; 1246 break; 1247 case AV_PIX_FMT_RGB24: 1248 c->lumToYV12 = rgb24ToY_c; 1249 break; 1250 case AV_PIX_FMT_RGB565LE: 1251 c->lumToYV12 = rgb16leToY_c; 1252 break; 1253 case AV_PIX_FMT_RGB565BE: 1254 c->lumToYV12 = rgb16beToY_c; 1255 break; 1256 case AV_PIX_FMT_RGB555LE: 1257 c->lumToYV12 = rgb15leToY_c; 1258 break; 1259 case AV_PIX_FMT_RGB555BE: 1260 c->lumToYV12 = rgb15beToY_c; 1261 break; 1262 case AV_PIX_FMT_RGB444LE: 1263 c->lumToYV12 = rgb12leToY_c; 1264 break; 1265 case AV_PIX_FMT_RGB444BE: 1266 c->lumToYV12 = rgb12beToY_c; 1267 break; 1268 case AV_PIX_FMT_RGB8: 1269 case AV_PIX_FMT_BGR8: 1270 case AV_PIX_FMT_PAL8: 1271 case AV_PIX_FMT_BGR4_BYTE: 1272 case AV_PIX_FMT_RGB4_BYTE: 1273 c->lumToYV12 = palToY_c; 1274 break; 1275 case AV_PIX_FMT_MONOBLACK: 1276 c->lumToYV12 = monoblack2Y_c; 1277 break; 1278 case AV_PIX_FMT_MONOWHITE: 1279 c->lumToYV12 = monowhite2Y_c; 1280 break; 1281 case AV_PIX_FMT_RGB32: 1282 c->lumToYV12 = bgr32ToY_c; 1283 break; 1284 case AV_PIX_FMT_RGB32_1: 1285 c->lumToYV12 = bgr321ToY_c; 1286 break; 1287 case AV_PIX_FMT_BGR32: 1288 c->lumToYV12 = rgb32ToY_c; 1289 break; 1290 case AV_PIX_FMT_BGR32_1: 1291 c->lumToYV12 = rgb321ToY_c; 1292 break; 1293 case AV_PIX_FMT_RGB48BE: 1294 c->lumToYV12 = rgb48BEToY_c; 1295 break; 1296 case AV_PIX_FMT_RGB48LE: 1297 c->lumToYV12 = rgb48LEToY_c; 1298 break; 1299 case AV_PIX_FMT_BGR48BE: 1300 c->lumToYV12 = bgr48BEToY_c; 1301 break; 1302 case AV_PIX_FMT_BGR48LE: 1303 c->lumToYV12 = bgr48LEToY_c; 1304 break; 1305 case AV_PIX_FMT_RGBA64BE: 1306 c->lumToYV12 = rgb64BEToY_c; 1307 break; 1308 case AV_PIX_FMT_RGBA64LE: 1309 c->lumToYV12 = rgb64LEToY_c; 1310 break; 1311 case AV_PIX_FMT_BGRA64BE: 1312 c->lumToYV12 = bgr64BEToY_c; 1313 break; 1314 case AV_PIX_FMT_BGRA64LE: 1315 c->lumToYV12 = bgr64LEToY_c; 1316 } 1317 if (c->alpPixBuf) { 1318 if (is16BPS(srcFormat) || isNBPS(srcFormat)) { 1319 if (HAVE_BIGENDIAN == !isBE(srcFormat)) 1320 c->alpToYV12 = bswap16Y_c; 1321 } 1322 switch (srcFormat) { 1323 case AV_PIX_FMT_BGRA64LE: 1324 case AV_PIX_FMT_BGRA64BE: 1325 case AV_PIX_FMT_RGBA64LE: 1326 case AV_PIX_FMT_RGBA64BE: c->alpToYV12 = rgba64ToA_c; break; 1327 case AV_PIX_FMT_BGRA: 1328 case AV_PIX_FMT_RGBA: 1329 c->alpToYV12 = rgbaToA_c; 1330 break; 1331 case AV_PIX_FMT_ABGR: 1332 case AV_PIX_FMT_ARGB: 1333 c->alpToYV12 = abgrToA_c; 1334 break; 1335 case AV_PIX_FMT_Y400A: 1336 c->alpToYV12 = uyvyToY_c; 1337 break; 1338 case AV_PIX_FMT_PAL8 : 1339 c->alpToYV12 = palToA_c; 1340 break; 1341 } 1342 } 1343} 1344