1/* 2 * huffyuv codec for libavcodec 3 * 4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at> 5 * 6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of 7 * the algorithm used 8 * 9 * This file is part of FFmpeg. 10 * 11 * FFmpeg is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU Lesser General Public 13 * License as published by the Free Software Foundation; either 14 * version 2.1 of the License, or (at your option) any later version. 15 * 16 * FFmpeg is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * Lesser General Public License for more details. 20 * 21 * You should have received a copy of the GNU Lesser General Public 22 * License along with FFmpeg; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 24 */ 25 26/** 27 * @file libavcodec/huffyuv.c 28 * huffyuv codec for libavcodec. 29 */ 30 31#include "avcodec.h" 32#include "bitstream.h" 33#include "dsputil.h" 34 35#define VLC_BITS 11 36 37#ifdef WORDS_BIGENDIAN 38#define B 3 39#define G 2 40#define R 1 41#else 42#define B 0 43#define G 1 44#define R 2 45#endif 46 47typedef enum Predictor{ 48 LEFT= 0, 49 PLANE, 50 MEDIAN, 51} Predictor; 52 53typedef struct HYuvContext{ 54 AVCodecContext *avctx; 55 Predictor predictor; 56 GetBitContext gb; 57 PutBitContext pb; 58 int interlaced; 59 int decorrelate; 60 int bitstream_bpp; 61 int version; 62 int yuy2; //use yuy2 instead of 422P 63 int bgr32; //use bgr32 instead of bgr24 64 int width, height; 65 int flags; 66 int context; 67 int picture_number; 68 int last_slice_end; 69 uint8_t *temp[3]; 70 uint64_t stats[3][256]; 71 uint8_t len[3][256]; 72 uint32_t bits[3][256]; 73 uint32_t pix_bgr_map[1<<VLC_BITS]; 74 VLC vlc[6]; //Y,U,V,YY,YU,YV 75 AVFrame picture; 76 uint8_t *bitstream_buffer; 77 unsigned int bitstream_buffer_size; 78 DSPContext dsp; 79}HYuvContext; 80 81static const unsigned char classic_shift_luma[] = { 82 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8, 83 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70, 84 69,68, 0 85}; 86 87static const unsigned char classic_shift_chroma[] = { 88 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183, 89 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119, 90 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0 91}; 92 93static const unsigned char classic_add_luma[256] = { 94 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37, 95 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36, 96 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36, 97 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39, 98 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37, 99 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29, 100 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16, 101 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14, 102 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6, 103 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15, 104 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25, 105 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49, 106 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60, 107 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52, 108 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43, 109 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8, 110}; 111 112static const unsigned char classic_add_chroma[256] = { 113 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9, 114 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7, 115 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77, 116 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63, 117 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 118 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22, 119 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111, 120 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1, 121 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134, 122 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96, 123 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41, 124 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36, 125 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26, 126 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13, 127 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8, 128 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2, 129}; 130 131static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){ 132 int i; 133 134 for(i=0; i<w-1; i++){ 135 acc+= src[i]; 136 dst[i]= acc; 137 i++; 138 acc+= src[i]; 139 dst[i]= acc; 140 } 141 142 for(; i<w; i++){ 143 acc+= src[i]; 144 dst[i]= acc; 145 } 146 147 return acc; 148} 149 150static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){ 151 int i; 152 int r,g,b; 153 r= *red; 154 g= *green; 155 b= *blue; 156 157 for(i=0; i<w; i++){ 158 b+= src[4*i+B]; 159 g+= src[4*i+G]; 160 r+= src[4*i+R]; 161 162 dst[4*i+B]= b; 163 dst[4*i+G]= g; 164 dst[4*i+R]= r; 165 } 166 167 *red= r; 168 *green= g; 169 *blue= b; 170} 171 172static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){ 173 int i; 174 if(w<32){ 175 for(i=0; i<w; i++){ 176 const int temp= src[i]; 177 dst[i]= temp - left; 178 left= temp; 179 } 180 return left; 181 }else{ 182 for(i=0; i<16; i++){ 183 const int temp= src[i]; 184 dst[i]= temp - left; 185 left= temp; 186 } 187 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16); 188 return src[w-1]; 189 } 190} 191 192static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){ 193 int i; 194 int r,g,b; 195 r= *red; 196 g= *green; 197 b= *blue; 198 for(i=0; i<FFMIN(w,4); i++){ 199 const int rt= src[i*4+R]; 200 const int gt= src[i*4+G]; 201 const int bt= src[i*4+B]; 202 dst[i*4+R]= rt - r; 203 dst[i*4+G]= gt - g; 204 dst[i*4+B]= bt - b; 205 r = rt; 206 g = gt; 207 b = bt; 208 } 209 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16); 210 *red= src[(w-1)*4+R]; 211 *green= src[(w-1)*4+G]; 212 *blue= src[(w-1)*4+B]; 213} 214 215static void read_len_table(uint8_t *dst, GetBitContext *gb){ 216 int i, val, repeat; 217 218 for(i=0; i<256;){ 219 repeat= get_bits(gb, 3); 220 val = get_bits(gb, 5); 221 if(repeat==0) 222 repeat= get_bits(gb, 8); 223//printf("%d %d\n", val, repeat); 224 while (repeat--) 225 dst[i++] = val; 226 } 227} 228 229static int generate_bits_table(uint32_t *dst, uint8_t *len_table){ 230 int len, index; 231 uint32_t bits=0; 232 233 for(len=32; len>0; len--){ 234 for(index=0; index<256; index++){ 235 if(len_table[index]==len) 236 dst[index]= bits++; 237 } 238 if(bits & 1){ 239 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n"); 240 return -1; 241 } 242 bits >>= 1; 243 } 244 return 0; 245} 246 247#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 248typedef struct { 249 uint64_t val; 250 int name; 251} HeapElem; 252 253static void heap_sift(HeapElem *h, int root, int size) 254{ 255 while(root*2+1 < size) { 256 int child = root*2+1; 257 if(child < size-1 && h[child].val > h[child+1].val) 258 child++; 259 if(h[root].val > h[child].val) { 260 FFSWAP(HeapElem, h[root], h[child]); 261 root = child; 262 } else 263 break; 264 } 265} 266 267static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){ 268 HeapElem h[size]; 269 int up[2*size]; 270 int len[2*size]; 271 int offset, i, next; 272 273 for(offset=1; ; offset<<=1){ 274 for(i=0; i<size; i++){ 275 h[i].name = i; 276 h[i].val = (stats[i] << 8) + offset; 277 } 278 for(i=size/2-1; i>=0; i--) 279 heap_sift(h, i, size); 280 281 for(next=size; next<size*2-1; next++){ 282 // merge the two smallest entries, and put it back in the heap 283 uint64_t min1v = h[0].val; 284 up[h[0].name] = next; 285 h[0].val = INT64_MAX; 286 heap_sift(h, 0, size); 287 up[h[0].name] = next; 288 h[0].name = next; 289 h[0].val += min1v; 290 heap_sift(h, 0, size); 291 } 292 293 len[2*size-2] = 0; 294 for(i=2*size-3; i>=size; i--) 295 len[i] = len[up[i]] + 1; 296 for(i=0; i<size; i++) { 297 dst[i] = len[up[i]] + 1; 298 if(dst[i] >= 32) break; 299 } 300 if(i==size) break; 301 } 302} 303#endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 304 305static void generate_joint_tables(HYuvContext *s){ 306 uint16_t symbols[1<<VLC_BITS]; 307 uint16_t bits[1<<VLC_BITS]; 308 uint8_t len[1<<VLC_BITS]; 309 if(s->bitstream_bpp < 24){ 310 int p, i, y, u; 311 for(p=0; p<3; p++){ 312 for(i=y=0; y<256; y++){ 313 int len0 = s->len[0][y]; 314 int limit = VLC_BITS - len0; 315 if(limit <= 0) 316 continue; 317 for(u=0; u<256; u++){ 318 int len1 = s->len[p][u]; 319 if(len1 > limit) 320 continue; 321 len[i] = len0 + len1; 322 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u]; 323 symbols[i] = (y<<8) + u; 324 if(symbols[i] != 0xffff) // reserved to mean "invalid" 325 i++; 326 } 327 } 328 free_vlc(&s->vlc[3+p]); 329 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0); 330 } 331 }else{ 332 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map; 333 int i, b, g, r, code; 334 int p0 = s->decorrelate; 335 int p1 = !s->decorrelate; 336 // restrict the range to +/-16 becaues that's pretty much guaranteed to 337 // cover all the combinations that fit in 11 bits total, and it doesn't 338 // matter if we miss a few rare codes. 339 for(i=0, g=-16; g<16; g++){ 340 int len0 = s->len[p0][g&255]; 341 int limit0 = VLC_BITS - len0; 342 if(limit0 < 2) 343 continue; 344 for(b=-16; b<16; b++){ 345 int len1 = s->len[p1][b&255]; 346 int limit1 = limit0 - len1; 347 if(limit1 < 1) 348 continue; 349 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255]; 350 for(r=-16; r<16; r++){ 351 int len2 = s->len[2][r&255]; 352 if(len2 > limit1) 353 continue; 354 len[i] = len0 + len1 + len2; 355 bits[i] = (code << len2) + s->bits[2][r&255]; 356 if(s->decorrelate){ 357 map[i][G] = g; 358 map[i][B] = g+b; 359 map[i][R] = g+r; 360 }else{ 361 map[i][B] = g; 362 map[i][G] = b; 363 map[i][R] = r; 364 } 365 i++; 366 } 367 } 368 } 369 free_vlc(&s->vlc[3]); 370 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0); 371 } 372} 373 374static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){ 375 GetBitContext gb; 376 int i; 377 378 init_get_bits(&gb, src, length*8); 379 380 for(i=0; i<3; i++){ 381 read_len_table(s->len[i], &gb); 382 383 if(generate_bits_table(s->bits[i], s->len[i])<0){ 384 return -1; 385 } 386#if 0 387for(j=0; j<256; j++){ 388printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j); 389} 390#endif 391 free_vlc(&s->vlc[i]); 392 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); 393 } 394 395 generate_joint_tables(s); 396 397 return (get_bits_count(&gb)+7)/8; 398} 399 400static int read_old_huffman_tables(HYuvContext *s){ 401#if 1 402 GetBitContext gb; 403 int i; 404 405 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8); 406 read_len_table(s->len[0], &gb); 407 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8); 408 read_len_table(s->len[1], &gb); 409 410 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i]; 411 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i]; 412 413 if(s->bitstream_bpp >= 24){ 414 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t)); 415 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t)); 416 } 417 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t)); 418 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t)); 419 420 for(i=0; i<3; i++){ 421 free_vlc(&s->vlc[i]); 422 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); 423 } 424 425 generate_joint_tables(s); 426 427 return 0; 428#else 429 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n"); 430 return -1; 431#endif 432} 433 434static av_cold void alloc_temp(HYuvContext *s){ 435 int i; 436 437 if(s->bitstream_bpp<24){ 438 for(i=0; i<3; i++){ 439 s->temp[i]= av_malloc(s->width + 16); 440 } 441 }else{ 442 for(i=0; i<2; i++){ 443 s->temp[i]= av_malloc(4*s->width + 16); 444 } 445 } 446} 447 448static av_cold int common_init(AVCodecContext *avctx){ 449 HYuvContext *s = avctx->priv_data; 450 451 s->avctx= avctx; 452 s->flags= avctx->flags; 453 454 dsputil_init(&s->dsp, avctx); 455 456 s->width= avctx->width; 457 s->height= avctx->height; 458 assert(s->width>0 && s->height>0); 459 460 return 0; 461} 462 463#if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 464static av_cold int decode_init(AVCodecContext *avctx) 465{ 466 HYuvContext *s = avctx->priv_data; 467 468 common_init(avctx); 469 memset(s->vlc, 0, 3*sizeof(VLC)); 470 471 avctx->coded_frame= &s->picture; 472 s->interlaced= s->height > 288; 473 474s->bgr32=1; 475//if(avctx->extradata) 476// printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size); 477 if(avctx->extradata_size){ 478 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12) 479 s->version=1; // do such files exist at all? 480 else 481 s->version=2; 482 }else 483 s->version=0; 484 485 if(s->version==2){ 486 int method, interlace; 487 488 method= ((uint8_t*)avctx->extradata)[0]; 489 s->decorrelate= method&64 ? 1 : 0; 490 s->predictor= method&63; 491 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1]; 492 if(s->bitstream_bpp==0) 493 s->bitstream_bpp= avctx->bits_per_coded_sample&~7; 494 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4; 495 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced; 496 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0; 497 498 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0) 499 return -1; 500 }else{ 501 switch(avctx->bits_per_coded_sample&7){ 502 case 1: 503 s->predictor= LEFT; 504 s->decorrelate= 0; 505 break; 506 case 2: 507 s->predictor= LEFT; 508 s->decorrelate= 1; 509 break; 510 case 3: 511 s->predictor= PLANE; 512 s->decorrelate= avctx->bits_per_coded_sample >= 24; 513 break; 514 case 4: 515 s->predictor= MEDIAN; 516 s->decorrelate= 0; 517 break; 518 default: 519 s->predictor= LEFT; //OLD 520 s->decorrelate= 0; 521 break; 522 } 523 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7; 524 s->context= 0; 525 526 if(read_old_huffman_tables(s) < 0) 527 return -1; 528 } 529 530 switch(s->bitstream_bpp){ 531 case 12: 532 avctx->pix_fmt = PIX_FMT_YUV420P; 533 break; 534 case 16: 535 if(s->yuy2){ 536 avctx->pix_fmt = PIX_FMT_YUYV422; 537 }else{ 538 avctx->pix_fmt = PIX_FMT_YUV422P; 539 } 540 break; 541 case 24: 542 case 32: 543 if(s->bgr32){ 544 avctx->pix_fmt = PIX_FMT_RGB32; 545 }else{ 546 avctx->pix_fmt = PIX_FMT_BGR24; 547 } 548 break; 549 default: 550 assert(0); 551 } 552 553 alloc_temp(s); 554 555// av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced); 556 557 return 0; 558} 559#endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 560 561#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 562static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){ 563 int i; 564 int index= 0; 565 566 for(i=0; i<256;){ 567 int val= len[i]; 568 int repeat=0; 569 570 for(; i<256 && len[i]==val && repeat<255; i++) 571 repeat++; 572 573 assert(val < 32 && val >0 && repeat<256 && repeat>0); 574 if(repeat>7){ 575 buf[index++]= val; 576 buf[index++]= repeat; 577 }else{ 578 buf[index++]= val | (repeat<<5); 579 } 580 } 581 582 return index; 583} 584 585static av_cold int encode_init(AVCodecContext *avctx) 586{ 587 HYuvContext *s = avctx->priv_data; 588 int i, j; 589 590 common_init(avctx); 591 592 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772 593 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 594 s->version=2; 595 596 avctx->coded_frame= &s->picture; 597 598 switch(avctx->pix_fmt){ 599 case PIX_FMT_YUV420P: 600 s->bitstream_bpp= 12; 601 break; 602 case PIX_FMT_YUV422P: 603 s->bitstream_bpp= 16; 604 break; 605 case PIX_FMT_RGB32: 606 s->bitstream_bpp= 24; 607 break; 608 default: 609 av_log(avctx, AV_LOG_ERROR, "format not supported\n"); 610 return -1; 611 } 612 avctx->bits_per_coded_sample= s->bitstream_bpp; 613 s->decorrelate= s->bitstream_bpp >= 24; 614 s->predictor= avctx->prediction_method; 615 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0; 616 if(avctx->context_model==1){ 617 s->context= avctx->context_model; 618 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){ 619 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n"); 620 return -1; 621 } 622 }else s->context= 0; 623 624 if(avctx->codec->id==CODEC_ID_HUFFYUV){ 625 if(avctx->pix_fmt==PIX_FMT_YUV420P){ 626 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n"); 627 return -1; 628 } 629 if(avctx->context_model){ 630 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n"); 631 return -1; 632 } 633 if(s->interlaced != ( s->height > 288 )) 634 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n"); 635 } 636 637 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){ 638 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n"); 639 return -1; 640 } 641 642 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6); 643 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp; 644 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20; 645 if(s->context) 646 ((uint8_t*)avctx->extradata)[2]|= 0x40; 647 ((uint8_t*)avctx->extradata)[3]= 0; 648 s->avctx->extradata_size= 4; 649 650 if(avctx->stats_in){ 651 char *p= avctx->stats_in; 652 653 for(i=0; i<3; i++) 654 for(j=0; j<256; j++) 655 s->stats[i][j]= 1; 656 657 for(;;){ 658 for(i=0; i<3; i++){ 659 char *next; 660 661 for(j=0; j<256; j++){ 662 s->stats[i][j]+= strtol(p, &next, 0); 663 if(next==p) return -1; 664 p=next; 665 } 666 } 667 if(p[0]==0 || p[1]==0 || p[2]==0) break; 668 } 669 }else{ 670 for(i=0; i<3; i++) 671 for(j=0; j<256; j++){ 672 int d= FFMIN(j, 256-j); 673 674 s->stats[i][j]= 100000000/(d+1); 675 } 676 } 677 678 for(i=0; i<3; i++){ 679 generate_len_table(s->len[i], s->stats[i], 256); 680 681 if(generate_bits_table(s->bits[i], s->len[i])<0){ 682 return -1; 683 } 684 685 s->avctx->extradata_size+= 686 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]); 687 } 688 689 if(s->context){ 690 for(i=0; i<3; i++){ 691 int pels = s->width*s->height / (i?40:10); 692 for(j=0; j<256; j++){ 693 int d= FFMIN(j, 256-j); 694 s->stats[i][j]= pels/(d+1); 695 } 696 } 697 }else{ 698 for(i=0; i<3; i++) 699 for(j=0; j<256; j++) 700 s->stats[i][j]= 0; 701 } 702 703// printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced); 704 705 alloc_temp(s); 706 707 s->picture_number=0; 708 709 return 0; 710} 711#endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 712 713/* TODO instead of restarting the read when the code isn't in the first level 714 * of the joint table, jump into the 2nd level of the individual table. */ 715#define READ_2PIX(dst0, dst1, plane1){\ 716 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\ 717 if(code != 0xffff){\ 718 dst0 = code>>8;\ 719 dst1 = code;\ 720 }else{\ 721 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\ 722 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\ 723 }\ 724} 725 726static void decode_422_bitstream(HYuvContext *s, int count){ 727 int i; 728 729 count/=2; 730 731 if(count >= (s->gb.size_in_bits - get_bits_count(&s->gb))/(31*4)){ 732 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ 733 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); 734 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); 735 } 736 }else{ 737 for(i=0; i<count; i++){ 738 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); 739 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); 740 } 741 } 742} 743 744static void decode_gray_bitstream(HYuvContext *s, int count){ 745 int i; 746 747 count/=2; 748 749 if(count >= (s->gb.size_in_bits - get_bits_count(&s->gb))/(31*2)){ 750 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ 751 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); 752 } 753 }else{ 754 for(i=0; i<count; i++){ 755 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); 756 } 757 } 758} 759 760#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 761static int encode_422_bitstream(HYuvContext *s, int count){ 762 int i; 763 764 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){ 765 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 766 return -1; 767 } 768 769#define LOAD4\ 770 int y0 = s->temp[0][2*i];\ 771 int y1 = s->temp[0][2*i+1];\ 772 int u0 = s->temp[1][i];\ 773 int v0 = s->temp[2][i]; 774 775 count/=2; 776 if(s->flags&CODEC_FLAG_PASS1){ 777 for(i=0; i<count; i++){ 778 LOAD4; 779 s->stats[0][y0]++; 780 s->stats[1][u0]++; 781 s->stats[0][y1]++; 782 s->stats[2][v0]++; 783 } 784 } 785 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT) 786 return 0; 787 if(s->context){ 788 for(i=0; i<count; i++){ 789 LOAD4; 790 s->stats[0][y0]++; 791 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); 792 s->stats[1][u0]++; 793 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); 794 s->stats[0][y1]++; 795 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 796 s->stats[2][v0]++; 797 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); 798 } 799 }else{ 800 for(i=0; i<count; i++){ 801 LOAD4; 802 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); 803 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); 804 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 805 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); 806 } 807 } 808 return 0; 809} 810 811static int encode_gray_bitstream(HYuvContext *s, int count){ 812 int i; 813 814 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){ 815 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 816 return -1; 817 } 818 819#define LOAD2\ 820 int y0 = s->temp[0][2*i];\ 821 int y1 = s->temp[0][2*i+1]; 822#define STAT2\ 823 s->stats[0][y0]++;\ 824 s->stats[0][y1]++; 825#define WRITE2\ 826 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\ 827 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); 828 829 count/=2; 830 if(s->flags&CODEC_FLAG_PASS1){ 831 for(i=0; i<count; i++){ 832 LOAD2; 833 STAT2; 834 } 835 } 836 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT) 837 return 0; 838 839 if(s->context){ 840 for(i=0; i<count; i++){ 841 LOAD2; 842 STAT2; 843 WRITE2; 844 } 845 }else{ 846 for(i=0; i<count; i++){ 847 LOAD2; 848 WRITE2; 849 } 850 } 851 return 0; 852} 853#endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 854 855static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){ 856 int i; 857 for(i=0; i<count; i++){ 858 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1); 859 if(code != -1){ 860 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code]; 861 }else if(decorrelate){ 862 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); 863 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G]; 864 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G]; 865 }else{ 866 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3); 867 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); 868 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); 869 } 870 if(alpha) 871 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?! 872 } 873} 874 875static void decode_bgr_bitstream(HYuvContext *s, int count){ 876 if(s->decorrelate){ 877 if(s->bitstream_bpp==24) 878 decode_bgr_1(s, count, 1, 0); 879 else 880 decode_bgr_1(s, count, 1, 1); 881 }else{ 882 if(s->bitstream_bpp==24) 883 decode_bgr_1(s, count, 0, 0); 884 else 885 decode_bgr_1(s, count, 0, 1); 886 } 887} 888 889static int encode_bgr_bitstream(HYuvContext *s, int count){ 890 int i; 891 892 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){ 893 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); 894 return -1; 895 } 896 897#define LOAD3\ 898 int g= s->temp[0][4*i+G];\ 899 int b= (s->temp[0][4*i+B] - g) & 0xff;\ 900 int r= (s->temp[0][4*i+R] - g) & 0xff; 901#define STAT3\ 902 s->stats[0][b]++;\ 903 s->stats[1][g]++;\ 904 s->stats[2][r]++; 905#define WRITE3\ 906 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\ 907 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\ 908 put_bits(&s->pb, s->len[2][r], s->bits[2][r]); 909 910 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){ 911 for(i=0; i<count; i++){ 912 LOAD3; 913 STAT3; 914 } 915 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){ 916 for(i=0; i<count; i++){ 917 LOAD3; 918 STAT3; 919 WRITE3; 920 } 921 }else{ 922 for(i=0; i<count; i++){ 923 LOAD3; 924 WRITE3; 925 } 926 } 927 return 0; 928} 929 930#if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 931static void draw_slice(HYuvContext *s, int y){ 932 int h, cy; 933 int offset[4]; 934 935 if(s->avctx->draw_horiz_band==NULL) 936 return; 937 938 h= y - s->last_slice_end; 939 y -= h; 940 941 if(s->bitstream_bpp==12){ 942 cy= y>>1; 943 }else{ 944 cy= y; 945 } 946 947 offset[0] = s->picture.linesize[0]*y; 948 offset[1] = s->picture.linesize[1]*cy; 949 offset[2] = s->picture.linesize[2]*cy; 950 offset[3] = 0; 951 emms_c(); 952 953 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h); 954 955 s->last_slice_end= y + h; 956} 957 958static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size){ 959 HYuvContext *s = avctx->priv_data; 960 const int width= s->width; 961 const int width2= s->width>>1; 962 const int height= s->height; 963 int fake_ystride, fake_ustride, fake_vstride; 964 AVFrame * const p= &s->picture; 965 int table_size= 0; 966 967 AVFrame *picture = data; 968 969 s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); 970 971 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4); 972 973 if(p->data[0]) 974 avctx->release_buffer(avctx, p); 975 976 p->reference= 0; 977 if(avctx->get_buffer(avctx, p) < 0){ 978 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); 979 return -1; 980 } 981 982 if(s->context){ 983 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size); 984 if(table_size < 0) 985 return -1; 986 } 987 988 if((unsigned)(buf_size-table_size) >= INT_MAX/8) 989 return -1; 990 991 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8); 992 993 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0]; 994 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1]; 995 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2]; 996 997 s->last_slice_end= 0; 998 999 if(s->bitstream_bpp<24){ 1000 int y, cy; 1001 int lefty, leftu, leftv; 1002 int lefttopy, lefttopu, lefttopv; 1003 1004 if(s->yuy2){ 1005 p->data[0][3]= get_bits(&s->gb, 8); 1006 p->data[0][2]= get_bits(&s->gb, 8); 1007 p->data[0][1]= get_bits(&s->gb, 8); 1008 p->data[0][0]= get_bits(&s->gb, 8); 1009 1010 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n"); 1011 return -1; 1012 }else{ 1013 1014 leftv= p->data[2][0]= get_bits(&s->gb, 8); 1015 lefty= p->data[0][1]= get_bits(&s->gb, 8); 1016 leftu= p->data[1][0]= get_bits(&s->gb, 8); 1017 p->data[0][0]= get_bits(&s->gb, 8); 1018 1019 switch(s->predictor){ 1020 case LEFT: 1021 case PLANE: 1022 decode_422_bitstream(s, width-2); 1023 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); 1024 if(!(s->flags&CODEC_FLAG_GRAY)){ 1025 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); 1026 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); 1027 } 1028 1029 for(cy=y=1; y<s->height; y++,cy++){ 1030 uint8_t *ydst, *udst, *vdst; 1031 1032 if(s->bitstream_bpp==12){ 1033 decode_gray_bitstream(s, width); 1034 1035 ydst= p->data[0] + p->linesize[0]*y; 1036 1037 lefty= add_left_prediction(ydst, s->temp[0], width, lefty); 1038 if(s->predictor == PLANE){ 1039 if(y>s->interlaced) 1040 s->dsp.add_bytes(ydst, ydst - fake_ystride, width); 1041 } 1042 y++; 1043 if(y>=s->height) break; 1044 } 1045 1046 draw_slice(s, y); 1047 1048 ydst= p->data[0] + p->linesize[0]*y; 1049 udst= p->data[1] + p->linesize[1]*cy; 1050 vdst= p->data[2] + p->linesize[2]*cy; 1051 1052 decode_422_bitstream(s, width); 1053 lefty= add_left_prediction(ydst, s->temp[0], width, lefty); 1054 if(!(s->flags&CODEC_FLAG_GRAY)){ 1055 leftu= add_left_prediction(udst, s->temp[1], width2, leftu); 1056 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv); 1057 } 1058 if(s->predictor == PLANE){ 1059 if(cy>s->interlaced){ 1060 s->dsp.add_bytes(ydst, ydst - fake_ystride, width); 1061 if(!(s->flags&CODEC_FLAG_GRAY)){ 1062 s->dsp.add_bytes(udst, udst - fake_ustride, width2); 1063 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2); 1064 } 1065 } 1066 } 1067 } 1068 draw_slice(s, height); 1069 1070 break; 1071 case MEDIAN: 1072 /* first line except first 2 pixels is left predicted */ 1073 decode_422_bitstream(s, width-2); 1074 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); 1075 if(!(s->flags&CODEC_FLAG_GRAY)){ 1076 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); 1077 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); 1078 } 1079 1080 cy=y=1; 1081 1082 /* second line is left predicted for interlaced case */ 1083 if(s->interlaced){ 1084 decode_422_bitstream(s, width); 1085 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty); 1086 if(!(s->flags&CODEC_FLAG_GRAY)){ 1087 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu); 1088 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv); 1089 } 1090 y++; cy++; 1091 } 1092 1093 /* next 4 pixels are left predicted too */ 1094 decode_422_bitstream(s, 4); 1095 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty); 1096 if(!(s->flags&CODEC_FLAG_GRAY)){ 1097 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu); 1098 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv); 1099 } 1100 1101 /* next line except the first 4 pixels is median predicted */ 1102 lefttopy= p->data[0][3]; 1103 decode_422_bitstream(s, width-4); 1104 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy); 1105 if(!(s->flags&CODEC_FLAG_GRAY)){ 1106 lefttopu= p->data[1][1]; 1107 lefttopv= p->data[2][1]; 1108 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu); 1109 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv); 1110 } 1111 y++; cy++; 1112 1113 for(; y<height; y++,cy++){ 1114 uint8_t *ydst, *udst, *vdst; 1115 1116 if(s->bitstream_bpp==12){ 1117 while(2*cy > y){ 1118 decode_gray_bitstream(s, width); 1119 ydst= p->data[0] + p->linesize[0]*y; 1120 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); 1121 y++; 1122 } 1123 if(y>=height) break; 1124 } 1125 draw_slice(s, y); 1126 1127 decode_422_bitstream(s, width); 1128 1129 ydst= p->data[0] + p->linesize[0]*y; 1130 udst= p->data[1] + p->linesize[1]*cy; 1131 vdst= p->data[2] + p->linesize[2]*cy; 1132 1133 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy); 1134 if(!(s->flags&CODEC_FLAG_GRAY)){ 1135 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu); 1136 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv); 1137 } 1138 } 1139 1140 draw_slice(s, height); 1141 break; 1142 } 1143 } 1144 }else{ 1145 int y; 1146 int leftr, leftg, leftb; 1147 const int last_line= (height-1)*p->linesize[0]; 1148 1149 if(s->bitstream_bpp==32){ 1150 skip_bits(&s->gb, 8); 1151 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); 1152 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); 1153 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); 1154 }else{ 1155 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); 1156 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); 1157 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); 1158 skip_bits(&s->gb, 8); 1159 } 1160 1161 if(s->bgr32){ 1162 switch(s->predictor){ 1163 case LEFT: 1164 case PLANE: 1165 decode_bgr_bitstream(s, width-1); 1166 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb); 1167 1168 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down. 1169 decode_bgr_bitstream(s, width); 1170 1171 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb); 1172 if(s->predictor == PLANE){ 1173 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){ 1174 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y, 1175 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride); 1176 } 1177 } 1178 } 1179 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order 1180 break; 1181 default: 1182 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n"); 1183 } 1184 }else{ 1185 1186 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n"); 1187 return -1; 1188 } 1189 } 1190 emms_c(); 1191 1192 *picture= *p; 1193 *data_size = sizeof(AVFrame); 1194 1195 return (get_bits_count(&s->gb)+31)/32*4 + table_size; 1196} 1197#endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 1198 1199static int common_end(HYuvContext *s){ 1200 int i; 1201 1202 for(i=0; i<3; i++){ 1203 av_freep(&s->temp[i]); 1204 } 1205 return 0; 1206} 1207 1208#if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER 1209static av_cold int decode_end(AVCodecContext *avctx) 1210{ 1211 HYuvContext *s = avctx->priv_data; 1212 int i; 1213 1214 common_end(s); 1215 av_freep(&s->bitstream_buffer); 1216 1217 for(i=0; i<6; i++){ 1218 free_vlc(&s->vlc[i]); 1219 } 1220 1221 return 0; 1222} 1223#endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ 1224 1225#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER 1226static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ 1227 HYuvContext *s = avctx->priv_data; 1228 AVFrame *pict = data; 1229 const int width= s->width; 1230 const int width2= s->width>>1; 1231 const int height= s->height; 1232 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; 1233 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; 1234 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; 1235 AVFrame * const p= &s->picture; 1236 int i, j, size=0; 1237 1238 *p = *pict; 1239 p->pict_type= FF_I_TYPE; 1240 p->key_frame= 1; 1241 1242 if(s->context){ 1243 for(i=0; i<3; i++){ 1244 generate_len_table(s->len[i], s->stats[i], 256); 1245 if(generate_bits_table(s->bits[i], s->len[i])<0) 1246 return -1; 1247 size+= store_table(s, s->len[i], &buf[size]); 1248 } 1249 1250 for(i=0; i<3; i++) 1251 for(j=0; j<256; j++) 1252 s->stats[i][j] >>= 1; 1253 } 1254 1255 init_put_bits(&s->pb, buf+size, buf_size-size); 1256 1257 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){ 1258 int lefty, leftu, leftv, y, cy; 1259 1260 put_bits(&s->pb, 8, leftv= p->data[2][0]); 1261 put_bits(&s->pb, 8, lefty= p->data[0][1]); 1262 put_bits(&s->pb, 8, leftu= p->data[1][0]); 1263 put_bits(&s->pb, 8, p->data[0][0]); 1264 1265 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty); 1266 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu); 1267 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv); 1268 1269 encode_422_bitstream(s, width-2); 1270 1271 if(s->predictor==MEDIAN){ 1272 int lefttopy, lefttopu, lefttopv; 1273 cy=y=1; 1274 if(s->interlaced){ 1275 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty); 1276 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu); 1277 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv); 1278 1279 encode_422_bitstream(s, width); 1280 y++; cy++; 1281 } 1282 1283 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty); 1284 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu); 1285 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv); 1286 1287 encode_422_bitstream(s, 4); 1288 1289 lefttopy= p->data[0][3]; 1290 lefttopu= p->data[1][1]; 1291 lefttopv= p->data[2][1]; 1292 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy); 1293 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu); 1294 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv); 1295 encode_422_bitstream(s, width-4); 1296 y++; cy++; 1297 1298 for(; y<height; y++,cy++){ 1299 uint8_t *ydst, *udst, *vdst; 1300 1301 if(s->bitstream_bpp==12){ 1302 while(2*cy > y){ 1303 ydst= p->data[0] + p->linesize[0]*y; 1304 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); 1305 encode_gray_bitstream(s, width); 1306 y++; 1307 } 1308 if(y>=height) break; 1309 } 1310 ydst= p->data[0] + p->linesize[0]*y; 1311 udst= p->data[1] + p->linesize[1]*cy; 1312 vdst= p->data[2] + p->linesize[2]*cy; 1313 1314 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy); 1315 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu); 1316 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv); 1317 1318 encode_422_bitstream(s, width); 1319 } 1320 }else{ 1321 for(cy=y=1; y<height; y++,cy++){ 1322 uint8_t *ydst, *udst, *vdst; 1323 1324 /* encode a luma only line & y++ */ 1325 if(s->bitstream_bpp==12){ 1326 ydst= p->data[0] + p->linesize[0]*y; 1327 1328 if(s->predictor == PLANE && s->interlaced < y){ 1329 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); 1330 1331 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); 1332 }else{ 1333 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty); 1334 } 1335 encode_gray_bitstream(s, width); 1336 y++; 1337 if(y>=height) break; 1338 } 1339 1340 ydst= p->data[0] + p->linesize[0]*y; 1341 udst= p->data[1] + p->linesize[1]*cy; 1342 vdst= p->data[2] + p->linesize[2]*cy; 1343 1344 if(s->predictor == PLANE && s->interlaced < cy){ 1345 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); 1346 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2); 1347 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2); 1348 1349 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); 1350 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu); 1351 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv); 1352 }else{ 1353 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty); 1354 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu); 1355 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv); 1356 } 1357 1358 encode_422_bitstream(s, width); 1359 } 1360 } 1361 }else if(avctx->pix_fmt == PIX_FMT_RGB32){ 1362 uint8_t *data = p->data[0] + (height-1)*p->linesize[0]; 1363 const int stride = -p->linesize[0]; 1364 const int fake_stride = -fake_ystride; 1365 int y; 1366 int leftr, leftg, leftb; 1367 1368 put_bits(&s->pb, 8, leftr= data[R]); 1369 put_bits(&s->pb, 8, leftg= data[G]); 1370 put_bits(&s->pb, 8, leftb= data[B]); 1371 put_bits(&s->pb, 8, 0); 1372 1373 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb); 1374 encode_bgr_bitstream(s, width-1); 1375 1376 for(y=1; y<s->height; y++){ 1377 uint8_t *dst = data + y*stride; 1378 if(s->predictor == PLANE && s->interlaced < y){ 1379 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4); 1380 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb); 1381 }else{ 1382 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb); 1383 } 1384 encode_bgr_bitstream(s, width); 1385 } 1386 }else{ 1387 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n"); 1388 } 1389 emms_c(); 1390 1391 size+= (put_bits_count(&s->pb)+31)/8; 1392 size/= 4; 1393 1394 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){ 1395 int j; 1396 char *p= avctx->stats_out; 1397 char *end= p + 1024*30; 1398 for(i=0; i<3; i++){ 1399 for(j=0; j<256; j++){ 1400 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]); 1401 p+= strlen(p); 1402 s->stats[i][j]= 0; 1403 } 1404 snprintf(p, end-p, "\n"); 1405 p++; 1406 } 1407 } else 1408 avctx->stats_out[0] = '\0'; 1409 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){ 1410 flush_put_bits(&s->pb); 1411 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size); 1412 } 1413 1414 s->picture_number++; 1415 1416 return size*4; 1417} 1418 1419static av_cold int encode_end(AVCodecContext *avctx) 1420{ 1421 HYuvContext *s = avctx->priv_data; 1422 1423 common_end(s); 1424 1425 av_freep(&avctx->extradata); 1426 av_freep(&avctx->stats_out); 1427 1428 return 0; 1429} 1430#endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ 1431 1432#if CONFIG_HUFFYUV_DECODER 1433AVCodec huffyuv_decoder = { 1434 "huffyuv", 1435 CODEC_TYPE_VIDEO, 1436 CODEC_ID_HUFFYUV, 1437 sizeof(HYuvContext), 1438 decode_init, 1439 NULL, 1440 decode_end, 1441 decode_frame, 1442 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, 1443 NULL, 1444 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), 1445}; 1446#endif 1447 1448#if CONFIG_FFVHUFF_DECODER 1449AVCodec ffvhuff_decoder = { 1450 "ffvhuff", 1451 CODEC_TYPE_VIDEO, 1452 CODEC_ID_FFVHUFF, 1453 sizeof(HYuvContext), 1454 decode_init, 1455 NULL, 1456 decode_end, 1457 decode_frame, 1458 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, 1459 NULL, 1460 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), 1461}; 1462#endif 1463 1464#if CONFIG_HUFFYUV_ENCODER 1465AVCodec huffyuv_encoder = { 1466 "huffyuv", 1467 CODEC_TYPE_VIDEO, 1468 CODEC_ID_HUFFYUV, 1469 sizeof(HYuvContext), 1470 encode_init, 1471 encode_frame, 1472 encode_end, 1473 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, 1474 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), 1475}; 1476#endif 1477 1478#if CONFIG_FFVHUFF_ENCODER 1479AVCodec ffvhuff_encoder = { 1480 "ffvhuff", 1481 CODEC_TYPE_VIDEO, 1482 CODEC_ID_FFVHUFF, 1483 sizeof(HYuvContext), 1484 encode_init, 1485 encode_frame, 1486 encode_end, 1487 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, 1488 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), 1489}; 1490#endif 1491