1/* 2 * various utility functions for use within FFmpeg 3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard 4 * 5 * This file is part of FFmpeg. 6 * 7 * FFmpeg is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * FFmpeg is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with FFmpeg; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21#include "avformat.h" 22#include "internal.h" 23#include "libavcodec/opt.h" 24#include "metadata.h" 25#include "libavutil/avstring.h" 26#include "riff.h" 27#include <sys/time.h> 28#include <time.h> 29#include <strings.h> 30 31#undef NDEBUG 32#include <assert.h> 33 34#define OOM_DEBUG 0 35 36/** 37 * @file libavformat/utils.c 38 * various utility functions for use within FFmpeg 39 */ 40 41unsigned avformat_version(void) 42{ 43 return LIBAVFORMAT_VERSION_INT; 44} 45 46/* fraction handling */ 47 48/** 49 * f = val + (num / den) + 0.5. 50 * 51 * 'num' is normalized so that it is such as 0 <= num < den. 52 * 53 * @param f fractional number 54 * @param val integer value 55 * @param num must be >= 0 56 * @param den must be >= 1 57 */ 58static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) 59{ 60 num += (den >> 1); 61 if (num >= den) { 62 val += num / den; 63 num = num % den; 64 } 65 f->val = val; 66 f->num = num; 67 f->den = den; 68} 69 70/** 71 * Fractional addition to f: f = f + (incr / f->den). 72 * 73 * @param f fractional number 74 * @param incr increment, can be positive or negative 75 */ 76static void av_frac_add(AVFrac *f, int64_t incr) 77{ 78 int64_t num, den; 79 80 num = f->num + incr; 81 den = f->den; 82 if (num < 0) { 83 f->val += num / den; 84 num = num % den; 85 if (num < 0) { 86 num += den; 87 f->val--; 88 } 89 } else if (num >= den) { 90 f->val += num / den; 91 num = num % den; 92 } 93 f->num = num; 94} 95 96/** head of registered input format linked list */ 97AVInputFormat *first_iformat = NULL; 98/** head of registered output format linked list */ 99AVOutputFormat *first_oformat = NULL; 100 101AVInputFormat *av_iformat_next(AVInputFormat *f) 102{ 103 if(f) return f->next; 104 else return first_iformat; 105} 106 107AVOutputFormat *av_oformat_next(AVOutputFormat *f) 108{ 109 if(f) return f->next; 110 else return first_oformat; 111} 112 113void av_register_input_format(AVInputFormat *format) 114{ 115 AVInputFormat **p; 116 p = &first_iformat; 117 while (*p != NULL) p = &(*p)->next; 118 *p = format; 119 format->next = NULL; 120} 121 122void av_register_output_format(AVOutputFormat *format) 123{ 124 AVOutputFormat **p; 125 p = &first_oformat; 126 while (*p != NULL) p = &(*p)->next; 127 *p = format; 128 format->next = NULL; 129} 130 131int match_ext(const char *filename, const char *extensions) 132{ 133 const char *ext, *p; 134 char ext1[32], *q; 135 136 if(!filename) 137 return 0; 138 139 ext = strrchr(filename, '.'); 140 if (ext) { 141 ext++; 142 p = extensions; 143 for(;;) { 144 q = ext1; 145 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1) 146 *q++ = *p++; 147 *q = '\0'; 148 if (!strcasecmp(ext1, ext)) 149 return 1; 150 if (*p == '\0') 151 break; 152 p++; 153 } 154 } 155 return 0; 156} 157 158static int match_format(const char *name, const char *names) 159{ 160 const char *p; 161 int len, namelen; 162 163 if (!name || !names) 164 return 0; 165 166 namelen = strlen(name); 167 while ((p = strchr(names, ','))) { 168 len = FFMAX(p - names, namelen); 169 if (!strncasecmp(name, names, len)) 170 return 1; 171 names = p+1; 172 } 173 return !strcasecmp(name, names); 174} 175 176AVOutputFormat *guess_format(const char *short_name, const char *filename, 177 const char *mime_type) 178{ 179 AVOutputFormat *fmt, *fmt_found; 180 int score_max, score; 181 182 /* specific test for image sequences */ 183#if CONFIG_IMAGE2_MUXER 184 if (!short_name && filename && 185 av_filename_number_test(filename) && 186 av_guess_image2_codec(filename) != CODEC_ID_NONE) { 187 return guess_format("image2", NULL, NULL); 188 } 189#endif 190 /* Find the proper file type. */ 191 fmt_found = NULL; 192 score_max = 0; 193 fmt = first_oformat; 194 while (fmt != NULL) { 195 score = 0; 196 if (fmt->name && short_name && !strcmp(fmt->name, short_name)) 197 score += 100; 198 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type)) 199 score += 10; 200 if (filename && fmt->extensions && 201 match_ext(filename, fmt->extensions)) { 202 score += 5; 203 } 204 if (score > score_max) { 205 score_max = score; 206 fmt_found = fmt; 207 } 208 fmt = fmt->next; 209 } 210 return fmt_found; 211} 212 213AVOutputFormat *guess_stream_format(const char *short_name, const char *filename, 214 const char *mime_type) 215{ 216 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type); 217 218 if (fmt) { 219 AVOutputFormat *stream_fmt; 220 char stream_format_name[64]; 221 222 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name); 223 stream_fmt = guess_format(stream_format_name, NULL, NULL); 224 225 if (stream_fmt) 226 fmt = stream_fmt; 227 } 228 229 return fmt; 230} 231 232enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, 233 const char *filename, const char *mime_type, enum CodecType type){ 234 if(type == CODEC_TYPE_VIDEO){ 235 enum CodecID codec_id= CODEC_ID_NONE; 236 237#if CONFIG_IMAGE2_MUXER 238 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){ 239 codec_id= av_guess_image2_codec(filename); 240 } 241#endif 242 if(codec_id == CODEC_ID_NONE) 243 codec_id= fmt->video_codec; 244 return codec_id; 245 }else if(type == CODEC_TYPE_AUDIO) 246 return fmt->audio_codec; 247 else 248 return CODEC_ID_NONE; 249} 250 251AVInputFormat *av_find_input_format(const char *short_name) 252{ 253 AVInputFormat *fmt; 254 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) { 255 if (match_format(short_name, fmt->name)) 256 return fmt; 257 } 258 return NULL; 259} 260 261/* memory handling */ 262 263void av_destruct_packet(AVPacket *pkt) 264{ 265 av_free(pkt->data); 266 pkt->data = NULL; pkt->size = 0; 267} 268 269void av_init_packet(AVPacket *pkt) 270{ 271 pkt->pts = AV_NOPTS_VALUE; 272 pkt->dts = AV_NOPTS_VALUE; 273 pkt->pos = -1; 274 pkt->duration = 0; 275 pkt->convergence_duration = 0; 276 pkt->flags = 0; 277 pkt->stream_index = 0; 278 pkt->destruct= av_destruct_packet_nofree; 279} 280 281int av_new_packet(AVPacket *pkt, int size) 282{ 283 uint8_t *data; 284 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE) 285 return AVERROR(ENOMEM); 286 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE); 287 if (!data) 288 return AVERROR(ENOMEM); 289 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE); 290 291 av_init_packet(pkt); 292 pkt->data = data; 293 pkt->size = size; 294 pkt->destruct = av_destruct_packet; 295 return 0; 296} 297 298int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size) 299{ 300 int ret= av_new_packet(pkt, size); 301 302 if(ret<0) 303 return ret; 304 305 pkt->pos= url_ftell(s); 306 307 ret= get_buffer(s, pkt->data, size); 308 if(ret<=0) 309 av_free_packet(pkt); 310 else 311 pkt->size= ret; 312 313 return ret; 314} 315 316int av_dup_packet(AVPacket *pkt) 317{ 318 if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) { 319 uint8_t *data; 320 /* We duplicate the packet and don't forget to add the padding again. */ 321 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE) 322 return AVERROR(ENOMEM); 323 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE); 324 if (!data) { 325 return AVERROR(ENOMEM); 326 } 327 memcpy(data, pkt->data, pkt->size); 328 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE); 329 pkt->data = data; 330 pkt->destruct = av_destruct_packet; 331 } 332 return 0; 333} 334 335int av_filename_number_test(const char *filename) 336{ 337 char buf[1024]; 338 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0); 339} 340 341static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max) 342{ 343 AVInputFormat *fmt1, *fmt; 344 int score; 345 346 fmt = NULL; 347 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) { 348 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE)) 349 continue; 350 score = 0; 351 if (fmt1->read_probe) { 352 score = fmt1->read_probe(pd); 353 } else if (fmt1->extensions) { 354 if (match_ext(pd->filename, fmt1->extensions)) { 355 score = 50; 356 } 357 } 358 if (score > *score_max) { 359 *score_max = score; 360 fmt = fmt1; 361 }else if (score == *score_max) 362 fmt = NULL; 363 } 364 return fmt; 365} 366 367AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){ 368 int score=0; 369 return av_probe_input_format2(pd, is_opened, &score); 370} 371 372static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score) 373{ 374 AVInputFormat *fmt; 375 fmt = av_probe_input_format2(pd, 1, &score); 376 377 if (fmt) { 378 if (!strcmp(fmt->name, "mp3")) { 379 st->codec->codec_id = CODEC_ID_MP3; 380 st->codec->codec_type = CODEC_TYPE_AUDIO; 381 } else if (!strcmp(fmt->name, "ac3")) { 382 st->codec->codec_id = CODEC_ID_AC3; 383 st->codec->codec_type = CODEC_TYPE_AUDIO; 384 } else if (!strcmp(fmt->name, "mpegvideo")) { 385 st->codec->codec_id = CODEC_ID_MPEG2VIDEO; 386 st->codec->codec_type = CODEC_TYPE_VIDEO; 387 } else if (!strcmp(fmt->name, "m4v")) { 388 st->codec->codec_id = CODEC_ID_MPEG4; 389 st->codec->codec_type = CODEC_TYPE_VIDEO; 390 } else if (!strcmp(fmt->name, "h264")) { 391 st->codec->codec_id = CODEC_ID_H264; 392 st->codec->codec_type = CODEC_TYPE_VIDEO; 393 } 394 } 395 return !!fmt; 396} 397 398/************************************************************/ 399/* input media file */ 400 401/** 402 * Open a media file from an IO stream. 'fmt' must be specified. 403 */ 404int av_open_input_stream(AVFormatContext **ic_ptr, 405 ByteIOContext *pb, const char *filename, 406 AVInputFormat *fmt, AVFormatParameters *ap) 407{ 408 int err; 409 AVFormatContext *ic; 410 AVFormatParameters default_ap; 411 412 if(!ap){ 413 ap=&default_ap; 414 memset(ap, 0, sizeof(default_ap)); 415 } 416 417 if(!ap->prealloced_context) 418 ic = avformat_alloc_context(); 419 else 420 ic = *ic_ptr; 421 if (!ic) { 422 err = AVERROR(ENOMEM); 423 goto fail; 424 } 425 ic->iformat = fmt; 426 ic->pb = pb; 427 ic->duration = AV_NOPTS_VALUE; 428 ic->start_time = AV_NOPTS_VALUE; 429 av_strlcpy(ic->filename, filename, sizeof(ic->filename)); 430 431 /* allocate private data */ 432 if (fmt->priv_data_size > 0) { 433 ic->priv_data = av_mallocz(fmt->priv_data_size); 434 if (!ic->priv_data) { 435 err = AVERROR(ENOMEM); 436 goto fail; 437 } 438 } else { 439 ic->priv_data = NULL; 440 } 441 442 if (ic->iformat->read_header) { 443 err = ic->iformat->read_header(ic, ap); 444 if (err < 0) 445 goto fail; 446 } 447 448 if (pb && !ic->data_offset) 449 ic->data_offset = url_ftell(ic->pb); 450 451#if LIBAVFORMAT_VERSION_MAJOR < 53 452 ff_metadata_demux_compat(ic); 453#endif 454 455 *ic_ptr = ic; 456 return 0; 457 fail: 458 if (ic) { 459 int i; 460 av_freep(&ic->priv_data); 461 for(i=0;i<ic->nb_streams;i++) { 462 AVStream *st = ic->streams[i]; 463 if (st) { 464 av_free(st->priv_data); 465 av_free(st->codec->extradata); 466 } 467 av_free(st); 468 } 469 } 470 av_free(ic); 471 *ic_ptr = NULL; 472 return err; 473} 474 475/** size of probe buffer, for guessing file type from file contents */ 476#define PROBE_BUF_MIN 2048 477#define PROBE_BUF_MAX (1<<20) 478 479int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, 480 AVInputFormat *fmt, 481 int buf_size, 482 AVFormatParameters *ap) 483{ 484 int err, probe_size; 485 AVProbeData probe_data, *pd = &probe_data; 486 ByteIOContext *pb = NULL; 487 488 pd->filename = ""; 489 if (filename) 490 pd->filename = filename; 491 pd->buf = NULL; 492 pd->buf_size = 0; 493 494 if (!fmt) { 495 /* guess format if no file can be opened */ 496 fmt = av_probe_input_format(pd, 0); 497 } 498 499 /* Do not open file if the format does not need it. XXX: specific 500 hack needed to handle RTSP/TCP */ 501 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) { 502 /* if no file needed do not try to open one */ 503 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) { 504 goto fail; 505 } 506 if (buf_size > 0) { 507 url_setbufsize(pb, buf_size); 508 } 509 510 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){ 511 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0; 512 /* read probe data */ 513 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE); 514 pd->buf_size = get_buffer(pb, pd->buf, probe_size); 515 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); 516 if (url_fseek(pb, 0, SEEK_SET) < 0) { 517 url_fclose(pb); 518 if (url_fopen(&pb, filename, URL_RDONLY) < 0) { 519 pb = NULL; 520 err = AVERROR(EIO); 521 goto fail; 522 } 523 } 524 /* guess file format */ 525 fmt = av_probe_input_format2(pd, 1, &score); 526 } 527 av_freep(&pd->buf); 528 } 529 530 /* if still no format found, error */ 531 if (!fmt) { 532 err = AVERROR_NOFMT; 533 goto fail; 534 } 535 536 /* check filename in case an image number is expected */ 537 if (fmt->flags & AVFMT_NEEDNUMBER) { 538 if (!av_filename_number_test(filename)) { 539 err = AVERROR_NUMEXPECTED; 540 goto fail; 541 } 542 } 543 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap); 544 if (err) 545 goto fail; 546 return 0; 547 fail: 548 av_freep(&pd->buf); 549 if (pb) 550 url_fclose(pb); 551 *ic_ptr = NULL; 552 return err; 553 554} 555 556/*******************************************************/ 557 558static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt, 559 AVPacketList **plast_pktl){ 560 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList)); 561 if (!pktl) 562 return NULL; 563 564 if (*packet_buffer) 565 (*plast_pktl)->next = pktl; 566 else 567 *packet_buffer = pktl; 568 569 /* add the packet in the buffered packet list */ 570 *plast_pktl = pktl; 571 pktl->pkt= *pkt; 572 return &pktl->pkt; 573} 574 575int av_read_packet(AVFormatContext *s, AVPacket *pkt) 576{ 577 int ret; 578 AVStream *st; 579 int alloc_count = 0; 580 /* Foxconn, add by Michael for OOM issue. */ 581 int mem_alloc_size = 0; 582 /* Foxconn, end by Michael for OOM issue. */ 583 for(;;){ 584 AVPacketList *pktl = s->raw_packet_buffer; 585 586 if (pktl) { 587 *pkt = pktl->pkt; 588 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){ 589 s->raw_packet_buffer = pktl->next; 590 av_free(pktl); 591 //fprintf(stdout, "successfully decoded alloc_count = %d\n",alloc_count); 592 return 0; 593 } 594 /* wklin debug start */ 595 else if (alloc_count > 0) { 596 AVProbeData *pd = &st->probe_data; 597 int i = 0; 598 while (pktl) { 599 s->raw_packet_buffer = pktl->next; 600 av_free(pktl); 601 pktl = s->raw_packet_buffer; 602 i++; 603 } 604 pd->buf_size=0; 605 av_freep(&pd->buf); 606 //fprintf(stdout, "cannot decode...%d entries freed\n",i); 607 return -1; 608 } 609 /* wklin debug end */ 610 } 611 612 av_init_packet(pkt); 613 ret= s->iformat->read_packet(s, pkt); 614 615 if (ret < 0){ 616 fprintf(stdout, "cannot decode...alloced cnt = %d \n",alloc_count); 617 return ret; 618 } 619 st= s->streams[pkt->stream_index]; 620 621 switch(st->codec->codec_type){ 622 case CODEC_TYPE_VIDEO: 623 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id; 624 break; 625 case CODEC_TYPE_AUDIO: 626 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id; 627 break; 628 case CODEC_TYPE_SUBTITLE: 629 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id; 630 break; 631 } 632 633 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE){ 634 //fprintf(stdout, "decode ok...alloced cnt = %d \n",alloc_count); 635 return ret; 636 } 637 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end); 638 alloc_count ++; /* wklin debug */ 639 640 if(st->codec->codec_id == CODEC_ID_PROBE){ 641 AVProbeData *pd = &st->probe_data; 642 643 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE); 644 if (pd->buf == NULL) 645 fprintf(stdout, "pd->buf=NULL\n"); 646 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size); 647 pd->buf_size += pkt->size; 648 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); 649 650 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){ 651 set_codec_from_probe_data(st, pd, 1); 652 if(st->codec->codec_id != CODEC_ID_PROBE){ 653 pd->buf_size=0; 654 av_freep(&pd->buf); 655 } 656 } 657 } 658 } 659} 660 661/**********************************************************/ 662 663/** 664 * Get the number of samples of an audio frame. Return -1 on error. 665 */ 666static int get_audio_frame_size(AVCodecContext *enc, int size) 667{ 668 int frame_size; 669 670 if(enc->codec_id == CODEC_ID_VORBIS) 671 return -1; 672 673 if (enc->frame_size <= 1) { 674 int bits_per_sample = av_get_bits_per_sample(enc->codec_id); 675 676 if (bits_per_sample) { 677 if (enc->channels == 0) 678 return -1; 679 frame_size = (size << 3) / (bits_per_sample * enc->channels); 680 } else { 681 /* used for example by ADPCM codecs */ 682 if (enc->bit_rate == 0) 683 return -1; 684 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate; 685 } 686 } else { 687 frame_size = enc->frame_size; 688 } 689 return frame_size; 690} 691 692 693/** 694 * Return the frame duration in seconds. Return 0 if not available. 695 */ 696static void compute_frame_duration(int *pnum, int *pden, AVStream *st, 697 AVCodecParserContext *pc, AVPacket *pkt) 698{ 699 int frame_size; 700 701 *pnum = 0; 702 *pden = 0; 703 switch(st->codec->codec_type) { 704 case CODEC_TYPE_VIDEO: 705 if(st->time_base.num*1000LL > st->time_base.den){ 706 *pnum = st->time_base.num; 707 *pden = st->time_base.den; 708 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){ 709 *pnum = st->codec->time_base.num; 710 *pden = st->codec->time_base.den; 711 if (pc && pc->repeat_pict) { 712 *pnum = (*pnum) * (1 + pc->repeat_pict); 713 } 714 } 715 break; 716 case CODEC_TYPE_AUDIO: 717 frame_size = get_audio_frame_size(st->codec, pkt->size); 718 if (frame_size < 0) 719 break; 720 *pnum = frame_size; 721 *pden = st->codec->sample_rate; 722 break; 723 default: 724 break; 725 } 726} 727 728static int is_intra_only(AVCodecContext *enc){ 729 if(enc->codec_type == CODEC_TYPE_AUDIO){ 730 return 1; 731 }else if(enc->codec_type == CODEC_TYPE_VIDEO){ 732 switch(enc->codec_id){ 733 case CODEC_ID_MJPEG: 734 case CODEC_ID_MJPEGB: 735 case CODEC_ID_LJPEG: 736 case CODEC_ID_RAWVIDEO: 737 case CODEC_ID_DVVIDEO: 738 case CODEC_ID_HUFFYUV: 739 case CODEC_ID_FFVHUFF: 740 case CODEC_ID_ASV1: 741 case CODEC_ID_ASV2: 742 case CODEC_ID_VCR1: 743 case CODEC_ID_DNXHD: 744 case CODEC_ID_JPEG2000: 745 return 1; 746 default: break; 747 } 748 } 749 return 0; 750} 751 752static void update_initial_timestamps(AVFormatContext *s, int stream_index, 753 int64_t dts, int64_t pts) 754{ 755 AVStream *st= s->streams[stream_index]; 756 AVPacketList *pktl= s->packet_buffer; 757 758 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE) 759 return; 760 761 st->first_dts= dts - st->cur_dts; 762 st->cur_dts= dts; 763 764 for(; pktl; pktl= pktl->next){ 765 if(pktl->pkt.stream_index != stream_index) 766 continue; 767 //FIXME think more about this check 768 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts) 769 pktl->pkt.pts += st->first_dts; 770 771 if(pktl->pkt.dts != AV_NOPTS_VALUE) 772 pktl->pkt.dts += st->first_dts; 773 774 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE) 775 st->start_time= pktl->pkt.pts; 776 } 777 if (st->start_time == AV_NOPTS_VALUE) 778 st->start_time = pts; 779} 780 781static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt) 782{ 783 AVPacketList *pktl= s->packet_buffer; 784 int64_t cur_dts= 0; 785 786 if(st->first_dts != AV_NOPTS_VALUE){ 787 cur_dts= st->first_dts; 788 for(; pktl; pktl= pktl->next){ 789 if(pktl->pkt.stream_index == pkt->stream_index){ 790 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration) 791 break; 792 cur_dts -= pkt->duration; 793 } 794 } 795 pktl= s->packet_buffer; 796 st->first_dts = cur_dts; 797 }else if(st->cur_dts) 798 return; 799 800 for(; pktl; pktl= pktl->next){ 801 if(pktl->pkt.stream_index != pkt->stream_index) 802 continue; 803 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE 804 && !pktl->pkt.duration){ 805 pktl->pkt.dts= cur_dts; 806 if(!st->codec->has_b_frames) 807 pktl->pkt.pts= cur_dts; 808 cur_dts += pkt->duration; 809 pktl->pkt.duration= pkt->duration; 810 }else 811 break; 812 } 813 if(st->first_dts == AV_NOPTS_VALUE) 814 st->cur_dts= cur_dts; 815} 816 817static void compute_pkt_fields(AVFormatContext *s, AVStream *st, 818 AVCodecParserContext *pc, AVPacket *pkt) 819{ 820 int num, den, presentation_delayed, delay, i; 821 int64_t offset; 822 823 /* do we have a video B-frame ? */ 824 delay= st->codec->has_b_frames; 825 presentation_delayed = 0; 826 /* XXX: need has_b_frame, but cannot get it if the codec is 827 not initialized */ 828 if (delay && 829 pc && pc->pict_type != FF_B_TYPE) 830 presentation_delayed = 1; 831 832 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63 833 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){ 834 pkt->dts -= 1LL<<st->pts_wrap_bits; 835 } 836 837 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg) 838 // we take the conservative approach and discard both 839 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly. 840 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){ 841 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n"); 842 pkt->dts= pkt->pts= AV_NOPTS_VALUE; 843 } 844 845 if (pkt->duration == 0) { 846 compute_frame_duration(&num, &den, st, pc, pkt); 847 if (den && num) { 848 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num); 849 850 if(pkt->duration != 0 && s->packet_buffer) 851 update_initial_durations(s, st, pkt); 852 } 853 } 854 855 /* correct timestamps with byte offset if demuxers only have timestamps 856 on packet boundaries */ 857 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){ 858 /* this will estimate bitrate based on this frame's duration and size */ 859 offset = av_rescale(pc->offset, pkt->duration, pkt->size); 860 if(pkt->pts != AV_NOPTS_VALUE) 861 pkt->pts += offset; 862 if(pkt->dts != AV_NOPTS_VALUE) 863 pkt->dts += offset; 864 } 865 866 if (pc && pc->dts_sync_point >= 0) { 867 // we have synchronization info from the parser 868 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num; 869 if (den > 0) { 870 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den; 871 if (pkt->dts != AV_NOPTS_VALUE) { 872 // got DTS from the stream, update reference timestamp 873 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den; 874 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 875 } else if (st->reference_dts != AV_NOPTS_VALUE) { 876 // compute DTS based on reference timestamp 877 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den; 878 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 879 } 880 if (pc->dts_sync_point > 0) 881 st->reference_dts = pkt->dts; // new reference 882 } 883 } 884 885 /* This may be redundant, but it should not hurt. */ 886 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts) 887 presentation_delayed = 1; 888 889// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc); 890 /* interpolate PTS and DTS if they are not present */ 891 //We skip H264 currently because delay and has_b_frames are not reliably set 892 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){ 893 if (presentation_delayed) { 894 /* DTS = decompression timestamp */ 895 /* PTS = presentation timestamp */ 896 if (pkt->dts == AV_NOPTS_VALUE) 897 pkt->dts = st->last_IP_pts; 898 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); 899 if (pkt->dts == AV_NOPTS_VALUE) 900 pkt->dts = st->cur_dts; 901 902 /* this is tricky: the dts must be incremented by the duration 903 of the frame we are displaying, i.e. the last I- or P-frame */ 904 if (st->last_IP_duration == 0) 905 st->last_IP_duration = pkt->duration; 906 if(pkt->dts != AV_NOPTS_VALUE) 907 st->cur_dts = pkt->dts + st->last_IP_duration; 908 st->last_IP_duration = pkt->duration; 909 st->last_IP_pts= pkt->pts; 910 /* cannot compute PTS if not present (we can compute it only 911 by knowing the future */ 912 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){ 913 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){ 914 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts); 915 int64_t new_diff= FFABS(st->cur_dts - pkt->pts); 916 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){ 917 pkt->pts += pkt->duration; 918 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size); 919 } 920 } 921 922 /* presentation is not delayed : PTS and DTS are the same */ 923 if(pkt->pts == AV_NOPTS_VALUE) 924 pkt->pts = pkt->dts; 925 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts); 926 if(pkt->pts == AV_NOPTS_VALUE) 927 pkt->pts = st->cur_dts; 928 pkt->dts = pkt->pts; 929 if(pkt->pts != AV_NOPTS_VALUE) 930 st->cur_dts = pkt->pts + pkt->duration; 931 } 932 } 933 934 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 935 st->pts_buffer[0]= pkt->pts; 936 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 937 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 938 if(pkt->dts == AV_NOPTS_VALUE) 939 pkt->dts= st->pts_buffer[0]; 940 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here 941 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet 942 } 943 if(pkt->dts > st->cur_dts) 944 st->cur_dts = pkt->dts; 945 } 946 947// av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts); 948 949 /* update flags */ 950 if(is_intra_only(st->codec)) 951 pkt->flags |= PKT_FLAG_KEY; 952 else if (pc) { 953 pkt->flags = 0; 954 /* keyframe computation */ 955 if (pc->key_frame == 1) 956 pkt->flags |= PKT_FLAG_KEY; 957 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE) 958 pkt->flags |= PKT_FLAG_KEY; 959 } 960 if (pc) 961 pkt->convergence_duration = pc->convergence_duration; 962} 963 964void av_destruct_packet_nofree(AVPacket *pkt) 965{ 966 pkt->data = NULL; pkt->size = 0; 967} 968 969static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) 970{ 971 AVStream *st; 972 int len, ret, i; 973 int count = 0; 974 975 av_init_packet(pkt); 976 977 for(;;) { 978 /* select current input stream component */ 979 st = s->cur_st; 980 if (st) { 981 if (!st->need_parsing || !st->parser) { 982 /* no parsing needed: we just output the packet as is */ 983 /* raw data support */ 984 *pkt = st->cur_pkt; st->cur_pkt.data= NULL; 985 compute_pkt_fields(s, st, NULL, pkt); 986 s->cur_st = NULL; 987 break; 988 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) { 989 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size, 990 st->cur_ptr, st->cur_len, 991 st->cur_pkt.pts, st->cur_pkt.dts); 992 st->cur_pkt.pts = AV_NOPTS_VALUE; 993 st->cur_pkt.dts = AV_NOPTS_VALUE; 994 /* increment read pointer */ 995 st->cur_ptr += len; 996 st->cur_len -= len; 997 998 /* return packet if any */ 999 if (pkt->size) { 1000 pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close. 1001 got_packet: 1002 pkt->duration = 0; 1003 pkt->stream_index = st->index; 1004 pkt->pts = st->parser->pts; 1005 pkt->dts = st->parser->dts; 1006 pkt->destruct = av_destruct_packet_nofree; 1007 compute_pkt_fields(s, st, st->parser, pkt); 1008 1009 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){ 1010 ff_reduce_index(s, st->index); 1011 av_add_index_entry(st, st->parser->frame_offset, pkt->dts, 1012 0, 0, AVINDEX_KEYFRAME); 1013 } 1014 1015 break; 1016 } 1017 } else { 1018 /* free packet */ 1019 av_free_packet(&st->cur_pkt); 1020 s->cur_st = NULL; 1021 } 1022 } else { 1023 AVPacket cur_pkt; 1024 1025 /* read next packet */ 1026 ret = av_read_packet(s, &cur_pkt); 1027 1028 if (ret < 0) { 1029 if (ret == AVERROR(EAGAIN)) 1030 return ret; 1031 /* return the last frames, if any */ 1032 for(i = 0; i < s->nb_streams; i++) { 1033 st = s->streams[i]; 1034 if (st->parser && st->need_parsing) { 1035 av_parser_parse(st->parser, st->codec, 1036 &pkt->data, &pkt->size, 1037 NULL, 0, 1038 AV_NOPTS_VALUE, AV_NOPTS_VALUE); 1039 if (pkt->size) 1040 goto got_packet; 1041 } 1042 } 1043 /* no more packets: really terminate parsing */ 1044 return ret; 1045 } 1046 st = s->streams[cur_pkt.stream_index]; 1047 st->cur_pkt= cur_pkt; 1048 1049 if(st->cur_pkt.pts != AV_NOPTS_VALUE && 1050 st->cur_pkt.dts != AV_NOPTS_VALUE && 1051 st->cur_pkt.pts < st->cur_pkt.dts){ 1052 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n", 1053 st->cur_pkt.stream_index, 1054 st->cur_pkt.pts, 1055 st->cur_pkt.dts, 1056 st->cur_pkt.size); 1057// av_free_packet(&st->cur_pkt); 1058// return -1; 1059 } 1060 1061 if(s->debug & FF_FDEBUG_TS) 1062 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n", 1063 st->cur_pkt.stream_index, 1064 st->cur_pkt.pts, 1065 st->cur_pkt.dts, 1066 st->cur_pkt.size, 1067 st->cur_pkt.flags); 1068 1069 s->cur_st = st; 1070 st->cur_ptr = st->cur_pkt.data; 1071 st->cur_len = st->cur_pkt.size; 1072 if (st->need_parsing && !st->parser) { 1073 st->parser = av_parser_init(st->codec->codec_id); 1074 if (!st->parser) { 1075 /* no parser available: just output the raw packets */ 1076 st->need_parsing = AVSTREAM_PARSE_NONE; 1077 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){ 1078 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 1079 } 1080 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){ 1081 st->parser->next_frame_offset= 1082 st->parser->cur_offset= st->cur_pkt.pos; 1083 } 1084 } 1085 } 1086 } 1087 if(s->debug & FF_FDEBUG_TS) 1088 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n", 1089 pkt->stream_index, 1090 pkt->pts, 1091 pkt->dts, 1092 pkt->size, 1093 pkt->flags); 1094 return 0; 1095} 1096 1097int av_read_frame(AVFormatContext *s, AVPacket *pkt) 1098{ 1099 AVPacketList *pktl; 1100 int eof=0; 1101 const int genpts= s->flags & AVFMT_FLAG_GENPTS; 1102 1103 for(;;){ 1104 pktl = s->packet_buffer; 1105 if (pktl) { 1106 AVPacket *next_pkt= &pktl->pkt; 1107 1108 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){ 1109 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){ 1110 if( pktl->pkt.stream_index == next_pkt->stream_index 1111 && next_pkt->dts < pktl->pkt.dts 1112 && pktl->pkt.pts != pktl->pkt.dts //not b frame 1113 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){ 1114 next_pkt->pts= pktl->pkt.dts; 1115 } 1116 pktl= pktl->next; 1117 } 1118 pktl = s->packet_buffer; 1119 } 1120 1121 if( next_pkt->pts != AV_NOPTS_VALUE 1122 || next_pkt->dts == AV_NOPTS_VALUE 1123 || !genpts || eof){ 1124 /* read packet from packet buffer, if there is data */ 1125 *pkt = *next_pkt; 1126 s->packet_buffer = pktl->next; 1127 av_free(pktl); 1128 return 0; 1129 } 1130 } 1131 if(genpts){ 1132 int ret= av_read_frame_internal(s, pkt); 1133 if(ret<0){ 1134 if(pktl && ret != AVERROR(EAGAIN)){ 1135 eof=1; 1136 continue; 1137 }else 1138 return ret; 1139 } 1140 1141 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt, 1142 &s->packet_buffer_end)) < 0) 1143 return AVERROR(ENOMEM); 1144 }else{ 1145 assert(!s->packet_buffer); 1146 return av_read_frame_internal(s, pkt); 1147 } 1148 } 1149} 1150 1151/* XXX: suppress the packet queue */ 1152static void flush_packet_queue(AVFormatContext *s) 1153{ 1154 AVPacketList *pktl; 1155 1156 for(;;) { 1157 pktl = s->packet_buffer; 1158 if (!pktl) 1159 break; 1160 s->packet_buffer = pktl->next; 1161 av_free_packet(&pktl->pkt); 1162 av_free(pktl); 1163 } 1164} 1165 1166/*******************************************************/ 1167/* seek support */ 1168 1169int av_find_default_stream_index(AVFormatContext *s) 1170{ 1171 int first_audio_index = -1; 1172 int i; 1173 AVStream *st; 1174 1175 if (s->nb_streams <= 0) 1176 return -1; 1177 for(i = 0; i < s->nb_streams; i++) { 1178 st = s->streams[i]; 1179 if (st->codec->codec_type == CODEC_TYPE_VIDEO) { 1180 return i; 1181 } 1182 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO) 1183 first_audio_index = i; 1184 } 1185 return first_audio_index >= 0 ? first_audio_index : 0; 1186} 1187 1188/** 1189 * Flush the frame reader. 1190 */ 1191static void av_read_frame_flush(AVFormatContext *s) 1192{ 1193 AVStream *st; 1194 int i; 1195 1196 flush_packet_queue(s); 1197 1198 s->cur_st = NULL; 1199 1200 /* for each stream, reset read state */ 1201 for(i = 0; i < s->nb_streams; i++) { 1202 st = s->streams[i]; 1203 1204 if (st->parser) { 1205 av_parser_close(st->parser); 1206 st->parser = NULL; 1207 av_free_packet(&st->cur_pkt); 1208 } 1209 st->last_IP_pts = AV_NOPTS_VALUE; 1210 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */ 1211 st->reference_dts = AV_NOPTS_VALUE; 1212 /* fail safe */ 1213 st->cur_ptr = NULL; 1214 st->cur_len = 0; 1215 } 1216} 1217 1218void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){ 1219 int i; 1220 1221 for(i = 0; i < s->nb_streams; i++) { 1222 AVStream *st = s->streams[i]; 1223 1224 st->cur_dts = av_rescale(timestamp, 1225 st->time_base.den * (int64_t)ref_st->time_base.num, 1226 st->time_base.num * (int64_t)ref_st->time_base.den); 1227 } 1228} 1229 1230void ff_reduce_index(AVFormatContext *s, int stream_index) 1231{ 1232 AVStream *st= s->streams[stream_index]; 1233 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry); 1234 1235 if((unsigned)st->nb_index_entries >= max_entries){ 1236 int i; 1237 for(i=0; 2*i<st->nb_index_entries; i++) 1238 st->index_entries[i]= st->index_entries[2*i]; 1239 st->nb_index_entries= i; 1240 } 1241} 1242 1243int av_add_index_entry(AVStream *st, 1244 int64_t pos, int64_t timestamp, int size, int distance, int flags) 1245{ 1246 AVIndexEntry *entries, *ie; 1247 int index; 1248 1249 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) 1250 return -1; 1251 1252 entries = av_fast_realloc(st->index_entries, 1253 &st->index_entries_allocated_size, 1254 (st->nb_index_entries + 1) * 1255 sizeof(AVIndexEntry)); 1256 if(!entries) 1257 return -1; 1258 1259 st->index_entries= entries; 1260 1261 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY); 1262 1263 if(index<0){ 1264 index= st->nb_index_entries++; 1265 ie= &entries[index]; 1266 assert(index==0 || ie[-1].timestamp < timestamp); 1267 }else{ 1268 ie= &entries[index]; 1269 if(ie->timestamp != timestamp){ 1270 if(ie->timestamp <= timestamp) 1271 return -1; 1272 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index)); 1273 st->nb_index_entries++; 1274 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance 1275 distance= ie->min_distance; 1276 } 1277 1278 ie->pos = pos; 1279 ie->timestamp = timestamp; 1280 ie->min_distance= distance; 1281 ie->size= size; 1282 ie->flags = flags; 1283 1284 return index; 1285} 1286 1287int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, 1288 int flags) 1289{ 1290 AVIndexEntry *entries= st->index_entries; 1291 int nb_entries= st->nb_index_entries; 1292 int a, b, m; 1293 int64_t timestamp; 1294 1295 a = - 1; 1296 b = nb_entries; 1297 1298 while (b - a > 1) { 1299 m = (a + b) >> 1; 1300 timestamp = entries[m].timestamp; 1301 if(timestamp >= wanted_timestamp) 1302 b = m; 1303 if(timestamp <= wanted_timestamp) 1304 a = m; 1305 } 1306 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; 1307 1308 if(!(flags & AVSEEK_FLAG_ANY)){ 1309 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ 1310 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; 1311 } 1312 } 1313 1314 if(m == nb_entries) 1315 return -1; 1316 return m; 1317} 1318 1319#define DEBUG_SEEK 1320 1321int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){ 1322 AVInputFormat *avif= s->iformat; 1323 int64_t pos_min, pos_max, pos, pos_limit; 1324 int64_t ts_min, ts_max, ts; 1325 int index; 1326 AVStream *st; 1327 1328 if (stream_index < 0) 1329 return -1; 1330 1331#ifdef DEBUG_SEEK 1332 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts); 1333#endif 1334 1335 ts_max= 1336 ts_min= AV_NOPTS_VALUE; 1337 pos_limit= -1; //gcc falsely says it may be uninitialized 1338 1339 st= s->streams[stream_index]; 1340 if(st->index_entries){ 1341 AVIndexEntry *e; 1342 1343 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp() 1344 index= FFMAX(index, 0); 1345 e= &st->index_entries[index]; 1346 1347 if(e->timestamp <= target_ts || e->pos == e->min_distance){ 1348 pos_min= e->pos; 1349 ts_min= e->timestamp; 1350#ifdef DEBUG_SEEK 1351 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n", 1352 pos_min,ts_min); 1353#endif 1354 }else{ 1355 assert(index==0); 1356 } 1357 1358 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD); 1359 assert(index < st->nb_index_entries); 1360 if(index >= 0){ 1361 e= &st->index_entries[index]; 1362 assert(e->timestamp >= target_ts); 1363 pos_max= e->pos; 1364 ts_max= e->timestamp; 1365 pos_limit= pos_max - e->min_distance; 1366#ifdef DEBUG_SEEK 1367 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n", 1368 pos_max,pos_limit, ts_max); 1369#endif 1370 } 1371 } 1372 1373 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp); 1374 if(pos<0) 1375 return -1; 1376 1377 /* do the seek */ 1378 url_fseek(s->pb, pos, SEEK_SET); 1379 1380 av_update_cur_dts(s, st, ts); 1381 1382 return 0; 1383} 1384 1385int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){ 1386 int64_t pos, ts; 1387 int64_t start_pos, filesize; 1388 int no_change; 1389 1390#ifdef DEBUG_SEEK 1391 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts); 1392#endif 1393 1394 if(ts_min == AV_NOPTS_VALUE){ 1395 pos_min = s->data_offset; 1396 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 1397 if (ts_min == AV_NOPTS_VALUE) 1398 return -1; 1399 } 1400 1401 if(ts_max == AV_NOPTS_VALUE){ 1402 int step= 1024; 1403 filesize = url_fsize(s->pb); 1404 pos_max = filesize - 1; 1405 do{ 1406 pos_max -= step; 1407 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step); 1408 step += step; 1409 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step); 1410 if (ts_max == AV_NOPTS_VALUE) 1411 return -1; 1412 1413 for(;;){ 1414 int64_t tmp_pos= pos_max + 1; 1415 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX); 1416 if(tmp_ts == AV_NOPTS_VALUE) 1417 break; 1418 ts_max= tmp_ts; 1419 pos_max= tmp_pos; 1420 if(tmp_pos >= filesize) 1421 break; 1422 } 1423 pos_limit= pos_max; 1424 } 1425 1426 if(ts_min > ts_max){ 1427 return -1; 1428 }else if(ts_min == ts_max){ 1429 pos_limit= pos_min; 1430 } 1431 1432 no_change=0; 1433 while (pos_min < pos_limit) { 1434#ifdef DEBUG_SEEK 1435 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n", 1436 pos_min, pos_max, 1437 ts_min, ts_max); 1438#endif 1439 assert(pos_limit <= pos_max); 1440 1441 if(no_change==0){ 1442 int64_t approximate_keyframe_distance= pos_max - pos_limit; 1443 // interpolate position (better than dichotomy) 1444 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min) 1445 + pos_min - approximate_keyframe_distance; 1446 }else if(no_change==1){ 1447 // bisection, if interpolation failed to change min or max pos last time 1448 pos = (pos_min + pos_limit)>>1; 1449 }else{ 1450 /* linear search if bisection failed, can only happen if there 1451 are very few or no keyframes between min/max */ 1452 pos=pos_min; 1453 } 1454 if(pos <= pos_min) 1455 pos= pos_min + 1; 1456 else if(pos > pos_limit) 1457 pos= pos_limit; 1458 start_pos= pos; 1459 1460 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1 1461 if(pos == pos_max) 1462 no_change++; 1463 else 1464 no_change=0; 1465#ifdef DEBUG_SEEK 1466av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change); 1467#endif 1468 if(ts == AV_NOPTS_VALUE){ 1469 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n"); 1470 return -1; 1471 } 1472 assert(ts != AV_NOPTS_VALUE); 1473 if (target_ts <= ts) { 1474 pos_limit = start_pos - 1; 1475 pos_max = pos; 1476 ts_max = ts; 1477 } 1478 if (target_ts >= ts) { 1479 pos_min = pos; 1480 ts_min = ts; 1481 } 1482 } 1483 1484 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; 1485 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; 1486#ifdef DEBUG_SEEK 1487 pos_min = pos; 1488 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 1489 pos_min++; 1490 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 1491 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n", 1492 pos, ts_min, target_ts, ts_max); 1493#endif 1494 *ts_ret= ts; 1495 return pos; 1496} 1497 1498static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){ 1499 int64_t pos_min, pos_max; 1500#if 0 1501 AVStream *st; 1502 1503 if (stream_index < 0) 1504 return -1; 1505 1506 st= s->streams[stream_index]; 1507#endif 1508 1509 pos_min = s->data_offset; 1510 pos_max = url_fsize(s->pb) - 1; 1511 1512 if (pos < pos_min) pos= pos_min; 1513 else if(pos > pos_max) pos= pos_max; 1514 1515 url_fseek(s->pb, pos, SEEK_SET); 1516 1517#if 0 1518 av_update_cur_dts(s, st, ts); 1519#endif 1520 return 0; 1521} 1522 1523static int av_seek_frame_generic(AVFormatContext *s, 1524 int stream_index, int64_t timestamp, int flags) 1525{ 1526 int index, ret; 1527 AVStream *st; 1528 AVIndexEntry *ie; 1529 1530 st = s->streams[stream_index]; 1531 1532 index = av_index_search_timestamp(st, timestamp, flags); 1533 1534 if(index < 0 || index==st->nb_index_entries-1){ 1535 int i; 1536 AVPacket pkt; 1537 1538 if(st->nb_index_entries){ 1539 assert(st->index_entries); 1540 ie= &st->index_entries[st->nb_index_entries-1]; 1541 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0) 1542 return ret; 1543 av_update_cur_dts(s, st, ie->timestamp); 1544 }else{ 1545 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0) 1546 return ret; 1547 } 1548 for(i=0;; i++) { 1549 int ret = av_read_frame(s, &pkt); 1550 if(ret<0) 1551 break; 1552 av_free_packet(&pkt); 1553 if(stream_index == pkt.stream_index){ 1554 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp) 1555 break; 1556 } 1557 } 1558 index = av_index_search_timestamp(st, timestamp, flags); 1559 } 1560 if (index < 0) 1561 return -1; 1562 1563 av_read_frame_flush(s); 1564 if (s->iformat->read_seek){ 1565 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0) 1566 return 0; 1567 } 1568 ie = &st->index_entries[index]; 1569 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0) 1570 return ret; 1571 av_update_cur_dts(s, st, ie->timestamp); 1572 1573 return 0; 1574} 1575 1576int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) 1577{ 1578 int ret; 1579 AVStream *st; 1580 1581 av_read_frame_flush(s); 1582 1583 if(flags & AVSEEK_FLAG_BYTE) 1584 return av_seek_frame_byte(s, stream_index, timestamp, flags); 1585 1586 if(stream_index < 0){ 1587 stream_index= av_find_default_stream_index(s); 1588 if(stream_index < 0) 1589 return -1; 1590 1591 st= s->streams[stream_index]; 1592 /* timestamp for default must be expressed in AV_TIME_BASE units */ 1593 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); 1594 } 1595 1596 /* first, we try the format specific seek */ 1597 if (s->iformat->read_seek) 1598 ret = s->iformat->read_seek(s, stream_index, timestamp, flags); 1599 else 1600 ret = -1; 1601 if (ret >= 0) { 1602 return 0; 1603 } 1604 1605 if(s->iformat->read_timestamp) 1606 return av_seek_frame_binary(s, stream_index, timestamp, flags); 1607 else 1608 return av_seek_frame_generic(s, stream_index, timestamp, flags); 1609} 1610 1611/*******************************************************/ 1612 1613/** 1614 * Returns TRUE if the stream has accurate duration in any stream. 1615 * 1616 * @return TRUE if the stream has accurate duration for at least one component. 1617 */ 1618static int av_has_duration(AVFormatContext *ic) 1619{ 1620 int i; 1621 AVStream *st; 1622 1623 for(i = 0;i < ic->nb_streams; i++) { 1624 st = ic->streams[i]; 1625 if (st->duration != AV_NOPTS_VALUE) 1626 return 1; 1627 } 1628 return 0; 1629} 1630 1631/** 1632 * Estimate the stream timings from the one of each components. 1633 * 1634 * Also computes the global bitrate if possible. 1635 */ 1636static void av_update_stream_timings(AVFormatContext *ic) 1637{ 1638 int64_t start_time, start_time1, end_time, end_time1; 1639 int64_t duration, duration1; 1640 int i; 1641 AVStream *st; 1642 1643 start_time = INT64_MAX; 1644 end_time = INT64_MIN; 1645 duration = INT64_MIN; 1646 for(i = 0;i < ic->nb_streams; i++) { 1647 st = ic->streams[i]; 1648 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) { 1649 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q); 1650 if (start_time1 < start_time) 1651 start_time = start_time1; 1652 if (st->duration != AV_NOPTS_VALUE) { 1653 end_time1 = start_time1 1654 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 1655 if (end_time1 > end_time) 1656 end_time = end_time1; 1657 } 1658 } 1659 if (st->duration != AV_NOPTS_VALUE) { 1660 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 1661 if (duration1 > duration) 1662 duration = duration1; 1663 } 1664 } 1665 if (start_time != INT64_MAX) { 1666 ic->start_time = start_time; 1667 if (end_time != INT64_MIN) { 1668 if (end_time - start_time > duration) 1669 duration = end_time - start_time; 1670 } 1671 } 1672 if (duration != INT64_MIN) { 1673 ic->duration = duration; 1674 if (ic->file_size > 0) { 1675 /* compute the bitrate */ 1676 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE / 1677 (double)ic->duration; 1678 } 1679 } 1680} 1681 1682static void fill_all_stream_timings(AVFormatContext *ic) 1683{ 1684 int i; 1685 AVStream *st; 1686 1687 av_update_stream_timings(ic); 1688 for(i = 0;i < ic->nb_streams; i++) { 1689 st = ic->streams[i]; 1690 if (st->start_time == AV_NOPTS_VALUE) { 1691 if(ic->start_time != AV_NOPTS_VALUE) 1692 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base); 1693 if(ic->duration != AV_NOPTS_VALUE) 1694 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base); 1695 } 1696 } 1697} 1698 1699static void av_estimate_timings_from_bit_rate(AVFormatContext *ic) 1700{ 1701 int64_t filesize, duration; 1702 int bit_rate, i; 1703 AVStream *st; 1704 1705 /* if bit_rate is already set, we believe it */ 1706 if (ic->bit_rate == 0) { 1707 bit_rate = 0; 1708 for(i=0;i<ic->nb_streams;i++) { 1709 st = ic->streams[i]; 1710 bit_rate += st->codec->bit_rate; 1711 } 1712 ic->bit_rate = bit_rate; 1713 } 1714 1715 /* if duration is already set, we believe it */ 1716 if (ic->duration == AV_NOPTS_VALUE && 1717 ic->bit_rate != 0 && 1718 ic->file_size != 0) { 1719 filesize = ic->file_size; 1720 if (filesize > 0) { 1721 for(i = 0; i < ic->nb_streams; i++) { 1722 st = ic->streams[i]; 1723 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num); 1724 if (st->duration == AV_NOPTS_VALUE) 1725 st->duration = duration; 1726 } 1727 } 1728 } 1729} 1730 1731#define DURATION_MAX_READ_SIZE 250000 1732 1733/* only usable for MPEG-PS streams */ 1734static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) 1735{ 1736 AVPacket pkt1, *pkt = &pkt1; 1737 AVStream *st; 1738 int read_size, i, ret; 1739 int64_t end_time; 1740 int64_t filesize, offset, duration; 1741 1742 ic->cur_st = NULL; 1743 1744 /* flush packet queue */ 1745 flush_packet_queue(ic); 1746 1747 for(i=0;i<ic->nb_streams;i++) { 1748 st = ic->streams[i]; 1749 if (st->parser) { 1750 av_parser_close(st->parser); 1751 st->parser= NULL; 1752 av_free_packet(&st->cur_pkt); 1753 } 1754 } 1755 1756 /* we read the first packets to get the first PTS (not fully 1757 accurate, but it is enough now) */ 1758 url_fseek(ic->pb, 0, SEEK_SET); 1759 read_size = 0; 1760 for(;;) { 1761 if (read_size >= DURATION_MAX_READ_SIZE) 1762 break; 1763 /* if all info is available, we can stop */ 1764 for(i = 0;i < ic->nb_streams; i++) { 1765 st = ic->streams[i]; 1766 if (st->start_time == AV_NOPTS_VALUE) 1767 break; 1768 } 1769 if (i == ic->nb_streams) 1770 break; 1771 1772 ret = av_read_packet(ic, pkt); 1773 if (ret != 0) 1774 break; 1775 read_size += pkt->size; 1776 st = ic->streams[pkt->stream_index]; 1777 if (pkt->pts != AV_NOPTS_VALUE) { 1778 if (st->start_time == AV_NOPTS_VALUE) 1779 st->start_time = pkt->pts; 1780 } 1781 av_free_packet(pkt); 1782 } 1783 1784 /* estimate the end time (duration) */ 1785 /* XXX: may need to support wrapping */ 1786 filesize = ic->file_size; 1787 offset = filesize - DURATION_MAX_READ_SIZE; 1788 if (offset < 0) 1789 offset = 0; 1790 1791 url_fseek(ic->pb, offset, SEEK_SET); 1792 read_size = 0; 1793 for(;;) { 1794 if (read_size >= DURATION_MAX_READ_SIZE) 1795 break; 1796 1797 ret = av_read_packet(ic, pkt); 1798 if (ret != 0) 1799 break; 1800 read_size += pkt->size; 1801 st = ic->streams[pkt->stream_index]; 1802 if (pkt->pts != AV_NOPTS_VALUE && 1803 st->start_time != AV_NOPTS_VALUE) { 1804 end_time = pkt->pts; 1805 duration = end_time - st->start_time; 1806 if (duration > 0) { 1807 if (st->duration == AV_NOPTS_VALUE || 1808 st->duration < duration) 1809 st->duration = duration; 1810 } 1811 } 1812 av_free_packet(pkt); 1813 } 1814 1815 fill_all_stream_timings(ic); 1816 1817 url_fseek(ic->pb, old_offset, SEEK_SET); 1818 for(i=0; i<ic->nb_streams; i++){ 1819 st= ic->streams[i]; 1820 st->cur_dts= st->first_dts; 1821 st->last_IP_pts = AV_NOPTS_VALUE; 1822 } 1823} 1824 1825static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset) 1826{ 1827 int64_t file_size; 1828 1829 /* get the file size, if possible */ 1830 if (ic->iformat->flags & AVFMT_NOFILE) { 1831 file_size = 0; 1832 } else { 1833 file_size = url_fsize(ic->pb); 1834 if (file_size < 0) 1835 file_size = 0; 1836 } 1837 ic->file_size = file_size; 1838 1839 /* Foxconn, marked by MJ., for OOM. */ 1840#if 1 1841 if ((!strcmp(ic->iformat->name, "mpeg") || 1842 !strcmp(ic->iformat->name, "mpegts")) && 1843 file_size && !url_is_streamed(ic->pb)) { 1844 /* get accurate estimate from the PTSes */ 1845 av_estimate_timings_from_pts(ic, old_offset); 1846 } else if (av_has_duration(ic)) { 1847 /* at least one component has timings - we use them for all 1848 the components */ 1849 fill_all_stream_timings(ic); 1850 } else 1851#endif 1852 /* Foxconn, mared-end by MJ., for OOM . */ 1853 { 1854 /* less precise: use bitrate info */ 1855 av_estimate_timings_from_bit_rate(ic); 1856 } 1857 1858 1859 av_update_stream_timings(ic); 1860 1861 1862#if 0 1863 { 1864 int i; 1865 AVStream *st; 1866 for(i = 0;i < ic->nb_streams; i++) { 1867 st = ic->streams[i]; 1868 printf("%d: start_time: %0.3f duration: %0.3f\n", 1869 i, (double)st->start_time / AV_TIME_BASE, 1870 (double)st->duration / AV_TIME_BASE); 1871 } 1872 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", 1873 (double)ic->start_time / AV_TIME_BASE, 1874 (double)ic->duration / AV_TIME_BASE, 1875 ic->bit_rate / 1000); 1876 } 1877#endif 1878} 1879 1880static int has_codec_parameters(AVCodecContext *enc) 1881{ 1882 int val; 1883 switch(enc->codec_type) { 1884 case CODEC_TYPE_AUDIO: 1885 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE; 1886 if(!enc->frame_size && 1887 (enc->codec_id == CODEC_ID_VORBIS || 1888 enc->codec_id == CODEC_ID_AAC)) 1889 return 0; 1890 break; 1891 case CODEC_TYPE_VIDEO: 1892 val = enc->width && enc->pix_fmt != PIX_FMT_NONE; 1893 break; 1894 default: 1895 val = 1; 1896 break; 1897 } 1898 return enc->codec_id != CODEC_ID_NONE && val != 0; 1899} 1900 1901static int try_decode_frame(AVStream *st, const uint8_t *data, int size) 1902{ 1903 int16_t *samples; 1904 AVCodec *codec; 1905 int got_picture, data_size, ret=0; 1906 AVFrame picture; 1907 1908 if(!st->codec->codec){ 1909 codec = avcodec_find_decoder(st->codec->codec_id); 1910 if (!codec) 1911 return -1; 1912 ret = avcodec_open(st->codec, codec); 1913 if (ret < 0) 1914 return ret; 1915 } 1916 1917 if(!has_codec_parameters(st->codec)){ 1918 switch(st->codec->codec_type) { 1919 case CODEC_TYPE_VIDEO: 1920 ret = avcodec_decode_video(st->codec, &picture, 1921 &got_picture, data, size); 1922 break; 1923 case CODEC_TYPE_AUDIO: 1924 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE); 1925 samples = av_malloc(data_size); 1926 if (!samples) 1927 goto fail; 1928 ret = avcodec_decode_audio2(st->codec, samples, 1929 &data_size, data, size); 1930 av_free(samples); 1931 break; 1932 default: 1933 break; 1934 } 1935 } 1936 fail: 1937 return ret; 1938} 1939 1940unsigned int codec_get_tag(const AVCodecTag *tags, int id) 1941{ 1942 while (tags->id != CODEC_ID_NONE) { 1943 if (tags->id == id) 1944 return tags->tag; 1945 tags++; 1946 } 1947 return 0; 1948} 1949 1950enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag) 1951{ 1952 int i; 1953 for(i=0; tags[i].id != CODEC_ID_NONE;i++) { 1954 if(tag == tags[i].tag) 1955 return tags[i].id; 1956 } 1957 for(i=0; tags[i].id != CODEC_ID_NONE; i++) { 1958 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF) 1959 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF) 1960 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF) 1961 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF)) 1962 return tags[i].id; 1963 } 1964 return CODEC_ID_NONE; 1965} 1966 1967unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id) 1968{ 1969 int i; 1970 for(i=0; tags && tags[i]; i++){ 1971 int tag= codec_get_tag(tags[i], id); 1972 if(tag) return tag; 1973 } 1974 return 0; 1975} 1976 1977enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag) 1978{ 1979 int i; 1980 for(i=0; tags && tags[i]; i++){ 1981 enum CodecID id= codec_get_id(tags[i], tag); 1982 if(id!=CODEC_ID_NONE) return id; 1983 } 1984 return CODEC_ID_NONE; 1985} 1986 1987static void compute_chapters_end(AVFormatContext *s) 1988{ 1989 unsigned int i; 1990 1991 for (i=0; i+1<s->nb_chapters; i++) 1992 if (s->chapters[i]->end == AV_NOPTS_VALUE) { 1993 assert(s->chapters[i]->start <= s->chapters[i+1]->start); 1994 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base)); 1995 s->chapters[i]->end = s->chapters[i+1]->start; 1996 } 1997 1998 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) { 1999 assert(s->start_time != AV_NOPTS_VALUE); 2000 assert(s->duration > 0); 2001 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration, 2002 AV_TIME_BASE_Q, 2003 s->chapters[i]->time_base); 2004 } 2005} 2006 2007/* absolute maximum size we read until we abort */ 2008#define MAX_READ_SIZE 5000000 2009 2010#define MAX_STD_TIMEBASES (60*12+5) 2011static int get_std_framerate(int i){ 2012 if(i<60*12) return i*1001; 2013 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12; 2014} 2015 2016/* 2017 * Is the time base unreliable. 2018 * This is a heuristic to balance between quick acceptance of the values in 2019 * the headers vs. some extra checks. 2020 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps. 2021 * MPEG-2 commonly misuses field repeat flags to store different framerates. 2022 * And there are "variable" fps files this needs to detect as well. 2023 */ 2024static int tb_unreliable(AVCodecContext *c){ 2025 if( c->time_base.den >= 101L*c->time_base.num 2026 || c->time_base.den < 5L*c->time_base.num 2027/* || c->codec_tag == AV_RL32("DIVX") 2028 || c->codec_tag == AV_RL32("XVID")*/ 2029 || c->codec_id == CODEC_ID_MPEG2VIDEO 2030 || c->codec_id == CODEC_ID_H264 2031 ) 2032 return 1; 2033 return 0; 2034} 2035 2036int av_find_stream_info(AVFormatContext *ic) 2037{ 2038 int i, count, ret, read_size, j; 2039 AVStream *st; 2040 AVPacket pkt1, *pkt; 2041 int64_t last_dts[MAX_STREAMS]; 2042 int64_t duration_gcd[MAX_STREAMS]={0}; 2043 int duration_count[MAX_STREAMS]={0}; 2044 double (*duration_error)[MAX_STD_TIMEBASES]; 2045 int64_t old_offset = url_ftell(ic->pb); 2046 int64_t codec_info_duration[MAX_STREAMS]={0}; 2047 int codec_info_nb_frames[MAX_STREAMS]={0}; 2048 int try_count; 2049 2050 2051 //av_log(NULL, AV_LOG_DEBUG, "==> %s \n\n", __FUNCTION__); 2052 2053 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error)); 2054 if (!duration_error) return AVERROR(ENOMEM); 2055 2056 for(i=0;i<ic->nb_streams;i++) { 2057 st = ic->streams[i]; 2058 if(st->codec->codec_type == CODEC_TYPE_VIDEO){ 2059/* if(!st->time_base.num) 2060 st->time_base= */ 2061 if(!st->codec->time_base.num) 2062 st->codec->time_base= st->time_base; 2063 } 2064 //only for the split stuff 2065 if (!st->parser) { 2066 st->parser = av_parser_init(st->codec->codec_id); 2067 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){ 2068 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 2069 } 2070 } 2071 } 2072 2073 for(i=0;i<MAX_STREAMS;i++){ 2074 last_dts[i]= AV_NOPTS_VALUE; 2075 } 2076 2077 try_count = 0; 2078 count = 0; 2079 read_size = 0; 2080 for(;;) { 2081 if(url_interrupt_cb()){ 2082 ret= AVERROR(EINTR); 2083 break; 2084 } 2085 /* check if one codec still needs to be handled */ 2086 for(i=0;i<ic->nb_streams;i++) { 2087 st = ic->streams[i]; 2088 if (!has_codec_parameters(st->codec)) 2089 break; 2090 /* variable fps and no guess at the real fps */ 2091 if( tb_unreliable(st->codec) 2092 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO) 2093 break; 2094 if(st->parser && st->parser->parser->split && !st->codec->extradata) 2095 break; 2096 if(st->first_dts == AV_NOPTS_VALUE) 2097 break; 2098 } 2099 if (i == ic->nb_streams) { 2100 /* NOTE: if the format has no header, then we need to read 2101 some packets to get most of the streams, so we cannot 2102 stop here */ 2103 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { 2104 /* if we found the info for all the codecs, we can stop */ 2105 ret = count; 2106 break; 2107 } 2108 } 2109 /* we did not get all the codec info, but we read too much data */ 2110 if (read_size >= MAX_READ_SIZE) { 2111 ret = count; 2112 break; 2113 } 2114 2115 /* NOTE: a new stream can be added there if no header in file 2116 (AVFMTCTX_NOHEADER) */ 2117 ret = av_read_frame_internal(ic, &pkt1); 2118 2119 if (ret < 0) { 2120 /* EOF or error */ 2121 ret = -1; /* we could not have all the codec parameters before EOF */ 2122 for(i=0;i<ic->nb_streams;i++) { 2123 st = ic->streams[i]; 2124 if (!has_codec_parameters(st->codec)){ 2125 char buf[256]; 2126 avcodec_string(buf, sizeof(buf), st->codec, 0); 2127 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf); 2128 } else { 2129 ret = 0; 2130 } 2131 } 2132 break; 2133 } 2134 2135 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end); 2136 if(av_dup_packet(pkt) < 0) { 2137 av_free(duration_error); 2138 return AVERROR(ENOMEM); 2139 } 2140 2141 read_size += pkt->size; 2142 2143 2144 2145 st = ic->streams[pkt->stream_index]; 2146 if(codec_info_nb_frames[st->index]>1) 2147 codec_info_duration[st->index] += pkt->duration; 2148 2149 if (pkt->duration != 0) 2150 codec_info_nb_frames[st->index]++; 2151 2152 { 2153 int index= pkt->stream_index; 2154 int64_t last= last_dts[index]; 2155 int64_t duration= pkt->dts - last; 2156 2157 2158 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){ 2159 double dur= duration * av_q2d(st->time_base); 2160 2161// if(st->codec->codec_type == CODEC_TYPE_VIDEO) 2162// av_log(NULL, AV_LOG_ERROR, "%f\n", dur); 2163 if(duration_count[index] < 2) 2164 memset(duration_error[index], 0, sizeof(*duration_error)); 2165 2166 for(i=1; i<MAX_STD_TIMEBASES; i++){ 2167 int framerate= get_std_framerate(i); 2168 int ticks= lrintf(dur*framerate/(1001*12)); 2169 double error= dur - ticks*1001*12/(double)framerate; 2170 duration_error[index][i] += error*error; 2171 } 2172 duration_count[index]++; 2173 // ignore the first 4 values, they might have some random jitter 2174 if (duration_count[index] > 3) 2175 duration_gcd[index] = av_gcd(duration_gcd[index], duration); 2176 } 2177 if(last == AV_NOPTS_VALUE || duration_count[index]<=1) 2178 last_dts[pkt->stream_index]= pkt->dts; 2179 } 2180 if(st->parser && st->parser->parser->split && !st->codec->extradata){ 2181 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size); 2182 if(i){ 2183 st->codec->extradata_size= i; 2184 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); 2185 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); 2186 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE); 2187 } 2188 } 2189 2190 /* if still no information, we try to open the codec and to 2191 decompress the frame. We try to avoid that in most cases as 2192 it takes longer and uses more memory. For MPEG-4, we need to 2193 decompress for QuickTime. */ 2194 if (!has_codec_parameters(st->codec) /*&& 2195 (st->codec->codec_id == CODEC_ID_FLV1 || 2196 st->codec->codec_id == CODEC_ID_H264 || 2197 st->codec->codec_id == CODEC_ID_H263 || 2198 st->codec->codec_id == CODEC_ID_H261 || 2199 st->codec->codec_id == CODEC_ID_VORBIS || 2200 st->codec->codec_id == CODEC_ID_MJPEG || 2201 st->codec->codec_id == CODEC_ID_PNG || 2202 st->codec->codec_id == CODEC_ID_PAM || 2203 st->codec->codec_id == CODEC_ID_PGM || 2204 st->codec->codec_id == CODEC_ID_PGMYUV || 2205 st->codec->codec_id == CODEC_ID_PBM || 2206 st->codec->codec_id == CODEC_ID_PPM || 2207 st->codec->codec_id == CODEC_ID_SHORTEN || 2208 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/ 2209 ){ 2210 try_decode_frame(st, pkt->data, pkt->size); 2211 } 2212 2213 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) { 2214 break; 2215 } 2216 } 2217 2218 // close codecs which were opened in try_decode_frame() 2219 for(i=0;i<ic->nb_streams;i++) { 2220 st = ic->streams[i]; 2221 if(st->codec->codec) 2222 avcodec_close(st->codec); 2223 } 2224 /* FFMEPG free memory in here. */ 2225 for(i=0;i<ic->nb_streams;i++) { 2226 st = ic->streams[i]; 2227 if (st->codec->codec_type == CODEC_TYPE_VIDEO) { 2228 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample) 2229 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt); 2230 2231 // the check for tb_unreliable() is not completely correct, since this is not about handling 2232 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g. 2233 // ipmovie.c produces. 2234 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1) 2235 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX); 2236 if(duration_count[i] 2237 && tb_unreliable(st->codec) /*&& 2238 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ... 2239 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){ 2240 int num = 0; 2241 double best_error= 2*av_q2d(st->time_base); 2242 best_error= best_error*best_error*duration_count[i]*1000*12*30; 2243 2244 for(j=1; j<MAX_STD_TIMEBASES; j++){ 2245 double error= duration_error[i][j] * get_std_framerate(j); 2246// if(st->codec->codec_type == CODEC_TYPE_VIDEO) 2247// av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error); 2248 if(error < best_error){ 2249 best_error= error; 2250 num = get_std_framerate(j); 2251 } 2252 } 2253 // do not increase frame rate by more than 1 % in order to match a standard rate. 2254 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate))) 2255 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX); 2256 } 2257 2258 if (!st->r_frame_rate.num){ 2259 if( st->codec->time_base.den * (int64_t)st->time_base.num 2260 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){ 2261 st->r_frame_rate.num = st->codec->time_base.den; 2262 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame; 2263 }else{ 2264 st->r_frame_rate.num = st->time_base.den; 2265 st->r_frame_rate.den = st->time_base.num; 2266 } 2267 } 2268 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) { 2269 if(!st->codec->bits_per_coded_sample) 2270 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id); 2271 } 2272 } 2273 2274 av_estimate_timings(ic, old_offset); 2275 2276 compute_chapters_end(ic); 2277 2278#if 0 2279 /* correct DTS for B-frame streams with no timestamps */ 2280 for(i=0;i<ic->nb_streams;i++) { 2281 st = ic->streams[i]; 2282 if (st->codec->codec_type == CODEC_TYPE_VIDEO) { 2283 if(b-frames){ 2284 ppktl = &ic->packet_buffer; 2285 while(ppkt1){ 2286 if(ppkt1->stream_index != i) 2287 continue; 2288 if(ppkt1->pkt->dts < 0) 2289 break; 2290 if(ppkt1->pkt->pts != AV_NOPTS_VALUE) 2291 break; 2292 ppkt1->pkt->dts -= delta; 2293 ppkt1= ppkt1->next; 2294 } 2295 if(ppkt1) 2296 continue; 2297 st->cur_dts -= delta; 2298 } 2299 } 2300 } 2301#endif 2302 2303 av_free(duration_error); 2304 2305 return ret; 2306} 2307 2308/*******************************************************/ 2309 2310int av_read_play(AVFormatContext *s) 2311{ 2312 if (s->iformat->read_play) 2313 return s->iformat->read_play(s); 2314 if (s->pb) 2315 return av_url_read_fpause(s->pb, 0); 2316 return AVERROR(ENOSYS); 2317} 2318 2319int av_read_pause(AVFormatContext *s) 2320{ 2321 if (s->iformat->read_pause) 2322 return s->iformat->read_pause(s); 2323 if (s->pb) 2324 return av_url_read_fpause(s->pb, 1); 2325 return AVERROR(ENOSYS); 2326} 2327 2328void av_close_input_stream(AVFormatContext *s) 2329{ 2330 int i; 2331 AVStream *st; 2332 2333 if (s->iformat->read_close) 2334 s->iformat->read_close(s); 2335 for(i=0;i<s->nb_streams;i++) { 2336 /* free all data in a stream component */ 2337 st = s->streams[i]; 2338 if (st->parser) { 2339 av_parser_close(st->parser); 2340 av_free_packet(&st->cur_pkt); 2341 } 2342 av_metadata_free(&st->metadata); 2343 av_free(st->index_entries); 2344 av_free(st->codec->extradata); 2345 av_free(st->codec); 2346#if LIBAVFORMAT_VERSION_INT < (53<<16) 2347 av_free(st->filename); 2348#endif 2349 av_free(st->priv_data); 2350 av_free(st); 2351 } 2352 for(i=s->nb_programs-1; i>=0; i--) { 2353#if LIBAVFORMAT_VERSION_INT < (53<<16) 2354 av_freep(&s->programs[i]->provider_name); 2355 av_freep(&s->programs[i]->name); 2356#endif 2357 av_metadata_free(&s->programs[i]->metadata); 2358 av_freep(&s->programs[i]->stream_index); 2359 av_freep(&s->programs[i]); 2360 } 2361 av_freep(&s->programs); 2362 flush_packet_queue(s); 2363 av_freep(&s->priv_data); 2364 while(s->nb_chapters--) { 2365#if LIBAVFORMAT_VERSION_INT < (53<<16) 2366 av_free(s->chapters[s->nb_chapters]->title); 2367#endif 2368 av_metadata_free(&s->chapters[s->nb_chapters]->metadata); 2369 av_free(s->chapters[s->nb_chapters]); 2370 } 2371 av_freep(&s->chapters); 2372 av_metadata_free(&s->metadata); 2373 av_free(s); 2374} 2375 2376void av_close_input_file(AVFormatContext *s) 2377{ 2378 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb; 2379 av_close_input_stream(s); 2380 if (pb) 2381 url_fclose(pb); 2382} 2383 2384AVStream *av_new_stream(AVFormatContext *s, int id) 2385{ 2386 AVStream *st; 2387 int i; 2388 2389 if (s->nb_streams >= MAX_STREAMS) 2390 return NULL; 2391 2392 st = av_mallocz(sizeof(AVStream)); 2393 if (!st) 2394 return NULL; 2395 2396 st->codec= avcodec_alloc_context(); 2397 if (s->iformat) { 2398 /* no default bitrate if decoding */ 2399 st->codec->bit_rate = 0; 2400 } 2401 st->index = s->nb_streams; 2402 st->id = id; 2403 st->start_time = AV_NOPTS_VALUE; 2404 st->duration = AV_NOPTS_VALUE; 2405 /* we set the current DTS to 0 so that formats without any timestamps 2406 but durations get some timestamps, formats with some unknown 2407 timestamps have their first few packets buffered and the 2408 timestamps corrected before they are returned to the user */ 2409 st->cur_dts = 0; 2410 st->first_dts = AV_NOPTS_VALUE; 2411 2412 /* default pts setting is MPEG-like */ 2413 av_set_pts_info(st, 33, 1, 90000); 2414 st->last_IP_pts = AV_NOPTS_VALUE; 2415 for(i=0; i<MAX_REORDER_DELAY+1; i++) 2416 st->pts_buffer[i]= AV_NOPTS_VALUE; 2417 st->reference_dts = AV_NOPTS_VALUE; 2418 2419 st->sample_aspect_ratio = (AVRational){0,1}; 2420 2421 s->streams[s->nb_streams++] = st; 2422 return st; 2423} 2424 2425AVProgram *av_new_program(AVFormatContext *ac, int id) 2426{ 2427 AVProgram *program=NULL; 2428 int i; 2429 2430#ifdef DEBUG_SI 2431 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id); 2432#endif 2433 2434 for(i=0; i<ac->nb_programs; i++) 2435 if(ac->programs[i]->id == id) 2436 program = ac->programs[i]; 2437 2438 if(!program){ 2439 program = av_mallocz(sizeof(AVProgram)); 2440 if (!program) 2441 return NULL; 2442 dynarray_add(&ac->programs, &ac->nb_programs, program); 2443 program->discard = AVDISCARD_NONE; 2444 } 2445 program->id = id; 2446 2447 return program; 2448} 2449 2450AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title) 2451{ 2452 AVChapter *chapter = NULL; 2453 int i; 2454 2455 for(i=0; i<s->nb_chapters; i++) 2456 if(s->chapters[i]->id == id) 2457 chapter = s->chapters[i]; 2458 2459 if(!chapter){ 2460 chapter= av_mallocz(sizeof(AVChapter)); 2461 if(!chapter) 2462 return NULL; 2463 dynarray_add(&s->chapters, &s->nb_chapters, chapter); 2464 } 2465#if LIBAVFORMAT_VERSION_INT < (53<<16) 2466 av_free(chapter->title); 2467#endif 2468 av_metadata_set(&chapter->metadata, "title", title); 2469 chapter->id = id; 2470 chapter->time_base= time_base; 2471 chapter->start = start; 2472 chapter->end = end; 2473 2474 return chapter; 2475} 2476 2477/************************************************************/ 2478/* output media file */ 2479 2480int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap) 2481{ 2482 int ret; 2483 2484 if (s->oformat->priv_data_size > 0) { 2485 s->priv_data = av_mallocz(s->oformat->priv_data_size); 2486 if (!s->priv_data) 2487 return AVERROR(ENOMEM); 2488 } else 2489 s->priv_data = NULL; 2490 2491 if (s->oformat->set_parameters) { 2492 ret = s->oformat->set_parameters(s, ap); 2493 if (ret < 0) 2494 return ret; 2495 } 2496 return 0; 2497} 2498 2499int av_write_header(AVFormatContext *s) 2500{ 2501 int ret, i; 2502 AVStream *st; 2503 2504 // some sanity checks 2505 for(i=0;i<s->nb_streams;i++) { 2506 st = s->streams[i]; 2507 2508 switch (st->codec->codec_type) { 2509 case CODEC_TYPE_AUDIO: 2510 if(st->codec->sample_rate<=0){ 2511 av_log(s, AV_LOG_ERROR, "sample rate not set\n"); 2512 return -1; 2513 } 2514 if(!st->codec->block_align) 2515 st->codec->block_align = st->codec->channels * 2516 av_get_bits_per_sample(st->codec->codec_id) >> 3; 2517 break; 2518 case CODEC_TYPE_VIDEO: 2519 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too? 2520 av_log(s, AV_LOG_ERROR, "time base not set\n"); 2521 return -1; 2522 } 2523 if(st->codec->width<=0 || st->codec->height<=0){ 2524 av_log(s, AV_LOG_ERROR, "dimensions not set\n"); 2525 return -1; 2526 } 2527 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){ 2528 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n"); 2529 return -1; 2530 } 2531 break; 2532 } 2533 2534 if(s->oformat->codec_tag){ 2535 if(st->codec->codec_tag){ 2536 //FIXME 2537 //check that tag + id is in the table 2538 //if neither is in the table -> OK 2539 //if tag is in the table with another id -> FAIL 2540 //if id is in the table with another tag -> FAIL unless strict < ? 2541 }else 2542 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id); 2543 } 2544 2545 if(s->oformat->flags & AVFMT_GLOBALHEADER && 2546 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER)) 2547 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i); 2548 } 2549 2550 if (!s->priv_data && s->oformat->priv_data_size > 0) { 2551 s->priv_data = av_mallocz(s->oformat->priv_data_size); 2552 if (!s->priv_data) 2553 return AVERROR(ENOMEM); 2554 } 2555 2556#if LIBAVFORMAT_VERSION_MAJOR < 53 2557 ff_metadata_mux_compat(s); 2558#endif 2559 2560 if(s->oformat->write_header){ 2561 ret = s->oformat->write_header(s); 2562 if (ret < 0) 2563 return ret; 2564 } 2565 2566 /* init PTS generation */ 2567 for(i=0;i<s->nb_streams;i++) { 2568 int64_t den = AV_NOPTS_VALUE; 2569 st = s->streams[i]; 2570 2571 switch (st->codec->codec_type) { 2572 case CODEC_TYPE_AUDIO: 2573 den = (int64_t)st->time_base.num * st->codec->sample_rate; 2574 break; 2575 case CODEC_TYPE_VIDEO: 2576 den = (int64_t)st->time_base.num * st->codec->time_base.den; 2577 break; 2578 default: 2579 break; 2580 } 2581 if (den != AV_NOPTS_VALUE) { 2582 if (den <= 0) 2583 return AVERROR_INVALIDDATA; 2584 av_frac_init(&st->pts, 0, 0, den); 2585 } 2586 } 2587 return 0; 2588} 2589 2590//FIXME merge with compute_pkt_fields 2591static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){ 2592 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames); 2593 int num, den, frame_size, i; 2594 2595// av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index); 2596 2597/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) 2598 return -1;*/ 2599 2600 /* duration field */ 2601 if (pkt->duration == 0) { 2602 compute_frame_duration(&num, &den, st, NULL, pkt); 2603 if (den && num) { 2604 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num); 2605 } 2606 } 2607 2608 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0) 2609 pkt->pts= pkt->dts; 2610 2611 //XXX/FIXME this is a temporary hack until all encoders output pts 2612 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){ 2613 pkt->dts= 2614// pkt->pts= st->cur_dts; 2615 pkt->pts= st->pts.val; 2616 } 2617 2618 //calculate dts from pts 2619 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 2620 st->pts_buffer[0]= pkt->pts; 2621 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++) 2622 st->pts_buffer[i]= (i-delay-1) * pkt->duration; 2623 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 2624 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 2625 2626 pkt->dts= st->pts_buffer[0]; 2627 } 2628 2629 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){ 2630 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts); 2631 return -1; 2632 } 2633 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ 2634 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n"); 2635 return -1; 2636 } 2637 2638// av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts); 2639 st->cur_dts= pkt->dts; 2640 st->pts.val= pkt->dts; 2641 2642 /* update pts */ 2643 switch (st->codec->codec_type) { 2644 case CODEC_TYPE_AUDIO: 2645 frame_size = get_audio_frame_size(st->codec, pkt->size); 2646 2647 /* HACK/FIXME, we skip the initial 0 size packets as they are most 2648 likely equal to the encoder delay, but it would be better if we 2649 had the real timestamps from the encoder */ 2650 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { 2651 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); 2652 } 2653 break; 2654 case CODEC_TYPE_VIDEO: 2655 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); 2656 break; 2657 default: 2658 break; 2659 } 2660 return 0; 2661} 2662 2663int av_write_frame(AVFormatContext *s, AVPacket *pkt) 2664{ 2665 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt); 2666 2667 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 2668 return ret; 2669 2670 ret= s->oformat->write_packet(s, pkt); 2671 if(!ret) 2672 ret= url_ferror(s->pb); 2673 return ret; 2674} 2675 2676void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, 2677 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)) 2678{ 2679 AVPacketList **next_point, *this_pktl; 2680 2681 this_pktl = av_mallocz(sizeof(AVPacketList)); 2682 this_pktl->pkt= *pkt; 2683 if(pkt->destruct == av_destruct_packet) 2684 pkt->destruct= NULL; // not shared -> must keep original from being freed 2685 else 2686 av_dup_packet(&this_pktl->pkt); //shared -> must dup 2687 2688 next_point = &s->packet_buffer; 2689 while(*next_point){ 2690 if(compare(s, &(*next_point)->pkt, pkt)) 2691 break; 2692 next_point= &(*next_point)->next; 2693 } 2694 this_pktl->next= *next_point; 2695 *next_point= this_pktl; 2696} 2697 2698int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt) 2699{ 2700 AVStream *st = s->streams[ pkt ->stream_index]; 2701 AVStream *st2= s->streams[ next->stream_index]; 2702 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den; 2703 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den; 2704 2705 if (pkt->dts == AV_NOPTS_VALUE) 2706 return 0; 2707 2708 return next->dts * left > pkt->dts * right; //FIXME this can overflow 2709} 2710 2711int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){ 2712 AVPacketList *pktl; 2713 int stream_count=0; 2714 int streams[MAX_STREAMS]; 2715 2716 if(pkt){ 2717 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts); 2718 } 2719 2720 memset(streams, 0, sizeof(streams)); 2721 pktl= s->packet_buffer; 2722 while(pktl){ 2723//av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts); 2724 if(streams[ pktl->pkt.stream_index ] == 0) 2725 stream_count++; 2726 streams[ pktl->pkt.stream_index ]++; 2727 pktl= pktl->next; 2728 } 2729 2730 if(stream_count && (s->nb_streams == stream_count || flush)){ 2731 pktl= s->packet_buffer; 2732 *out= pktl->pkt; 2733 2734 s->packet_buffer= pktl->next; 2735 av_freep(&pktl); 2736 return 1; 2737 }else{ 2738 av_init_packet(out); 2739 return 0; 2740 } 2741} 2742 2743/** 2744 * Interleaves an AVPacket correctly so it can be muxed. 2745 * @param out the interleaved packet will be output here 2746 * @param in the input packet 2747 * @param flush 1 if no further packets are available as input and all 2748 * remaining packets should be output 2749 * @return 1 if a packet was output, 0 if no packet could be output, 2750 * < 0 if an error occurred 2751 */ 2752static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ 2753 if(s->oformat->interleave_packet) 2754 return s->oformat->interleave_packet(s, out, in, flush); 2755 else 2756 return av_interleave_packet_per_dts(s, out, in, flush); 2757} 2758 2759int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){ 2760 AVStream *st= s->streams[ pkt->stream_index]; 2761 2762 //FIXME/XXX/HACK drop zero sized packets 2763 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0) 2764 return 0; 2765 2766//av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts); 2767 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 2768 return -1; 2769 2770 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 2771 return -1; 2772 2773 for(;;){ 2774 AVPacket opkt; 2775 int ret= av_interleave_packet(s, &opkt, pkt, 0); 2776 if(ret<=0) //FIXME cleanup needed for ret<0 ? 2777 return ret; 2778 2779 ret= s->oformat->write_packet(s, &opkt); 2780 2781 av_free_packet(&opkt); 2782 pkt= NULL; 2783 2784 if(ret<0) 2785 return ret; 2786 if(url_ferror(s->pb)) 2787 return url_ferror(s->pb); 2788 } 2789} 2790 2791int av_write_trailer(AVFormatContext *s) 2792{ 2793 int ret, i; 2794 2795 for(;;){ 2796 AVPacket pkt; 2797 ret= av_interleave_packet(s, &pkt, NULL, 1); 2798 if(ret<0) //FIXME cleanup needed for ret<0 ? 2799 goto fail; 2800 if(!ret) 2801 break; 2802 2803 ret= s->oformat->write_packet(s, &pkt); 2804 2805 av_free_packet(&pkt); 2806 2807 if(ret<0) 2808 goto fail; 2809 if(url_ferror(s->pb)) 2810 goto fail; 2811 } 2812 2813 if(s->oformat->write_trailer) 2814 ret = s->oformat->write_trailer(s); 2815fail: 2816 if(ret == 0) 2817 ret=url_ferror(s->pb); 2818 for(i=0;i<s->nb_streams;i++) 2819 av_freep(&s->streams[i]->priv_data); 2820 av_freep(&s->priv_data); 2821 return ret; 2822} 2823 2824void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx) 2825{ 2826 int i, j; 2827 AVProgram *program=NULL; 2828 void *tmp; 2829 2830 for(i=0; i<ac->nb_programs; i++){ 2831 if(ac->programs[i]->id != progid) 2832 continue; 2833 program = ac->programs[i]; 2834 for(j=0; j<program->nb_stream_indexes; j++) 2835 if(program->stream_index[j] == idx) 2836 return; 2837 2838 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1)); 2839 if(!tmp) 2840 return; 2841 program->stream_index = tmp; 2842 program->stream_index[program->nb_stream_indexes++] = idx; 2843 return; 2844 } 2845} 2846 2847static void print_fps(double d, const char *postfix){ 2848 uint64_t v= lrintf(d*100); 2849 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix); 2850 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix); 2851 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix); 2852} 2853 2854/* "user interface" functions */ 2855static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) 2856{ 2857 char buf[256]; 2858 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); 2859 AVStream *st = ic->streams[i]; 2860 int g = av_gcd(st->time_base.num, st->time_base.den); 2861 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0); 2862 avcodec_string(buf, sizeof(buf), st->codec, is_output); 2863 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i); 2864 /* the pid is an important information, so we display it */ 2865 /* XXX: add a generic system */ 2866 if (flags & AVFMT_SHOW_IDS) 2867 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); 2868 if (lang) 2869 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); 2870 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g); 2871 av_log(NULL, AV_LOG_INFO, ": %s", buf); 2872 if (st->sample_aspect_ratio.num && // default 2873 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) { 2874 AVRational display_aspect_ratio; 2875 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, 2876 st->codec->width*st->sample_aspect_ratio.num, 2877 st->codec->height*st->sample_aspect_ratio.den, 2878 1024*1024); 2879 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d", 2880 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, 2881 display_aspect_ratio.num, display_aspect_ratio.den); 2882 } 2883 if(st->codec->codec_type == CODEC_TYPE_VIDEO){ 2884 if(st->r_frame_rate.den && st->r_frame_rate.num) 2885 print_fps(av_q2d(st->r_frame_rate), "tbr"); 2886 if(st->time_base.den && st->time_base.num) 2887 print_fps(1/av_q2d(st->time_base), "tbn"); 2888 if(st->codec->time_base.den && st->codec->time_base.num) 2889 print_fps(1/av_q2d(st->codec->time_base), "tbc"); 2890 } 2891 av_log(NULL, AV_LOG_INFO, "\n"); 2892} 2893 2894void dump_format(AVFormatContext *ic, 2895 int index, 2896 const char *url, 2897 int is_output) 2898{ 2899 int i; 2900 2901 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n", 2902 is_output ? "Output" : "Input", 2903 index, 2904 is_output ? ic->oformat->name : ic->iformat->name, 2905 is_output ? "to" : "from", url); 2906 if (!is_output) { 2907 av_log(NULL, AV_LOG_INFO, " Duration: "); 2908 if (ic->duration != AV_NOPTS_VALUE) { 2909 int hours, mins, secs, us; 2910 secs = ic->duration / AV_TIME_BASE; 2911 us = ic->duration % AV_TIME_BASE; 2912 mins = secs / 60; 2913 secs %= 60; 2914 hours = mins / 60; 2915 mins %= 60; 2916 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs, 2917 (100 * us) / AV_TIME_BASE); 2918 } else { 2919 av_log(NULL, AV_LOG_INFO, "N/A"); 2920 } 2921 if (ic->start_time != AV_NOPTS_VALUE) { 2922 int secs, us; 2923 av_log(NULL, AV_LOG_INFO, ", start: "); 2924 secs = ic->start_time / AV_TIME_BASE; 2925 us = ic->start_time % AV_TIME_BASE; 2926 av_log(NULL, AV_LOG_INFO, "%d.%06d", 2927 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE)); 2928 } 2929 av_log(NULL, AV_LOG_INFO, ", bitrate: "); 2930 if (ic->bit_rate) { 2931 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000); 2932 } else { 2933 av_log(NULL, AV_LOG_INFO, "N/A"); 2934 } 2935 av_log(NULL, AV_LOG_INFO, "\n"); 2936 } 2937 if(ic->nb_programs) { 2938 int j, k; 2939 for(j=0; j<ic->nb_programs; j++) { 2940 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata, 2941 "name", NULL, 0); 2942 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id, 2943 name ? name->value : ""); 2944 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) 2945 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output); 2946 } 2947 } else 2948 for(i=0;i<ic->nb_streams;i++) 2949 dump_stream_format(ic, i, index, is_output); 2950} 2951 2952#if LIBAVFORMAT_VERSION_MAJOR < 53 2953int parse_image_size(int *width_ptr, int *height_ptr, const char *str) 2954{ 2955 return av_parse_video_frame_size(width_ptr, height_ptr, str); 2956} 2957 2958int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg) 2959{ 2960 AVRational frame_rate; 2961 int ret = av_parse_video_frame_rate(&frame_rate, arg); 2962 *frame_rate_num= frame_rate.num; 2963 *frame_rate_den= frame_rate.den; 2964 return ret; 2965} 2966#endif 2967 2968int64_t av_gettime(void) 2969{ 2970 struct timeval tv; 2971 gettimeofday(&tv,NULL); 2972 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec; 2973} 2974 2975int64_t parse_date(const char *datestr, int duration) 2976{ 2977 const char *p; 2978 int64_t t; 2979 struct tm dt; 2980 int i; 2981 static const char * const date_fmt[] = { 2982 "%Y-%m-%d", 2983 "%Y%m%d", 2984 }; 2985 static const char * const time_fmt[] = { 2986 "%H:%M:%S", 2987 "%H%M%S", 2988 }; 2989 const char *q; 2990 int is_utc, len; 2991 char lastch; 2992 int negative = 0; 2993 2994#undef time 2995 time_t now = time(0); 2996 2997 len = strlen(datestr); 2998 if (len > 0) 2999 lastch = datestr[len - 1]; 3000 else 3001 lastch = '\0'; 3002 is_utc = (lastch == 'z' || lastch == 'Z'); 3003 3004 memset(&dt, 0, sizeof(dt)); 3005 3006 p = datestr; 3007 q = NULL; 3008 if (!duration) { 3009 if (!strncasecmp(datestr, "now", len)) 3010 return (int64_t) now * 1000000; 3011 3012 /* parse the year-month-day part */ 3013 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) { 3014 q = small_strptime(p, date_fmt[i], &dt); 3015 if (q) { 3016 break; 3017 } 3018 } 3019 3020 /* if the year-month-day part is missing, then take the 3021 * current year-month-day time */ 3022 if (!q) { 3023 if (is_utc) { 3024 dt = *gmtime(&now); 3025 } else { 3026 dt = *localtime(&now); 3027 } 3028 dt.tm_hour = dt.tm_min = dt.tm_sec = 0; 3029 } else { 3030 p = q; 3031 } 3032 3033 if (*p == 'T' || *p == 't' || *p == ' ') 3034 p++; 3035 3036 /* parse the hour-minute-second part */ 3037 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) { 3038 q = small_strptime(p, time_fmt[i], &dt); 3039 if (q) { 3040 break; 3041 } 3042 } 3043 } else { 3044 /* parse datestr as a duration */ 3045 if (p[0] == '-') { 3046 negative = 1; 3047 ++p; 3048 } 3049 /* parse datestr as HH:MM:SS */ 3050 q = small_strptime(p, time_fmt[0], &dt); 3051 if (!q) { 3052 /* parse datestr as S+ */ 3053 dt.tm_sec = strtol(p, (char **)&q, 10); 3054 if (q == p) 3055 /* the parsing didn't succeed */ 3056 return INT64_MIN; 3057 dt.tm_min = 0; 3058 dt.tm_hour = 0; 3059 } 3060 } 3061 3062 /* Now we have all the fields that we can get */ 3063 if (!q) { 3064 return INT64_MIN; 3065 } 3066 3067 if (duration) { 3068 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec; 3069 } else { 3070 dt.tm_isdst = -1; /* unknown */ 3071 if (is_utc) { 3072 t = mktimegm(&dt); 3073 } else { 3074 t = mktime(&dt); 3075 } 3076 } 3077 3078 t *= 1000000; 3079 3080 /* parse the .m... part */ 3081 if (*q == '.') { 3082 int val, n; 3083 q++; 3084 for (val = 0, n = 100000; n >= 1; n /= 10, q++) { 3085 if (!isdigit(*q)) 3086 break; 3087 val += n * (*q - '0'); 3088 } 3089 t += val; 3090 } 3091 return negative ? -t : t; 3092} 3093 3094int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info) 3095{ 3096 const char *p; 3097 char tag[128], *q; 3098 3099 p = info; 3100 if (*p == '?') 3101 p++; 3102 for(;;) { 3103 q = tag; 3104 while (*p != '\0' && *p != '=' && *p != '&') { 3105 if ((q - tag) < sizeof(tag) - 1) 3106 *q++ = *p; 3107 p++; 3108 } 3109 *q = '\0'; 3110 q = arg; 3111 if (*p == '=') { 3112 p++; 3113 while (*p != '&' && *p != '\0') { 3114 if ((q - arg) < arg_size - 1) { 3115 if (*p == '+') 3116 *q++ = ' '; 3117 else 3118 *q++ = *p; 3119 } 3120 p++; 3121 } 3122 *q = '\0'; 3123 } 3124 if (!strcmp(tag, tag1)) 3125 return 1; 3126 if (*p != '&') 3127 break; 3128 p++; 3129 } 3130 return 0; 3131} 3132 3133int av_get_frame_filename(char *buf, int buf_size, 3134 const char *path, int number) 3135{ 3136 const char *p; 3137 char *q, buf1[20], c; 3138 int nd, len, percentd_found; 3139 3140 q = buf; 3141 p = path; 3142 percentd_found = 0; 3143 for(;;) { 3144 c = *p++; 3145 if (c == '\0') 3146 break; 3147 if (c == '%') { 3148 do { 3149 nd = 0; 3150 while (isdigit(*p)) { 3151 nd = nd * 10 + *p++ - '0'; 3152 } 3153 c = *p++; 3154 } while (isdigit(c)); 3155 3156 switch(c) { 3157 case '%': 3158 goto addchar; 3159 case 'd': 3160 if (percentd_found) 3161 goto fail; 3162 percentd_found = 1; 3163 snprintf(buf1, sizeof(buf1), "%0*d", nd, number); 3164 len = strlen(buf1); 3165 if ((q - buf + len) > buf_size - 1) 3166 goto fail; 3167 memcpy(q, buf1, len); 3168 q += len; 3169 break; 3170 default: 3171 goto fail; 3172 } 3173 } else { 3174 addchar: 3175 if ((q - buf) < buf_size - 1) 3176 *q++ = c; 3177 } 3178 } 3179 if (!percentd_found) 3180 goto fail; 3181 *q = '\0'; 3182 return 0; 3183 fail: 3184 *q = '\0'; 3185 return -1; 3186} 3187 3188static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size) 3189{ 3190 int len, i, j, c; 3191#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 3192 3193 for(i=0;i<size;i+=16) { 3194 len = size - i; 3195 if (len > 16) 3196 len = 16; 3197 PRINT("%08x ", i); 3198 for(j=0;j<16;j++) { 3199 if (j < len) 3200 PRINT(" %02x", buf[i+j]); 3201 else 3202 PRINT(" "); 3203 } 3204 PRINT(" "); 3205 for(j=0;j<len;j++) { 3206 c = buf[i+j]; 3207 if (c < ' ' || c > '~') 3208 c = '.'; 3209 PRINT("%c", c); 3210 } 3211 PRINT("\n"); 3212 } 3213#undef PRINT 3214} 3215 3216void av_hex_dump(FILE *f, uint8_t *buf, int size) 3217{ 3218 hex_dump_internal(NULL, f, 0, buf, size); 3219} 3220 3221void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size) 3222{ 3223 hex_dump_internal(avcl, NULL, level, buf, size); 3224} 3225 3226 //FIXME needs to know the time_base 3227static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload) 3228{ 3229#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 3230 PRINT("stream #%d:\n", pkt->stream_index); 3231 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0)); 3232 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE); 3233 /* DTS is _always_ valid after av_read_frame() */ 3234 PRINT(" dts="); 3235 if (pkt->dts == AV_NOPTS_VALUE) 3236 PRINT("N/A"); 3237 else 3238 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE); 3239 /* PTS may not be known if B-frames are present. */ 3240 PRINT(" pts="); 3241 if (pkt->pts == AV_NOPTS_VALUE) 3242 PRINT("N/A"); 3243 else 3244 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE); 3245 PRINT("\n"); 3246 PRINT(" size=%d\n", pkt->size); 3247#undef PRINT 3248 if (dump_payload) 3249 av_hex_dump(f, pkt->data, pkt->size); 3250} 3251 3252void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload) 3253{ 3254 pkt_dump_internal(NULL, f, 0, pkt, dump_payload); 3255} 3256 3257void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload) 3258{ 3259 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload); 3260} 3261 3262void url_split(char *proto, int proto_size, 3263 char *authorization, int authorization_size, 3264 char *hostname, int hostname_size, 3265 int *port_ptr, 3266 char *path, int path_size, 3267 const char *url) 3268{ 3269 const char *p, *ls, *at, *col, *brk; 3270 3271 if (port_ptr) *port_ptr = -1; 3272 if (proto_size > 0) proto[0] = 0; 3273 if (authorization_size > 0) authorization[0] = 0; 3274 if (hostname_size > 0) hostname[0] = 0; 3275 if (path_size > 0) path[0] = 0; 3276 3277 /* parse protocol */ 3278 if ((p = strchr(url, ':'))) { 3279 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url)); 3280 p++; /* skip ':' */ 3281 if (*p == '/') p++; 3282 if (*p == '/') p++; 3283 } else { 3284 /* no protocol means plain filename */ 3285 av_strlcpy(path, url, path_size); 3286 return; 3287 } 3288 3289 /* separate path from hostname */ 3290 ls = strchr(p, '/'); 3291 if(!ls) 3292 ls = strchr(p, '?'); 3293 if(ls) 3294 av_strlcpy(path, ls, path_size); 3295 else 3296 ls = &p[strlen(p)]; // XXX 3297 3298 /* the rest is hostname, use that to parse auth/port */ 3299 if (ls != p) { 3300 /* authorization (user[:pass]@hostname) */ 3301 if ((at = strchr(p, '@')) && at < ls) { 3302 av_strlcpy(authorization, p, 3303 FFMIN(authorization_size, at + 1 - p)); 3304 p = at + 1; /* skip '@' */ 3305 } 3306 3307 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) { 3308 /* [host]:port */ 3309 av_strlcpy(hostname, p + 1, 3310 FFMIN(hostname_size, brk - p)); 3311 if (brk[1] == ':' && port_ptr) 3312 *port_ptr = atoi(brk + 2); 3313 } else if ((col = strchr(p, ':')) && col < ls) { 3314 av_strlcpy(hostname, p, 3315 FFMIN(col + 1 - p, hostname_size)); 3316 if (port_ptr) *port_ptr = atoi(col + 1); 3317 } else 3318 av_strlcpy(hostname, p, 3319 FFMIN(ls + 1 - p, hostname_size)); 3320 } 3321} 3322 3323char *ff_data_to_hex(char *buff, const uint8_t *src, int s) 3324{ 3325 int i; 3326 static const char hex_table[16] = { '0', '1', '2', '3', 3327 '4', '5', '6', '7', 3328 '8', '9', 'A', 'B', 3329 'C', 'D', 'E', 'F' }; 3330 3331 for(i = 0; i < s; i++) { 3332 buff[i * 2] = hex_table[src[i] >> 4]; 3333 buff[i * 2 + 1] = hex_table[src[i] & 0xF]; 3334 } 3335 3336 return buff; 3337} 3338 3339void av_set_pts_info(AVStream *s, int pts_wrap_bits, 3340 unsigned int pts_num, unsigned int pts_den) 3341{ 3342 unsigned int gcd= av_gcd(pts_num, pts_den); 3343 s->pts_wrap_bits = pts_wrap_bits; 3344 s->time_base.num = pts_num/gcd; 3345 s->time_base.den = pts_den/gcd; 3346 3347 if(gcd>1) 3348 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd); 3349} 3350