1/* 2 * various utility functions for use within Libav 3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard 4 * 5 * This file is part of Libav. 6 * 7 * Libav is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * Libav is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with Libav; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22/* #define DEBUG */ 23 24#include "avformat.h" 25#include "avio_internal.h" 26#include "internal.h" 27#include "libavcodec/internal.h" 28#include "libavcodec/bytestream.h" 29#include "libavutil/opt.h" 30#include "libavutil/dict.h" 31#include "libavutil/pixdesc.h" 32#include "metadata.h" 33#include "id3v2.h" 34#include "libavutil/avassert.h" 35#include "libavutil/avstring.h" 36#include "libavutil/mathematics.h" 37#include "libavutil/parseutils.h" 38#include "riff.h" 39#include "audiointerleave.h" 40#include "url.h" 41#include <sys/time.h> 42#include <time.h> 43#include <stdarg.h> 44#if CONFIG_NETWORK 45#include "network.h" 46#endif 47 48#undef NDEBUG 49#include <assert.h> 50 51/** 52 * @file 53 * various utility functions for use within Libav 54 */ 55 56unsigned avformat_version(void) 57{ 58 return LIBAVFORMAT_VERSION_INT; 59} 60 61const char *avformat_configuration(void) 62{ 63 return LIBAV_CONFIGURATION; 64} 65 66const char *avformat_license(void) 67{ 68#define LICENSE_PREFIX "libavformat license: " 69 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1; 70} 71 72/* fraction handling */ 73 74/** 75 * f = val + (num / den) + 0.5. 76 * 77 * 'num' is normalized so that it is such as 0 <= num < den. 78 * 79 * @param f fractional number 80 * @param val integer value 81 * @param num must be >= 0 82 * @param den must be >= 1 83 */ 84static void frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) 85{ 86 num += (den >> 1); 87 if (num >= den) { 88 val += num / den; 89 num = num % den; 90 } 91 f->val = val; 92 f->num = num; 93 f->den = den; 94} 95 96/** 97 * Fractional addition to f: f = f + (incr / f->den). 98 * 99 * @param f fractional number 100 * @param incr increment, can be positive or negative 101 */ 102static void frac_add(AVFrac *f, int64_t incr) 103{ 104 int64_t num, den; 105 106 num = f->num + incr; 107 den = f->den; 108 if (num < 0) { 109 f->val += num / den; 110 num = num % den; 111 if (num < 0) { 112 num += den; 113 f->val--; 114 } 115 } else if (num >= den) { 116 f->val += num / den; 117 num = num % den; 118 } 119 f->num = num; 120} 121 122/** head of registered input format linked list */ 123static AVInputFormat *first_iformat = NULL; 124/** head of registered output format linked list */ 125static AVOutputFormat *first_oformat = NULL; 126 127AVInputFormat *av_iformat_next(AVInputFormat *f) 128{ 129 if(f) return f->next; 130 else return first_iformat; 131} 132 133AVOutputFormat *av_oformat_next(AVOutputFormat *f) 134{ 135 if(f) return f->next; 136 else return first_oformat; 137} 138 139void av_register_input_format(AVInputFormat *format) 140{ 141 AVInputFormat **p; 142 p = &first_iformat; 143 while (*p != NULL) p = &(*p)->next; 144 *p = format; 145 format->next = NULL; 146} 147 148void av_register_output_format(AVOutputFormat *format) 149{ 150 AVOutputFormat **p; 151 p = &first_oformat; 152 while (*p != NULL) p = &(*p)->next; 153 *p = format; 154 format->next = NULL; 155} 156 157int av_match_ext(const char *filename, const char *extensions) 158{ 159 const char *ext, *p; 160 char ext1[32], *q; 161 162 if(!filename) 163 return 0; 164 165 ext = strrchr(filename, '.'); 166 if (ext) { 167 ext++; 168 p = extensions; 169 for(;;) { 170 q = ext1; 171 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1) 172 *q++ = *p++; 173 *q = '\0'; 174 if (!av_strcasecmp(ext1, ext)) 175 return 1; 176 if (*p == '\0') 177 break; 178 p++; 179 } 180 } 181 return 0; 182} 183 184static int match_format(const char *name, const char *names) 185{ 186 const char *p; 187 int len, namelen; 188 189 if (!name || !names) 190 return 0; 191 192 namelen = strlen(name); 193 while ((p = strchr(names, ','))) { 194 len = FFMAX(p - names, namelen); 195 if (!av_strncasecmp(name, names, len)) 196 return 1; 197 names = p+1; 198 } 199 return !av_strcasecmp(name, names); 200} 201 202AVOutputFormat *av_guess_format(const char *short_name, const char *filename, 203 const char *mime_type) 204{ 205 AVOutputFormat *fmt = NULL, *fmt_found; 206 int score_max, score; 207 208 /* specific test for image sequences */ 209#if CONFIG_IMAGE2_MUXER 210 if (!short_name && filename && 211 av_filename_number_test(filename) && 212 ff_guess_image2_codec(filename) != CODEC_ID_NONE) { 213 return av_guess_format("image2", NULL, NULL); 214 } 215#endif 216 /* Find the proper file type. */ 217 fmt_found = NULL; 218 score_max = 0; 219 while ((fmt = av_oformat_next(fmt))) { 220 score = 0; 221 if (fmt->name && short_name && !strcmp(fmt->name, short_name)) 222 score += 100; 223 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type)) 224 score += 10; 225 if (filename && fmt->extensions && 226 av_match_ext(filename, fmt->extensions)) { 227 score += 5; 228 } 229 if (score > score_max) { 230 score_max = score; 231 fmt_found = fmt; 232 } 233 } 234 return fmt_found; 235} 236 237enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, 238 const char *filename, const char *mime_type, enum AVMediaType type){ 239 if(type == AVMEDIA_TYPE_VIDEO){ 240 enum CodecID codec_id= CODEC_ID_NONE; 241 242#if CONFIG_IMAGE2_MUXER 243 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){ 244 codec_id= ff_guess_image2_codec(filename); 245 } 246#endif 247 if(codec_id == CODEC_ID_NONE) 248 codec_id= fmt->video_codec; 249 return codec_id; 250 }else if(type == AVMEDIA_TYPE_AUDIO) 251 return fmt->audio_codec; 252 else if (type == AVMEDIA_TYPE_SUBTITLE) 253 return fmt->subtitle_codec; 254 else 255 return CODEC_ID_NONE; 256} 257 258AVInputFormat *av_find_input_format(const char *short_name) 259{ 260 AVInputFormat *fmt = NULL; 261 while ((fmt = av_iformat_next(fmt))) { 262 if (match_format(short_name, fmt->name)) 263 return fmt; 264 } 265 return NULL; 266} 267 268 269int av_get_packet(AVIOContext *s, AVPacket *pkt, int size) 270{ 271 int ret= av_new_packet(pkt, size); 272 273 if(ret<0) 274 return ret; 275 276 pkt->pos= avio_tell(s); 277 278 ret= avio_read(s, pkt->data, size); 279 if(ret<=0) 280 av_free_packet(pkt); 281 else 282 av_shrink_packet(pkt, ret); 283 284 return ret; 285} 286 287int av_append_packet(AVIOContext *s, AVPacket *pkt, int size) 288{ 289 int ret; 290 int old_size; 291 if (!pkt->size) 292 return av_get_packet(s, pkt, size); 293 old_size = pkt->size; 294 ret = av_grow_packet(pkt, size); 295 if (ret < 0) 296 return ret; 297 ret = avio_read(s, pkt->data + old_size, size); 298 av_shrink_packet(pkt, old_size + FFMAX(ret, 0)); 299 return ret; 300} 301 302 303int av_filename_number_test(const char *filename) 304{ 305 char buf[1024]; 306 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0); 307} 308 309AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max) 310{ 311 AVProbeData lpd = *pd; 312 AVInputFormat *fmt1 = NULL, *fmt; 313 int score, id3 = 0; 314 315 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) { 316 int id3len = ff_id3v2_tag_len(lpd.buf); 317 if (lpd.buf_size > id3len + 16) { 318 lpd.buf += id3len; 319 lpd.buf_size -= id3len; 320 } 321 id3 = 1; 322 } 323 324 fmt = NULL; 325 while ((fmt1 = av_iformat_next(fmt1))) { 326 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE)) 327 continue; 328 score = 0; 329 if (fmt1->read_probe) { 330 score = fmt1->read_probe(&lpd); 331 } else if (fmt1->extensions) { 332 if (av_match_ext(lpd.filename, fmt1->extensions)) { 333 score = 50; 334 } 335 } 336 if (score > *score_max) { 337 *score_max = score; 338 fmt = fmt1; 339 }else if (score == *score_max) 340 fmt = NULL; 341 } 342 343 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */ 344 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_MAX/4) { 345 while ((fmt = av_iformat_next(fmt))) 346 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) { 347 *score_max = AVPROBE_SCORE_MAX/4; 348 break; 349 } 350 } 351 352 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4-1) { 353 while ((fmt = av_iformat_next(fmt))) 354 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) { 355 *score_max = AVPROBE_SCORE_MAX/4-1; 356 break; 357 } 358 } 359 360 return fmt; 361} 362 363AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){ 364 int score=0; 365 return av_probe_input_format2(pd, is_opened, &score); 366} 367 368static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score) 369{ 370 static const struct { 371 const char *name; enum CodecID id; enum AVMediaType type; 372 } fmt_id_type[] = { 373 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO }, 374 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO }, 375 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO }, 376 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO }, 377 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO }, 378 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO }, 379 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO }, 380 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO }, 381 { 0 } 382 }; 383 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score); 384 385 if (fmt) { 386 int i; 387 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n", 388 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score); 389 for (i = 0; fmt_id_type[i].name; i++) { 390 if (!strcmp(fmt->name, fmt_id_type[i].name)) { 391 st->codec->codec_id = fmt_id_type[i].id; 392 st->codec->codec_type = fmt_id_type[i].type; 393 break; 394 } 395 } 396 } 397 return !!fmt; 398} 399 400/************************************************************/ 401/* input media file */ 402 403#if FF_API_FORMAT_PARAMETERS 404static AVDictionary *convert_format_parameters(AVFormatParameters *ap) 405{ 406 char buf[1024]; 407 AVDictionary *opts = NULL; 408 409 if (!ap) 410 return NULL; 411 412 if (ap->time_base.num) { 413 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num); 414 av_dict_set(&opts, "framerate", buf, 0); 415 } 416 if (ap->sample_rate) { 417 snprintf(buf, sizeof(buf), "%d", ap->sample_rate); 418 av_dict_set(&opts, "sample_rate", buf, 0); 419 } 420 if (ap->channels) { 421 snprintf(buf, sizeof(buf), "%d", ap->channels); 422 av_dict_set(&opts, "channels", buf, 0); 423 } 424 if (ap->width || ap->height) { 425 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height); 426 av_dict_set(&opts, "video_size", buf, 0); 427 } 428 if (ap->pix_fmt != PIX_FMT_NONE) { 429 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0); 430 } 431 if (ap->channel) { 432 snprintf(buf, sizeof(buf), "%d", ap->channel); 433 av_dict_set(&opts, "channel", buf, 0); 434 } 435 if (ap->standard) { 436 av_dict_set(&opts, "standard", ap->standard, 0); 437 } 438 if (ap->mpeg2ts_compute_pcr) { 439 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0); 440 } 441 if (ap->initial_pause) { 442 av_dict_set(&opts, "initial_pause", "1", 0); 443 } 444 return opts; 445} 446 447/** 448 * Open a media file from an IO stream. 'fmt' must be specified. 449 */ 450int av_open_input_stream(AVFormatContext **ic_ptr, 451 AVIOContext *pb, const char *filename, 452 AVInputFormat *fmt, AVFormatParameters *ap) 453{ 454 int err; 455 AVDictionary *opts; 456 AVFormatContext *ic; 457 AVFormatParameters default_ap; 458 459 if(!ap){ 460 ap=&default_ap; 461 memset(ap, 0, sizeof(default_ap)); 462 } 463 opts = convert_format_parameters(ap); 464 465 if(!ap->prealloced_context) 466 ic = avformat_alloc_context(); 467 else 468 ic = *ic_ptr; 469 if (!ic) { 470 err = AVERROR(ENOMEM); 471 goto fail; 472 } 473 if (pb && fmt && fmt->flags & AVFMT_NOFILE) 474 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and " 475 "will be ignored with AVFMT_NOFILE format.\n"); 476 else 477 ic->pb = pb; 478 479 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0) 480 goto fail; 481 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above 482 483fail: 484 *ic_ptr = ic; 485 av_dict_free(&opts); 486 return err; 487} 488#endif 489 490/** size of probe buffer, for guessing file type from file contents */ 491#define PROBE_BUF_MIN 2048 492#define PROBE_BUF_MAX (1<<20) 493 494int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, 495 const char *filename, void *logctx, 496 unsigned int offset, unsigned int max_probe_size) 497{ 498 AVProbeData pd = { filename ? filename : "", NULL, -offset }; 499 unsigned char *buf = NULL; 500 int ret = 0, probe_size; 501 502 if (!max_probe_size) { 503 max_probe_size = PROBE_BUF_MAX; 504 } else if (max_probe_size > PROBE_BUF_MAX) { 505 max_probe_size = PROBE_BUF_MAX; 506 } else if (max_probe_size < PROBE_BUF_MIN) { 507 return AVERROR(EINVAL); 508 } 509 510 if (offset >= max_probe_size) { 511 return AVERROR(EINVAL); 512 } 513 514 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt; 515 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) { 516 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0; 517 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1; 518 519 if (probe_size < offset) { 520 continue; 521 } 522 523 /* read probe data */ 524 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE); 525 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) { 526 /* fail if error was not end of file, otherwise, lower score */ 527 if (ret != AVERROR_EOF) { 528 av_free(buf); 529 return ret; 530 } 531 score = 0; 532 ret = 0; /* error was end of file, nothing read */ 533 } 534 pd.buf_size += ret; 535 pd.buf = &buf[offset]; 536 537 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); 538 539 /* guess file format */ 540 *fmt = av_probe_input_format2(&pd, 1, &score); 541 if(*fmt){ 542 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration 543 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score); 544 }else 545 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score); 546 } 547 } 548 549 if (!*fmt) { 550 av_free(buf); 551 return AVERROR_INVALIDDATA; 552 } 553 554 /* rewind. reuse probe buffer to avoid seeking */ 555 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0) 556 av_free(buf); 557 558 return ret; 559} 560 561#if FF_API_FORMAT_PARAMETERS 562int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, 563 AVInputFormat *fmt, 564 int buf_size, 565 AVFormatParameters *ap) 566{ 567 int err; 568 AVDictionary *opts = convert_format_parameters(ap); 569 570 if (!ap || !ap->prealloced_context) 571 *ic_ptr = NULL; 572 573 err = avformat_open_input(ic_ptr, filename, fmt, &opts); 574 575 av_dict_free(&opts); 576 return err; 577} 578#endif 579 580/* open input file and probe the format if necessary */ 581static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options) 582{ 583 int ret; 584 AVProbeData pd = {filename, NULL, 0}; 585 586 if (s->pb) { 587 s->flags |= AVFMT_FLAG_CUSTOM_IO; 588 if (!s->iformat) 589 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0); 590 else if (s->iformat->flags & AVFMT_NOFILE) 591 return AVERROR(EINVAL); 592 return 0; 593 } 594 595 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) || 596 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0)))) 597 return 0; 598 599 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ, 600 &s->interrupt_callback, options)) < 0) 601 return ret; 602 if (s->iformat) 603 return 0; 604 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0); 605} 606 607int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options) 608{ 609 AVFormatContext *s = *ps; 610 int ret = 0; 611 AVFormatParameters ap = { { 0 } }; 612 AVDictionary *tmp = NULL; 613 614 if (!s && !(s = avformat_alloc_context())) 615 return AVERROR(ENOMEM); 616 if (fmt) 617 s->iformat = fmt; 618 619 if (options) 620 av_dict_copy(&tmp, *options, 0); 621 622 if ((ret = av_opt_set_dict(s, &tmp)) < 0) 623 goto fail; 624 625 if ((ret = init_input(s, filename, &tmp)) < 0) 626 goto fail; 627 628 /* check filename in case an image number is expected */ 629 if (s->iformat->flags & AVFMT_NEEDNUMBER) { 630 if (!av_filename_number_test(filename)) { 631 ret = AVERROR(EINVAL); 632 goto fail; 633 } 634 } 635 636 s->duration = s->start_time = AV_NOPTS_VALUE; 637 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename)); 638 639 /* allocate private data */ 640 if (s->iformat->priv_data_size > 0) { 641 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) { 642 ret = AVERROR(ENOMEM); 643 goto fail; 644 } 645 if (s->iformat->priv_class) { 646 *(const AVClass**)s->priv_data = s->iformat->priv_class; 647 av_opt_set_defaults(s->priv_data); 648 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) 649 goto fail; 650 } 651 } 652 653 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */ 654 if (s->pb) 655 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC); 656 657 if (s->iformat->read_header) 658 if ((ret = s->iformat->read_header(s, &ap)) < 0) 659 goto fail; 660 661 if (s->pb && !s->data_offset) 662 s->data_offset = avio_tell(s->pb); 663 664 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; 665 666 if (options) { 667 av_dict_free(options); 668 *options = tmp; 669 } 670 *ps = s; 671 return 0; 672 673fail: 674 av_dict_free(&tmp); 675 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO)) 676 avio_close(s->pb); 677 avformat_free_context(s); 678 *ps = NULL; 679 return ret; 680} 681 682/*******************************************************/ 683 684static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt, 685 AVPacketList **plast_pktl){ 686 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList)); 687 if (!pktl) 688 return NULL; 689 690 if (*packet_buffer) 691 (*plast_pktl)->next = pktl; 692 else 693 *packet_buffer = pktl; 694 695 /* add the packet in the buffered packet list */ 696 *plast_pktl = pktl; 697 pktl->pkt= *pkt; 698 return &pktl->pkt; 699} 700 701int av_read_packet(AVFormatContext *s, AVPacket *pkt) 702{ 703 int ret, i; 704 AVStream *st; 705 706 for(;;){ 707 AVPacketList *pktl = s->raw_packet_buffer; 708 709 if (pktl) { 710 *pkt = pktl->pkt; 711 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE || 712 !s->streams[pkt->stream_index]->probe_packets || 713 s->raw_packet_buffer_remaining_size < pkt->size){ 714 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data; 715 av_freep(&pd->buf); 716 pd->buf_size = 0; 717 s->raw_packet_buffer = pktl->next; 718 s->raw_packet_buffer_remaining_size += pkt->size; 719 av_free(pktl); 720 return 0; 721 } 722 } 723 724 av_init_packet(pkt); 725 ret= s->iformat->read_packet(s, pkt); 726 if (ret < 0) { 727 if (!pktl || ret == AVERROR(EAGAIN)) 728 return ret; 729 for (i = 0; i < s->nb_streams; i++) 730 s->streams[i]->probe_packets = 0; 731 continue; 732 } 733 734 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) && 735 (pkt->flags & AV_PKT_FLAG_CORRUPT)) { 736 av_log(s, AV_LOG_WARNING, 737 "Dropped corrupted packet (stream = %d)\n", 738 pkt->stream_index); 739 av_free_packet(pkt); 740 continue; 741 } 742 743 st= s->streams[pkt->stream_index]; 744 745 switch(st->codec->codec_type){ 746 case AVMEDIA_TYPE_VIDEO: 747 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id; 748 break; 749 case AVMEDIA_TYPE_AUDIO: 750 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id; 751 break; 752 case AVMEDIA_TYPE_SUBTITLE: 753 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id; 754 break; 755 } 756 757 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE || 758 !st->probe_packets)) 759 return ret; 760 761 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end); 762 s->raw_packet_buffer_remaining_size -= pkt->size; 763 764 if(st->codec->codec_id == CODEC_ID_PROBE){ 765 AVProbeData *pd = &st->probe_data; 766 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index); 767 --st->probe_packets; 768 769 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE); 770 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size); 771 pd->buf_size += pkt->size; 772 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); 773 774 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){ 775 //FIXME we do not reduce score to 0 for the case of running out of buffer space in bytes 776 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0); 777 if(st->codec->codec_id != CODEC_ID_PROBE){ 778 pd->buf_size=0; 779 av_freep(&pd->buf); 780 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index); 781 } 782 } 783 } 784 } 785} 786 787/**********************************************************/ 788 789/** 790 * Get the number of samples of an audio frame. Return -1 on error. 791 */ 792static int get_audio_frame_size(AVCodecContext *enc, int size) 793{ 794 int frame_size; 795 796 if(enc->codec_id == CODEC_ID_VORBIS) 797 return -1; 798 799 if (enc->frame_size <= 1) { 800 int bits_per_sample = av_get_bits_per_sample(enc->codec_id); 801 802 if (bits_per_sample) { 803 if (enc->channels == 0) 804 return -1; 805 frame_size = (size << 3) / (bits_per_sample * enc->channels); 806 } else { 807 /* used for example by ADPCM codecs */ 808 if (enc->bit_rate == 0) 809 return -1; 810 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate; 811 } 812 } else { 813 frame_size = enc->frame_size; 814 } 815 return frame_size; 816} 817 818 819/** 820 * Return the frame duration in seconds. Return 0 if not available. 821 */ 822static void compute_frame_duration(int *pnum, int *pden, AVStream *st, 823 AVCodecParserContext *pc, AVPacket *pkt) 824{ 825 int frame_size; 826 827 *pnum = 0; 828 *pden = 0; 829 switch(st->codec->codec_type) { 830 case AVMEDIA_TYPE_VIDEO: 831 if (st->r_frame_rate.num) { 832 *pnum = st->r_frame_rate.den; 833 *pden = st->r_frame_rate.num; 834 } else if(st->time_base.num*1000LL > st->time_base.den) { 835 *pnum = st->time_base.num; 836 *pden = st->time_base.den; 837 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){ 838 *pnum = st->codec->time_base.num; 839 *pden = st->codec->time_base.den; 840 if (pc && pc->repeat_pict) { 841 if (*pnum > INT_MAX / (1 + pc->repeat_pict)) 842 *pden /= 1 + pc->repeat_pict; 843 else 844 *pnum *= 1 + pc->repeat_pict; 845 } 846 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet 847 //Thus if we have no parser in such case leave duration undefined. 848 if(st->codec->ticks_per_frame>1 && !pc){ 849 *pnum = *pden = 0; 850 } 851 } 852 break; 853 case AVMEDIA_TYPE_AUDIO: 854 frame_size = get_audio_frame_size(st->codec, pkt->size); 855 if (frame_size <= 0 || st->codec->sample_rate <= 0) 856 break; 857 *pnum = frame_size; 858 *pden = st->codec->sample_rate; 859 break; 860 default: 861 break; 862 } 863} 864 865static int is_intra_only(AVCodecContext *enc){ 866 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){ 867 return 1; 868 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){ 869 switch(enc->codec_id){ 870 case CODEC_ID_MJPEG: 871 case CODEC_ID_MJPEGB: 872 case CODEC_ID_LJPEG: 873 case CODEC_ID_PRORES: 874 case CODEC_ID_RAWVIDEO: 875 case CODEC_ID_DVVIDEO: 876 case CODEC_ID_HUFFYUV: 877 case CODEC_ID_FFVHUFF: 878 case CODEC_ID_ASV1: 879 case CODEC_ID_ASV2: 880 case CODEC_ID_VCR1: 881 case CODEC_ID_DNXHD: 882 case CODEC_ID_JPEG2000: 883 return 1; 884 default: break; 885 } 886 } 887 return 0; 888} 889 890static void update_initial_timestamps(AVFormatContext *s, int stream_index, 891 int64_t dts, int64_t pts) 892{ 893 AVStream *st= s->streams[stream_index]; 894 AVPacketList *pktl= s->packet_buffer; 895 896 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE) 897 return; 898 899 st->first_dts= dts - st->cur_dts; 900 st->cur_dts= dts; 901 902 for(; pktl; pktl= pktl->next){ 903 if(pktl->pkt.stream_index != stream_index) 904 continue; 905 //FIXME think more about this check 906 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts) 907 pktl->pkt.pts += st->first_dts; 908 909 if(pktl->pkt.dts != AV_NOPTS_VALUE) 910 pktl->pkt.dts += st->first_dts; 911 912 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE) 913 st->start_time= pktl->pkt.pts; 914 } 915 if (st->start_time == AV_NOPTS_VALUE) 916 st->start_time = pts; 917} 918 919static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt) 920{ 921 AVPacketList *pktl= s->packet_buffer; 922 int64_t cur_dts= 0; 923 924 if(st->first_dts != AV_NOPTS_VALUE){ 925 cur_dts= st->first_dts; 926 for(; pktl; pktl= pktl->next){ 927 if(pktl->pkt.stream_index == pkt->stream_index){ 928 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration) 929 break; 930 cur_dts -= pkt->duration; 931 } 932 } 933 pktl= s->packet_buffer; 934 st->first_dts = cur_dts; 935 }else if(st->cur_dts) 936 return; 937 938 for(; pktl; pktl= pktl->next){ 939 if(pktl->pkt.stream_index != pkt->stream_index) 940 continue; 941 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE 942 && !pktl->pkt.duration){ 943 pktl->pkt.dts= cur_dts; 944 if(!st->codec->has_b_frames) 945 pktl->pkt.pts= cur_dts; 946 cur_dts += pkt->duration; 947 pktl->pkt.duration= pkt->duration; 948 }else 949 break; 950 } 951 if(st->first_dts == AV_NOPTS_VALUE) 952 st->cur_dts= cur_dts; 953} 954 955static void compute_pkt_fields(AVFormatContext *s, AVStream *st, 956 AVCodecParserContext *pc, AVPacket *pkt) 957{ 958 int num, den, presentation_delayed, delay, i; 959 int64_t offset; 960 961 if (s->flags & AVFMT_FLAG_NOFILLIN) 962 return; 963 964 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE) 965 pkt->dts= AV_NOPTS_VALUE; 966 967 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B) 968 //FIXME Set low_delay = 0 when has_b_frames = 1 969 st->codec->has_b_frames = 1; 970 971 /* do we have a video B-frame ? */ 972 delay= st->codec->has_b_frames; 973 presentation_delayed = 0; 974 975 /* XXX: need has_b_frame, but cannot get it if the codec is 976 not initialized */ 977 if (delay && 978 pc && pc->pict_type != AV_PICTURE_TYPE_B) 979 presentation_delayed = 1; 980 981 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63 982 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){ 983 pkt->dts -= 1LL<<st->pts_wrap_bits; 984 } 985 986 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg) 987 // we take the conservative approach and discard both 988 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly. 989 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){ 990 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n"); 991 pkt->dts= pkt->pts= AV_NOPTS_VALUE; 992 } 993 994 if (pkt->duration == 0) { 995 compute_frame_duration(&num, &den, st, pc, pkt); 996 if (den && num) { 997 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN); 998 999 if(pkt->duration != 0 && s->packet_buffer) 1000 update_initial_durations(s, st, pkt); 1001 } 1002 } 1003 1004 /* correct timestamps with byte offset if demuxers only have timestamps 1005 on packet boundaries */ 1006 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){ 1007 /* this will estimate bitrate based on this frame's duration and size */ 1008 offset = av_rescale(pc->offset, pkt->duration, pkt->size); 1009 if(pkt->pts != AV_NOPTS_VALUE) 1010 pkt->pts += offset; 1011 if(pkt->dts != AV_NOPTS_VALUE) 1012 pkt->dts += offset; 1013 } 1014 1015 if (pc && pc->dts_sync_point >= 0) { 1016 // we have synchronization info from the parser 1017 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num; 1018 if (den > 0) { 1019 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den; 1020 if (pkt->dts != AV_NOPTS_VALUE) { 1021 // got DTS from the stream, update reference timestamp 1022 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den; 1023 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 1024 } else if (st->reference_dts != AV_NOPTS_VALUE) { 1025 // compute DTS based on reference timestamp 1026 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den; 1027 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 1028 } 1029 if (pc->dts_sync_point > 0) 1030 st->reference_dts = pkt->dts; // new reference 1031 } 1032 } 1033 1034 /* This may be redundant, but it should not hurt. */ 1035 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts) 1036 presentation_delayed = 1; 1037 1038// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc); 1039 /* interpolate PTS and DTS if they are not present */ 1040 //We skip H264 currently because delay and has_b_frames are not reliably set 1041 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){ 1042 if (presentation_delayed) { 1043 /* DTS = decompression timestamp */ 1044 /* PTS = presentation timestamp */ 1045 if (pkt->dts == AV_NOPTS_VALUE) 1046 pkt->dts = st->last_IP_pts; 1047 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); 1048 if (pkt->dts == AV_NOPTS_VALUE) 1049 pkt->dts = st->cur_dts; 1050 1051 /* this is tricky: the dts must be incremented by the duration 1052 of the frame we are displaying, i.e. the last I- or P-frame */ 1053 if (st->last_IP_duration == 0) 1054 st->last_IP_duration = pkt->duration; 1055 if(pkt->dts != AV_NOPTS_VALUE) 1056 st->cur_dts = pkt->dts + st->last_IP_duration; 1057 st->last_IP_duration = pkt->duration; 1058 st->last_IP_pts= pkt->pts; 1059 /* cannot compute PTS if not present (we can compute it only 1060 by knowing the future */ 1061 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){ 1062 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){ 1063 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts); 1064 int64_t new_diff= FFABS(st->cur_dts - pkt->pts); 1065 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){ 1066 pkt->pts += pkt->duration; 1067 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size); 1068 } 1069 } 1070 1071 /* presentation is not delayed : PTS and DTS are the same */ 1072 if(pkt->pts == AV_NOPTS_VALUE) 1073 pkt->pts = pkt->dts; 1074 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts); 1075 if(pkt->pts == AV_NOPTS_VALUE) 1076 pkt->pts = st->cur_dts; 1077 pkt->dts = pkt->pts; 1078 if(pkt->pts != AV_NOPTS_VALUE) 1079 st->cur_dts = pkt->pts + pkt->duration; 1080 } 1081 } 1082 1083 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 1084 st->pts_buffer[0]= pkt->pts; 1085 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 1086 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 1087 if(pkt->dts == AV_NOPTS_VALUE) 1088 pkt->dts= st->pts_buffer[0]; 1089 if(st->codec->codec_id == CODEC_ID_H264){ // we skipped it above so we try here 1090 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet 1091 } 1092 if(pkt->dts > st->cur_dts) 1093 st->cur_dts = pkt->dts; 1094 } 1095 1096// av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts); 1097 1098 /* update flags */ 1099 if(is_intra_only(st->codec)) 1100 pkt->flags |= AV_PKT_FLAG_KEY; 1101 else if (pc) { 1102 pkt->flags = 0; 1103 /* keyframe computation */ 1104 if (pc->key_frame == 1) 1105 pkt->flags |= AV_PKT_FLAG_KEY; 1106 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I) 1107 pkt->flags |= AV_PKT_FLAG_KEY; 1108 } 1109 if (pc) 1110 pkt->convergence_duration = pc->convergence_duration; 1111} 1112 1113 1114static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) 1115{ 1116 AVStream *st; 1117 int len, ret, i; 1118 1119 av_init_packet(pkt); 1120 1121 for(;;) { 1122 /* select current input stream component */ 1123 st = s->cur_st; 1124 if (st) { 1125 if (!st->need_parsing || !st->parser) { 1126 /* no parsing needed: we just output the packet as is */ 1127 /* raw data support */ 1128 *pkt = st->cur_pkt; st->cur_pkt.data= NULL; 1129 compute_pkt_fields(s, st, NULL, pkt); 1130 s->cur_st = NULL; 1131 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && 1132 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { 1133 ff_reduce_index(s, st->index); 1134 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); 1135 } 1136 break; 1137 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) { 1138 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size, 1139 st->cur_ptr, st->cur_len, 1140 st->cur_pkt.pts, st->cur_pkt.dts, 1141 st->cur_pkt.pos); 1142 st->cur_pkt.pts = AV_NOPTS_VALUE; 1143 st->cur_pkt.dts = AV_NOPTS_VALUE; 1144 /* increment read pointer */ 1145 st->cur_ptr += len; 1146 st->cur_len -= len; 1147 1148 /* return packet if any */ 1149 if (pkt->size) { 1150 got_packet: 1151 pkt->duration = 0; 1152 pkt->stream_index = st->index; 1153 pkt->pts = st->parser->pts; 1154 pkt->dts = st->parser->dts; 1155 pkt->pos = st->parser->pos; 1156 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){ 1157 s->cur_st = NULL; 1158 pkt->destruct= st->cur_pkt.destruct; 1159 st->cur_pkt.destruct= NULL; 1160 st->cur_pkt.data = NULL; 1161 assert(st->cur_len == 0); 1162 }else{ 1163 pkt->destruct = NULL; 1164 } 1165 compute_pkt_fields(s, st, st->parser, pkt); 1166 1167 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){ 1168 ff_reduce_index(s, st->index); 1169 av_add_index_entry(st, st->parser->frame_offset, pkt->dts, 1170 0, 0, AVINDEX_KEYFRAME); 1171 } 1172 1173 break; 1174 } 1175 } else { 1176 /* free packet */ 1177 av_free_packet(&st->cur_pkt); 1178 s->cur_st = NULL; 1179 } 1180 } else { 1181 AVPacket cur_pkt; 1182 /* read next packet */ 1183 ret = av_read_packet(s, &cur_pkt); 1184 if (ret < 0) { 1185 if (ret == AVERROR(EAGAIN)) 1186 return ret; 1187 /* return the last frames, if any */ 1188 for(i = 0; i < s->nb_streams; i++) { 1189 st = s->streams[i]; 1190 if (st->parser && st->need_parsing) { 1191 av_parser_parse2(st->parser, st->codec, 1192 &pkt->data, &pkt->size, 1193 NULL, 0, 1194 AV_NOPTS_VALUE, AV_NOPTS_VALUE, 1195 AV_NOPTS_VALUE); 1196 if (pkt->size) 1197 goto got_packet; 1198 } 1199 } 1200 /* no more packets: really terminate parsing */ 1201 return ret; 1202 } 1203 st = s->streams[cur_pkt.stream_index]; 1204 st->cur_pkt= cur_pkt; 1205 1206 if(st->cur_pkt.pts != AV_NOPTS_VALUE && 1207 st->cur_pkt.dts != AV_NOPTS_VALUE && 1208 st->cur_pkt.pts < st->cur_pkt.dts){ 1209 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n", 1210 st->cur_pkt.stream_index, 1211 st->cur_pkt.pts, 1212 st->cur_pkt.dts, 1213 st->cur_pkt.size); 1214// av_free_packet(&st->cur_pkt); 1215// return -1; 1216 } 1217 1218 if(s->debug & FF_FDEBUG_TS) 1219 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", 1220 st->cur_pkt.stream_index, 1221 st->cur_pkt.pts, 1222 st->cur_pkt.dts, 1223 st->cur_pkt.size, 1224 st->cur_pkt.duration, 1225 st->cur_pkt.flags); 1226 1227 s->cur_st = st; 1228 st->cur_ptr = st->cur_pkt.data; 1229 st->cur_len = st->cur_pkt.size; 1230 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) { 1231 st->parser = av_parser_init(st->codec->codec_id); 1232 if (!st->parser) { 1233 /* no parser available: just output the raw packets */ 1234 st->need_parsing = AVSTREAM_PARSE_NONE; 1235 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){ 1236 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 1237 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){ 1238 st->parser->flags |= PARSER_FLAG_ONCE; 1239 } 1240 } 1241 } 1242 } 1243 if(s->debug & FF_FDEBUG_TS) 1244 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", 1245 pkt->stream_index, 1246 pkt->pts, 1247 pkt->dts, 1248 pkt->size, 1249 pkt->duration, 1250 pkt->flags); 1251 1252 return 0; 1253} 1254 1255static int read_from_packet_buffer(AVFormatContext *s, AVPacket *pkt) 1256{ 1257 AVPacketList *pktl = s->packet_buffer; 1258 av_assert0(pktl); 1259 *pkt = pktl->pkt; 1260 s->packet_buffer = pktl->next; 1261 av_freep(&pktl); 1262 return 0; 1263} 1264 1265int av_read_frame(AVFormatContext *s, AVPacket *pkt) 1266{ 1267 const int genpts = s->flags & AVFMT_FLAG_GENPTS; 1268 int eof = 0; 1269 1270 if (!genpts) 1271 return s->packet_buffer ? read_from_packet_buffer(s, pkt) : 1272 read_frame_internal(s, pkt); 1273 1274 for (;;) { 1275 int ret; 1276 AVPacketList *pktl = s->packet_buffer; 1277 1278 if (pktl) { 1279 AVPacket *next_pkt = &pktl->pkt; 1280 1281 if (next_pkt->dts != AV_NOPTS_VALUE) { 1282 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits; 1283 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) { 1284 if (pktl->pkt.stream_index == next_pkt->stream_index && 1285 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) && 1286 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame 1287 next_pkt->pts = pktl->pkt.dts; 1288 } 1289 pktl = pktl->next; 1290 } 1291 pktl = s->packet_buffer; 1292 } 1293 1294 /* read packet from packet buffer, if there is data */ 1295 if (!(next_pkt->pts == AV_NOPTS_VALUE && 1296 next_pkt->dts != AV_NOPTS_VALUE && !eof)) 1297 return read_from_packet_buffer(s, pkt); 1298 } 1299 1300 ret = read_frame_internal(s, pkt); 1301 if (ret < 0) { 1302 if (pktl && ret != AVERROR(EAGAIN)) { 1303 eof = 1; 1304 continue; 1305 } else 1306 return ret; 1307 } 1308 1309 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt, 1310 &s->packet_buffer_end)) < 0) 1311 return AVERROR(ENOMEM); 1312 } 1313} 1314 1315/* XXX: suppress the packet queue */ 1316static void flush_packet_queue(AVFormatContext *s) 1317{ 1318 AVPacketList *pktl; 1319 1320 for(;;) { 1321 pktl = s->packet_buffer; 1322 if (!pktl) 1323 break; 1324 s->packet_buffer = pktl->next; 1325 av_free_packet(&pktl->pkt); 1326 av_free(pktl); 1327 } 1328 while(s->raw_packet_buffer){ 1329 pktl = s->raw_packet_buffer; 1330 s->raw_packet_buffer = pktl->next; 1331 av_free_packet(&pktl->pkt); 1332 av_free(pktl); 1333 } 1334 s->packet_buffer_end= 1335 s->raw_packet_buffer_end= NULL; 1336 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; 1337} 1338 1339/*******************************************************/ 1340/* seek support */ 1341 1342int av_find_default_stream_index(AVFormatContext *s) 1343{ 1344 int first_audio_index = -1; 1345 int i; 1346 AVStream *st; 1347 1348 if (s->nb_streams <= 0) 1349 return -1; 1350 for(i = 0; i < s->nb_streams; i++) { 1351 st = s->streams[i]; 1352 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 1353 return i; 1354 } 1355 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) 1356 first_audio_index = i; 1357 } 1358 return first_audio_index >= 0 ? first_audio_index : 0; 1359} 1360 1361/** 1362 * Flush the frame reader. 1363 */ 1364void ff_read_frame_flush(AVFormatContext *s) 1365{ 1366 AVStream *st; 1367 int i, j; 1368 1369 flush_packet_queue(s); 1370 1371 s->cur_st = NULL; 1372 1373 /* for each stream, reset read state */ 1374 for(i = 0; i < s->nb_streams; i++) { 1375 st = s->streams[i]; 1376 1377 if (st->parser) { 1378 av_parser_close(st->parser); 1379 st->parser = NULL; 1380 av_free_packet(&st->cur_pkt); 1381 } 1382 st->last_IP_pts = AV_NOPTS_VALUE; 1383 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */ 1384 st->reference_dts = AV_NOPTS_VALUE; 1385 /* fail safe */ 1386 st->cur_ptr = NULL; 1387 st->cur_len = 0; 1388 1389 st->probe_packets = MAX_PROBE_PACKETS; 1390 1391 for(j=0; j<MAX_REORDER_DELAY+1; j++) 1392 st->pts_buffer[j]= AV_NOPTS_VALUE; 1393 } 1394} 1395 1396#if FF_API_SEEK_PUBLIC 1397void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp) 1398{ 1399 ff_update_cur_dts(s, ref_st, timestamp); 1400} 1401#endif 1402 1403void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp) 1404{ 1405 int i; 1406 1407 for(i = 0; i < s->nb_streams; i++) { 1408 AVStream *st = s->streams[i]; 1409 1410 st->cur_dts = av_rescale(timestamp, 1411 st->time_base.den * (int64_t)ref_st->time_base.num, 1412 st->time_base.num * (int64_t)ref_st->time_base.den); 1413 } 1414} 1415 1416void ff_reduce_index(AVFormatContext *s, int stream_index) 1417{ 1418 AVStream *st= s->streams[stream_index]; 1419 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry); 1420 1421 if((unsigned)st->nb_index_entries >= max_entries){ 1422 int i; 1423 for(i=0; 2*i<st->nb_index_entries; i++) 1424 st->index_entries[i]= st->index_entries[2*i]; 1425 st->nb_index_entries= i; 1426 } 1427} 1428 1429int ff_add_index_entry(AVIndexEntry **index_entries, 1430 int *nb_index_entries, 1431 unsigned int *index_entries_allocated_size, 1432 int64_t pos, int64_t timestamp, int size, int distance, int flags) 1433{ 1434 AVIndexEntry *entries, *ie; 1435 int index; 1436 1437 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) 1438 return -1; 1439 1440 entries = av_fast_realloc(*index_entries, 1441 index_entries_allocated_size, 1442 (*nb_index_entries + 1) * 1443 sizeof(AVIndexEntry)); 1444 if(!entries) 1445 return -1; 1446 1447 *index_entries= entries; 1448 1449 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY); 1450 1451 if(index<0){ 1452 index= (*nb_index_entries)++; 1453 ie= &entries[index]; 1454 assert(index==0 || ie[-1].timestamp < timestamp); 1455 }else{ 1456 ie= &entries[index]; 1457 if(ie->timestamp != timestamp){ 1458 if(ie->timestamp <= timestamp) 1459 return -1; 1460 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index)); 1461 (*nb_index_entries)++; 1462 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance 1463 distance= ie->min_distance; 1464 } 1465 1466 ie->pos = pos; 1467 ie->timestamp = timestamp; 1468 ie->min_distance= distance; 1469 ie->size= size; 1470 ie->flags = flags; 1471 1472 return index; 1473} 1474 1475int av_add_index_entry(AVStream *st, 1476 int64_t pos, int64_t timestamp, int size, int distance, int flags) 1477{ 1478 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries, 1479 &st->index_entries_allocated_size, pos, 1480 timestamp, size, distance, flags); 1481} 1482 1483int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries, 1484 int64_t wanted_timestamp, int flags) 1485{ 1486 int a, b, m; 1487 int64_t timestamp; 1488 1489 a = - 1; 1490 b = nb_entries; 1491 1492 //optimize appending index entries at the end 1493 if(b && entries[b-1].timestamp < wanted_timestamp) 1494 a= b-1; 1495 1496 while (b - a > 1) { 1497 m = (a + b) >> 1; 1498 timestamp = entries[m].timestamp; 1499 if(timestamp >= wanted_timestamp) 1500 b = m; 1501 if(timestamp <= wanted_timestamp) 1502 a = m; 1503 } 1504 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; 1505 1506 if(!(flags & AVSEEK_FLAG_ANY)){ 1507 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ 1508 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; 1509 } 1510 } 1511 1512 if(m == nb_entries) 1513 return -1; 1514 return m; 1515} 1516 1517int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, 1518 int flags) 1519{ 1520 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries, 1521 wanted_timestamp, flags); 1522} 1523 1524#if FF_API_SEEK_PUBLIC 1525int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){ 1526 return ff_seek_frame_binary(s, stream_index, target_ts, flags); 1527} 1528#endif 1529 1530int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags) 1531{ 1532 AVInputFormat *avif= s->iformat; 1533 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit; 1534 int64_t ts_min, ts_max, ts; 1535 int index; 1536 int64_t ret; 1537 AVStream *st; 1538 1539 if (stream_index < 0) 1540 return -1; 1541 1542 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts); 1543 1544 ts_max= 1545 ts_min= AV_NOPTS_VALUE; 1546 pos_limit= -1; //gcc falsely says it may be uninitialized 1547 1548 st= s->streams[stream_index]; 1549 if(st->index_entries){ 1550 AVIndexEntry *e; 1551 1552 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp() 1553 index= FFMAX(index, 0); 1554 e= &st->index_entries[index]; 1555 1556 if(e->timestamp <= target_ts || e->pos == e->min_distance){ 1557 pos_min= e->pos; 1558 ts_min= e->timestamp; 1559 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n", 1560 pos_min,ts_min); 1561 }else{ 1562 assert(index==0); 1563 } 1564 1565 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD); 1566 assert(index < st->nb_index_entries); 1567 if(index >= 0){ 1568 e= &st->index_entries[index]; 1569 assert(e->timestamp >= target_ts); 1570 pos_max= e->pos; 1571 ts_max= e->timestamp; 1572 pos_limit= pos_max - e->min_distance; 1573 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n", 1574 pos_max,pos_limit, ts_max); 1575 } 1576 } 1577 1578 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp); 1579 if(pos<0) 1580 return -1; 1581 1582 /* do the seek */ 1583 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0) 1584 return ret; 1585 1586 ff_update_cur_dts(s, st, ts); 1587 1588 return 0; 1589} 1590 1591#if FF_API_SEEK_PUBLIC 1592int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, 1593 int64_t pos_min, int64_t pos_max, int64_t pos_limit, 1594 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, 1595 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )) 1596{ 1597 return ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, 1598 pos_limit, ts_min, ts_max, flags, ts_ret, 1599 read_timestamp); 1600} 1601#endif 1602 1603int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, 1604 int64_t pos_min, int64_t pos_max, int64_t pos_limit, 1605 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, 1606 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )) 1607{ 1608 int64_t pos, ts; 1609 int64_t start_pos, filesize; 1610 int no_change; 1611 1612 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts); 1613 1614 if(ts_min == AV_NOPTS_VALUE){ 1615 pos_min = s->data_offset; 1616 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 1617 if (ts_min == AV_NOPTS_VALUE) 1618 return -1; 1619 } 1620 1621 if(ts_max == AV_NOPTS_VALUE){ 1622 int step= 1024; 1623 filesize = avio_size(s->pb); 1624 pos_max = filesize - 1; 1625 do{ 1626 pos_max -= step; 1627 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step); 1628 step += step; 1629 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step); 1630 if (ts_max == AV_NOPTS_VALUE) 1631 return -1; 1632 1633 for(;;){ 1634 int64_t tmp_pos= pos_max + 1; 1635 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX); 1636 if(tmp_ts == AV_NOPTS_VALUE) 1637 break; 1638 ts_max= tmp_ts; 1639 pos_max= tmp_pos; 1640 if(tmp_pos >= filesize) 1641 break; 1642 } 1643 pos_limit= pos_max; 1644 } 1645 1646 if(ts_min > ts_max){ 1647 return -1; 1648 }else if(ts_min == ts_max){ 1649 pos_limit= pos_min; 1650 } 1651 1652 no_change=0; 1653 while (pos_min < pos_limit) { 1654 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n", 1655 pos_min, pos_max, ts_min, ts_max); 1656 assert(pos_limit <= pos_max); 1657 1658 if(no_change==0){ 1659 int64_t approximate_keyframe_distance= pos_max - pos_limit; 1660 // interpolate position (better than dichotomy) 1661 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min) 1662 + pos_min - approximate_keyframe_distance; 1663 }else if(no_change==1){ 1664 // bisection, if interpolation failed to change min or max pos last time 1665 pos = (pos_min + pos_limit)>>1; 1666 }else{ 1667 /* linear search if bisection failed, can only happen if there 1668 are very few or no keyframes between min/max */ 1669 pos=pos_min; 1670 } 1671 if(pos <= pos_min) 1672 pos= pos_min + 1; 1673 else if(pos > pos_limit) 1674 pos= pos_limit; 1675 start_pos= pos; 1676 1677 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1 1678 if(pos == pos_max) 1679 no_change++; 1680 else 1681 no_change=0; 1682 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", 1683 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, 1684 pos_limit, start_pos, no_change); 1685 if(ts == AV_NOPTS_VALUE){ 1686 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n"); 1687 return -1; 1688 } 1689 assert(ts != AV_NOPTS_VALUE); 1690 if (target_ts <= ts) { 1691 pos_limit = start_pos - 1; 1692 pos_max = pos; 1693 ts_max = ts; 1694 } 1695 if (target_ts >= ts) { 1696 pos_min = pos; 1697 ts_min = ts; 1698 } 1699 } 1700 1701 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; 1702 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; 1703 pos_min = pos; 1704 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 1705 pos_min++; 1706 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 1707 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n", 1708 pos, ts_min, target_ts, ts_max); 1709 *ts_ret= ts; 1710 return pos; 1711} 1712 1713static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){ 1714 int64_t pos_min, pos_max; 1715#if 0 1716 AVStream *st; 1717 1718 if (stream_index < 0) 1719 return -1; 1720 1721 st= s->streams[stream_index]; 1722#endif 1723 1724 pos_min = s->data_offset; 1725 pos_max = avio_size(s->pb) - 1; 1726 1727 if (pos < pos_min) pos= pos_min; 1728 else if(pos > pos_max) pos= pos_max; 1729 1730 avio_seek(s->pb, pos, SEEK_SET); 1731 1732#if 0 1733 av_update_cur_dts(s, st, ts); 1734#endif 1735 return 0; 1736} 1737 1738static int seek_frame_generic(AVFormatContext *s, 1739 int stream_index, int64_t timestamp, int flags) 1740{ 1741 int index; 1742 int64_t ret; 1743 AVStream *st; 1744 AVIndexEntry *ie; 1745 1746 st = s->streams[stream_index]; 1747 1748 index = av_index_search_timestamp(st, timestamp, flags); 1749 1750 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp) 1751 return -1; 1752 1753 if(index < 0 || index==st->nb_index_entries-1){ 1754 AVPacket pkt; 1755 1756 if(st->nb_index_entries){ 1757 assert(st->index_entries); 1758 ie= &st->index_entries[st->nb_index_entries-1]; 1759 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0) 1760 return ret; 1761 ff_update_cur_dts(s, st, ie->timestamp); 1762 }else{ 1763 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0) 1764 return ret; 1765 } 1766 for (;;) { 1767 int read_status; 1768 do{ 1769 read_status = av_read_frame(s, &pkt); 1770 } while (read_status == AVERROR(EAGAIN)); 1771 if (read_status < 0) 1772 break; 1773 av_free_packet(&pkt); 1774 if(stream_index == pkt.stream_index){ 1775 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp) 1776 break; 1777 } 1778 } 1779 index = av_index_search_timestamp(st, timestamp, flags); 1780 } 1781 if (index < 0) 1782 return -1; 1783 1784 ff_read_frame_flush(s); 1785 if (s->iformat->read_seek){ 1786 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0) 1787 return 0; 1788 } 1789 ie = &st->index_entries[index]; 1790 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0) 1791 return ret; 1792 ff_update_cur_dts(s, st, ie->timestamp); 1793 1794 return 0; 1795} 1796 1797int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) 1798{ 1799 int ret; 1800 AVStream *st; 1801 1802 if (flags & AVSEEK_FLAG_BYTE) { 1803 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK) 1804 return -1; 1805 ff_read_frame_flush(s); 1806 return seek_frame_byte(s, stream_index, timestamp, flags); 1807 } 1808 1809 if(stream_index < 0){ 1810 stream_index= av_find_default_stream_index(s); 1811 if(stream_index < 0) 1812 return -1; 1813 1814 st= s->streams[stream_index]; 1815 /* timestamp for default must be expressed in AV_TIME_BASE units */ 1816 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); 1817 } 1818 1819 /* first, we try the format specific seek */ 1820 if (s->iformat->read_seek) { 1821 ff_read_frame_flush(s); 1822 ret = s->iformat->read_seek(s, stream_index, timestamp, flags); 1823 } else 1824 ret = -1; 1825 if (ret >= 0) { 1826 return 0; 1827 } 1828 1829 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) { 1830 ff_read_frame_flush(s); 1831 return ff_seek_frame_binary(s, stream_index, timestamp, flags); 1832 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) { 1833 ff_read_frame_flush(s); 1834 return seek_frame_generic(s, stream_index, timestamp, flags); 1835 } 1836 else 1837 return -1; 1838} 1839 1840int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags) 1841{ 1842 if(min_ts > ts || max_ts < ts) 1843 return -1; 1844 1845 if (s->iformat->read_seek2) { 1846 ff_read_frame_flush(s); 1847 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags); 1848 } 1849 1850 if(s->iformat->read_timestamp){ 1851 //try to seek via read_timestamp() 1852 } 1853 1854 //Fallback to old API if new is not implemented but old is 1855 //Note the old has somewat different sematics 1856 if(s->iformat->read_seek || 1) 1857 return av_seek_frame(s, stream_index, ts, flags | ((uint64_t)ts - min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0)); 1858 1859 // try some generic seek like seek_frame_generic() but with new ts semantics 1860} 1861 1862/*******************************************************/ 1863 1864/** 1865 * Return TRUE if the stream has accurate duration in any stream. 1866 * 1867 * @return TRUE if the stream has accurate duration for at least one component. 1868 */ 1869static int has_duration(AVFormatContext *ic) 1870{ 1871 int i; 1872 AVStream *st; 1873 1874 for(i = 0;i < ic->nb_streams; i++) { 1875 st = ic->streams[i]; 1876 if (st->duration != AV_NOPTS_VALUE) 1877 return 1; 1878 } 1879 return 0; 1880} 1881 1882/** 1883 * Estimate the stream timings from the one of each components. 1884 * 1885 * Also computes the global bitrate if possible. 1886 */ 1887static void update_stream_timings(AVFormatContext *ic) 1888{ 1889 int64_t start_time, start_time1, end_time, end_time1; 1890 int64_t duration, duration1, filesize; 1891 int i; 1892 AVStream *st; 1893 1894 start_time = INT64_MAX; 1895 end_time = INT64_MIN; 1896 duration = INT64_MIN; 1897 for(i = 0;i < ic->nb_streams; i++) { 1898 st = ic->streams[i]; 1899 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) { 1900 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q); 1901 start_time = FFMIN(start_time, start_time1); 1902 if (st->duration != AV_NOPTS_VALUE) { 1903 end_time1 = start_time1 1904 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 1905 end_time = FFMAX(end_time, end_time1); 1906 } 1907 } 1908 if (st->duration != AV_NOPTS_VALUE) { 1909 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 1910 duration = FFMAX(duration, duration1); 1911 } 1912 } 1913 if (start_time != INT64_MAX) { 1914 ic->start_time = start_time; 1915 if (end_time != INT64_MIN) 1916 duration = FFMAX(duration, end_time - start_time); 1917 } 1918 if (duration != INT64_MIN) { 1919 ic->duration = duration; 1920 if (ic->pb && (filesize = avio_size(ic->pb)) > 0) { 1921 /* compute the bitrate */ 1922 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE / 1923 (double)ic->duration; 1924 } 1925 } 1926} 1927 1928static void fill_all_stream_timings(AVFormatContext *ic) 1929{ 1930 int i; 1931 AVStream *st; 1932 1933 update_stream_timings(ic); 1934 for(i = 0;i < ic->nb_streams; i++) { 1935 st = ic->streams[i]; 1936 if (st->start_time == AV_NOPTS_VALUE) { 1937 if(ic->start_time != AV_NOPTS_VALUE) 1938 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base); 1939 if(ic->duration != AV_NOPTS_VALUE) 1940 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base); 1941 } 1942 } 1943} 1944 1945static void estimate_timings_from_bit_rate(AVFormatContext *ic) 1946{ 1947 int64_t filesize, duration; 1948 int bit_rate, i; 1949 AVStream *st; 1950 1951 /* if bit_rate is already set, we believe it */ 1952 if (ic->bit_rate <= 0) { 1953 bit_rate = 0; 1954 for(i=0;i<ic->nb_streams;i++) { 1955 st = ic->streams[i]; 1956 if (st->codec->bit_rate > 0) 1957 bit_rate += st->codec->bit_rate; 1958 } 1959 ic->bit_rate = bit_rate; 1960 } 1961 1962 /* if duration is already set, we believe it */ 1963 if (ic->duration == AV_NOPTS_VALUE && 1964 ic->bit_rate != 0) { 1965 filesize = ic->pb ? avio_size(ic->pb) : 0; 1966 if (filesize > 0) { 1967 for(i = 0; i < ic->nb_streams; i++) { 1968 st = ic->streams[i]; 1969 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num); 1970 if (st->duration == AV_NOPTS_VALUE) 1971 st->duration = duration; 1972 } 1973 } 1974 } 1975} 1976 1977#define DURATION_MAX_READ_SIZE 250000 1978#define DURATION_MAX_RETRY 3 1979 1980/* only usable for MPEG-PS streams */ 1981static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) 1982{ 1983 AVPacket pkt1, *pkt = &pkt1; 1984 AVStream *st; 1985 int read_size, i, ret; 1986 int64_t end_time; 1987 int64_t filesize, offset, duration; 1988 int retry=0; 1989 1990 ic->cur_st = NULL; 1991 1992 /* flush packet queue */ 1993 flush_packet_queue(ic); 1994 1995 for (i=0; i<ic->nb_streams; i++) { 1996 st = ic->streams[i]; 1997 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE) 1998 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n"); 1999 2000 if (st->parser) { 2001 av_parser_close(st->parser); 2002 st->parser= NULL; 2003 av_free_packet(&st->cur_pkt); 2004 } 2005 } 2006 2007 /* estimate the end time (duration) */ 2008 /* XXX: may need to support wrapping */ 2009 filesize = ic->pb ? avio_size(ic->pb) : 0; 2010 end_time = AV_NOPTS_VALUE; 2011 do{ 2012 offset = filesize - (DURATION_MAX_READ_SIZE<<retry); 2013 if (offset < 0) 2014 offset = 0; 2015 2016 avio_seek(ic->pb, offset, SEEK_SET); 2017 read_size = 0; 2018 for(;;) { 2019 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0))) 2020 break; 2021 2022 do { 2023 ret = av_read_packet(ic, pkt); 2024 } while(ret == AVERROR(EAGAIN)); 2025 if (ret != 0) 2026 break; 2027 read_size += pkt->size; 2028 st = ic->streams[pkt->stream_index]; 2029 if (pkt->pts != AV_NOPTS_VALUE && 2030 (st->start_time != AV_NOPTS_VALUE || 2031 st->first_dts != AV_NOPTS_VALUE)) { 2032 duration = end_time = pkt->pts; 2033 if (st->start_time != AV_NOPTS_VALUE) 2034 duration -= st->start_time; 2035 else 2036 duration -= st->first_dts; 2037 if (duration < 0) 2038 duration += 1LL<<st->pts_wrap_bits; 2039 if (duration > 0) { 2040 if (st->duration == AV_NOPTS_VALUE || st->duration < duration) 2041 st->duration = duration; 2042 } 2043 } 2044 av_free_packet(pkt); 2045 } 2046 }while( end_time==AV_NOPTS_VALUE 2047 && filesize > (DURATION_MAX_READ_SIZE<<retry) 2048 && ++retry <= DURATION_MAX_RETRY); 2049 2050 fill_all_stream_timings(ic); 2051 2052 avio_seek(ic->pb, old_offset, SEEK_SET); 2053 for (i=0; i<ic->nb_streams; i++) { 2054 st= ic->streams[i]; 2055 st->cur_dts= st->first_dts; 2056 st->last_IP_pts = AV_NOPTS_VALUE; 2057 st->reference_dts = AV_NOPTS_VALUE; 2058 } 2059} 2060 2061static void estimate_timings(AVFormatContext *ic, int64_t old_offset) 2062{ 2063 int64_t file_size; 2064 2065 /* get the file size, if possible */ 2066 if (ic->iformat->flags & AVFMT_NOFILE) { 2067 file_size = 0; 2068 } else { 2069 file_size = avio_size(ic->pb); 2070 file_size = FFMAX(0, file_size); 2071 } 2072 2073 if ((!strcmp(ic->iformat->name, "mpeg") || 2074 !strcmp(ic->iformat->name, "mpegts")) && 2075 file_size && ic->pb->seekable) { 2076 /* get accurate estimate from the PTSes */ 2077 estimate_timings_from_pts(ic, old_offset); 2078 } else if (has_duration(ic)) { 2079 /* at least one component has timings - we use them for all 2080 the components */ 2081 fill_all_stream_timings(ic); 2082 } else { 2083 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n"); 2084 /* less precise: use bitrate info */ 2085 estimate_timings_from_bit_rate(ic); 2086 } 2087 update_stream_timings(ic); 2088 2089 { 2090 int i; 2091 AVStream av_unused *st; 2092 for(i = 0;i < ic->nb_streams; i++) { 2093 st = ic->streams[i]; 2094 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i, 2095 (double) st->start_time / AV_TIME_BASE, 2096 (double) st->duration / AV_TIME_BASE); 2097 } 2098 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", 2099 (double) ic->start_time / AV_TIME_BASE, 2100 (double) ic->duration / AV_TIME_BASE, 2101 ic->bit_rate / 1000); 2102 } 2103} 2104 2105static int has_codec_parameters(AVCodecContext *avctx) 2106{ 2107 int val; 2108 switch (avctx->codec_type) { 2109 case AVMEDIA_TYPE_AUDIO: 2110 val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE; 2111 if (!avctx->frame_size && 2112 (avctx->codec_id == CODEC_ID_VORBIS || 2113 avctx->codec_id == CODEC_ID_AAC || 2114 avctx->codec_id == CODEC_ID_MP1 || 2115 avctx->codec_id == CODEC_ID_MP2 || 2116 avctx->codec_id == CODEC_ID_MP3 || 2117 avctx->codec_id == CODEC_ID_CELT)) 2118 return 0; 2119 break; 2120 case AVMEDIA_TYPE_VIDEO: 2121 val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE; 2122 break; 2123 default: 2124 val = 1; 2125 break; 2126 } 2127 return avctx->codec_id != CODEC_ID_NONE && val != 0; 2128} 2129 2130static int has_decode_delay_been_guessed(AVStream *st) 2131{ 2132 return st->codec->codec_id != CODEC_ID_H264 || 2133 st->info->nb_decoded_frames >= 6; 2134} 2135 2136/* returns 1 or 0 if or if not decoded data was returned, or a negative error */ 2137static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options) 2138{ 2139 AVCodec *codec; 2140 int got_picture = 1, ret = 0; 2141 AVFrame picture; 2142 AVPacket pkt = *avpkt; 2143 2144 if (!avcodec_is_open(st->codec)) { 2145 AVDictionary *thread_opt = NULL; 2146 2147 codec = st->codec->codec ? st->codec->codec : 2148 avcodec_find_decoder(st->codec->codec_id); 2149 2150 if (!codec) 2151 return -1; 2152 2153 /* force thread count to 1 since the h264 decoder will not extract SPS 2154 * and PPS to extradata during multi-threaded decoding */ 2155 av_dict_set(options ? options : &thread_opt, "threads", "1", 0); 2156 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt); 2157 if (!options) 2158 av_dict_free(&thread_opt); 2159 if (ret < 0) 2160 return ret; 2161 } 2162 2163 while ((pkt.size > 0 || (!pkt.data && got_picture)) && 2164 ret >= 0 && 2165 (!has_codec_parameters(st->codec) || 2166 !has_decode_delay_been_guessed(st) || 2167 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) { 2168 got_picture = 0; 2169 avcodec_get_frame_defaults(&picture); 2170 switch(st->codec->codec_type) { 2171 case AVMEDIA_TYPE_VIDEO: 2172 ret = avcodec_decode_video2(st->codec, &picture, 2173 &got_picture, &pkt); 2174 break; 2175 case AVMEDIA_TYPE_AUDIO: 2176 ret = avcodec_decode_audio4(st->codec, &picture, &got_picture, &pkt); 2177 break; 2178 default: 2179 break; 2180 } 2181 if (ret >= 0) { 2182 if (got_picture) 2183 st->info->nb_decoded_frames++; 2184 pkt.data += ret; 2185 pkt.size -= ret; 2186 ret = got_picture; 2187 } 2188 } 2189 return ret; 2190} 2191 2192unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id) 2193{ 2194 while (tags->id != CODEC_ID_NONE) { 2195 if (tags->id == id) 2196 return tags->tag; 2197 tags++; 2198 } 2199 return 0; 2200} 2201 2202enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag) 2203{ 2204 int i; 2205 for(i=0; tags[i].id != CODEC_ID_NONE;i++) { 2206 if(tag == tags[i].tag) 2207 return tags[i].id; 2208 } 2209 for(i=0; tags[i].id != CODEC_ID_NONE; i++) { 2210 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag)) 2211 return tags[i].id; 2212 } 2213 return CODEC_ID_NONE; 2214} 2215 2216unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id) 2217{ 2218 int i; 2219 for(i=0; tags && tags[i]; i++){ 2220 int tag= ff_codec_get_tag(tags[i], id); 2221 if(tag) return tag; 2222 } 2223 return 0; 2224} 2225 2226enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag) 2227{ 2228 int i; 2229 for(i=0; tags && tags[i]; i++){ 2230 enum CodecID id= ff_codec_get_id(tags[i], tag); 2231 if(id!=CODEC_ID_NONE) return id; 2232 } 2233 return CODEC_ID_NONE; 2234} 2235 2236static void compute_chapters_end(AVFormatContext *s) 2237{ 2238 unsigned int i, j; 2239 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time); 2240 2241 for (i = 0; i < s->nb_chapters; i++) 2242 if (s->chapters[i]->end == AV_NOPTS_VALUE) { 2243 AVChapter *ch = s->chapters[i]; 2244 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base) 2245 : INT64_MAX; 2246 2247 for (j = 0; j < s->nb_chapters; j++) { 2248 AVChapter *ch1 = s->chapters[j]; 2249 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base); 2250 if (j != i && next_start > ch->start && next_start < end) 2251 end = next_start; 2252 } 2253 ch->end = (end == INT64_MAX) ? ch->start : end; 2254 } 2255} 2256 2257static int get_std_framerate(int i){ 2258 if(i<60*12) return i*1001; 2259 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12; 2260} 2261 2262/* 2263 * Is the time base unreliable. 2264 * This is a heuristic to balance between quick acceptance of the values in 2265 * the headers vs. some extra checks. 2266 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps. 2267 * MPEG-2 commonly misuses field repeat flags to store different framerates. 2268 * And there are "variable" fps files this needs to detect as well. 2269 */ 2270static int tb_unreliable(AVCodecContext *c){ 2271 if( c->time_base.den >= 101L*c->time_base.num 2272 || c->time_base.den < 5L*c->time_base.num 2273/* || c->codec_tag == AV_RL32("DIVX") 2274 || c->codec_tag == AV_RL32("XVID")*/ 2275 || c->codec_id == CODEC_ID_MPEG2VIDEO 2276 || c->codec_id == CODEC_ID_H264 2277 ) 2278 return 1; 2279 return 0; 2280} 2281 2282#if FF_API_FORMAT_PARAMETERS 2283int av_find_stream_info(AVFormatContext *ic) 2284{ 2285 return avformat_find_stream_info(ic, NULL); 2286} 2287#endif 2288 2289int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) 2290{ 2291 int i, count, ret, read_size, j; 2292 AVStream *st; 2293 AVPacket pkt1, *pkt; 2294 int64_t old_offset = avio_tell(ic->pb); 2295 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those 2296 2297 for(i=0;i<ic->nb_streams;i++) { 2298 AVCodec *codec; 2299 AVDictionary *thread_opt = NULL; 2300 st = ic->streams[i]; 2301 2302 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || 2303 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { 2304/* if(!st->time_base.num) 2305 st->time_base= */ 2306 if(!st->codec->time_base.num) 2307 st->codec->time_base= st->time_base; 2308 } 2309 //only for the split stuff 2310 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) { 2311 st->parser = av_parser_init(st->codec->codec_id); 2312 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){ 2313 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 2314 } 2315 } 2316 codec = st->codec->codec ? st->codec->codec : 2317 avcodec_find_decoder(st->codec->codec_id); 2318 2319 /* force thread count to 1 since the h264 decoder will not extract SPS 2320 * and PPS to extradata during multi-threaded decoding */ 2321 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0); 2322 2323 /* Ensure that subtitle_header is properly set. */ 2324 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE 2325 && codec && !st->codec->codec) 2326 avcodec_open2(st->codec, codec, options ? &options[i] 2327 : &thread_opt); 2328 2329 //try to just open decoders, in case this is enough to get parameters 2330 if(!has_codec_parameters(st->codec)){ 2331 if (codec && !st->codec->codec) 2332 avcodec_open2(st->codec, codec, options ? &options[i] 2333 : &thread_opt); 2334 } 2335 if (!options) 2336 av_dict_free(&thread_opt); 2337 } 2338 2339 for (i=0; i<ic->nb_streams; i++) { 2340 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE; 2341 } 2342 2343 count = 0; 2344 read_size = 0; 2345 for(;;) { 2346 if (ff_check_interrupt(&ic->interrupt_callback)){ 2347 ret= AVERROR_EXIT; 2348 av_log(ic, AV_LOG_DEBUG, "interrupted\n"); 2349 break; 2350 } 2351 2352 /* check if one codec still needs to be handled */ 2353 for(i=0;i<ic->nb_streams;i++) { 2354 int fps_analyze_framecount = 20; 2355 2356 st = ic->streams[i]; 2357 if (!has_codec_parameters(st->codec)) 2358 break; 2359 /* if the timebase is coarse (like the usual millisecond precision 2360 of mkv), we need to analyze more frames to reliably arrive at 2361 the correct fps */ 2362 if (av_q2d(st->time_base) > 0.0005) 2363 fps_analyze_framecount *= 2; 2364 if (ic->fps_probe_size >= 0) 2365 fps_analyze_framecount = ic->fps_probe_size; 2366 /* variable fps and no guess at the real fps */ 2367 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num) 2368 && st->info->duration_count < fps_analyze_framecount 2369 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 2370 break; 2371 if(st->parser && st->parser->parser->split && !st->codec->extradata) 2372 break; 2373 if(st->first_dts == AV_NOPTS_VALUE) 2374 break; 2375 } 2376 if (i == ic->nb_streams) { 2377 /* NOTE: if the format has no header, then we need to read 2378 some packets to get most of the streams, so we cannot 2379 stop here */ 2380 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { 2381 /* if we found the info for all the codecs, we can stop */ 2382 ret = count; 2383 av_log(ic, AV_LOG_DEBUG, "All info found\n"); 2384 break; 2385 } 2386 } 2387 /* we did not get all the codec info, but we read too much data */ 2388 if (read_size >= ic->probesize) { 2389 ret = count; 2390 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize); 2391 break; 2392 } 2393 2394 /* NOTE: a new stream can be added there if no header in file 2395 (AVFMTCTX_NOHEADER) */ 2396 ret = read_frame_internal(ic, &pkt1); 2397 if (ret == AVERROR(EAGAIN)) 2398 continue; 2399 2400 if (ret < 0) { 2401 /* EOF or error*/ 2402 AVPacket empty_pkt = { 0 }; 2403 int err; 2404 av_init_packet(&empty_pkt); 2405 2406 ret = -1; /* we could not have all the codec parameters before EOF */ 2407 for(i=0;i<ic->nb_streams;i++) { 2408 st = ic->streams[i]; 2409 2410 /* flush the decoders */ 2411 do { 2412 err = try_decode_frame(st, &empty_pkt, 2413 (options && i < orig_nb_streams) ? 2414 &options[i] : NULL); 2415 } while (err > 0 && !has_codec_parameters(st->codec)); 2416 2417 if (err < 0) { 2418 av_log(ic, AV_LOG_WARNING, 2419 "decoding for stream %d failed\n", st->index); 2420 } else if (!has_codec_parameters(st->codec)){ 2421 char buf[256]; 2422 avcodec_string(buf, sizeof(buf), st->codec, 0); 2423 av_log(ic, AV_LOG_WARNING, 2424 "Could not find codec parameters (%s)\n", buf); 2425 } else { 2426 ret = 0; 2427 } 2428 } 2429 break; 2430 } 2431 2432 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end); 2433 if ((ret = av_dup_packet(pkt)) < 0) 2434 goto find_stream_info_err; 2435 2436 read_size += pkt->size; 2437 2438 st = ic->streams[pkt->stream_index]; 2439 if (st->codec_info_nb_frames>1) { 2440 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) { 2441 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n"); 2442 break; 2443 } 2444 st->info->codec_info_duration += pkt->duration; 2445 } 2446 { 2447 int64_t last = st->info->last_dts; 2448 2449 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){ 2450 int64_t duration= pkt->dts - last; 2451 double dur= duration * av_q2d(st->time_base); 2452 2453// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 2454// av_log(NULL, AV_LOG_ERROR, "%f\n", dur); 2455 if (st->info->duration_count < 2) 2456 memset(st->info->duration_error, 0, sizeof(st->info->duration_error)); 2457 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) { 2458 int framerate= get_std_framerate(i); 2459 int ticks= lrintf(dur*framerate/(1001*12)); 2460 double error = dur - (double)ticks*1001*12 / framerate; 2461 st->info->duration_error[i] += error*error; 2462 } 2463 st->info->duration_count++; 2464 // ignore the first 4 values, they might have some random jitter 2465 if (st->info->duration_count > 3) 2466 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration); 2467 } 2468 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1) 2469 st->info->last_dts = pkt->dts; 2470 } 2471 if(st->parser && st->parser->parser->split && !st->codec->extradata){ 2472 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size); 2473 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) { 2474 st->codec->extradata_size= i; 2475 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); 2476 if (!st->codec->extradata) 2477 return AVERROR(ENOMEM); 2478 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); 2479 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE); 2480 } 2481 } 2482 2483 /* if still no information, we try to open the codec and to 2484 decompress the frame. We try to avoid that in most cases as 2485 it takes longer and uses more memory. For MPEG-4, we need to 2486 decompress for QuickTime. 2487 2488 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at 2489 least one frame of codec data, this makes sure the codec initializes 2490 the channel configuration and does not only trust the values from the container. 2491 */ 2492 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL); 2493 2494 st->codec_info_nb_frames++; 2495 count++; 2496 } 2497 2498 // close codecs which were opened in try_decode_frame() 2499 for(i=0;i<ic->nb_streams;i++) { 2500 st = ic->streams[i]; 2501 avcodec_close(st->codec); 2502 } 2503 for(i=0;i<ic->nb_streams;i++) { 2504 st = ic->streams[i]; 2505 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration) 2506 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, 2507 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den, 2508 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000); 2509 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 2510 // the check for tb_unreliable() is not completely correct, since this is not about handling 2511 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g. 2512 // ipmovie.c produces. 2513 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num) 2514 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX); 2515 if (st->info->duration_count && !st->r_frame_rate.num 2516 && tb_unreliable(st->codec) /*&& 2517 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ... 2518 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){ 2519 int num = 0; 2520 double best_error= 2*av_q2d(st->time_base); 2521 best_error = best_error*best_error*st->info->duration_count*1000*12*30; 2522 2523 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) { 2524 double error = st->info->duration_error[j] * get_std_framerate(j); 2525// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 2526// av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error); 2527 if(error < best_error){ 2528 best_error= error; 2529 num = get_std_framerate(j); 2530 } 2531 } 2532 // do not increase frame rate by more than 1 % in order to match a standard rate. 2533 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate))) 2534 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX); 2535 } 2536 2537 if (!st->r_frame_rate.num){ 2538 if( st->codec->time_base.den * (int64_t)st->time_base.num 2539 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){ 2540 st->r_frame_rate.num = st->codec->time_base.den; 2541 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame; 2542 }else{ 2543 st->r_frame_rate.num = st->time_base.den; 2544 st->r_frame_rate.den = st->time_base.num; 2545 } 2546 } 2547 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { 2548 if(!st->codec->bits_per_coded_sample) 2549 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id); 2550 // set stream disposition based on audio service type 2551 switch (st->codec->audio_service_type) { 2552 case AV_AUDIO_SERVICE_TYPE_EFFECTS: 2553 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break; 2554 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED: 2555 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break; 2556 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED: 2557 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break; 2558 case AV_AUDIO_SERVICE_TYPE_COMMENTARY: 2559 st->disposition = AV_DISPOSITION_COMMENT; break; 2560 case AV_AUDIO_SERVICE_TYPE_KARAOKE: 2561 st->disposition = AV_DISPOSITION_KARAOKE; break; 2562 } 2563 } 2564 } 2565 2566 estimate_timings(ic, old_offset); 2567 2568 compute_chapters_end(ic); 2569 2570#if 0 2571 /* correct DTS for B-frame streams with no timestamps */ 2572 for(i=0;i<ic->nb_streams;i++) { 2573 st = ic->streams[i]; 2574 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 2575 if(b-frames){ 2576 ppktl = &ic->packet_buffer; 2577 while(ppkt1){ 2578 if(ppkt1->stream_index != i) 2579 continue; 2580 if(ppkt1->pkt->dts < 0) 2581 break; 2582 if(ppkt1->pkt->pts != AV_NOPTS_VALUE) 2583 break; 2584 ppkt1->pkt->dts -= delta; 2585 ppkt1= ppkt1->next; 2586 } 2587 if(ppkt1) 2588 continue; 2589 st->cur_dts -= delta; 2590 } 2591 } 2592 } 2593#endif 2594 2595 find_stream_info_err: 2596 for (i=0; i < ic->nb_streams; i++) { 2597 if (ic->streams[i]->codec) 2598 ic->streams[i]->codec->thread_count = 0; 2599 av_freep(&ic->streams[i]->info); 2600 } 2601 return ret; 2602} 2603 2604static AVProgram *find_program_from_stream(AVFormatContext *ic, int s) 2605{ 2606 int i, j; 2607 2608 for (i = 0; i < ic->nb_programs; i++) 2609 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++) 2610 if (ic->programs[i]->stream_index[j] == s) 2611 return ic->programs[i]; 2612 return NULL; 2613} 2614 2615int av_find_best_stream(AVFormatContext *ic, 2616 enum AVMediaType type, 2617 int wanted_stream_nb, 2618 int related_stream, 2619 AVCodec **decoder_ret, 2620 int flags) 2621{ 2622 int i, nb_streams = ic->nb_streams; 2623 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1; 2624 unsigned *program = NULL; 2625 AVCodec *decoder = NULL, *best_decoder = NULL; 2626 2627 if (related_stream >= 0 && wanted_stream_nb < 0) { 2628 AVProgram *p = find_program_from_stream(ic, related_stream); 2629 if (p) { 2630 program = p->stream_index; 2631 nb_streams = p->nb_stream_indexes; 2632 } 2633 } 2634 for (i = 0; i < nb_streams; i++) { 2635 int real_stream_index = program ? program[i] : i; 2636 AVStream *st = ic->streams[real_stream_index]; 2637 AVCodecContext *avctx = st->codec; 2638 if (avctx->codec_type != type) 2639 continue; 2640 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb) 2641 continue; 2642 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED)) 2643 continue; 2644 if (decoder_ret) { 2645 decoder = avcodec_find_decoder(st->codec->codec_id); 2646 if (!decoder) { 2647 if (ret < 0) 2648 ret = AVERROR_DECODER_NOT_FOUND; 2649 continue; 2650 } 2651 } 2652 if (best_count >= st->codec_info_nb_frames) 2653 continue; 2654 best_count = st->codec_info_nb_frames; 2655 ret = real_stream_index; 2656 best_decoder = decoder; 2657 if (program && i == nb_streams - 1 && ret < 0) { 2658 program = NULL; 2659 nb_streams = ic->nb_streams; 2660 i = 0; /* no related stream found, try again with everything */ 2661 } 2662 } 2663 if (decoder_ret) 2664 *decoder_ret = best_decoder; 2665 return ret; 2666} 2667 2668/*******************************************************/ 2669 2670int av_read_play(AVFormatContext *s) 2671{ 2672 if (s->iformat->read_play) 2673 return s->iformat->read_play(s); 2674 if (s->pb) 2675 return avio_pause(s->pb, 0); 2676 return AVERROR(ENOSYS); 2677} 2678 2679int av_read_pause(AVFormatContext *s) 2680{ 2681 if (s->iformat->read_pause) 2682 return s->iformat->read_pause(s); 2683 if (s->pb) 2684 return avio_pause(s->pb, 1); 2685 return AVERROR(ENOSYS); 2686} 2687 2688#if FF_API_FORMAT_PARAMETERS 2689void av_close_input_stream(AVFormatContext *s) 2690{ 2691 flush_packet_queue(s); 2692 if (s->iformat->read_close) 2693 s->iformat->read_close(s); 2694 avformat_free_context(s); 2695} 2696#endif 2697 2698void avformat_free_context(AVFormatContext *s) 2699{ 2700 int i; 2701 AVStream *st; 2702 2703 av_opt_free(s); 2704 if (s->iformat && s->iformat->priv_class && s->priv_data) 2705 av_opt_free(s->priv_data); 2706 2707 for(i=0;i<s->nb_streams;i++) { 2708 /* free all data in a stream component */ 2709 st = s->streams[i]; 2710 if (st->parser) { 2711 av_parser_close(st->parser); 2712 av_free_packet(&st->cur_pkt); 2713 } 2714 av_dict_free(&st->metadata); 2715 av_freep(&st->probe_data.buf); 2716 av_free(st->index_entries); 2717 av_free(st->codec->extradata); 2718 av_free(st->codec->subtitle_header); 2719 av_free(st->codec); 2720 av_free(st->priv_data); 2721 av_free(st->info); 2722 av_free(st); 2723 } 2724 for(i=s->nb_programs-1; i>=0; i--) { 2725 av_dict_free(&s->programs[i]->metadata); 2726 av_freep(&s->programs[i]->stream_index); 2727 av_freep(&s->programs[i]); 2728 } 2729 av_freep(&s->programs); 2730 av_freep(&s->priv_data); 2731 while(s->nb_chapters--) { 2732 av_dict_free(&s->chapters[s->nb_chapters]->metadata); 2733 av_free(s->chapters[s->nb_chapters]); 2734 } 2735 av_freep(&s->chapters); 2736 av_dict_free(&s->metadata); 2737 av_freep(&s->streams); 2738 av_free(s); 2739} 2740 2741#if FF_API_CLOSE_INPUT_FILE 2742void av_close_input_file(AVFormatContext *s) 2743{ 2744 avformat_close_input(&s); 2745} 2746#endif 2747 2748void avformat_close_input(AVFormatContext **ps) 2749{ 2750 AVFormatContext *s = *ps; 2751 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ? 2752 NULL : s->pb; 2753 flush_packet_queue(s); 2754 if (s->iformat->read_close) 2755 s->iformat->read_close(s); 2756 avformat_free_context(s); 2757 *ps = NULL; 2758 if (pb) 2759 avio_close(pb); 2760} 2761 2762#if FF_API_NEW_STREAM 2763AVStream *av_new_stream(AVFormatContext *s, int id) 2764{ 2765 AVStream *st = avformat_new_stream(s, NULL); 2766 if (st) 2767 st->id = id; 2768 return st; 2769} 2770#endif 2771 2772AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c) 2773{ 2774 AVStream *st; 2775 int i; 2776 AVStream **streams; 2777 2778 if (s->nb_streams >= INT_MAX/sizeof(*streams)) 2779 return NULL; 2780 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams)); 2781 if (!streams) 2782 return NULL; 2783 s->streams = streams; 2784 2785 st = av_mallocz(sizeof(AVStream)); 2786 if (!st) 2787 return NULL; 2788 if (!(st->info = av_mallocz(sizeof(*st->info)))) { 2789 av_free(st); 2790 return NULL; 2791 } 2792 2793 st->codec = avcodec_alloc_context3(c); 2794 if (s->iformat) { 2795 /* no default bitrate if decoding */ 2796 st->codec->bit_rate = 0; 2797 } 2798 st->index = s->nb_streams; 2799 st->start_time = AV_NOPTS_VALUE; 2800 st->duration = AV_NOPTS_VALUE; 2801 /* we set the current DTS to 0 so that formats without any timestamps 2802 but durations get some timestamps, formats with some unknown 2803 timestamps have their first few packets buffered and the 2804 timestamps corrected before they are returned to the user */ 2805 st->cur_dts = 0; 2806 st->first_dts = AV_NOPTS_VALUE; 2807 st->probe_packets = MAX_PROBE_PACKETS; 2808 2809 /* default pts setting is MPEG-like */ 2810 avpriv_set_pts_info(st, 33, 1, 90000); 2811 st->last_IP_pts = AV_NOPTS_VALUE; 2812 for(i=0; i<MAX_REORDER_DELAY+1; i++) 2813 st->pts_buffer[i]= AV_NOPTS_VALUE; 2814 st->reference_dts = AV_NOPTS_VALUE; 2815 2816 st->sample_aspect_ratio = (AVRational){0,1}; 2817 2818 s->streams[s->nb_streams++] = st; 2819 return st; 2820} 2821 2822AVProgram *av_new_program(AVFormatContext *ac, int id) 2823{ 2824 AVProgram *program=NULL; 2825 int i; 2826 2827 av_dlog(ac, "new_program: id=0x%04x\n", id); 2828 2829 for(i=0; i<ac->nb_programs; i++) 2830 if(ac->programs[i]->id == id) 2831 program = ac->programs[i]; 2832 2833 if(!program){ 2834 program = av_mallocz(sizeof(AVProgram)); 2835 if (!program) 2836 return NULL; 2837 dynarray_add(&ac->programs, &ac->nb_programs, program); 2838 program->discard = AVDISCARD_NONE; 2839 } 2840 program->id = id; 2841 2842 return program; 2843} 2844 2845AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title) 2846{ 2847 AVChapter *chapter = NULL; 2848 int i; 2849 2850 for(i=0; i<s->nb_chapters; i++) 2851 if(s->chapters[i]->id == id) 2852 chapter = s->chapters[i]; 2853 2854 if(!chapter){ 2855 chapter= av_mallocz(sizeof(AVChapter)); 2856 if(!chapter) 2857 return NULL; 2858 dynarray_add(&s->chapters, &s->nb_chapters, chapter); 2859 } 2860 av_dict_set(&chapter->metadata, "title", title, 0); 2861 chapter->id = id; 2862 chapter->time_base= time_base; 2863 chapter->start = start; 2864 chapter->end = end; 2865 2866 return chapter; 2867} 2868 2869/************************************************************/ 2870/* output media file */ 2871 2872#if FF_API_FORMAT_PARAMETERS 2873int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap) 2874{ 2875 int ret; 2876 2877 if (s->oformat->priv_data_size > 0) { 2878 s->priv_data = av_mallocz(s->oformat->priv_data_size); 2879 if (!s->priv_data) 2880 return AVERROR(ENOMEM); 2881 if (s->oformat->priv_class) { 2882 *(const AVClass**)s->priv_data= s->oformat->priv_class; 2883 av_opt_set_defaults(s->priv_data); 2884 } 2885 } else 2886 s->priv_data = NULL; 2887 2888 if (s->oformat->set_parameters) { 2889 ret = s->oformat->set_parameters(s, ap); 2890 if (ret < 0) 2891 return ret; 2892 } 2893 return 0; 2894} 2895#endif 2896 2897static int validate_codec_tag(AVFormatContext *s, AVStream *st) 2898{ 2899 const AVCodecTag *avctag; 2900 int n; 2901 enum CodecID id = CODEC_ID_NONE; 2902 unsigned int tag = 0; 2903 2904 /** 2905 * Check that tag + id is in the table 2906 * If neither is in the table -> OK 2907 * If tag is in the table with another id -> FAIL 2908 * If id is in the table with another tag -> FAIL unless strict < normal 2909 */ 2910 for (n = 0; s->oformat->codec_tag[n]; n++) { 2911 avctag = s->oformat->codec_tag[n]; 2912 while (avctag->id != CODEC_ID_NONE) { 2913 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codec->codec_tag)) { 2914 id = avctag->id; 2915 if (id == st->codec->codec_id) 2916 return 1; 2917 } 2918 if (avctag->id == st->codec->codec_id) 2919 tag = avctag->tag; 2920 avctag++; 2921 } 2922 } 2923 if (id != CODEC_ID_NONE) 2924 return 0; 2925 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL)) 2926 return 0; 2927 return 1; 2928} 2929 2930#if FF_API_FORMAT_PARAMETERS 2931int av_write_header(AVFormatContext *s) 2932{ 2933 return avformat_write_header(s, NULL); 2934} 2935#endif 2936 2937int avformat_write_header(AVFormatContext *s, AVDictionary **options) 2938{ 2939 int ret = 0, i; 2940 AVStream *st; 2941 AVDictionary *tmp = NULL; 2942 2943 if (options) 2944 av_dict_copy(&tmp, *options, 0); 2945 if ((ret = av_opt_set_dict(s, &tmp)) < 0) 2946 goto fail; 2947 2948 // some sanity checks 2949 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) { 2950 av_log(s, AV_LOG_ERROR, "no streams\n"); 2951 ret = AVERROR(EINVAL); 2952 goto fail; 2953 } 2954 2955 for(i=0;i<s->nb_streams;i++) { 2956 st = s->streams[i]; 2957 2958 switch (st->codec->codec_type) { 2959 case AVMEDIA_TYPE_AUDIO: 2960 if(st->codec->sample_rate<=0){ 2961 av_log(s, AV_LOG_ERROR, "sample rate not set\n"); 2962 ret = AVERROR(EINVAL); 2963 goto fail; 2964 } 2965 if(!st->codec->block_align) 2966 st->codec->block_align = st->codec->channels * 2967 av_get_bits_per_sample(st->codec->codec_id) >> 3; 2968 break; 2969 case AVMEDIA_TYPE_VIDEO: 2970 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too? 2971 av_log(s, AV_LOG_ERROR, "time base not set\n"); 2972 ret = AVERROR(EINVAL); 2973 goto fail; 2974 } 2975 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){ 2976 av_log(s, AV_LOG_ERROR, "dimensions not set\n"); 2977 ret = AVERROR(EINVAL); 2978 goto fail; 2979 } 2980 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){ 2981 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n"); 2982 ret = AVERROR(EINVAL); 2983 goto fail; 2984 } 2985 break; 2986 } 2987 2988 if(s->oformat->codec_tag){ 2989 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){ 2990 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here 2991 st->codec->codec_tag= 0; 2992 } 2993 if(st->codec->codec_tag){ 2994 if (!validate_codec_tag(s, st)) { 2995 char tagbuf[32]; 2996 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag); 2997 av_log(s, AV_LOG_ERROR, 2998 "Tag %s/0x%08x incompatible with output codec id '%d'\n", 2999 tagbuf, st->codec->codec_tag, st->codec->codec_id); 3000 ret = AVERROR_INVALIDDATA; 3001 goto fail; 3002 } 3003 }else 3004 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id); 3005 } 3006 3007 if(s->oformat->flags & AVFMT_GLOBALHEADER && 3008 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER)) 3009 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i); 3010 } 3011 3012 if (!s->priv_data && s->oformat->priv_data_size > 0) { 3013 s->priv_data = av_mallocz(s->oformat->priv_data_size); 3014 if (!s->priv_data) { 3015 ret = AVERROR(ENOMEM); 3016 goto fail; 3017 } 3018 if (s->oformat->priv_class) { 3019 *(const AVClass**)s->priv_data= s->oformat->priv_class; 3020 av_opt_set_defaults(s->priv_data); 3021 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) 3022 goto fail; 3023 } 3024 } 3025 3026 /* set muxer identification string */ 3027 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) { 3028 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0); 3029 } 3030 3031 if(s->oformat->write_header){ 3032 ret = s->oformat->write_header(s); 3033 if (ret < 0) 3034 goto fail; 3035 } 3036 3037 /* init PTS generation */ 3038 for(i=0;i<s->nb_streams;i++) { 3039 int64_t den = AV_NOPTS_VALUE; 3040 st = s->streams[i]; 3041 3042 switch (st->codec->codec_type) { 3043 case AVMEDIA_TYPE_AUDIO: 3044 den = (int64_t)st->time_base.num * st->codec->sample_rate; 3045 break; 3046 case AVMEDIA_TYPE_VIDEO: 3047 den = (int64_t)st->time_base.num * st->codec->time_base.den; 3048 break; 3049 default: 3050 break; 3051 } 3052 if (den != AV_NOPTS_VALUE) { 3053 if (den <= 0) { 3054 ret = AVERROR_INVALIDDATA; 3055 goto fail; 3056 } 3057 frac_init(&st->pts, 0, 0, den); 3058 } 3059 } 3060 3061 if (options) { 3062 av_dict_free(options); 3063 *options = tmp; 3064 } 3065 return 0; 3066fail: 3067 av_dict_free(&tmp); 3068 return ret; 3069} 3070 3071//FIXME merge with compute_pkt_fields 3072static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){ 3073 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames); 3074 int num, den, frame_size, i; 3075 3076 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", 3077 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index); 3078 3079/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) 3080 return AVERROR(EINVAL);*/ 3081 3082 /* duration field */ 3083 if (pkt->duration == 0) { 3084 compute_frame_duration(&num, &den, st, NULL, pkt); 3085 if (den && num) { 3086 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num); 3087 } 3088 } 3089 3090 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0) 3091 pkt->pts= pkt->dts; 3092 3093 //XXX/FIXME this is a temporary hack until all encoders output pts 3094 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){ 3095 pkt->dts= 3096// pkt->pts= st->cur_dts; 3097 pkt->pts= st->pts.val; 3098 } 3099 3100 //calculate dts from pts 3101 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 3102 st->pts_buffer[0]= pkt->pts; 3103 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++) 3104 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration; 3105 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 3106 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 3107 3108 pkt->dts= st->pts_buffer[0]; 3109 } 3110 3111 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){ 3112 av_log(s, AV_LOG_ERROR, 3113 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n", 3114 st->index, st->cur_dts, pkt->dts); 3115 return AVERROR(EINVAL); 3116 } 3117 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ 3118 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index); 3119 return AVERROR(EINVAL); 3120 } 3121 3122// av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts); 3123 st->cur_dts= pkt->dts; 3124 st->pts.val= pkt->dts; 3125 3126 /* update pts */ 3127 switch (st->codec->codec_type) { 3128 case AVMEDIA_TYPE_AUDIO: 3129 frame_size = get_audio_frame_size(st->codec, pkt->size); 3130 3131 /* HACK/FIXME, we skip the initial 0 size packets as they are most 3132 likely equal to the encoder delay, but it would be better if we 3133 had the real timestamps from the encoder */ 3134 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { 3135 frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); 3136 } 3137 break; 3138 case AVMEDIA_TYPE_VIDEO: 3139 frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); 3140 break; 3141 default: 3142 break; 3143 } 3144 return 0; 3145} 3146 3147int av_write_frame(AVFormatContext *s, AVPacket *pkt) 3148{ 3149 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt); 3150 3151 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 3152 return ret; 3153 3154 ret= s->oformat->write_packet(s, pkt); 3155 3156 if (ret >= 0) 3157 s->streams[pkt->stream_index]->nb_frames++; 3158 return ret; 3159} 3160 3161void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, 3162 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)) 3163{ 3164 AVPacketList **next_point, *this_pktl; 3165 3166 this_pktl = av_mallocz(sizeof(AVPacketList)); 3167 this_pktl->pkt= *pkt; 3168 pkt->destruct= NULL; // do not free original but only the copy 3169 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory 3170 3171 if(s->streams[pkt->stream_index]->last_in_packet_buffer){ 3172 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next); 3173 }else 3174 next_point = &s->packet_buffer; 3175 3176 if(*next_point){ 3177 if(compare(s, &s->packet_buffer_end->pkt, pkt)){ 3178 while(!compare(s, &(*next_point)->pkt, pkt)){ 3179 next_point= &(*next_point)->next; 3180 } 3181 goto next_non_null; 3182 }else{ 3183 next_point = &(s->packet_buffer_end->next); 3184 } 3185 } 3186 assert(!*next_point); 3187 3188 s->packet_buffer_end= this_pktl; 3189next_non_null: 3190 3191 this_pktl->next= *next_point; 3192 3193 s->streams[pkt->stream_index]->last_in_packet_buffer= 3194 *next_point= this_pktl; 3195} 3196 3197static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt) 3198{ 3199 AVStream *st = s->streams[ pkt ->stream_index]; 3200 AVStream *st2= s->streams[ next->stream_index]; 3201 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts, 3202 st->time_base); 3203 3204 if (comp == 0) 3205 return pkt->stream_index < next->stream_index; 3206 return comp > 0; 3207} 3208 3209int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){ 3210 AVPacketList *pktl; 3211 int stream_count=0; 3212 int i; 3213 3214 if(pkt){ 3215 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts); 3216 } 3217 3218 for(i=0; i < s->nb_streams; i++) 3219 stream_count+= !!s->streams[i]->last_in_packet_buffer; 3220 3221 if(stream_count && (s->nb_streams == stream_count || flush)){ 3222 pktl= s->packet_buffer; 3223 *out= pktl->pkt; 3224 3225 s->packet_buffer= pktl->next; 3226 if(!s->packet_buffer) 3227 s->packet_buffer_end= NULL; 3228 3229 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl) 3230 s->streams[out->stream_index]->last_in_packet_buffer= NULL; 3231 av_freep(&pktl); 3232 return 1; 3233 }else{ 3234 av_init_packet(out); 3235 return 0; 3236 } 3237} 3238 3239/** 3240 * Interleave an AVPacket correctly so it can be muxed. 3241 * @param out the interleaved packet will be output here 3242 * @param in the input packet 3243 * @param flush 1 if no further packets are available as input and all 3244 * remaining packets should be output 3245 * @return 1 if a packet was output, 0 if no packet could be output, 3246 * < 0 if an error occurred 3247 */ 3248static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ 3249 if (s->oformat->interleave_packet) { 3250 int ret = s->oformat->interleave_packet(s, out, in, flush); 3251 if (in) 3252 av_free_packet(in); 3253 return ret; 3254 } else 3255 return av_interleave_packet_per_dts(s, out, in, flush); 3256} 3257 3258int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){ 3259 AVStream *st= s->streams[ pkt->stream_index]; 3260 int ret; 3261 3262 //FIXME/XXX/HACK drop zero sized packets 3263 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0) 3264 return 0; 3265 3266 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n", 3267 pkt->size, pkt->dts, pkt->pts); 3268 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 3269 return ret; 3270 3271 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 3272 return AVERROR(EINVAL); 3273 3274 for(;;){ 3275 AVPacket opkt; 3276 int ret= interleave_packet(s, &opkt, pkt, 0); 3277 if(ret<=0) //FIXME cleanup needed for ret<0 ? 3278 return ret; 3279 3280 ret= s->oformat->write_packet(s, &opkt); 3281 if (ret >= 0) 3282 s->streams[opkt.stream_index]->nb_frames++; 3283 3284 av_free_packet(&opkt); 3285 pkt= NULL; 3286 3287 if(ret<0) 3288 return ret; 3289 } 3290} 3291 3292int av_write_trailer(AVFormatContext *s) 3293{ 3294 int ret, i; 3295 3296 for(;;){ 3297 AVPacket pkt; 3298 ret= interleave_packet(s, &pkt, NULL, 1); 3299 if(ret<0) //FIXME cleanup needed for ret<0 ? 3300 goto fail; 3301 if(!ret) 3302 break; 3303 3304 ret= s->oformat->write_packet(s, &pkt); 3305 if (ret >= 0) 3306 s->streams[pkt.stream_index]->nb_frames++; 3307 3308 av_free_packet(&pkt); 3309 3310 if(ret<0) 3311 goto fail; 3312 } 3313 3314 if(s->oformat->write_trailer) 3315 ret = s->oformat->write_trailer(s); 3316fail: 3317 for(i=0;i<s->nb_streams;i++) { 3318 av_freep(&s->streams[i]->priv_data); 3319 av_freep(&s->streams[i]->index_entries); 3320 } 3321 if (s->oformat->priv_class) 3322 av_opt_free(s->priv_data); 3323 av_freep(&s->priv_data); 3324 return ret; 3325} 3326 3327void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx) 3328{ 3329 int i, j; 3330 AVProgram *program=NULL; 3331 void *tmp; 3332 3333 if (idx >= ac->nb_streams) { 3334 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx); 3335 return; 3336 } 3337 3338 for(i=0; i<ac->nb_programs; i++){ 3339 if(ac->programs[i]->id != progid) 3340 continue; 3341 program = ac->programs[i]; 3342 for(j=0; j<program->nb_stream_indexes; j++) 3343 if(program->stream_index[j] == idx) 3344 return; 3345 3346 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1)); 3347 if(!tmp) 3348 return; 3349 program->stream_index = tmp; 3350 program->stream_index[program->nb_stream_indexes++] = idx; 3351 return; 3352 } 3353} 3354 3355static void print_fps(double d, const char *postfix){ 3356 uint64_t v= lrintf(d*100); 3357 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix); 3358 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix); 3359 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix); 3360} 3361 3362static void dump_metadata(void *ctx, AVDictionary *m, const char *indent) 3363{ 3364 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){ 3365 AVDictionaryEntry *tag=NULL; 3366 3367 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent); 3368 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) { 3369 if(strcmp("language", tag->key)) 3370 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value); 3371 } 3372 } 3373} 3374 3375/* "user interface" functions */ 3376static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) 3377{ 3378 char buf[256]; 3379 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); 3380 AVStream *st = ic->streams[i]; 3381 int g = av_gcd(st->time_base.num, st->time_base.den); 3382 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0); 3383 avcodec_string(buf, sizeof(buf), st->codec, is_output); 3384 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i); 3385 /* the pid is an important information, so we display it */ 3386 /* XXX: add a generic system */ 3387 if (flags & AVFMT_SHOW_IDS) 3388 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); 3389 if (lang) 3390 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); 3391 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g); 3392 av_log(NULL, AV_LOG_INFO, ": %s", buf); 3393 if (st->sample_aspect_ratio.num && // default 3394 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) { 3395 AVRational display_aspect_ratio; 3396 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, 3397 st->codec->width*st->sample_aspect_ratio.num, 3398 st->codec->height*st->sample_aspect_ratio.den, 3399 1024*1024); 3400 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d", 3401 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, 3402 display_aspect_ratio.num, display_aspect_ratio.den); 3403 } 3404 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ 3405 if(st->avg_frame_rate.den && st->avg_frame_rate.num) 3406 print_fps(av_q2d(st->avg_frame_rate), "fps"); 3407 if(st->r_frame_rate.den && st->r_frame_rate.num) 3408 print_fps(av_q2d(st->r_frame_rate), "tbr"); 3409 if(st->time_base.den && st->time_base.num) 3410 print_fps(1/av_q2d(st->time_base), "tbn"); 3411 if(st->codec->time_base.den && st->codec->time_base.num) 3412 print_fps(1/av_q2d(st->codec->time_base), "tbc"); 3413 } 3414 if (st->disposition & AV_DISPOSITION_DEFAULT) 3415 av_log(NULL, AV_LOG_INFO, " (default)"); 3416 if (st->disposition & AV_DISPOSITION_DUB) 3417 av_log(NULL, AV_LOG_INFO, " (dub)"); 3418 if (st->disposition & AV_DISPOSITION_ORIGINAL) 3419 av_log(NULL, AV_LOG_INFO, " (original)"); 3420 if (st->disposition & AV_DISPOSITION_COMMENT) 3421 av_log(NULL, AV_LOG_INFO, " (comment)"); 3422 if (st->disposition & AV_DISPOSITION_LYRICS) 3423 av_log(NULL, AV_LOG_INFO, " (lyrics)"); 3424 if (st->disposition & AV_DISPOSITION_KARAOKE) 3425 av_log(NULL, AV_LOG_INFO, " (karaoke)"); 3426 if (st->disposition & AV_DISPOSITION_FORCED) 3427 av_log(NULL, AV_LOG_INFO, " (forced)"); 3428 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) 3429 av_log(NULL, AV_LOG_INFO, " (hearing impaired)"); 3430 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED) 3431 av_log(NULL, AV_LOG_INFO, " (visual impaired)"); 3432 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS) 3433 av_log(NULL, AV_LOG_INFO, " (clean effects)"); 3434 av_log(NULL, AV_LOG_INFO, "\n"); 3435 dump_metadata(NULL, st->metadata, " "); 3436} 3437 3438#if FF_API_DUMP_FORMAT 3439void dump_format(AVFormatContext *ic, 3440 int index, 3441 const char *url, 3442 int is_output) 3443{ 3444 av_dump_format(ic, index, url, is_output); 3445} 3446#endif 3447 3448void av_dump_format(AVFormatContext *ic, 3449 int index, 3450 const char *url, 3451 int is_output) 3452{ 3453 int i; 3454 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL; 3455 if (ic->nb_streams && !printed) 3456 return; 3457 3458 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n", 3459 is_output ? "Output" : "Input", 3460 index, 3461 is_output ? ic->oformat->name : ic->iformat->name, 3462 is_output ? "to" : "from", url); 3463 dump_metadata(NULL, ic->metadata, " "); 3464 if (!is_output) { 3465 av_log(NULL, AV_LOG_INFO, " Duration: "); 3466 if (ic->duration != AV_NOPTS_VALUE) { 3467 int hours, mins, secs, us; 3468 secs = ic->duration / AV_TIME_BASE; 3469 us = ic->duration % AV_TIME_BASE; 3470 mins = secs / 60; 3471 secs %= 60; 3472 hours = mins / 60; 3473 mins %= 60; 3474 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs, 3475 (100 * us) / AV_TIME_BASE); 3476 } else { 3477 av_log(NULL, AV_LOG_INFO, "N/A"); 3478 } 3479 if (ic->start_time != AV_NOPTS_VALUE) { 3480 int secs, us; 3481 av_log(NULL, AV_LOG_INFO, ", start: "); 3482 secs = ic->start_time / AV_TIME_BASE; 3483 us = abs(ic->start_time % AV_TIME_BASE); 3484 av_log(NULL, AV_LOG_INFO, "%d.%06d", 3485 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE)); 3486 } 3487 av_log(NULL, AV_LOG_INFO, ", bitrate: "); 3488 if (ic->bit_rate) { 3489 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000); 3490 } else { 3491 av_log(NULL, AV_LOG_INFO, "N/A"); 3492 } 3493 av_log(NULL, AV_LOG_INFO, "\n"); 3494 } 3495 for (i = 0; i < ic->nb_chapters; i++) { 3496 AVChapter *ch = ic->chapters[i]; 3497 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i); 3498 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base)); 3499 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base)); 3500 3501 dump_metadata(NULL, ch->metadata, " "); 3502 } 3503 if(ic->nb_programs) { 3504 int j, k, total = 0; 3505 for(j=0; j<ic->nb_programs; j++) { 3506 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata, 3507 "name", NULL, 0); 3508 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id, 3509 name ? name->value : ""); 3510 dump_metadata(NULL, ic->programs[j]->metadata, " "); 3511 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) { 3512 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output); 3513 printed[ic->programs[j]->stream_index[k]] = 1; 3514 } 3515 total += ic->programs[j]->nb_stream_indexes; 3516 } 3517 if (total < ic->nb_streams) 3518 av_log(NULL, AV_LOG_INFO, " No Program\n"); 3519 } 3520 for(i=0;i<ic->nb_streams;i++) 3521 if (!printed[i]) 3522 dump_stream_format(ic, i, index, is_output); 3523 3524 av_free(printed); 3525} 3526 3527int64_t av_gettime(void) 3528{ 3529 struct timeval tv; 3530 gettimeofday(&tv,NULL); 3531 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec; 3532} 3533 3534uint64_t ff_ntp_time(void) 3535{ 3536 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US; 3537} 3538 3539#if FF_API_PARSE_DATE 3540#include "libavutil/parseutils.h" 3541 3542int64_t parse_date(const char *timestr, int duration) 3543{ 3544 int64_t timeval; 3545 av_parse_time(&timeval, timestr, duration); 3546 return timeval; 3547} 3548#endif 3549 3550#if FF_API_FIND_INFO_TAG 3551#include "libavutil/parseutils.h" 3552 3553int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info) 3554{ 3555 return av_find_info_tag(arg, arg_size, tag1, info); 3556} 3557#endif 3558 3559int av_get_frame_filename(char *buf, int buf_size, 3560 const char *path, int number) 3561{ 3562 const char *p; 3563 char *q, buf1[20], c; 3564 int nd, len, percentd_found; 3565 3566 q = buf; 3567 p = path; 3568 percentd_found = 0; 3569 for(;;) { 3570 c = *p++; 3571 if (c == '\0') 3572 break; 3573 if (c == '%') { 3574 do { 3575 nd = 0; 3576 while (isdigit(*p)) { 3577 nd = nd * 10 + *p++ - '0'; 3578 } 3579 c = *p++; 3580 } while (isdigit(c)); 3581 3582 switch(c) { 3583 case '%': 3584 goto addchar; 3585 case 'd': 3586 if (percentd_found) 3587 goto fail; 3588 percentd_found = 1; 3589 snprintf(buf1, sizeof(buf1), "%0*d", nd, number); 3590 len = strlen(buf1); 3591 if ((q - buf + len) > buf_size - 1) 3592 goto fail; 3593 memcpy(q, buf1, len); 3594 q += len; 3595 break; 3596 default: 3597 goto fail; 3598 } 3599 } else { 3600 addchar: 3601 if ((q - buf) < buf_size - 1) 3602 *q++ = c; 3603 } 3604 } 3605 if (!percentd_found) 3606 goto fail; 3607 *q = '\0'; 3608 return 0; 3609 fail: 3610 *q = '\0'; 3611 return -1; 3612} 3613 3614static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size) 3615{ 3616 int len, i, j, c; 3617#undef fprintf 3618#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 3619 3620 for(i=0;i<size;i+=16) { 3621 len = size - i; 3622 if (len > 16) 3623 len = 16; 3624 PRINT("%08x ", i); 3625 for(j=0;j<16;j++) { 3626 if (j < len) 3627 PRINT(" %02x", buf[i+j]); 3628 else 3629 PRINT(" "); 3630 } 3631 PRINT(" "); 3632 for(j=0;j<len;j++) { 3633 c = buf[i+j]; 3634 if (c < ' ' || c > '~') 3635 c = '.'; 3636 PRINT("%c", c); 3637 } 3638 PRINT("\n"); 3639 } 3640#undef PRINT 3641} 3642 3643void av_hex_dump(FILE *f, uint8_t *buf, int size) 3644{ 3645 hex_dump_internal(NULL, f, 0, buf, size); 3646} 3647 3648void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size) 3649{ 3650 hex_dump_internal(avcl, NULL, level, buf, size); 3651} 3652 3653static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base) 3654{ 3655#undef fprintf 3656#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 3657 PRINT("stream #%d:\n", pkt->stream_index); 3658 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0)); 3659 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base)); 3660 /* DTS is _always_ valid after av_read_frame() */ 3661 PRINT(" dts="); 3662 if (pkt->dts == AV_NOPTS_VALUE) 3663 PRINT("N/A"); 3664 else 3665 PRINT("%0.3f", pkt->dts * av_q2d(time_base)); 3666 /* PTS may not be known if B-frames are present. */ 3667 PRINT(" pts="); 3668 if (pkt->pts == AV_NOPTS_VALUE) 3669 PRINT("N/A"); 3670 else 3671 PRINT("%0.3f", pkt->pts * av_q2d(time_base)); 3672 PRINT("\n"); 3673 PRINT(" size=%d\n", pkt->size); 3674#undef PRINT 3675 if (dump_payload) 3676 av_hex_dump(f, pkt->data, pkt->size); 3677} 3678 3679#if FF_API_PKT_DUMP 3680void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload) 3681{ 3682 AVRational tb = { 1, AV_TIME_BASE }; 3683 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb); 3684} 3685#endif 3686 3687void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st) 3688{ 3689 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base); 3690} 3691 3692#if FF_API_PKT_DUMP 3693void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload) 3694{ 3695 AVRational tb = { 1, AV_TIME_BASE }; 3696 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb); 3697} 3698#endif 3699 3700void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, 3701 AVStream *st) 3702{ 3703 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base); 3704} 3705 3706void av_url_split(char *proto, int proto_size, 3707 char *authorization, int authorization_size, 3708 char *hostname, int hostname_size, 3709 int *port_ptr, 3710 char *path, int path_size, 3711 const char *url) 3712{ 3713 const char *p, *ls, *at, *col, *brk; 3714 3715 if (port_ptr) *port_ptr = -1; 3716 if (proto_size > 0) proto[0] = 0; 3717 if (authorization_size > 0) authorization[0] = 0; 3718 if (hostname_size > 0) hostname[0] = 0; 3719 if (path_size > 0) path[0] = 0; 3720 3721 /* parse protocol */ 3722 if ((p = strchr(url, ':'))) { 3723 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url)); 3724 p++; /* skip ':' */ 3725 if (*p == '/') p++; 3726 if (*p == '/') p++; 3727 } else { 3728 /* no protocol means plain filename */ 3729 av_strlcpy(path, url, path_size); 3730 return; 3731 } 3732 3733 /* separate path from hostname */ 3734 ls = strchr(p, '/'); 3735 if(!ls) 3736 ls = strchr(p, '?'); 3737 if(ls) 3738 av_strlcpy(path, ls, path_size); 3739 else 3740 ls = &p[strlen(p)]; // XXX 3741 3742 /* the rest is hostname, use that to parse auth/port */ 3743 if (ls != p) { 3744 /* authorization (user[:pass]@hostname) */ 3745 if ((at = strchr(p, '@')) && at < ls) { 3746 av_strlcpy(authorization, p, 3747 FFMIN(authorization_size, at + 1 - p)); 3748 p = at + 1; /* skip '@' */ 3749 } 3750 3751 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) { 3752 /* [host]:port */ 3753 av_strlcpy(hostname, p + 1, 3754 FFMIN(hostname_size, brk - p)); 3755 if (brk[1] == ':' && port_ptr) 3756 *port_ptr = atoi(brk + 2); 3757 } else if ((col = strchr(p, ':')) && col < ls) { 3758 av_strlcpy(hostname, p, 3759 FFMIN(col + 1 - p, hostname_size)); 3760 if (port_ptr) *port_ptr = atoi(col + 1); 3761 } else 3762 av_strlcpy(hostname, p, 3763 FFMIN(ls + 1 - p, hostname_size)); 3764 } 3765} 3766 3767char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase) 3768{ 3769 int i; 3770 static const char hex_table_uc[16] = { '0', '1', '2', '3', 3771 '4', '5', '6', '7', 3772 '8', '9', 'A', 'B', 3773 'C', 'D', 'E', 'F' }; 3774 static const char hex_table_lc[16] = { '0', '1', '2', '3', 3775 '4', '5', '6', '7', 3776 '8', '9', 'a', 'b', 3777 'c', 'd', 'e', 'f' }; 3778 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc; 3779 3780 for(i = 0; i < s; i++) { 3781 buff[i * 2] = hex_table[src[i] >> 4]; 3782 buff[i * 2 + 1] = hex_table[src[i] & 0xF]; 3783 } 3784 3785 return buff; 3786} 3787 3788int ff_hex_to_data(uint8_t *data, const char *p) 3789{ 3790 int c, len, v; 3791 3792 len = 0; 3793 v = 1; 3794 for (;;) { 3795 p += strspn(p, SPACE_CHARS); 3796 if (*p == '\0') 3797 break; 3798 c = toupper((unsigned char) *p++); 3799 if (c >= '0' && c <= '9') 3800 c = c - '0'; 3801 else if (c >= 'A' && c <= 'F') 3802 c = c - 'A' + 10; 3803 else 3804 break; 3805 v = (v << 4) | c; 3806 if (v & 0x100) { 3807 if (data) 3808 data[len] = v; 3809 len++; 3810 v = 1; 3811 } 3812 } 3813 return len; 3814} 3815 3816#if FF_API_SET_PTS_INFO 3817void av_set_pts_info(AVStream *s, int pts_wrap_bits, 3818 unsigned int pts_num, unsigned int pts_den) 3819{ 3820 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den); 3821} 3822#endif 3823 3824void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, 3825 unsigned int pts_num, unsigned int pts_den) 3826{ 3827 AVRational new_tb; 3828 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){ 3829 if(new_tb.num != pts_num) 3830 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num); 3831 }else 3832 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index); 3833 3834 if(new_tb.num <= 0 || new_tb.den <= 0) { 3835 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index); 3836 return; 3837 } 3838 s->time_base = new_tb; 3839 s->pts_wrap_bits = pts_wrap_bits; 3840} 3841 3842int ff_url_join(char *str, int size, const char *proto, 3843 const char *authorization, const char *hostname, 3844 int port, const char *fmt, ...) 3845{ 3846#if CONFIG_NETWORK 3847 struct addrinfo hints, *ai; 3848#endif 3849 3850 str[0] = '\0'; 3851 if (proto) 3852 av_strlcatf(str, size, "%s://", proto); 3853 if (authorization && authorization[0]) 3854 av_strlcatf(str, size, "%s@", authorization); 3855#if CONFIG_NETWORK && defined(AF_INET6) 3856 /* Determine if hostname is a numerical IPv6 address, 3857 * properly escape it within [] in that case. */ 3858 memset(&hints, 0, sizeof(hints)); 3859 hints.ai_flags = AI_NUMERICHOST; 3860 if (!getaddrinfo(hostname, NULL, &hints, &ai)) { 3861 if (ai->ai_family == AF_INET6) { 3862 av_strlcat(str, "[", size); 3863 av_strlcat(str, hostname, size); 3864 av_strlcat(str, "]", size); 3865 } else { 3866 av_strlcat(str, hostname, size); 3867 } 3868 freeaddrinfo(ai); 3869 } else 3870#endif 3871 /* Not an IPv6 address, just output the plain string. */ 3872 av_strlcat(str, hostname, size); 3873 3874 if (port >= 0) 3875 av_strlcatf(str, size, ":%d", port); 3876 if (fmt) { 3877 va_list vl; 3878 int len = strlen(str); 3879 3880 va_start(vl, fmt); 3881 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl); 3882 va_end(vl); 3883 } 3884 return strlen(str); 3885} 3886 3887int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt, 3888 AVFormatContext *src) 3889{ 3890 AVPacket local_pkt; 3891 3892 local_pkt = *pkt; 3893 local_pkt.stream_index = dst_stream; 3894 if (pkt->pts != AV_NOPTS_VALUE) 3895 local_pkt.pts = av_rescale_q(pkt->pts, 3896 src->streams[pkt->stream_index]->time_base, 3897 dst->streams[dst_stream]->time_base); 3898 if (pkt->dts != AV_NOPTS_VALUE) 3899 local_pkt.dts = av_rescale_q(pkt->dts, 3900 src->streams[pkt->stream_index]->time_base, 3901 dst->streams[dst_stream]->time_base); 3902 return av_write_frame(dst, &local_pkt); 3903} 3904 3905void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf, 3906 void *context) 3907{ 3908 const char *ptr = str; 3909 3910 /* Parse key=value pairs. */ 3911 for (;;) { 3912 const char *key; 3913 char *dest = NULL, *dest_end; 3914 int key_len, dest_len = 0; 3915 3916 /* Skip whitespace and potential commas. */ 3917 while (*ptr && (isspace(*ptr) || *ptr == ',')) 3918 ptr++; 3919 if (!*ptr) 3920 break; 3921 3922 key = ptr; 3923 3924 if (!(ptr = strchr(key, '='))) 3925 break; 3926 ptr++; 3927 key_len = ptr - key; 3928 3929 callback_get_buf(context, key, key_len, &dest, &dest_len); 3930 dest_end = dest + dest_len - 1; 3931 3932 if (*ptr == '\"') { 3933 ptr++; 3934 while (*ptr && *ptr != '\"') { 3935 if (*ptr == '\\') { 3936 if (!ptr[1]) 3937 break; 3938 if (dest && dest < dest_end) 3939 *dest++ = ptr[1]; 3940 ptr += 2; 3941 } else { 3942 if (dest && dest < dest_end) 3943 *dest++ = *ptr; 3944 ptr++; 3945 } 3946 } 3947 if (*ptr == '\"') 3948 ptr++; 3949 } else { 3950 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++) 3951 if (dest && dest < dest_end) 3952 *dest++ = *ptr; 3953 } 3954 if (dest) 3955 *dest = 0; 3956 } 3957} 3958 3959int ff_find_stream_index(AVFormatContext *s, int id) 3960{ 3961 int i; 3962 for (i = 0; i < s->nb_streams; i++) { 3963 if (s->streams[i]->id == id) 3964 return i; 3965 } 3966 return -1; 3967} 3968 3969void ff_make_absolute_url(char *buf, int size, const char *base, 3970 const char *rel) 3971{ 3972 char *sep; 3973 /* Absolute path, relative to the current server */ 3974 if (base && strstr(base, "://") && rel[0] == '/') { 3975 if (base != buf) 3976 av_strlcpy(buf, base, size); 3977 sep = strstr(buf, "://"); 3978 if (sep) { 3979 sep += 3; 3980 sep = strchr(sep, '/'); 3981 if (sep) 3982 *sep = '\0'; 3983 } 3984 av_strlcat(buf, rel, size); 3985 return; 3986 } 3987 /* If rel actually is an absolute url, just copy it */ 3988 if (!base || strstr(rel, "://") || rel[0] == '/') { 3989 av_strlcpy(buf, rel, size); 3990 return; 3991 } 3992 if (base != buf) 3993 av_strlcpy(buf, base, size); 3994 /* Remove the file name from the base url */ 3995 sep = strrchr(buf, '/'); 3996 if (sep) 3997 sep[1] = '\0'; 3998 else 3999 buf[0] = '\0'; 4000 while (av_strstart(rel, "../", NULL) && sep) { 4001 /* Remove the path delimiter at the end */ 4002 sep[0] = '\0'; 4003 sep = strrchr(buf, '/'); 4004 /* If the next directory name to pop off is "..", break here */ 4005 if (!strcmp(sep ? &sep[1] : buf, "..")) { 4006 /* Readd the slash we just removed */ 4007 av_strlcat(buf, "/", size); 4008 break; 4009 } 4010 /* Cut off the directory name */ 4011 if (sep) 4012 sep[1] = '\0'; 4013 else 4014 buf[0] = '\0'; 4015 rel += 3; 4016 } 4017 av_strlcat(buf, rel, size); 4018} 4019 4020int64_t ff_iso8601_to_unix_time(const char *datestr) 4021{ 4022#if HAVE_STRPTIME 4023 struct tm time1 = {0}, time2 = {0}; 4024 char *ret1, *ret2; 4025 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1); 4026 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2); 4027 if (ret2 && !ret1) 4028 return av_timegm(&time2); 4029 else 4030 return av_timegm(&time1); 4031#else 4032 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert " 4033 "the date string.\n"); 4034 return 0; 4035#endif 4036} 4037 4038int avformat_query_codec(AVOutputFormat *ofmt, enum CodecID codec_id, int std_compliance) 4039{ 4040 if (ofmt) { 4041 if (ofmt->query_codec) 4042 return ofmt->query_codec(codec_id, std_compliance); 4043 else if (ofmt->codec_tag) 4044 return !!av_codec_get_tag(ofmt->codec_tag, codec_id); 4045 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec || 4046 codec_id == ofmt->subtitle_codec) 4047 return 1; 4048 } 4049 return AVERROR_PATCHWELCOME; 4050} 4051 4052int avformat_network_init(void) 4053{ 4054#if CONFIG_NETWORK 4055 int ret; 4056 ff_network_inited_globally = 1; 4057 if ((ret = ff_network_init()) < 0) 4058 return ret; 4059 ff_tls_init(); 4060#endif 4061 return 0; 4062} 4063 4064int avformat_network_deinit(void) 4065{ 4066#if CONFIG_NETWORK 4067 ff_network_close(); 4068 ff_tls_deinit(); 4069#endif 4070 return 0; 4071} 4072 4073int ff_add_param_change(AVPacket *pkt, int32_t channels, 4074 uint64_t channel_layout, int32_t sample_rate, 4075 int32_t width, int32_t height) 4076{ 4077 uint32_t flags = 0; 4078 int size = 4; 4079 uint8_t *data; 4080 if (!pkt) 4081 return AVERROR(EINVAL); 4082 if (channels) { 4083 size += 4; 4084 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT; 4085 } 4086 if (channel_layout) { 4087 size += 8; 4088 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT; 4089 } 4090 if (sample_rate) { 4091 size += 4; 4092 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE; 4093 } 4094 if (width || height) { 4095 size += 8; 4096 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS; 4097 } 4098 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size); 4099 if (!data) 4100 return AVERROR(ENOMEM); 4101 bytestream_put_le32(&data, flags); 4102 if (channels) 4103 bytestream_put_le32(&data, channels); 4104 if (channel_layout) 4105 bytestream_put_le64(&data, channel_layout); 4106 if (sample_rate) 4107 bytestream_put_le32(&data, sample_rate); 4108 if (width || height) { 4109 bytestream_put_le32(&data, width); 4110 bytestream_put_le32(&data, height); 4111 } 4112 return 0; 4113} 4114 4115const struct AVCodecTag *avformat_get_riff_video_tags(void) 4116{ 4117 return ff_codec_bmp_tags; 4118} 4119const struct AVCodecTag *avformat_get_riff_audio_tags(void) 4120{ 4121 return ff_codec_wav_tags; 4122} 4123