1/* 2 * avconv main 3 * Copyright (c) 2000-2011 The libav developers. 4 * 5 * This file is part of Libav. 6 * 7 * Libav is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * Libav is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with Libav; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22#include "config.h" 23#include <ctype.h> 24#include <string.h> 25#include <math.h> 26#include <stdlib.h> 27#include <errno.h> 28#include <signal.h> 29#include <limits.h> 30#include <unistd.h> 31#include "libavformat/avformat.h" 32#include "libavdevice/avdevice.h" 33#include "libswscale/swscale.h" 34#include "libavutil/opt.h" 35#include "libavcodec/audioconvert.h" 36#include "libavutil/audioconvert.h" 37#include "libavutil/parseutils.h" 38#include "libavutil/samplefmt.h" 39#include "libavutil/colorspace.h" 40#include "libavutil/fifo.h" 41#include "libavutil/intreadwrite.h" 42#include "libavutil/dict.h" 43#include "libavutil/mathematics.h" 44#include "libavutil/pixdesc.h" 45#include "libavutil/avstring.h" 46#include "libavutil/libm.h" 47#include "libavutil/imgutils.h" 48#include "libavformat/os_support.h" 49 50#if CONFIG_AVFILTER 51# include "libavfilter/avfilter.h" 52# include "libavfilter/avfiltergraph.h" 53# include "libavfilter/buffersrc.h" 54# include "libavfilter/vsrc_buffer.h" 55#endif 56 57#if HAVE_SYS_RESOURCE_H 58#include <sys/types.h> 59#include <sys/time.h> 60#include <sys/resource.h> 61#elif HAVE_GETPROCESSTIMES 62#include <windows.h> 63#endif 64#if HAVE_GETPROCESSMEMORYINFO 65#include <windows.h> 66#include <psapi.h> 67#endif 68 69#if HAVE_SYS_SELECT_H 70#include <sys/select.h> 71#endif 72 73#include <time.h> 74 75#include "cmdutils.h" 76 77#include "libavutil/avassert.h" 78 79#define VSYNC_AUTO -1 80#define VSYNC_PASSTHROUGH 0 81#define VSYNC_CFR 1 82#define VSYNC_VFR 2 83 84const char program_name[] = "avconv"; 85const int program_birth_year = 2000; 86 87/* select an input stream for an output stream */ 88typedef struct StreamMap { 89 int disabled; /** 1 is this mapping is disabled by a negative map */ 90 int file_index; 91 int stream_index; 92 int sync_file_index; 93 int sync_stream_index; 94} StreamMap; 95 96/** 97 * select an input file for an output file 98 */ 99typedef struct MetadataMap { 100 int file; ///< file index 101 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram 102 int index; ///< stream/chapter/program number 103} MetadataMap; 104 105static const OptionDef options[]; 106 107static int video_discard = 0; 108static int same_quant = 0; 109static int do_deinterlace = 0; 110static int intra_dc_precision = 8; 111static int qp_hist = 0; 112 113static int file_overwrite = 0; 114static int do_benchmark = 0; 115static int do_hex_dump = 0; 116static int do_pkt_dump = 0; 117static int do_pass = 0; 118static char *pass_logfilename_prefix = NULL; 119static int video_sync_method = VSYNC_AUTO; 120static int audio_sync_method = 0; 121static float audio_drift_threshold = 0.1; 122static int copy_ts = 0; 123static int copy_tb = 1; 124static int opt_shortest = 0; 125static char *vstats_filename; 126static FILE *vstats_file; 127 128static int audio_volume = 256; 129 130static int exit_on_error = 0; 131static int using_stdin = 0; 132static int64_t video_size = 0; 133static int64_t audio_size = 0; 134static int64_t extra_size = 0; 135static int nb_frames_dup = 0; 136static int nb_frames_drop = 0; 137static int input_sync; 138 139static float dts_delta_threshold = 10; 140 141static int print_stats = 1; 142 143static uint8_t *audio_buf; 144static unsigned int allocated_audio_buf_size; 145 146#define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass" 147 148typedef struct FrameBuffer { 149 uint8_t *base[4]; 150 uint8_t *data[4]; 151 int linesize[4]; 152 153 int h, w; 154 enum PixelFormat pix_fmt; 155 156 int refcount; 157 struct InputStream *ist; 158 struct FrameBuffer *next; 159} FrameBuffer; 160 161typedef struct InputStream { 162 int file_index; 163 AVStream *st; 164 int discard; /* true if stream data should be discarded */ 165 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */ 166 AVCodec *dec; 167 AVFrame *decoded_frame; 168 AVFrame *filtered_frame; 169 170 int64_t start; /* time when read started */ 171 int64_t next_pts; /* synthetic pts for cases where pkt.pts 172 is not defined */ 173 int64_t pts; /* current pts */ 174 PtsCorrectionContext pts_ctx; 175 double ts_scale; 176 int is_start; /* is 1 at the start and after a discontinuity */ 177 int showed_multi_packet_warning; 178 AVDictionary *opts; 179 180 /* a pool of free buffers for decoded data */ 181 FrameBuffer *buffer_pool; 182} InputStream; 183 184typedef struct InputFile { 185 AVFormatContext *ctx; 186 int eof_reached; /* true if eof reached */ 187 int ist_index; /* index of first stream in ist_table */ 188 int buffer_size; /* current total buffer size */ 189 int64_t ts_offset; 190 int nb_streams; /* number of stream that avconv is aware of; may be different 191 from ctx.nb_streams if new streams appear during av_read_frame() */ 192 int rate_emu; 193} InputFile; 194 195typedef struct OutputStream { 196 int file_index; /* file index */ 197 int index; /* stream index in the output file */ 198 int source_index; /* InputStream index */ 199 AVStream *st; /* stream in the output file */ 200 int encoding_needed; /* true if encoding needed for this stream */ 201 int frame_number; 202 /* input pts and corresponding output pts 203 for A/V sync */ 204 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */ 205 struct InputStream *sync_ist; /* input stream to sync against */ 206 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number 207 AVBitStreamFilterContext *bitstream_filters; 208 AVCodec *enc; 209 int64_t max_frames; 210 AVFrame *output_frame; 211 212 /* video only */ 213 int video_resample; 214 AVFrame pict_tmp; /* temporary image for resampling */ 215 struct SwsContext *img_resample_ctx; /* for image resampling */ 216 int resample_height; 217 int resample_width; 218 int resample_pix_fmt; 219 AVRational frame_rate; 220 int force_fps; 221 int top_field_first; 222 223 float frame_aspect_ratio; 224 225 /* forced key frames */ 226 int64_t *forced_kf_pts; 227 int forced_kf_count; 228 int forced_kf_index; 229 char *forced_keyframes; 230 231 /* audio only */ 232 int audio_resample; 233 ReSampleContext *resample; /* for audio resampling */ 234 int resample_sample_fmt; 235 int resample_channels; 236 int resample_sample_rate; 237 int reformat_pair; 238 AVAudioConvert *reformat_ctx; 239 AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */ 240 FILE *logfile; 241 242#if CONFIG_AVFILTER 243 AVFilterContext *output_video_filter; 244 AVFilterContext *input_video_filter; 245 AVFilterBufferRef *picref; 246 char *avfilter; 247 AVFilterGraph *graph; 248#endif 249 250 int64_t sws_flags; 251 AVDictionary *opts; 252 int is_past_recording_time; 253 int stream_copy; 254 const char *attachment_filename; 255 int copy_initial_nonkeyframes; 256} OutputStream; 257 258 259typedef struct OutputFile { 260 AVFormatContext *ctx; 261 AVDictionary *opts; 262 int ost_index; /* index of the first stream in output_streams */ 263 int64_t recording_time; /* desired length of the resulting file in microseconds */ 264 int64_t start_time; /* start time in microseconds */ 265 uint64_t limit_filesize; 266} OutputFile; 267 268static InputStream *input_streams = NULL; 269static int nb_input_streams = 0; 270static InputFile *input_files = NULL; 271static int nb_input_files = 0; 272 273static OutputStream *output_streams = NULL; 274static int nb_output_streams = 0; 275static OutputFile *output_files = NULL; 276static int nb_output_files = 0; 277 278typedef struct OptionsContext { 279 /* input/output options */ 280 int64_t start_time; 281 const char *format; 282 283 SpecifierOpt *codec_names; 284 int nb_codec_names; 285 SpecifierOpt *audio_channels; 286 int nb_audio_channels; 287 SpecifierOpt *audio_sample_rate; 288 int nb_audio_sample_rate; 289 SpecifierOpt *frame_rates; 290 int nb_frame_rates; 291 SpecifierOpt *frame_sizes; 292 int nb_frame_sizes; 293 SpecifierOpt *frame_pix_fmts; 294 int nb_frame_pix_fmts; 295 296 /* input options */ 297 int64_t input_ts_offset; 298 int rate_emu; 299 300 SpecifierOpt *ts_scale; 301 int nb_ts_scale; 302 SpecifierOpt *dump_attachment; 303 int nb_dump_attachment; 304 305 /* output options */ 306 StreamMap *stream_maps; 307 int nb_stream_maps; 308 /* first item specifies output metadata, second is input */ 309 MetadataMap (*meta_data_maps)[2]; 310 int nb_meta_data_maps; 311 int metadata_global_manual; 312 int metadata_streams_manual; 313 int metadata_chapters_manual; 314 const char **attachments; 315 int nb_attachments; 316 317 int chapters_input_file; 318 319 int64_t recording_time; 320 uint64_t limit_filesize; 321 float mux_preload; 322 float mux_max_delay; 323 324 int video_disable; 325 int audio_disable; 326 int subtitle_disable; 327 int data_disable; 328 329 /* indexed by output file stream index */ 330 int *streamid_map; 331 int nb_streamid_map; 332 333 SpecifierOpt *metadata; 334 int nb_metadata; 335 SpecifierOpt *max_frames; 336 int nb_max_frames; 337 SpecifierOpt *bitstream_filters; 338 int nb_bitstream_filters; 339 SpecifierOpt *codec_tags; 340 int nb_codec_tags; 341 SpecifierOpt *sample_fmts; 342 int nb_sample_fmts; 343 SpecifierOpt *qscale; 344 int nb_qscale; 345 SpecifierOpt *forced_key_frames; 346 int nb_forced_key_frames; 347 SpecifierOpt *force_fps; 348 int nb_force_fps; 349 SpecifierOpt *frame_aspect_ratios; 350 int nb_frame_aspect_ratios; 351 SpecifierOpt *rc_overrides; 352 int nb_rc_overrides; 353 SpecifierOpt *intra_matrices; 354 int nb_intra_matrices; 355 SpecifierOpt *inter_matrices; 356 int nb_inter_matrices; 357 SpecifierOpt *top_field_first; 358 int nb_top_field_first; 359 SpecifierOpt *metadata_map; 360 int nb_metadata_map; 361 SpecifierOpt *presets; 362 int nb_presets; 363 SpecifierOpt *copy_initial_nonkeyframes; 364 int nb_copy_initial_nonkeyframes; 365#if CONFIG_AVFILTER 366 SpecifierOpt *filters; 367 int nb_filters; 368#endif 369} OptionsContext; 370 371#define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\ 372{\ 373 int i, ret;\ 374 for (i = 0; i < o->nb_ ## name; i++) {\ 375 char *spec = o->name[i].specifier;\ 376 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\ 377 outvar = o->name[i].u.type;\ 378 else if (ret < 0)\ 379 exit_program(1);\ 380 }\ 381} 382 383static void reset_options(OptionsContext *o) 384{ 385 const OptionDef *po = options; 386 387 /* all OPT_SPEC and OPT_STRING can be freed in generic way */ 388 while (po->name) { 389 void *dst = (uint8_t*)o + po->u.off; 390 391 if (po->flags & OPT_SPEC) { 392 SpecifierOpt **so = dst; 393 int i, *count = (int*)(so + 1); 394 for (i = 0; i < *count; i++) { 395 av_freep(&(*so)[i].specifier); 396 if (po->flags & OPT_STRING) 397 av_freep(&(*so)[i].u.str); 398 } 399 av_freep(so); 400 *count = 0; 401 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING) 402 av_freep(dst); 403 po++; 404 } 405 406 av_freep(&o->stream_maps); 407 av_freep(&o->meta_data_maps); 408 av_freep(&o->streamid_map); 409 410 memset(o, 0, sizeof(*o)); 411 412 o->mux_max_delay = 0.7; 413 o->recording_time = INT64_MAX; 414 o->limit_filesize = UINT64_MAX; 415 o->chapters_input_file = INT_MAX; 416 417 uninit_opts(); 418 init_opts(); 419} 420 421static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf) 422{ 423 AVCodecContext *s = ist->st->codec; 424 FrameBuffer *buf = av_mallocz(sizeof(*buf)); 425 int ret; 426 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1; 427 int h_chroma_shift, v_chroma_shift; 428 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1 429 int w = s->width, h = s->height; 430 431 if (!buf) 432 return AVERROR(ENOMEM); 433 434 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) { 435 w += 2*edge; 436 h += 2*edge; 437 } 438 439 avcodec_align_dimensions(s, &w, &h); 440 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h, 441 s->pix_fmt, 32)) < 0) { 442 av_freep(&buf); 443 return ret; 444 } 445 /* XXX this shouldn't be needed, but some tests break without this line 446 * those decoders are buggy and need to be fixed. 447 * the following tests fail: 448 * bethsoft-vid, cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit 449 */ 450 memset(buf->base[0], 128, ret); 451 452 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); 453 for (int i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) { 454 const int h_shift = i==0 ? 0 : h_chroma_shift; 455 const int v_shift = i==0 ? 0 : v_chroma_shift; 456 if (s->flags & CODEC_FLAG_EMU_EDGE) 457 buf->data[i] = buf->base[i]; 458 else if (buf->base[i]) 459 buf->data[i] = buf->base[i] + 460 FFALIGN((buf->linesize[i]*edge >> v_shift) + 461 (pixel_size*edge >> h_shift), 32); 462 } 463 buf->w = s->width; 464 buf->h = s->height; 465 buf->pix_fmt = s->pix_fmt; 466 buf->ist = ist; 467 468 *pbuf = buf; 469 return 0; 470} 471 472static void free_buffer_pool(InputStream *ist) 473{ 474 FrameBuffer *buf = ist->buffer_pool; 475 while (buf) { 476 ist->buffer_pool = buf->next; 477 av_freep(&buf->base[0]); 478 av_free(buf); 479 buf = ist->buffer_pool; 480 } 481} 482 483static void unref_buffer(InputStream *ist, FrameBuffer *buf) 484{ 485 av_assert0(buf->refcount); 486 buf->refcount--; 487 if (!buf->refcount) { 488 buf->next = ist->buffer_pool; 489 ist->buffer_pool = buf; 490 } 491} 492 493static int codec_get_buffer(AVCodecContext *s, AVFrame *frame) 494{ 495 InputStream *ist = s->opaque; 496 FrameBuffer *buf; 497 int ret, i; 498 499 if (!ist->buffer_pool && (ret = alloc_buffer(ist, &ist->buffer_pool)) < 0) 500 return ret; 501 502 buf = ist->buffer_pool; 503 ist->buffer_pool = buf->next; 504 buf->next = NULL; 505 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) { 506 av_freep(&buf->base[0]); 507 av_free(buf); 508 if ((ret = alloc_buffer(ist, &buf)) < 0) 509 return ret; 510 } 511 buf->refcount++; 512 513 frame->opaque = buf; 514 frame->type = FF_BUFFER_TYPE_USER; 515 frame->extended_data = frame->data; 516 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE; 517 518 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) { 519 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't 520 frame->data[i] = buf->data[i]; 521 frame->linesize[i] = buf->linesize[i]; 522 } 523 524 return 0; 525} 526 527static void codec_release_buffer(AVCodecContext *s, AVFrame *frame) 528{ 529 InputStream *ist = s->opaque; 530 FrameBuffer *buf = frame->opaque; 531 int i; 532 533 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++) 534 frame->data[i] = NULL; 535 536 unref_buffer(ist, buf); 537} 538 539static void filter_release_buffer(AVFilterBuffer *fb) 540{ 541 FrameBuffer *buf = fb->priv; 542 av_free(fb); 543 unref_buffer(buf->ist, buf); 544} 545 546#if CONFIG_AVFILTER 547 548static int configure_video_filters(InputStream *ist, OutputStream *ost) 549{ 550 AVFilterContext *last_filter, *filter; 551 /** filter graph containing all filters including input & output */ 552 AVCodecContext *codec = ost->st->codec; 553 AVCodecContext *icodec = ist->st->codec; 554 AVSinkContext avsink_ctx = { .pix_fmt = codec->pix_fmt }; 555 AVRational sample_aspect_ratio; 556 char args[255]; 557 int ret; 558 559 ost->graph = avfilter_graph_alloc(); 560 561 if (ist->st->sample_aspect_ratio.num) { 562 sample_aspect_ratio = ist->st->sample_aspect_ratio; 563 } else 564 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio; 565 566 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width, 567 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE, 568 sample_aspect_ratio.num, sample_aspect_ratio.den); 569 570 ret = avfilter_graph_create_filter(&ost->input_video_filter, avfilter_get_by_name("buffer"), 571 "src", args, NULL, ost->graph); 572 if (ret < 0) 573 return ret; 574 ret = avfilter_graph_create_filter(&ost->output_video_filter, &avsink, 575 "out", NULL, &avsink_ctx, ost->graph); 576 if (ret < 0) 577 return ret; 578 last_filter = ost->input_video_filter; 579 580 if (codec->width != icodec->width || codec->height != icodec->height) { 581 snprintf(args, 255, "%d:%d:flags=0x%X", 582 codec->width, 583 codec->height, 584 (unsigned)ost->sws_flags); 585 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"), 586 NULL, args, NULL, ost->graph)) < 0) 587 return ret; 588 if ((ret = avfilter_link(last_filter, 0, filter, 0)) < 0) 589 return ret; 590 last_filter = filter; 591 } 592 593 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags); 594 ost->graph->scale_sws_opts = av_strdup(args); 595 596 if (ost->avfilter) { 597 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut)); 598 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut)); 599 600 outputs->name = av_strdup("in"); 601 outputs->filter_ctx = last_filter; 602 outputs->pad_idx = 0; 603 outputs->next = NULL; 604 605 inputs->name = av_strdup("out"); 606 inputs->filter_ctx = ost->output_video_filter; 607 inputs->pad_idx = 0; 608 inputs->next = NULL; 609 610 if ((ret = avfilter_graph_parse(ost->graph, ost->avfilter, inputs, outputs, NULL)) < 0) 611 return ret; 612 } else { 613 if ((ret = avfilter_link(last_filter, 0, ost->output_video_filter, 0)) < 0) 614 return ret; 615 } 616 617 if ((ret = avfilter_graph_config(ost->graph, NULL)) < 0) 618 return ret; 619 620 codec->width = ost->output_video_filter->inputs[0]->w; 621 codec->height = ost->output_video_filter->inputs[0]->h; 622 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = 623 ost->frame_aspect_ratio ? // overridden by the -aspect cli option 624 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) : 625 ost->output_video_filter->inputs[0]->sample_aspect_ratio; 626 627 return 0; 628} 629#endif /* CONFIG_AVFILTER */ 630 631static void term_exit(void) 632{ 633 av_log(NULL, AV_LOG_QUIET, ""); 634} 635 636static volatile int received_sigterm = 0; 637static volatile int received_nb_signals = 0; 638 639static void 640sigterm_handler(int sig) 641{ 642 received_sigterm = sig; 643 received_nb_signals++; 644 term_exit(); 645} 646 647static void term_init(void) 648{ 649 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ 650 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */ 651#ifdef SIGXCPU 652 signal(SIGXCPU, sigterm_handler); 653#endif 654} 655 656static int decode_interrupt_cb(void *ctx) 657{ 658 return received_nb_signals > 1; 659} 660 661static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; 662 663void exit_program(int ret) 664{ 665 int i; 666 667 /* close files */ 668 for (i = 0; i < nb_output_files; i++) { 669 AVFormatContext *s = output_files[i].ctx; 670 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb) 671 avio_close(s->pb); 672 avformat_free_context(s); 673 av_dict_free(&output_files[i].opts); 674 } 675 for (i = 0; i < nb_output_streams; i++) { 676 AVBitStreamFilterContext *bsfc = output_streams[i].bitstream_filters; 677 while (bsfc) { 678 AVBitStreamFilterContext *next = bsfc->next; 679 av_bitstream_filter_close(bsfc); 680 bsfc = next; 681 } 682 output_streams[i].bitstream_filters = NULL; 683 684 if (output_streams[i].output_frame) { 685 AVFrame *frame = output_streams[i].output_frame; 686 if (frame->extended_data != frame->data) 687 av_freep(&frame->extended_data); 688 av_freep(&frame); 689 } 690 691 av_freep(&output_streams[i].forced_keyframes); 692#if CONFIG_AVFILTER 693 av_freep(&output_streams[i].avfilter); 694#endif 695 } 696 for (i = 0; i < nb_input_files; i++) { 697 avformat_close_input(&input_files[i].ctx); 698 } 699 for (i = 0; i < nb_input_streams; i++) { 700 av_freep(&input_streams[i].decoded_frame); 701 av_freep(&input_streams[i].filtered_frame); 702 av_dict_free(&input_streams[i].opts); 703 free_buffer_pool(&input_streams[i]); 704 } 705 706 if (vstats_file) 707 fclose(vstats_file); 708 av_free(vstats_filename); 709 710 av_freep(&input_streams); 711 av_freep(&input_files); 712 av_freep(&output_streams); 713 av_freep(&output_files); 714 715 uninit_opts(); 716 av_free(audio_buf); 717 allocated_audio_buf_size = 0; 718 719#if CONFIG_AVFILTER 720 avfilter_uninit(); 721#endif 722 avformat_network_deinit(); 723 724 if (received_sigterm) { 725 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n", 726 (int) received_sigterm); 727 exit (255); 728 } 729 730 exit(ret); 731} 732 733static void assert_avoptions(AVDictionary *m) 734{ 735 AVDictionaryEntry *t; 736 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) { 737 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key); 738 exit_program(1); 739 } 740} 741 742static void assert_codec_experimental(AVCodecContext *c, int encoder) 743{ 744 const char *codec_string = encoder ? "encoder" : "decoder"; 745 AVCodec *codec; 746 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL && 747 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { 748 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad " 749 "results.\nAdd '-strict experimental' if you want to use it.\n", 750 codec_string, c->codec->name); 751 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id); 752 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL)) 753 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n", 754 codec_string, codec->name); 755 exit_program(1); 756 } 757} 758 759static void choose_sample_fmt(AVStream *st, AVCodec *codec) 760{ 761 if (codec && codec->sample_fmts) { 762 const enum AVSampleFormat *p = codec->sample_fmts; 763 for (; *p != -1; p++) { 764 if (*p == st->codec->sample_fmt) 765 break; 766 } 767 if (*p == -1) { 768 av_log(NULL, AV_LOG_WARNING, 769 "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n", 770 av_get_sample_fmt_name(st->codec->sample_fmt), 771 codec->name, 772 av_get_sample_fmt_name(codec->sample_fmts[0])); 773 st->codec->sample_fmt = codec->sample_fmts[0]; 774 } 775 } 776} 777 778/** 779 * Update the requested input sample format based on the output sample format. 780 * This is currently only used to request float output from decoders which 781 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT. 782 * Ideally this will be removed in the future when decoders do not do format 783 * conversion and only output in their native format. 784 */ 785static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec, 786 AVCodecContext *enc) 787{ 788 /* if sample formats match or a decoder sample format has already been 789 requested, just return */ 790 if (enc->sample_fmt == dec->sample_fmt || 791 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE) 792 return; 793 794 /* if decoder supports more than one output format */ 795 if (dec_codec && dec_codec->sample_fmts && 796 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE && 797 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) { 798 const enum AVSampleFormat *p; 799 int min_dec = -1, min_inc = -1; 800 801 /* find a matching sample format in the encoder */ 802 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) { 803 if (*p == enc->sample_fmt) { 804 dec->request_sample_fmt = *p; 805 return; 806 } else if (*p > enc->sample_fmt) { 807 min_inc = FFMIN(min_inc, *p - enc->sample_fmt); 808 } else 809 min_dec = FFMIN(min_dec, enc->sample_fmt - *p); 810 } 811 812 /* if none match, provide the one that matches quality closest */ 813 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc : 814 enc->sample_fmt - min_dec; 815 } 816} 817 818static void choose_sample_rate(AVStream *st, AVCodec *codec) 819{ 820 if (codec && codec->supported_samplerates) { 821 const int *p = codec->supported_samplerates; 822 int best = 0; 823 int best_dist = INT_MAX; 824 for (; *p; p++) { 825 int dist = abs(st->codec->sample_rate - *p); 826 if (dist < best_dist) { 827 best_dist = dist; 828 best = *p; 829 } 830 } 831 if (best_dist) { 832 av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best); 833 } 834 st->codec->sample_rate = best; 835 } 836} 837 838static void choose_pixel_fmt(AVStream *st, AVCodec *codec) 839{ 840 if (codec && codec->pix_fmts) { 841 const enum PixelFormat *p = codec->pix_fmts; 842 if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) { 843 if (st->codec->codec_id == CODEC_ID_MJPEG) { 844 p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE }; 845 } else if (st->codec->codec_id == CODEC_ID_LJPEG) { 846 p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, 847 PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE }; 848 } 849 } 850 for (; *p != PIX_FMT_NONE; p++) { 851 if (*p == st->codec->pix_fmt) 852 break; 853 } 854 if (*p == PIX_FMT_NONE) { 855 if (st->codec->pix_fmt != PIX_FMT_NONE) 856 av_log(NULL, AV_LOG_WARNING, 857 "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n", 858 av_pix_fmt_descriptors[st->codec->pix_fmt].name, 859 codec->name, 860 av_pix_fmt_descriptors[codec->pix_fmts[0]].name); 861 st->codec->pix_fmt = codec->pix_fmts[0]; 862 } 863 } 864} 865 866static double 867get_sync_ipts(const OutputStream *ost) 868{ 869 const InputStream *ist = ost->sync_ist; 870 OutputFile *of = &output_files[ost->file_index]; 871 return (double)(ist->pts - of->start_time) / AV_TIME_BASE; 872} 873 874static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) 875{ 876 AVBitStreamFilterContext *bsfc = ost->bitstream_filters; 877 AVCodecContext *avctx = ost->st->codec; 878 int ret; 879 880 /* 881 * Audio encoders may split the packets -- #frames in != #packets out. 882 * But there is no reordering, so we can limit the number of output packets 883 * by simply dropping them here. 884 * Counting encoded video frames needs to be done separately because of 885 * reordering, see do_video_out() 886 */ 887 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) { 888 if (ost->frame_number >= ost->max_frames) 889 return; 890 ost->frame_number++; 891 } 892 893 while (bsfc) { 894 AVPacket new_pkt = *pkt; 895 int a = av_bitstream_filter_filter(bsfc, avctx, NULL, 896 &new_pkt.data, &new_pkt.size, 897 pkt->data, pkt->size, 898 pkt->flags & AV_PKT_FLAG_KEY); 899 if (a > 0) { 900 av_free_packet(pkt); 901 new_pkt.destruct = av_destruct_packet; 902 } else if (a < 0) { 903 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s", 904 bsfc->filter->name, pkt->stream_index, 905 avctx->codec ? avctx->codec->name : "copy"); 906 print_error("", a); 907 if (exit_on_error) 908 exit_program(1); 909 } 910 *pkt = new_pkt; 911 912 bsfc = bsfc->next; 913 } 914 915 ret = av_interleaved_write_frame(s, pkt); 916 if (ret < 0) { 917 print_error("av_interleaved_write_frame()", ret); 918 exit_program(1); 919 } 920} 921 922static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size) 923{ 924 int fill_char = 0x00; 925 if (sample_fmt == AV_SAMPLE_FMT_U8) 926 fill_char = 0x80; 927 memset(buf, fill_char, size); 928} 929 930static int encode_audio_frame(AVFormatContext *s, OutputStream *ost, 931 const uint8_t *buf, int buf_size) 932{ 933 AVCodecContext *enc = ost->st->codec; 934 AVFrame *frame = NULL; 935 AVPacket pkt; 936 int ret, got_packet; 937 938 av_init_packet(&pkt); 939 pkt.data = NULL; 940 pkt.size = 0; 941 942 if (buf) { 943 if (!ost->output_frame) { 944 ost->output_frame = avcodec_alloc_frame(); 945 if (!ost->output_frame) { 946 av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n"); 947 exit_program(1); 948 } 949 } 950 frame = ost->output_frame; 951 if (frame->extended_data != frame->data) 952 av_freep(&frame->extended_data); 953 avcodec_get_frame_defaults(frame); 954 955 frame->nb_samples = buf_size / 956 (enc->channels * av_get_bytes_per_sample(enc->sample_fmt)); 957 if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt, 958 buf, buf_size, 1)) < 0) { 959 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n"); 960 exit_program(1); 961 } 962 } 963 964 got_packet = 0; 965 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) { 966 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n"); 967 exit_program(1); 968 } 969 970 if (got_packet) { 971 pkt.stream_index = ost->index; 972 if (pkt.pts != AV_NOPTS_VALUE) 973 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base); 974 if (pkt.duration > 0) 975 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base); 976 977 write_frame(s, &pkt, ost); 978 979 audio_size += pkt.size; 980 } 981 982 if (frame) 983 ost->sync_opts += frame->nb_samples; 984 985 return pkt.size; 986} 987 988static void do_audio_out(AVFormatContext *s, OutputStream *ost, 989 InputStream *ist, AVFrame *decoded_frame) 990{ 991 uint8_t *buftmp; 992 int64_t audio_buf_size; 993 994 int size_out, frame_bytes, resample_changed; 995 AVCodecContext *enc = ost->st->codec; 996 AVCodecContext *dec = ist->st->codec; 997 int osize = av_get_bytes_per_sample(enc->sample_fmt); 998 int isize = av_get_bytes_per_sample(dec->sample_fmt); 999 uint8_t *buf = decoded_frame->data[0]; 1000 int size = decoded_frame->nb_samples * dec->channels * isize; 1001 int64_t allocated_for_size = size; 1002 1003need_realloc: 1004 audio_buf_size = (allocated_for_size + isize * dec->channels - 1) / (isize * dec->channels); 1005 audio_buf_size = (audio_buf_size * enc->sample_rate + dec->sample_rate) / dec->sample_rate; 1006 audio_buf_size = audio_buf_size * 2 + 10000; // safety factors for the deprecated resampling API 1007 audio_buf_size = FFMAX(audio_buf_size, enc->frame_size); 1008 audio_buf_size *= osize * enc->channels; 1009 1010 if (audio_buf_size > INT_MAX) { 1011 av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n"); 1012 exit_program(1); 1013 } 1014 1015 av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size); 1016 if (!audio_buf) { 1017 av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n"); 1018 exit_program(1); 1019 } 1020 1021 if (enc->channels != dec->channels || enc->sample_rate != dec->sample_rate) 1022 ost->audio_resample = 1; 1023 1024 resample_changed = ost->resample_sample_fmt != dec->sample_fmt || 1025 ost->resample_channels != dec->channels || 1026 ost->resample_sample_rate != dec->sample_rate; 1027 1028 if ((ost->audio_resample && !ost->resample) || resample_changed) { 1029 if (resample_changed) { 1030 av_log(NULL, AV_LOG_INFO, "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d to rate:%d fmt:%s ch:%d\n", 1031 ist->file_index, ist->st->index, 1032 ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt), ost->resample_channels, 1033 dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels); 1034 ost->resample_sample_fmt = dec->sample_fmt; 1035 ost->resample_channels = dec->channels; 1036 ost->resample_sample_rate = dec->sample_rate; 1037 if (ost->resample) 1038 audio_resample_close(ost->resample); 1039 } 1040 /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */ 1041 if (audio_sync_method <= 1 && 1042 ost->resample_sample_fmt == enc->sample_fmt && 1043 ost->resample_channels == enc->channels && 1044 ost->resample_sample_rate == enc->sample_rate) { 1045 ost->resample = NULL; 1046 ost->audio_resample = 0; 1047 } else if (ost->audio_resample) { 1048 if (dec->sample_fmt != AV_SAMPLE_FMT_S16) 1049 av_log(NULL, AV_LOG_WARNING, "Using s16 intermediate sample format for resampling\n"); 1050 ost->resample = av_audio_resample_init(enc->channels, dec->channels, 1051 enc->sample_rate, dec->sample_rate, 1052 enc->sample_fmt, dec->sample_fmt, 1053 16, 10, 0, 0.8); 1054 if (!ost->resample) { 1055 av_log(NULL, AV_LOG_FATAL, "Can not resample %d channels @ %d Hz to %d channels @ %d Hz\n", 1056 dec->channels, dec->sample_rate, 1057 enc->channels, enc->sample_rate); 1058 exit_program(1); 1059 } 1060 } 1061 } 1062 1063#define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b)) 1064 if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt && 1065 MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt) != ost->reformat_pair) { 1066 if (ost->reformat_ctx) 1067 av_audio_convert_free(ost->reformat_ctx); 1068 ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1, 1069 dec->sample_fmt, 1, NULL, 0); 1070 if (!ost->reformat_ctx) { 1071 av_log(NULL, AV_LOG_FATAL, "Cannot convert %s sample format to %s sample format\n", 1072 av_get_sample_fmt_name(dec->sample_fmt), 1073 av_get_sample_fmt_name(enc->sample_fmt)); 1074 exit_program(1); 1075 } 1076 ost->reformat_pair = MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt); 1077 } 1078 1079 if (audio_sync_method) { 1080 double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts - 1081 av_fifo_size(ost->fifo) / (enc->channels * osize); 1082 int idelta = delta * dec->sample_rate / enc->sample_rate; 1083 int byte_delta = idelta * isize * dec->channels; 1084 1085 // FIXME resample delay 1086 if (fabs(delta) > 50) { 1087 if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) { 1088 if (byte_delta < 0) { 1089 byte_delta = FFMAX(byte_delta, -size); 1090 size += byte_delta; 1091 buf -= byte_delta; 1092 av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n", 1093 -byte_delta / (isize * dec->channels)); 1094 if (!size) 1095 return; 1096 ist->is_start = 0; 1097 } else { 1098 static uint8_t *input_tmp = NULL; 1099 input_tmp = av_realloc(input_tmp, byte_delta + size); 1100 1101 if (byte_delta > allocated_for_size - size) { 1102 allocated_for_size = byte_delta + (int64_t)size; 1103 goto need_realloc; 1104 } 1105 ist->is_start = 0; 1106 1107 generate_silence(input_tmp, dec->sample_fmt, byte_delta); 1108 memcpy(input_tmp + byte_delta, buf, size); 1109 buf = input_tmp; 1110 size += byte_delta; 1111 av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta); 1112 } 1113 } else if (audio_sync_method > 1) { 1114 int comp = av_clip(delta, -audio_sync_method, audio_sync_method); 1115 av_assert0(ost->audio_resample); 1116 av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n", 1117 delta, comp, enc->sample_rate); 1118// fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2)); 1119 av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate); 1120 } 1121 } 1122 } else 1123 ost->sync_opts = lrintf(get_sync_ipts(ost) * enc->sample_rate) - 1124 av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong 1125 1126 if (ost->audio_resample) { 1127 buftmp = audio_buf; 1128 size_out = audio_resample(ost->resample, 1129 (short *)buftmp, (short *)buf, 1130 size / (dec->channels * isize)); 1131 size_out = size_out * enc->channels * osize; 1132 } else { 1133 buftmp = buf; 1134 size_out = size; 1135 } 1136 1137 if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt) { 1138 const void *ibuf[6] = { buftmp }; 1139 void *obuf[6] = { audio_buf }; 1140 int istride[6] = { isize }; 1141 int ostride[6] = { osize }; 1142 int len = size_out / istride[0]; 1143 if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) { 1144 printf("av_audio_convert() failed\n"); 1145 if (exit_on_error) 1146 exit_program(1); 1147 return; 1148 } 1149 buftmp = audio_buf; 1150 size_out = len * osize; 1151 } 1152 1153 /* now encode as many frames as possible */ 1154 if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) { 1155 /* output resampled raw samples */ 1156 if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) { 1157 av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n"); 1158 exit_program(1); 1159 } 1160 av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL); 1161 1162 frame_bytes = enc->frame_size * osize * enc->channels; 1163 1164 while (av_fifo_size(ost->fifo) >= frame_bytes) { 1165 av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL); 1166 encode_audio_frame(s, ost, audio_buf, frame_bytes); 1167 } 1168 } else { 1169 encode_audio_frame(s, ost, buftmp, size_out); 1170 } 1171} 1172 1173static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp) 1174{ 1175 AVCodecContext *dec; 1176 AVPicture *picture2; 1177 AVPicture picture_tmp; 1178 uint8_t *buf = 0; 1179 1180 dec = ist->st->codec; 1181 1182 /* deinterlace : must be done before any resize */ 1183 if (do_deinterlace) { 1184 int size; 1185 1186 /* create temporary picture */ 1187 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height); 1188 buf = av_malloc(size); 1189 if (!buf) 1190 return; 1191 1192 picture2 = &picture_tmp; 1193 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height); 1194 1195 if (avpicture_deinterlace(picture2, picture, 1196 dec->pix_fmt, dec->width, dec->height) < 0) { 1197 /* if error, do not deinterlace */ 1198 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n"); 1199 av_free(buf); 1200 buf = NULL; 1201 picture2 = picture; 1202 } 1203 } else { 1204 picture2 = picture; 1205 } 1206 1207 if (picture != picture2) 1208 *picture = *picture2; 1209 *bufp = buf; 1210} 1211 1212static void do_subtitle_out(AVFormatContext *s, 1213 OutputStream *ost, 1214 InputStream *ist, 1215 AVSubtitle *sub, 1216 int64_t pts) 1217{ 1218 static uint8_t *subtitle_out = NULL; 1219 int subtitle_out_max_size = 1024 * 1024; 1220 int subtitle_out_size, nb, i; 1221 AVCodecContext *enc; 1222 AVPacket pkt; 1223 1224 if (pts == AV_NOPTS_VALUE) { 1225 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n"); 1226 if (exit_on_error) 1227 exit_program(1); 1228 return; 1229 } 1230 1231 enc = ost->st->codec; 1232 1233 if (!subtitle_out) { 1234 subtitle_out = av_malloc(subtitle_out_max_size); 1235 } 1236 1237 /* Note: DVB subtitle need one packet to draw them and one other 1238 packet to clear them */ 1239 /* XXX: signal it in the codec context ? */ 1240 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) 1241 nb = 2; 1242 else 1243 nb = 1; 1244 1245 for (i = 0; i < nb; i++) { 1246 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q); 1247 // start_display_time is required to be 0 1248 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q); 1249 sub->end_display_time -= sub->start_display_time; 1250 sub->start_display_time = 0; 1251 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out, 1252 subtitle_out_max_size, sub); 1253 if (subtitle_out_size < 0) { 1254 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n"); 1255 exit_program(1); 1256 } 1257 1258 av_init_packet(&pkt); 1259 pkt.stream_index = ost->index; 1260 pkt.data = subtitle_out; 1261 pkt.size = subtitle_out_size; 1262 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base); 1263 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) { 1264 /* XXX: the pts correction is handled here. Maybe handling 1265 it in the codec would be better */ 1266 if (i == 0) 1267 pkt.pts += 90 * sub->start_display_time; 1268 else 1269 pkt.pts += 90 * sub->end_display_time; 1270 } 1271 write_frame(s, &pkt, ost); 1272 } 1273} 1274 1275static int bit_buffer_size = 1024 * 256; 1276static uint8_t *bit_buffer = NULL; 1277 1278#if !CONFIG_AVFILTER 1279static void do_video_resample(OutputStream *ost, 1280 InputStream *ist, 1281 AVFrame *in_picture, 1282 AVFrame **out_picture) 1283{ 1284 int resample_changed = 0; 1285 *out_picture = in_picture; 1286 1287 resample_changed = ost->resample_width != in_picture->width || 1288 ost->resample_height != in_picture->height || 1289 ost->resample_pix_fmt != in_picture->format; 1290 1291 if (resample_changed) { 1292 av_log(NULL, AV_LOG_INFO, 1293 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n", 1294 ist->file_index, ist->st->index, 1295 ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt), 1296 in_picture->width, in_picture->height, av_get_pix_fmt_name(in_picture->format)); 1297 if (!ost->video_resample) 1298 ost->video_resample = 1; 1299 } 1300 1301 if (ost->video_resample) { 1302 *out_picture = &ost->pict_tmp; 1303 if (resample_changed) { 1304 /* initialize a new scaler context */ 1305 sws_freeContext(ost->img_resample_ctx); 1306 ost->img_resample_ctx = sws_getContext( 1307 ist->st->codec->width, 1308 ist->st->codec->height, 1309 ist->st->codec->pix_fmt, 1310 ost->st->codec->width, 1311 ost->st->codec->height, 1312 ost->st->codec->pix_fmt, 1313 ost->sws_flags, NULL, NULL, NULL); 1314 if (ost->img_resample_ctx == NULL) { 1315 av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n"); 1316 exit_program(1); 1317 } 1318 } 1319 sws_scale(ost->img_resample_ctx, in_picture->data, in_picture->linesize, 1320 0, ost->resample_height, (*out_picture)->data, (*out_picture)->linesize); 1321 } 1322 if (resample_changed) { 1323 ost->resample_width = in_picture->width; 1324 ost->resample_height = in_picture->height; 1325 ost->resample_pix_fmt = in_picture->format; 1326 } 1327} 1328#endif 1329 1330 1331static void do_video_out(AVFormatContext *s, 1332 OutputStream *ost, 1333 InputStream *ist, 1334 AVFrame *in_picture, 1335 int *frame_size, float quality) 1336{ 1337 int nb_frames, i, ret, format_video_sync; 1338 AVFrame *final_picture; 1339 AVCodecContext *enc; 1340 double sync_ipts; 1341 1342 enc = ost->st->codec; 1343 1344 sync_ipts = get_sync_ipts(ost) / av_q2d(enc->time_base); 1345 1346 /* by default, we output a single frame */ 1347 nb_frames = 1; 1348 1349 *frame_size = 0; 1350 1351 format_video_sync = video_sync_method; 1352 if (format_video_sync == VSYNC_AUTO) 1353 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : 1354 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR; 1355 1356 if (format_video_sync != VSYNC_PASSTHROUGH) { 1357 double vdelta = sync_ipts - ost->sync_opts; 1358 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c 1359 if (vdelta < -1.1) 1360 nb_frames = 0; 1361 else if (format_video_sync == VSYNC_VFR) { 1362 if (vdelta <= -0.6) { 1363 nb_frames = 0; 1364 } else if (vdelta > 0.6) 1365 ost->sync_opts = lrintf(sync_ipts); 1366 } else if (vdelta > 1.1) 1367 nb_frames = lrintf(vdelta); 1368//fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames); 1369 if (nb_frames == 0) { 1370 ++nb_frames_drop; 1371 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n"); 1372 } else if (nb_frames > 1) { 1373 nb_frames_dup += nb_frames - 1; 1374 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1); 1375 } 1376 } else 1377 ost->sync_opts = lrintf(sync_ipts); 1378 1379 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number); 1380 if (nb_frames <= 0) 1381 return; 1382 1383#if !CONFIG_AVFILTER 1384 do_video_resample(ost, ist, in_picture, &final_picture); 1385#else 1386 final_picture = in_picture; 1387#endif 1388 1389 /* duplicates frame if needed */ 1390 for (i = 0; i < nb_frames; i++) { 1391 AVPacket pkt; 1392 av_init_packet(&pkt); 1393 pkt.stream_index = ost->index; 1394 1395 if (s->oformat->flags & AVFMT_RAWPICTURE && 1396 enc->codec->id == CODEC_ID_RAWVIDEO) { 1397 /* raw pictures are written as AVPicture structure to 1398 avoid any copies. We support temporarily the older 1399 method. */ 1400 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame; 1401 enc->coded_frame->top_field_first = in_picture->top_field_first; 1402 pkt.data = (uint8_t *)final_picture; 1403 pkt.size = sizeof(AVPicture); 1404 pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base); 1405 pkt.flags |= AV_PKT_FLAG_KEY; 1406 1407 write_frame(s, &pkt, ost); 1408 } else { 1409 AVFrame big_picture; 1410 1411 big_picture = *final_picture; 1412 /* better than nothing: use input picture interlaced 1413 settings */ 1414 big_picture.interlaced_frame = in_picture->interlaced_frame; 1415 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) { 1416 if (ost->top_field_first == -1) 1417 big_picture.top_field_first = in_picture->top_field_first; 1418 else 1419 big_picture.top_field_first = !!ost->top_field_first; 1420 } 1421 1422 /* handles same_quant here. This is not correct because it may 1423 not be a global option */ 1424 big_picture.quality = quality; 1425 if (!enc->me_threshold) 1426 big_picture.pict_type = 0; 1427// big_picture.pts = AV_NOPTS_VALUE; 1428 big_picture.pts = ost->sync_opts; 1429// big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den); 1430// av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts); 1431 if (ost->forced_kf_index < ost->forced_kf_count && 1432 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) { 1433 big_picture.pict_type = AV_PICTURE_TYPE_I; 1434 ost->forced_kf_index++; 1435 } 1436 ret = avcodec_encode_video(enc, 1437 bit_buffer, bit_buffer_size, 1438 &big_picture); 1439 if (ret < 0) { 1440 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n"); 1441 exit_program(1); 1442 } 1443 1444 if (ret > 0) { 1445 pkt.data = bit_buffer; 1446 pkt.size = ret; 1447 if (enc->coded_frame->pts != AV_NOPTS_VALUE) 1448 pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); 1449/*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n", 1450 pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1, 1451 pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/ 1452 1453 if (enc->coded_frame->key_frame) 1454 pkt.flags |= AV_PKT_FLAG_KEY; 1455 write_frame(s, &pkt, ost); 1456 *frame_size = ret; 1457 video_size += ret; 1458 // fprintf(stderr,"\nFrame: %3d size: %5d type: %d", 1459 // enc->frame_number-1, ret, enc->pict_type); 1460 /* if two pass, output log */ 1461 if (ost->logfile && enc->stats_out) { 1462 fprintf(ost->logfile, "%s", enc->stats_out); 1463 } 1464 } 1465 } 1466 ost->sync_opts++; 1467 /* 1468 * For video, number of frames in == number of packets out. 1469 * But there may be reordering, so we can't throw away frames on encoder 1470 * flush, we need to limit them here, before they go into encoder. 1471 */ 1472 ost->frame_number++; 1473 } 1474} 1475 1476static double psnr(double d) 1477{ 1478 return -10.0 * log(d) / log(10.0); 1479} 1480 1481static void do_video_stats(AVFormatContext *os, OutputStream *ost, 1482 int frame_size) 1483{ 1484 AVCodecContext *enc; 1485 int frame_number; 1486 double ti1, bitrate, avg_bitrate; 1487 1488 /* this is executed just the first time do_video_stats is called */ 1489 if (!vstats_file) { 1490 vstats_file = fopen(vstats_filename, "w"); 1491 if (!vstats_file) { 1492 perror("fopen"); 1493 exit_program(1); 1494 } 1495 } 1496 1497 enc = ost->st->codec; 1498 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) { 1499 frame_number = ost->frame_number; 1500 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA); 1501 if (enc->flags&CODEC_FLAG_PSNR) 1502 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0))); 1503 1504 fprintf(vstats_file,"f_size= %6d ", frame_size); 1505 /* compute pts value */ 1506 ti1 = ost->sync_opts * av_q2d(enc->time_base); 1507 if (ti1 < 0.01) 1508 ti1 = 0.01; 1509 1510 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0; 1511 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0; 1512 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", 1513 (double)video_size / 1024, ti1, bitrate, avg_bitrate); 1514 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type)); 1515 } 1516} 1517 1518static void print_report(OutputFile *output_files, 1519 OutputStream *ost_table, int nb_ostreams, 1520 int is_last_report, int64_t timer_start) 1521{ 1522 char buf[1024]; 1523 OutputStream *ost; 1524 AVFormatContext *oc; 1525 int64_t total_size; 1526 AVCodecContext *enc; 1527 int frame_number, vid, i; 1528 double bitrate, ti1, pts; 1529 static int64_t last_time = -1; 1530 static int qp_histogram[52]; 1531 1532 if (!print_stats && !is_last_report) 1533 return; 1534 1535 if (!is_last_report) { 1536 int64_t cur_time; 1537 /* display the report every 0.5 seconds */ 1538 cur_time = av_gettime(); 1539 if (last_time == -1) { 1540 last_time = cur_time; 1541 return; 1542 } 1543 if ((cur_time - last_time) < 500000) 1544 return; 1545 last_time = cur_time; 1546 } 1547 1548 1549 oc = output_files[0].ctx; 1550 1551 total_size = avio_size(oc->pb); 1552 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too 1553 total_size = avio_tell(oc->pb); 1554 1555 buf[0] = '\0'; 1556 ti1 = 1e10; 1557 vid = 0; 1558 for (i = 0; i < nb_ostreams; i++) { 1559 float q = -1; 1560 ost = &ost_table[i]; 1561 enc = ost->st->codec; 1562 if (!ost->stream_copy && enc->coded_frame) 1563 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA; 1564 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { 1565 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q); 1566 } 1567 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) { 1568 float t = (av_gettime() - timer_start) / 1000000.0; 1569 1570 frame_number = ost->frame_number; 1571 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ", 1572 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q); 1573 if (is_last_report) 1574 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L"); 1575 if (qp_hist) { 1576 int j; 1577 int qp = lrintf(q); 1578 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram)) 1579 qp_histogram[qp]++; 1580 for (j = 0; j < 32; j++) 1581 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2))); 1582 } 1583 if (enc->flags&CODEC_FLAG_PSNR) { 1584 int j; 1585 double error, error_sum = 0; 1586 double scale, scale_sum = 0; 1587 char type[3] = { 'Y','U','V' }; 1588 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR="); 1589 for (j = 0; j < 3; j++) { 1590 if (is_last_report) { 1591 error = enc->error[j]; 1592 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number; 1593 } else { 1594 error = enc->coded_frame->error[j]; 1595 scale = enc->width * enc->height * 255.0 * 255.0; 1596 } 1597 if (j) 1598 scale /= 4; 1599 error_sum += error; 1600 scale_sum += scale; 1601 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale)); 1602 } 1603 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum)); 1604 } 1605 vid = 1; 1606 } 1607 /* compute min output value */ 1608 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base); 1609 if ((pts < ti1) && (pts > 0)) 1610 ti1 = pts; 1611 } 1612 if (ti1 < 0.01) 1613 ti1 = 0.01; 1614 1615 bitrate = (double)(total_size * 8) / ti1 / 1000.0; 1616 1617 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 1618 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s", 1619 (double)total_size / 1024, ti1, bitrate); 1620 1621 if (nb_frames_dup || nb_frames_drop) 1622 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d", 1623 nb_frames_dup, nb_frames_drop); 1624 1625 av_log(NULL, AV_LOG_INFO, "%s \r", buf); 1626 1627 fflush(stderr); 1628 1629 if (is_last_report) { 1630 int64_t raw= audio_size + video_size + extra_size; 1631 av_log(NULL, AV_LOG_INFO, "\n"); 1632 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n", 1633 video_size / 1024.0, 1634 audio_size / 1024.0, 1635 extra_size / 1024.0, 1636 100.0 * (total_size - raw) / raw 1637 ); 1638 } 1639} 1640 1641static void flush_encoders(OutputStream *ost_table, int nb_ostreams) 1642{ 1643 int i, ret; 1644 1645 for (i = 0; i < nb_ostreams; i++) { 1646 OutputStream *ost = &ost_table[i]; 1647 AVCodecContext *enc = ost->st->codec; 1648 AVFormatContext *os = output_files[ost->file_index].ctx; 1649 int stop_encoding = 0; 1650 1651 if (!ost->encoding_needed) 1652 continue; 1653 1654 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1) 1655 continue; 1656 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO) 1657 continue; 1658 1659 for (;;) { 1660 AVPacket pkt; 1661 int fifo_bytes; 1662 av_init_packet(&pkt); 1663 pkt.data = NULL; 1664 pkt.size = 0; 1665 1666 switch (ost->st->codec->codec_type) { 1667 case AVMEDIA_TYPE_AUDIO: 1668 fifo_bytes = av_fifo_size(ost->fifo); 1669 if (fifo_bytes > 0) { 1670 /* encode any samples remaining in fifo */ 1671 int frame_bytes = fifo_bytes; 1672 1673 av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL); 1674 1675 /* pad last frame with silence if needed */ 1676 if (!(enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)) { 1677 frame_bytes = enc->frame_size * enc->channels * 1678 av_get_bytes_per_sample(enc->sample_fmt); 1679 if (allocated_audio_buf_size < frame_bytes) 1680 exit_program(1); 1681 generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes); 1682 } 1683 encode_audio_frame(os, ost, audio_buf, frame_bytes); 1684 } else { 1685 /* flush encoder with NULL frames until it is done 1686 returning packets */ 1687 if (encode_audio_frame(os, ost, NULL, 0) == 0) { 1688 stop_encoding = 1; 1689 break; 1690 } 1691 } 1692 break; 1693 case AVMEDIA_TYPE_VIDEO: 1694 ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL); 1695 if (ret < 0) { 1696 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n"); 1697 exit_program(1); 1698 } 1699 video_size += ret; 1700 if (enc->coded_frame && enc->coded_frame->key_frame) 1701 pkt.flags |= AV_PKT_FLAG_KEY; 1702 if (ost->logfile && enc->stats_out) { 1703 fprintf(ost->logfile, "%s", enc->stats_out); 1704 } 1705 if (ret <= 0) { 1706 stop_encoding = 1; 1707 break; 1708 } 1709 pkt.stream_index = ost->index; 1710 pkt.data = bit_buffer; 1711 pkt.size = ret; 1712 if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) 1713 pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); 1714 write_frame(os, &pkt, ost); 1715 break; 1716 default: 1717 stop_encoding = 1; 1718 } 1719 if (stop_encoding) 1720 break; 1721 } 1722 } 1723} 1724 1725/* 1726 * Check whether a packet from ist should be written into ost at this time 1727 */ 1728static int check_output_constraints(InputStream *ist, OutputStream *ost) 1729{ 1730 OutputFile *of = &output_files[ost->file_index]; 1731 int ist_index = ist - input_streams; 1732 1733 if (ost->source_index != ist_index) 1734 return 0; 1735 1736 if (of->start_time && ist->pts < of->start_time) 1737 return 0; 1738 1739 if (of->recording_time != INT64_MAX && 1740 av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time, 1741 (AVRational){ 1, 1000000 }) >= 0) { 1742 ost->is_past_recording_time = 1; 1743 return 0; 1744 } 1745 1746 return 1; 1747} 1748 1749static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt) 1750{ 1751 OutputFile *of = &output_files[ost->file_index]; 1752 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base); 1753 AVPacket opkt; 1754 1755 av_init_packet(&opkt); 1756 1757 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && 1758 !ost->copy_initial_nonkeyframes) 1759 return; 1760 1761 /* force the input stream PTS */ 1762 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) 1763 audio_size += pkt->size; 1764 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 1765 video_size += pkt->size; 1766 ost->sync_opts++; 1767 } 1768 1769 opkt.stream_index = ost->index; 1770 if (pkt->pts != AV_NOPTS_VALUE) 1771 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time; 1772 else 1773 opkt.pts = AV_NOPTS_VALUE; 1774 1775 if (pkt->dts == AV_NOPTS_VALUE) 1776 opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base); 1777 else 1778 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base); 1779 opkt.dts -= ost_tb_start_time; 1780 1781 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base); 1782 opkt.flags = pkt->flags; 1783 1784 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters 1785 if ( ost->st->codec->codec_id != CODEC_ID_H264 1786 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO 1787 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO 1788 ) { 1789 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY)) 1790 opkt.destruct = av_destruct_packet; 1791 } else { 1792 opkt.data = pkt->data; 1793 opkt.size = pkt->size; 1794 } 1795 1796 write_frame(of->ctx, &opkt, ost); 1797 ost->st->codec->frame_number++; 1798 av_free_packet(&opkt); 1799} 1800 1801static void rate_emu_sleep(InputStream *ist) 1802{ 1803 if (input_files[ist->file_index].rate_emu) { 1804 int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE); 1805 int64_t now = av_gettime() - ist->start; 1806 if (pts > now) 1807 usleep(pts - now); 1808 } 1809} 1810 1811static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output) 1812{ 1813 AVFrame *decoded_frame; 1814 AVCodecContext *avctx = ist->st->codec; 1815 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt); 1816 int i, ret; 1817 1818 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame())) 1819 return AVERROR(ENOMEM); 1820 else 1821 avcodec_get_frame_defaults(ist->decoded_frame); 1822 decoded_frame = ist->decoded_frame; 1823 1824 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt); 1825 if (ret < 0) { 1826 return ret; 1827 } 1828 1829 if (!*got_output) { 1830 /* no audio frame */ 1831 return ret; 1832 } 1833 1834 /* if the decoder provides a pts, use it instead of the last packet pts. 1835 the decoder could be delaying output by a packet or more. */ 1836 if (decoded_frame->pts != AV_NOPTS_VALUE) 1837 ist->next_pts = decoded_frame->pts; 1838 1839 /* increment next_pts to use for the case where the input stream does not 1840 have timestamps or there are multiple frames in the packet */ 1841 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) / 1842 avctx->sample_rate; 1843 1844 // preprocess audio (volume) 1845 if (audio_volume != 256) { 1846 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps; 1847 void *samples = decoded_frame->data[0]; 1848 switch (avctx->sample_fmt) { 1849 case AV_SAMPLE_FMT_U8: 1850 { 1851 uint8_t *volp = samples; 1852 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) { 1853 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128; 1854 *volp++ = av_clip_uint8(v); 1855 } 1856 break; 1857 } 1858 case AV_SAMPLE_FMT_S16: 1859 { 1860 int16_t *volp = samples; 1861 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) { 1862 int v = ((*volp) * audio_volume + 128) >> 8; 1863 *volp++ = av_clip_int16(v); 1864 } 1865 break; 1866 } 1867 case AV_SAMPLE_FMT_S32: 1868 { 1869 int32_t *volp = samples; 1870 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) { 1871 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8); 1872 *volp++ = av_clipl_int32(v); 1873 } 1874 break; 1875 } 1876 case AV_SAMPLE_FMT_FLT: 1877 { 1878 float *volp = samples; 1879 float scale = audio_volume / 256.f; 1880 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) { 1881 *volp++ *= scale; 1882 } 1883 break; 1884 } 1885 case AV_SAMPLE_FMT_DBL: 1886 { 1887 double *volp = samples; 1888 double scale = audio_volume / 256.; 1889 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) { 1890 *volp++ *= scale; 1891 } 1892 break; 1893 } 1894 default: 1895 av_log(NULL, AV_LOG_FATAL, 1896 "Audio volume adjustment on sample format %s is not supported.\n", 1897 av_get_sample_fmt_name(ist->st->codec->sample_fmt)); 1898 exit_program(1); 1899 } 1900 } 1901 1902 rate_emu_sleep(ist); 1903 1904 for (i = 0; i < nb_output_streams; i++) { 1905 OutputStream *ost = &output_streams[i]; 1906 1907 if (!check_output_constraints(ist, ost) || !ost->encoding_needed) 1908 continue; 1909 do_audio_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame); 1910 } 1911 1912 return ret; 1913} 1914 1915static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts) 1916{ 1917 AVFrame *decoded_frame, *filtered_frame = NULL; 1918 void *buffer_to_free = NULL; 1919 int i, ret = 0; 1920 float quality; 1921#if CONFIG_AVFILTER 1922 int frame_available = 1; 1923#endif 1924 1925 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame())) 1926 return AVERROR(ENOMEM); 1927 else 1928 avcodec_get_frame_defaults(ist->decoded_frame); 1929 decoded_frame = ist->decoded_frame; 1930 pkt->pts = *pkt_pts; 1931 pkt->dts = ist->pts; 1932 *pkt_pts = AV_NOPTS_VALUE; 1933 1934 ret = avcodec_decode_video2(ist->st->codec, 1935 decoded_frame, got_output, pkt); 1936 if (ret < 0) 1937 return ret; 1938 1939 quality = same_quant ? decoded_frame->quality : 0; 1940 if (!*got_output) { 1941 /* no picture yet */ 1942 return ret; 1943 } 1944 ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts, 1945 decoded_frame->pkt_dts); 1946 if (pkt->duration) 1947 ist->next_pts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); 1948 else if (ist->st->codec->time_base.num != 0) { 1949 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : 1950 ist->st->codec->ticks_per_frame; 1951 ist->next_pts += ((int64_t)AV_TIME_BASE * 1952 ist->st->codec->time_base.num * ticks) / 1953 ist->st->codec->time_base.den; 1954 } 1955 pkt->size = 0; 1956 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free); 1957 1958 rate_emu_sleep(ist); 1959 1960 for (i = 0; i < nb_output_streams; i++) { 1961 OutputStream *ost = &output_streams[i]; 1962 int frame_size, resample_changed; 1963 1964 if (!check_output_constraints(ist, ost) || !ost->encoding_needed) 1965 continue; 1966 1967#if CONFIG_AVFILTER 1968 resample_changed = ost->resample_width != decoded_frame->width || 1969 ost->resample_height != decoded_frame->height || 1970 ost->resample_pix_fmt != decoded_frame->format; 1971 if (resample_changed) { 1972 av_log(NULL, AV_LOG_INFO, 1973 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n", 1974 ist->file_index, ist->st->index, 1975 ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt), 1976 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format)); 1977 1978 avfilter_graph_free(&ost->graph); 1979 if (configure_video_filters(ist, ost)) { 1980 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n"); 1981 exit_program(1); 1982 } 1983 1984 ost->resample_width = decoded_frame->width; 1985 ost->resample_height = decoded_frame->height; 1986 ost->resample_pix_fmt = decoded_frame->format; 1987 } 1988 1989 if (ist->st->sample_aspect_ratio.num) 1990 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; 1991 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) { 1992 FrameBuffer *buf = decoded_frame->opaque; 1993 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays( 1994 decoded_frame->data, decoded_frame->linesize, 1995 AV_PERM_READ | AV_PERM_PRESERVE, 1996 ist->st->codec->width, ist->st->codec->height, 1997 ist->st->codec->pix_fmt); 1998 1999 avfilter_copy_frame_props(fb, decoded_frame); 2000 fb->pts = ist->pts; 2001 fb->buf->priv = buf; 2002 fb->buf->free = filter_release_buffer; 2003 2004 buf->refcount++; 2005 av_buffersrc_buffer(ost->input_video_filter, fb); 2006 } else 2007 av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, 2008 ist->pts, decoded_frame->sample_aspect_ratio); 2009 2010 if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) { 2011 av_free(buffer_to_free); 2012 return AVERROR(ENOMEM); 2013 } else 2014 avcodec_get_frame_defaults(ist->filtered_frame); 2015 filtered_frame = ist->filtered_frame; 2016 2017 frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]); 2018 while (frame_available) { 2019 AVRational ist_pts_tb; 2020 if (ost->output_video_filter) 2021 get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb); 2022 if (ost->picref) 2023 ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q); 2024 if (ost->picref->video && !ost->frame_aspect_ratio) 2025 ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect; 2026#else 2027 filtered_frame = decoded_frame; 2028#endif 2029 2030 do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size, 2031 same_quant ? quality : ost->st->codec->global_quality); 2032 if (vstats_filename && frame_size) 2033 do_video_stats(output_files[ost->file_index].ctx, ost, frame_size); 2034#if CONFIG_AVFILTER 2035 frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]); 2036 if (ost->picref) 2037 avfilter_unref_buffer(ost->picref); 2038 } 2039#endif 2040 } 2041 2042 av_free(buffer_to_free); 2043 return ret; 2044} 2045 2046static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output) 2047{ 2048 AVSubtitle subtitle; 2049 int i, ret = avcodec_decode_subtitle2(ist->st->codec, 2050 &subtitle, got_output, pkt); 2051 if (ret < 0) 2052 return ret; 2053 if (!*got_output) 2054 return ret; 2055 2056 rate_emu_sleep(ist); 2057 2058 for (i = 0; i < nb_output_streams; i++) { 2059 OutputStream *ost = &output_streams[i]; 2060 2061 if (!check_output_constraints(ist, ost) || !ost->encoding_needed) 2062 continue; 2063 2064 do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts); 2065 } 2066 2067 avsubtitle_free(&subtitle); 2068 return ret; 2069} 2070 2071/* pkt = NULL means EOF (needed to flush decoder buffers) */ 2072static int output_packet(InputStream *ist, 2073 OutputStream *ost_table, int nb_ostreams, 2074 const AVPacket *pkt) 2075{ 2076 int i; 2077 int got_output; 2078 int64_t pkt_pts = AV_NOPTS_VALUE; 2079 AVPacket avpkt; 2080 2081 if (ist->next_pts == AV_NOPTS_VALUE) 2082 ist->next_pts = ist->pts; 2083 2084 if (pkt == NULL) { 2085 /* EOF handling */ 2086 av_init_packet(&avpkt); 2087 avpkt.data = NULL; 2088 avpkt.size = 0; 2089 goto handle_eof; 2090 } else { 2091 avpkt = *pkt; 2092 } 2093 2094 if (pkt->dts != AV_NOPTS_VALUE) 2095 ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); 2096 if (pkt->pts != AV_NOPTS_VALUE) 2097 pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q); 2098 2099 // while we have more to decode or while the decoder did output something on EOF 2100 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) { 2101 int ret = 0; 2102 handle_eof: 2103 2104 ist->pts = ist->next_pts; 2105 2106 if (avpkt.size && avpkt.size != pkt->size) { 2107 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING, 2108 "Multiple frames in a packet from stream %d\n", pkt->stream_index); 2109 ist->showed_multi_packet_warning = 1; 2110 } 2111 2112 switch (ist->st->codec->codec_type) { 2113 case AVMEDIA_TYPE_AUDIO: 2114 ret = transcode_audio (ist, &avpkt, &got_output); 2115 break; 2116 case AVMEDIA_TYPE_VIDEO: 2117 ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts); 2118 break; 2119 case AVMEDIA_TYPE_SUBTITLE: 2120 ret = transcode_subtitles(ist, &avpkt, &got_output); 2121 break; 2122 default: 2123 return -1; 2124 } 2125 2126 if (ret < 0) 2127 return ret; 2128 // touch data and size only if not EOF 2129 if (pkt) { 2130 avpkt.data += ret; 2131 avpkt.size -= ret; 2132 } 2133 if (!got_output) { 2134 continue; 2135 } 2136 } 2137 2138 /* handle stream copy */ 2139 if (!ist->decoding_needed) { 2140 rate_emu_sleep(ist); 2141 ist->pts = ist->next_pts; 2142 switch (ist->st->codec->codec_type) { 2143 case AVMEDIA_TYPE_AUDIO: 2144 ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) / 2145 ist->st->codec->sample_rate; 2146 break; 2147 case AVMEDIA_TYPE_VIDEO: 2148 if (ist->st->codec->time_base.num != 0) { 2149 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame; 2150 ist->next_pts += ((int64_t)AV_TIME_BASE * 2151 ist->st->codec->time_base.num * ticks) / 2152 ist->st->codec->time_base.den; 2153 } 2154 break; 2155 } 2156 } 2157 for (i = 0; pkt && i < nb_ostreams; i++) { 2158 OutputStream *ost = &ost_table[i]; 2159 2160 if (!check_output_constraints(ist, ost) || ost->encoding_needed) 2161 continue; 2162 2163 do_streamcopy(ist, ost, pkt); 2164 } 2165 2166 return 0; 2167} 2168 2169static void print_sdp(OutputFile *output_files, int n) 2170{ 2171 char sdp[2048]; 2172 int i; 2173 AVFormatContext **avc = av_malloc(sizeof(*avc) * n); 2174 2175 if (!avc) 2176 exit_program(1); 2177 for (i = 0; i < n; i++) 2178 avc[i] = output_files[i].ctx; 2179 2180 av_sdp_create(avc, n, sdp, sizeof(sdp)); 2181 printf("SDP:\n%s\n", sdp); 2182 fflush(stdout); 2183 av_freep(&avc); 2184} 2185 2186static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams, 2187 char *error, int error_len) 2188{ 2189 int i; 2190 InputStream *ist = &input_streams[ist_index]; 2191 if (ist->decoding_needed) { 2192 AVCodec *codec = ist->dec; 2193 if (!codec) { 2194 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d", 2195 ist->st->codec->codec_id, ist->file_index, ist->st->index); 2196 return AVERROR(EINVAL); 2197 } 2198 2199 /* update requested sample format for the decoder based on the 2200 corresponding encoder sample format */ 2201 for (i = 0; i < nb_output_streams; i++) { 2202 OutputStream *ost = &output_streams[i]; 2203 if (ost->source_index == ist_index) { 2204 update_sample_fmt(ist->st->codec, codec, ost->st->codec); 2205 break; 2206 } 2207 } 2208 2209 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) { 2210 ist->st->codec->get_buffer = codec_get_buffer; 2211 ist->st->codec->release_buffer = codec_release_buffer; 2212 ist->st->codec->opaque = ist; 2213 } 2214 2215 if (!av_dict_get(ist->opts, "threads", NULL, 0)) 2216 av_dict_set(&ist->opts, "threads", "auto", 0); 2217 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { 2218 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d", 2219 ist->file_index, ist->st->index); 2220 return AVERROR(EINVAL); 2221 } 2222 assert_codec_experimental(ist->st->codec, 0); 2223 assert_avoptions(ist->opts); 2224 } 2225 2226 ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; 2227 ist->next_pts = AV_NOPTS_VALUE; 2228 init_pts_correction(&ist->pts_ctx); 2229 ist->is_start = 1; 2230 2231 return 0; 2232} 2233 2234static void parse_forced_key_frames(char *kf, OutputStream *ost, 2235 AVCodecContext *avctx) 2236{ 2237 char *p; 2238 int n = 1, i; 2239 int64_t t; 2240 2241 for (p = kf; *p; p++) 2242 if (*p == ',') 2243 n++; 2244 ost->forced_kf_count = n; 2245 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n); 2246 if (!ost->forced_kf_pts) { 2247 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n"); 2248 exit_program(1); 2249 } 2250 2251 p = kf; 2252 for (i = 0; i < n; i++) { 2253 char *next = strchr(p, ','); 2254 2255 if (next) 2256 *next++ = 0; 2257 2258 t = parse_time_or_die("force_key_frames", p, 1); 2259 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base); 2260 2261 p = next; 2262 } 2263} 2264 2265static int transcode_init(OutputFile *output_files, 2266 int nb_output_files, 2267 InputFile *input_files, 2268 int nb_input_files) 2269{ 2270 int ret = 0, i, j, k; 2271 AVFormatContext *oc; 2272 AVCodecContext *codec, *icodec; 2273 OutputStream *ost; 2274 InputStream *ist; 2275 char error[1024]; 2276 int want_sdp = 1; 2277 2278 /* init framerate emulation */ 2279 for (i = 0; i < nb_input_files; i++) { 2280 InputFile *ifile = &input_files[i]; 2281 if (ifile->rate_emu) 2282 for (j = 0; j < ifile->nb_streams; j++) 2283 input_streams[j + ifile->ist_index].start = av_gettime(); 2284 } 2285 2286 /* output stream init */ 2287 for (i = 0; i < nb_output_files; i++) { 2288 oc = output_files[i].ctx; 2289 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) { 2290 av_dump_format(oc, i, oc->filename, 1); 2291 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i); 2292 return AVERROR(EINVAL); 2293 } 2294 } 2295 2296 /* for each output stream, we compute the right encoding parameters */ 2297 for (i = 0; i < nb_output_streams; i++) { 2298 ost = &output_streams[i]; 2299 oc = output_files[ost->file_index].ctx; 2300 ist = &input_streams[ost->source_index]; 2301 2302 if (ost->attachment_filename) 2303 continue; 2304 2305 codec = ost->st->codec; 2306 icodec = ist->st->codec; 2307 2308 ost->st->disposition = ist->st->disposition; 2309 codec->bits_per_raw_sample = icodec->bits_per_raw_sample; 2310 codec->chroma_sample_location = icodec->chroma_sample_location; 2311 2312 if (ost->stream_copy) { 2313 uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; 2314 2315 if (extra_size > INT_MAX) { 2316 return AVERROR(EINVAL); 2317 } 2318 2319 /* if stream_copy is selected, no need to decode or encode */ 2320 codec->codec_id = icodec->codec_id; 2321 codec->codec_type = icodec->codec_type; 2322 2323 if (!codec->codec_tag) { 2324 if (!oc->oformat->codec_tag || 2325 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id || 2326 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0) 2327 codec->codec_tag = icodec->codec_tag; 2328 } 2329 2330 codec->bit_rate = icodec->bit_rate; 2331 codec->rc_max_rate = icodec->rc_max_rate; 2332 codec->rc_buffer_size = icodec->rc_buffer_size; 2333 codec->field_order = icodec->field_order; 2334 codec->extradata = av_mallocz(extra_size); 2335 if (!codec->extradata) { 2336 return AVERROR(ENOMEM); 2337 } 2338 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); 2339 codec->extradata_size = icodec->extradata_size; 2340 if (!copy_tb) { 2341 codec->time_base = icodec->time_base; 2342 codec->time_base.num *= icodec->ticks_per_frame; 2343 av_reduce(&codec->time_base.num, &codec->time_base.den, 2344 codec->time_base.num, codec->time_base.den, INT_MAX); 2345 } else 2346 codec->time_base = ist->st->time_base; 2347 2348 switch (codec->codec_type) { 2349 case AVMEDIA_TYPE_AUDIO: 2350 if (audio_volume != 256) { 2351 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n"); 2352 exit_program(1); 2353 } 2354 codec->channel_layout = icodec->channel_layout; 2355 codec->sample_rate = icodec->sample_rate; 2356 codec->channels = icodec->channels; 2357 codec->frame_size = icodec->frame_size; 2358 codec->audio_service_type = icodec->audio_service_type; 2359 codec->block_align = icodec->block_align; 2360 break; 2361 case AVMEDIA_TYPE_VIDEO: 2362 codec->pix_fmt = icodec->pix_fmt; 2363 codec->width = icodec->width; 2364 codec->height = icodec->height; 2365 codec->has_b_frames = icodec->has_b_frames; 2366 if (!codec->sample_aspect_ratio.num) { 2367 codec->sample_aspect_ratio = 2368 ost->st->sample_aspect_ratio = 2369 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : 2370 ist->st->codec->sample_aspect_ratio.num ? 2371 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1}; 2372 } 2373 break; 2374 case AVMEDIA_TYPE_SUBTITLE: 2375 codec->width = icodec->width; 2376 codec->height = icodec->height; 2377 break; 2378 case AVMEDIA_TYPE_DATA: 2379 case AVMEDIA_TYPE_ATTACHMENT: 2380 break; 2381 default: 2382 abort(); 2383 } 2384 } else { 2385 if (!ost->enc) 2386 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id); 2387 2388 ist->decoding_needed = 1; 2389 ost->encoding_needed = 1; 2390 2391 switch (codec->codec_type) { 2392 case AVMEDIA_TYPE_AUDIO: 2393 ost->fifo = av_fifo_alloc(1024); 2394 if (!ost->fifo) { 2395 return AVERROR(ENOMEM); 2396 } 2397 ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); 2398 2399 if (!codec->sample_rate) 2400 codec->sample_rate = icodec->sample_rate; 2401 choose_sample_rate(ost->st, ost->enc); 2402 codec->time_base = (AVRational){ 1, codec->sample_rate }; 2403 2404 if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) 2405 codec->sample_fmt = icodec->sample_fmt; 2406 choose_sample_fmt(ost->st, ost->enc); 2407 2408 if (!codec->channels) 2409 codec->channels = icodec->channels; 2410 codec->channel_layout = icodec->channel_layout; 2411 if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) 2412 codec->channel_layout = 0; 2413 2414 ost->audio_resample = codec-> sample_rate != icodec->sample_rate || audio_sync_method > 1; 2415 icodec->request_channels = codec-> channels; 2416 ost->resample_sample_fmt = icodec->sample_fmt; 2417 ost->resample_sample_rate = icodec->sample_rate; 2418 ost->resample_channels = icodec->channels; 2419 break; 2420 case AVMEDIA_TYPE_VIDEO: 2421 if (codec->pix_fmt == PIX_FMT_NONE) 2422 codec->pix_fmt = icodec->pix_fmt; 2423 choose_pixel_fmt(ost->st, ost->enc); 2424 2425 if (ost->st->codec->pix_fmt == PIX_FMT_NONE) { 2426 av_log(NULL, AV_LOG_FATAL, "Video pixel format is unknown, stream cannot be encoded\n"); 2427 exit_program(1); 2428 } 2429 2430 if (!codec->width || !codec->height) { 2431 codec->width = icodec->width; 2432 codec->height = icodec->height; 2433 } 2434 2435 ost->video_resample = codec->width != icodec->width || 2436 codec->height != icodec->height || 2437 codec->pix_fmt != icodec->pix_fmt; 2438 if (ost->video_resample) { 2439#if !CONFIG_AVFILTER 2440 avcodec_get_frame_defaults(&ost->pict_tmp); 2441 if (avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt, 2442 codec->width, codec->height)) { 2443 av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n"); 2444 exit_program(1); 2445 } 2446 ost->img_resample_ctx = sws_getContext( 2447 icodec->width, 2448 icodec->height, 2449 icodec->pix_fmt, 2450 codec->width, 2451 codec->height, 2452 codec->pix_fmt, 2453 ost->sws_flags, NULL, NULL, NULL); 2454 if (ost->img_resample_ctx == NULL) { 2455 av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n"); 2456 exit_program(1); 2457 } 2458#endif 2459 codec->bits_per_raw_sample = 0; 2460 } 2461 2462 ost->resample_height = icodec->height; 2463 ost->resample_width = icodec->width; 2464 ost->resample_pix_fmt = icodec->pix_fmt; 2465 2466 if (!ost->frame_rate.num) 2467 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 }; 2468 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) { 2469 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); 2470 ost->frame_rate = ost->enc->supported_framerates[idx]; 2471 } 2472 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; 2473 2474#if CONFIG_AVFILTER 2475 if (configure_video_filters(ist, ost)) { 2476 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n"); 2477 exit(1); 2478 } 2479#endif 2480 if (ost->forced_keyframes) 2481 parse_forced_key_frames(ost->forced_keyframes, ost, 2482 ost->st->codec); 2483 break; 2484 case AVMEDIA_TYPE_SUBTITLE: 2485 break; 2486 default: 2487 abort(); 2488 break; 2489 } 2490 /* two pass mode */ 2491 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { 2492 char logfilename[1024]; 2493 FILE *f; 2494 2495 snprintf(logfilename, sizeof(logfilename), "%s-%d.log", 2496 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, 2497 i); 2498 if (!strcmp(ost->enc->name, "libx264")) { 2499 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE); 2500 } else { 2501 if (codec->flags & CODEC_FLAG_PASS1) { 2502 f = fopen(logfilename, "wb"); 2503 if (!f) { 2504 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n", 2505 logfilename, strerror(errno)); 2506 exit_program(1); 2507 } 2508 ost->logfile = f; 2509 } else { 2510 char *logbuffer; 2511 size_t logbuffer_size; 2512 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) { 2513 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n", 2514 logfilename); 2515 exit_program(1); 2516 } 2517 codec->stats_in = logbuffer; 2518 } 2519 } 2520 } 2521 } 2522 if (codec->codec_type == AVMEDIA_TYPE_VIDEO) { 2523 int size = codec->width * codec->height; 2524 bit_buffer_size = FFMAX(bit_buffer_size, 6 * size + 200); 2525 } 2526 } 2527 2528 if (!bit_buffer) 2529 bit_buffer = av_malloc(bit_buffer_size); 2530 if (!bit_buffer) { 2531 av_log(NULL, AV_LOG_ERROR, "Cannot allocate %d bytes output buffer\n", 2532 bit_buffer_size); 2533 return AVERROR(ENOMEM); 2534 } 2535 2536 /* open each encoder */ 2537 for (i = 0; i < nb_output_streams; i++) { 2538 ost = &output_streams[i]; 2539 if (ost->encoding_needed) { 2540 AVCodec *codec = ost->enc; 2541 AVCodecContext *dec = input_streams[ost->source_index].st->codec; 2542 if (!codec) { 2543 snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d:%d", 2544 ost->st->codec->codec_id, ost->file_index, ost->index); 2545 ret = AVERROR(EINVAL); 2546 goto dump_format; 2547 } 2548 if (dec->subtitle_header) { 2549 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size); 2550 if (!ost->st->codec->subtitle_header) { 2551 ret = AVERROR(ENOMEM); 2552 goto dump_format; 2553 } 2554 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); 2555 ost->st->codec->subtitle_header_size = dec->subtitle_header_size; 2556 } 2557 if (!av_dict_get(ost->opts, "threads", NULL, 0)) 2558 av_dict_set(&ost->opts, "threads", "auto", 0); 2559 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { 2560 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height", 2561 ost->file_index, ost->index); 2562 ret = AVERROR(EINVAL); 2563 goto dump_format; 2564 } 2565 assert_codec_experimental(ost->st->codec, 1); 2566 assert_avoptions(ost->opts); 2567 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) 2568 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." 2569 "It takes bits/s as argument, not kbits/s\n"); 2570 extra_size += ost->st->codec->extradata_size; 2571 2572 if (ost->st->codec->me_threshold) 2573 input_streams[ost->source_index].st->codec->debug |= FF_DEBUG_MV; 2574 } 2575 } 2576 2577 /* init input streams */ 2578 for (i = 0; i < nb_input_streams; i++) 2579 if ((ret = init_input_stream(i, output_streams, nb_output_streams, error, sizeof(error))) < 0) 2580 goto dump_format; 2581 2582 /* discard unused programs */ 2583 for (i = 0; i < nb_input_files; i++) { 2584 InputFile *ifile = &input_files[i]; 2585 for (j = 0; j < ifile->ctx->nb_programs; j++) { 2586 AVProgram *p = ifile->ctx->programs[j]; 2587 int discard = AVDISCARD_ALL; 2588 2589 for (k = 0; k < p->nb_stream_indexes; k++) 2590 if (!input_streams[ifile->ist_index + p->stream_index[k]].discard) { 2591 discard = AVDISCARD_DEFAULT; 2592 break; 2593 } 2594 p->discard = discard; 2595 } 2596 } 2597 2598 /* open files and write file headers */ 2599 for (i = 0; i < nb_output_files; i++) { 2600 oc = output_files[i].ctx; 2601 oc->interrupt_callback = int_cb; 2602 if (avformat_write_header(oc, &output_files[i].opts) < 0) { 2603 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i); 2604 ret = AVERROR(EINVAL); 2605 goto dump_format; 2606 } 2607 assert_avoptions(output_files[i].opts); 2608 if (strcmp(oc->oformat->name, "rtp")) { 2609 want_sdp = 0; 2610 } 2611 } 2612 2613 dump_format: 2614 /* dump the file output parameters - cannot be done before in case 2615 of stream copy */ 2616 for (i = 0; i < nb_output_files; i++) { 2617 av_dump_format(output_files[i].ctx, i, output_files[i].ctx->filename, 1); 2618 } 2619 2620 /* dump the stream mapping */ 2621 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n"); 2622 for (i = 0; i < nb_output_streams; i++) { 2623 ost = &output_streams[i]; 2624 2625 if (ost->attachment_filename) { 2626 /* an attached file */ 2627 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n", 2628 ost->attachment_filename, ost->file_index, ost->index); 2629 continue; 2630 } 2631 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d", 2632 input_streams[ost->source_index].file_index, 2633 input_streams[ost->source_index].st->index, 2634 ost->file_index, 2635 ost->index); 2636 if (ost->sync_ist != &input_streams[ost->source_index]) 2637 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]", 2638 ost->sync_ist->file_index, 2639 ost->sync_ist->st->index); 2640 if (ost->stream_copy) 2641 av_log(NULL, AV_LOG_INFO, " (copy)"); 2642 else 2643 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index].dec ? 2644 input_streams[ost->source_index].dec->name : "?", 2645 ost->enc ? ost->enc->name : "?"); 2646 av_log(NULL, AV_LOG_INFO, "\n"); 2647 } 2648 2649 if (ret) { 2650 av_log(NULL, AV_LOG_ERROR, "%s\n", error); 2651 return ret; 2652 } 2653 2654 if (want_sdp) { 2655 print_sdp(output_files, nb_output_files); 2656 } 2657 2658 return 0; 2659} 2660 2661/* 2662 * The following code is the main loop of the file converter 2663 */ 2664static int transcode(OutputFile *output_files, 2665 int nb_output_files, 2666 InputFile *input_files, 2667 int nb_input_files) 2668{ 2669 int ret, i; 2670 AVFormatContext *is, *os; 2671 OutputStream *ost; 2672 InputStream *ist; 2673 uint8_t *no_packet; 2674 int no_packet_count = 0; 2675 int64_t timer_start; 2676 2677 if (!(no_packet = av_mallocz(nb_input_files))) 2678 exit_program(1); 2679 2680 ret = transcode_init(output_files, nb_output_files, input_files, nb_input_files); 2681 if (ret < 0) 2682 goto fail; 2683 2684 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n"); 2685 term_init(); 2686 2687 timer_start = av_gettime(); 2688 2689 for (; received_sigterm == 0;) { 2690 int file_index, ist_index; 2691 AVPacket pkt; 2692 int64_t ipts_min; 2693 double opts_min; 2694 2695 ipts_min = INT64_MAX; 2696 opts_min = 1e100; 2697 2698 /* select the stream that we must read now by looking at the 2699 smallest output pts */ 2700 file_index = -1; 2701 for (i = 0; i < nb_output_streams; i++) { 2702 OutputFile *of; 2703 int64_t ipts; 2704 double opts; 2705 ost = &output_streams[i]; 2706 of = &output_files[ost->file_index]; 2707 if (ost->source_index < 0) 2708 continue; 2709 os = output_files[ost->file_index].ctx; 2710 ist = &input_streams[ost->source_index]; 2711 if (ost->is_past_recording_time || no_packet[ist->file_index] || 2712 (os->pb && avio_tell(os->pb) >= of->limit_filesize)) 2713 continue; 2714 opts = ost->st->pts.val * av_q2d(ost->st->time_base); 2715 ipts = ist->pts; 2716 if (!input_files[ist->file_index].eof_reached) { 2717 if (ipts < ipts_min) { 2718 ipts_min = ipts; 2719 if (input_sync) 2720 file_index = ist->file_index; 2721 } 2722 if (opts < opts_min) { 2723 opts_min = opts; 2724 if (!input_sync) file_index = ist->file_index; 2725 } 2726 } 2727 if (ost->frame_number >= ost->max_frames) { 2728 int j; 2729 for (j = 0; j < of->ctx->nb_streams; j++) 2730 output_streams[of->ost_index + j].is_past_recording_time = 1; 2731 continue; 2732 } 2733 } 2734 /* if none, if is finished */ 2735 if (file_index < 0) { 2736 if (no_packet_count) { 2737 no_packet_count = 0; 2738 memset(no_packet, 0, nb_input_files); 2739 usleep(10000); 2740 continue; 2741 } 2742 break; 2743 } 2744 2745 /* read a frame from it and output it in the fifo */ 2746 is = input_files[file_index].ctx; 2747 ret = av_read_frame(is, &pkt); 2748 if (ret == AVERROR(EAGAIN)) { 2749 no_packet[file_index] = 1; 2750 no_packet_count++; 2751 continue; 2752 } 2753 if (ret < 0) { 2754 input_files[file_index].eof_reached = 1; 2755 if (opt_shortest) 2756 break; 2757 else 2758 continue; 2759 } 2760 2761 no_packet_count = 0; 2762 memset(no_packet, 0, nb_input_files); 2763 2764 if (do_pkt_dump) { 2765 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump, 2766 is->streams[pkt.stream_index]); 2767 } 2768 /* the following test is needed in case new streams appear 2769 dynamically in stream : we ignore them */ 2770 if (pkt.stream_index >= input_files[file_index].nb_streams) 2771 goto discard_packet; 2772 ist_index = input_files[file_index].ist_index + pkt.stream_index; 2773 ist = &input_streams[ist_index]; 2774 if (ist->discard) 2775 goto discard_packet; 2776 2777 if (pkt.dts != AV_NOPTS_VALUE) 2778 pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); 2779 if (pkt.pts != AV_NOPTS_VALUE) 2780 pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); 2781 2782 if (pkt.pts != AV_NOPTS_VALUE) 2783 pkt.pts *= ist->ts_scale; 2784 if (pkt.dts != AV_NOPTS_VALUE) 2785 pkt.dts *= ist->ts_scale; 2786 2787 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", 2788 // ist->next_pts, 2789 // pkt.dts, input_files[ist->file_index].ts_offset, 2790 // ist->st->codec->codec_type); 2791 if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE 2792 && (is->iformat->flags & AVFMT_TS_DISCONT)) { 2793 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); 2794 int64_t delta = pkt_dts - ist->next_pts; 2795 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->pts) && !copy_ts) { 2796 input_files[ist->file_index].ts_offset -= delta; 2797 av_log(NULL, AV_LOG_DEBUG, 2798 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", 2799 delta, input_files[ist->file_index].ts_offset); 2800 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); 2801 if (pkt.pts != AV_NOPTS_VALUE) 2802 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); 2803 } 2804 } 2805 2806 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size); 2807 if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) { 2808 2809 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n", 2810 ist->file_index, ist->st->index); 2811 if (exit_on_error) 2812 exit_program(1); 2813 av_free_packet(&pkt); 2814 continue; 2815 } 2816 2817 discard_packet: 2818 av_free_packet(&pkt); 2819 2820 /* dump report by using the output first video and audio streams */ 2821 print_report(output_files, output_streams, nb_output_streams, 0, timer_start); 2822 } 2823 2824 /* at the end of stream, we must flush the decoder buffers */ 2825 for (i = 0; i < nb_input_streams; i++) { 2826 ist = &input_streams[i]; 2827 if (ist->decoding_needed) { 2828 output_packet(ist, output_streams, nb_output_streams, NULL); 2829 } 2830 } 2831 flush_encoders(output_streams, nb_output_streams); 2832 2833 term_exit(); 2834 2835 /* write the trailer if needed and close file */ 2836 for (i = 0; i < nb_output_files; i++) { 2837 os = output_files[i].ctx; 2838 av_write_trailer(os); 2839 } 2840 2841 /* dump report by using the first video and audio streams */ 2842 print_report(output_files, output_streams, nb_output_streams, 1, timer_start); 2843 2844 /* close each encoder */ 2845 for (i = 0; i < nb_output_streams; i++) { 2846 ost = &output_streams[i]; 2847 if (ost->encoding_needed) { 2848 av_freep(&ost->st->codec->stats_in); 2849 avcodec_close(ost->st->codec); 2850 } 2851#if CONFIG_AVFILTER 2852 avfilter_graph_free(&ost->graph); 2853#endif 2854 } 2855 2856 /* close each decoder */ 2857 for (i = 0; i < nb_input_streams; i++) { 2858 ist = &input_streams[i]; 2859 if (ist->decoding_needed) { 2860 avcodec_close(ist->st->codec); 2861 } 2862 } 2863 2864 /* finished ! */ 2865 ret = 0; 2866 2867 fail: 2868 av_freep(&bit_buffer); 2869 av_freep(&no_packet); 2870 2871 if (output_streams) { 2872 for (i = 0; i < nb_output_streams; i++) { 2873 ost = &output_streams[i]; 2874 if (ost) { 2875 if (ost->stream_copy) 2876 av_freep(&ost->st->codec->extradata); 2877 if (ost->logfile) { 2878 fclose(ost->logfile); 2879 ost->logfile = NULL; 2880 } 2881 av_fifo_free(ost->fifo); /* works even if fifo is not 2882 initialized but set to zero */ 2883 av_freep(&ost->st->codec->subtitle_header); 2884 av_free(ost->pict_tmp.data[0]); 2885 av_free(ost->forced_kf_pts); 2886 if (ost->video_resample) 2887 sws_freeContext(ost->img_resample_ctx); 2888 if (ost->resample) 2889 audio_resample_close(ost->resample); 2890 if (ost->reformat_ctx) 2891 av_audio_convert_free(ost->reformat_ctx); 2892 av_dict_free(&ost->opts); 2893 } 2894 } 2895 } 2896 return ret; 2897} 2898 2899static double parse_frame_aspect_ratio(const char *arg) 2900{ 2901 int x = 0, y = 0; 2902 double ar = 0; 2903 const char *p; 2904 char *end; 2905 2906 p = strchr(arg, ':'); 2907 if (p) { 2908 x = strtol(arg, &end, 10); 2909 if (end == p) 2910 y = strtol(end + 1, &end, 10); 2911 if (x > 0 && y > 0) 2912 ar = (double)x / (double)y; 2913 } else 2914 ar = strtod(arg, NULL); 2915 2916 if (!ar) { 2917 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n"); 2918 exit_program(1); 2919 } 2920 return ar; 2921} 2922 2923static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg) 2924{ 2925 return parse_option(o, "codec:a", arg, options); 2926} 2927 2928static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg) 2929{ 2930 return parse_option(o, "codec:v", arg, options); 2931} 2932 2933static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg) 2934{ 2935 return parse_option(o, "codec:s", arg, options); 2936} 2937 2938static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg) 2939{ 2940 return parse_option(o, "codec:d", arg, options); 2941} 2942 2943static int opt_map(OptionsContext *o, const char *opt, const char *arg) 2944{ 2945 StreamMap *m = NULL; 2946 int i, negative = 0, file_idx; 2947 int sync_file_idx = -1, sync_stream_idx; 2948 char *p, *sync; 2949 char *map; 2950 2951 if (*arg == '-') { 2952 negative = 1; 2953 arg++; 2954 } 2955 map = av_strdup(arg); 2956 2957 /* parse sync stream first, just pick first matching stream */ 2958 if (sync = strchr(map, ',')) { 2959 *sync = 0; 2960 sync_file_idx = strtol(sync + 1, &sync, 0); 2961 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) { 2962 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx); 2963 exit_program(1); 2964 } 2965 if (*sync) 2966 sync++; 2967 for (i = 0; i < input_files[sync_file_idx].nb_streams; i++) 2968 if (check_stream_specifier(input_files[sync_file_idx].ctx, 2969 input_files[sync_file_idx].ctx->streams[i], sync) == 1) { 2970 sync_stream_idx = i; 2971 break; 2972 } 2973 if (i == input_files[sync_file_idx].nb_streams) { 2974 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not " 2975 "match any streams.\n", arg); 2976 exit_program(1); 2977 } 2978 } 2979 2980 2981 file_idx = strtol(map, &p, 0); 2982 if (file_idx >= nb_input_files || file_idx < 0) { 2983 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx); 2984 exit_program(1); 2985 } 2986 if (negative) 2987 /* disable some already defined maps */ 2988 for (i = 0; i < o->nb_stream_maps; i++) { 2989 m = &o->stream_maps[i]; 2990 if (file_idx == m->file_index && 2991 check_stream_specifier(input_files[m->file_index].ctx, 2992 input_files[m->file_index].ctx->streams[m->stream_index], 2993 *p == ':' ? p + 1 : p) > 0) 2994 m->disabled = 1; 2995 } 2996 else 2997 for (i = 0; i < input_files[file_idx].nb_streams; i++) { 2998 if (check_stream_specifier(input_files[file_idx].ctx, input_files[file_idx].ctx->streams[i], 2999 *p == ':' ? p + 1 : p) <= 0) 3000 continue; 3001 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps), 3002 &o->nb_stream_maps, o->nb_stream_maps + 1); 3003 m = &o->stream_maps[o->nb_stream_maps - 1]; 3004 3005 m->file_index = file_idx; 3006 m->stream_index = i; 3007 3008 if (sync_file_idx >= 0) { 3009 m->sync_file_index = sync_file_idx; 3010 m->sync_stream_index = sync_stream_idx; 3011 } else { 3012 m->sync_file_index = file_idx; 3013 m->sync_stream_index = i; 3014 } 3015 } 3016 3017 if (!m) { 3018 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg); 3019 exit_program(1); 3020 } 3021 3022 av_freep(&map); 3023 return 0; 3024} 3025 3026static int opt_attach(OptionsContext *o, const char *opt, const char *arg) 3027{ 3028 o->attachments = grow_array(o->attachments, sizeof(*o->attachments), 3029 &o->nb_attachments, o->nb_attachments + 1); 3030 o->attachments[o->nb_attachments - 1] = arg; 3031 return 0; 3032} 3033 3034/** 3035 * Parse a metadata specifier in arg. 3036 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram) 3037 * @param index for type c/p, chapter/program index is written here 3038 * @param stream_spec for type s, the stream specifier is written here 3039 */ 3040static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec) 3041{ 3042 if (*arg) { 3043 *type = *arg; 3044 switch (*arg) { 3045 case 'g': 3046 break; 3047 case 's': 3048 if (*(++arg) && *arg != ':') { 3049 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg); 3050 exit_program(1); 3051 } 3052 *stream_spec = *arg == ':' ? arg + 1 : ""; 3053 break; 3054 case 'c': 3055 case 'p': 3056 if (*(++arg) == ':') 3057 *index = strtol(++arg, NULL, 0); 3058 break; 3059 default: 3060 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg); 3061 exit_program(1); 3062 } 3063 } else 3064 *type = 'g'; 3065} 3066 3067static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o) 3068{ 3069 AVDictionary **meta_in = NULL; 3070 AVDictionary **meta_out; 3071 int i, ret = 0; 3072 char type_in, type_out; 3073 const char *istream_spec = NULL, *ostream_spec = NULL; 3074 int idx_in = 0, idx_out = 0; 3075 3076 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec); 3077 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec); 3078 3079 if (type_in == 'g' || type_out == 'g') 3080 o->metadata_global_manual = 1; 3081 if (type_in == 's' || type_out == 's') 3082 o->metadata_streams_manual = 1; 3083 if (type_in == 'c' || type_out == 'c') 3084 o->metadata_chapters_manual = 1; 3085 3086#define METADATA_CHECK_INDEX(index, nb_elems, desc)\ 3087 if ((index) < 0 || (index) >= (nb_elems)) {\ 3088 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\ 3089 (desc), (index));\ 3090 exit_program(1);\ 3091 } 3092 3093#define SET_DICT(type, meta, context, index)\ 3094 switch (type) {\ 3095 case 'g':\ 3096 meta = &context->metadata;\ 3097 break;\ 3098 case 'c':\ 3099 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\ 3100 meta = &context->chapters[index]->metadata;\ 3101 break;\ 3102 case 'p':\ 3103 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\ 3104 meta = &context->programs[index]->metadata;\ 3105 break;\ 3106 case 's':\ 3107 break; /* handled separately below */ \ 3108 }\ 3109 3110 SET_DICT(type_in, meta_in, ic, idx_in); 3111 SET_DICT(type_out, meta_out, oc, idx_out); 3112 3113 /* for input streams choose first matching stream */ 3114 if (type_in == 's') { 3115 for (i = 0; i < ic->nb_streams; i++) { 3116 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) { 3117 meta_in = &ic->streams[i]->metadata; 3118 break; 3119 } else if (ret < 0) 3120 exit_program(1); 3121 } 3122 if (!meta_in) { 3123 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec); 3124 exit_program(1); 3125 } 3126 } 3127 3128 if (type_out == 's') { 3129 for (i = 0; i < oc->nb_streams; i++) { 3130 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) { 3131 meta_out = &oc->streams[i]->metadata; 3132 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE); 3133 } else if (ret < 0) 3134 exit_program(1); 3135 } 3136 } else 3137 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE); 3138 3139 return 0; 3140} 3141 3142static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder) 3143{ 3144 const char *codec_string = encoder ? "encoder" : "decoder"; 3145 AVCodec *codec; 3146 3147 codec = encoder ? 3148 avcodec_find_encoder_by_name(name) : 3149 avcodec_find_decoder_by_name(name); 3150 if (!codec) { 3151 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name); 3152 exit_program(1); 3153 } 3154 if (codec->type != type) { 3155 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name); 3156 exit_program(1); 3157 } 3158 return codec; 3159} 3160 3161static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st) 3162{ 3163 char *codec_name = NULL; 3164 3165 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st); 3166 if (codec_name) { 3167 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0); 3168 st->codec->codec_id = codec->id; 3169 return codec; 3170 } else 3171 return avcodec_find_decoder(st->codec->codec_id); 3172} 3173 3174/** 3175 * Add all the streams from the given input file to the global 3176 * list of input streams. 3177 */ 3178static void add_input_streams(OptionsContext *o, AVFormatContext *ic) 3179{ 3180 int i; 3181 3182 for (i = 0; i < ic->nb_streams; i++) { 3183 AVStream *st = ic->streams[i]; 3184 AVCodecContext *dec = st->codec; 3185 InputStream *ist; 3186 3187 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1); 3188 ist = &input_streams[nb_input_streams - 1]; 3189 ist->st = st; 3190 ist->file_index = nb_input_files; 3191 ist->discard = 1; 3192 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st); 3193 3194 ist->ts_scale = 1.0; 3195 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st); 3196 3197 ist->dec = choose_decoder(o, ic, st); 3198 3199 switch (dec->codec_type) { 3200 case AVMEDIA_TYPE_AUDIO: 3201 if (o->audio_disable) 3202 st->discard = AVDISCARD_ALL; 3203 break; 3204 case AVMEDIA_TYPE_VIDEO: 3205 if (dec->lowres) { 3206 dec->flags |= CODEC_FLAG_EMU_EDGE; 3207 dec->height >>= dec->lowres; 3208 dec->width >>= dec->lowres; 3209 } 3210 3211 if (o->video_disable) 3212 st->discard = AVDISCARD_ALL; 3213 else if (video_discard) 3214 st->discard = video_discard; 3215 break; 3216 case AVMEDIA_TYPE_DATA: 3217 break; 3218 case AVMEDIA_TYPE_SUBTITLE: 3219 if (o->subtitle_disable) 3220 st->discard = AVDISCARD_ALL; 3221 break; 3222 case AVMEDIA_TYPE_ATTACHMENT: 3223 case AVMEDIA_TYPE_UNKNOWN: 3224 break; 3225 default: 3226 abort(); 3227 } 3228 } 3229} 3230 3231static void assert_file_overwrite(const char *filename) 3232{ 3233 if (!file_overwrite && 3234 (strchr(filename, ':') == NULL || filename[1] == ':' || 3235 av_strstart(filename, "file:", NULL))) { 3236 if (avio_check(filename, 0) == 0) { 3237 if (!using_stdin) { 3238 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename); 3239 fflush(stderr); 3240 if (!read_yesno()) { 3241 fprintf(stderr, "Not overwriting - exiting\n"); 3242 exit_program(1); 3243 } 3244 } 3245 else { 3246 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename); 3247 exit_program(1); 3248 } 3249 } 3250 } 3251} 3252 3253static void dump_attachment(AVStream *st, const char *filename) 3254{ 3255 int ret; 3256 AVIOContext *out = NULL; 3257 AVDictionaryEntry *e; 3258 3259 if (!st->codec->extradata_size) { 3260 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n", 3261 nb_input_files - 1, st->index); 3262 return; 3263 } 3264 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0))) 3265 filename = e->value; 3266 if (!*filename) { 3267 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag" 3268 "in stream #%d:%d.\n", nb_input_files - 1, st->index); 3269 exit_program(1); 3270 } 3271 3272 assert_file_overwrite(filename); 3273 3274 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) { 3275 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n", 3276 filename); 3277 exit_program(1); 3278 } 3279 3280 avio_write(out, st->codec->extradata, st->codec->extradata_size); 3281 avio_flush(out); 3282 avio_close(out); 3283} 3284 3285static int opt_input_file(OptionsContext *o, const char *opt, const char *filename) 3286{ 3287 AVFormatContext *ic; 3288 AVInputFormat *file_iformat = NULL; 3289 int err, i, ret; 3290 int64_t timestamp; 3291 uint8_t buf[128]; 3292 AVDictionary **opts; 3293 int orig_nb_streams; // number of streams before avformat_find_stream_info 3294 3295 if (o->format) { 3296 if (!(file_iformat = av_find_input_format(o->format))) { 3297 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format); 3298 exit_program(1); 3299 } 3300 } 3301 3302 if (!strcmp(filename, "-")) 3303 filename = "pipe:"; 3304 3305 using_stdin |= !strncmp(filename, "pipe:", 5) || 3306 !strcmp(filename, "/dev/stdin"); 3307 3308 /* get default parameters from command line */ 3309 ic = avformat_alloc_context(); 3310 if (!ic) { 3311 print_error(filename, AVERROR(ENOMEM)); 3312 exit_program(1); 3313 } 3314 if (o->nb_audio_sample_rate) { 3315 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i); 3316 av_dict_set(&format_opts, "sample_rate", buf, 0); 3317 } 3318 if (o->nb_audio_channels) { 3319 snprintf(buf, sizeof(buf), "%d", o->audio_channels[o->nb_audio_channels - 1].u.i); 3320 av_dict_set(&format_opts, "channels", buf, 0); 3321 } 3322 if (o->nb_frame_rates) { 3323 av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0); 3324 } 3325 if (o->nb_frame_sizes) { 3326 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0); 3327 } 3328 if (o->nb_frame_pix_fmts) 3329 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0); 3330 3331 ic->flags |= AVFMT_FLAG_NONBLOCK; 3332 ic->interrupt_callback = int_cb; 3333 3334 /* open the input file with generic libav function */ 3335 err = avformat_open_input(&ic, filename, file_iformat, &format_opts); 3336 if (err < 0) { 3337 print_error(filename, err); 3338 exit_program(1); 3339 } 3340 assert_avoptions(format_opts); 3341 3342 /* apply forced codec ids */ 3343 for (i = 0; i < ic->nb_streams; i++) 3344 choose_decoder(o, ic, ic->streams[i]); 3345 3346 /* Set AVCodecContext options for avformat_find_stream_info */ 3347 opts = setup_find_stream_info_opts(ic, codec_opts); 3348 orig_nb_streams = ic->nb_streams; 3349 3350 /* If not enough info to get the stream parameters, we decode the 3351 first frames to get it. (used in mpeg case for example) */ 3352 ret = avformat_find_stream_info(ic, opts); 3353 if (ret < 0) { 3354 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename); 3355 avformat_close_input(&ic); 3356 exit_program(1); 3357 } 3358 3359 timestamp = o->start_time; 3360 /* add the stream start time */ 3361 if (ic->start_time != AV_NOPTS_VALUE) 3362 timestamp += ic->start_time; 3363 3364 /* if seeking requested, we execute it */ 3365 if (o->start_time != 0) { 3366 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD); 3367 if (ret < 0) { 3368 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n", 3369 filename, (double)timestamp / AV_TIME_BASE); 3370 } 3371 } 3372 3373 /* update the current parameters so that they match the one of the input stream */ 3374 add_input_streams(o, ic); 3375 3376 /* dump the file content */ 3377 av_dump_format(ic, nb_input_files, filename, 0); 3378 3379 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1); 3380 input_files[nb_input_files - 1].ctx = ic; 3381 input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams; 3382 input_files[nb_input_files - 1].ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp); 3383 input_files[nb_input_files - 1].nb_streams = ic->nb_streams; 3384 input_files[nb_input_files - 1].rate_emu = o->rate_emu; 3385 3386 for (i = 0; i < o->nb_dump_attachment; i++) { 3387 int j; 3388 3389 for (j = 0; j < ic->nb_streams; j++) { 3390 AVStream *st = ic->streams[j]; 3391 3392 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1) 3393 dump_attachment(st, o->dump_attachment[i].u.str); 3394 } 3395 } 3396 3397 for (i = 0; i < orig_nb_streams; i++) 3398 av_dict_free(&opts[i]); 3399 av_freep(&opts); 3400 3401 reset_options(o); 3402 return 0; 3403} 3404 3405static uint8_t *get_line(AVIOContext *s) 3406{ 3407 AVIOContext *line; 3408 uint8_t *buf; 3409 char c; 3410 3411 if (avio_open_dyn_buf(&line) < 0) { 3412 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n"); 3413 exit_program(1); 3414 } 3415 3416 while ((c = avio_r8(s)) && c != '\n') 3417 avio_w8(line, c); 3418 avio_w8(line, 0); 3419 avio_close_dyn_buf(line, &buf); 3420 3421 return buf; 3422} 3423 3424static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s) 3425{ 3426 int i, ret = 1; 3427 char filename[1000]; 3428 const char *base[3] = { getenv("AVCONV_DATADIR"), 3429 getenv("HOME"), 3430 AVCONV_DATADIR, 3431 }; 3432 3433 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) { 3434 if (!base[i]) 3435 continue; 3436 if (codec_name) { 3437 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i], 3438 i != 1 ? "" : "/.avconv", codec_name, preset_name); 3439 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL); 3440 } 3441 if (ret) { 3442 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i], 3443 i != 1 ? "" : "/.avconv", preset_name); 3444 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL); 3445 } 3446 } 3447 return ret; 3448} 3449 3450static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost) 3451{ 3452 char *codec_name = NULL; 3453 3454 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st); 3455 if (!codec_name) { 3456 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename, 3457 NULL, ost->st->codec->codec_type); 3458 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id); 3459 } else if (!strcmp(codec_name, "copy")) 3460 ost->stream_copy = 1; 3461 else { 3462 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1); 3463 ost->st->codec->codec_id = ost->enc->id; 3464 } 3465} 3466 3467static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type) 3468{ 3469 OutputStream *ost; 3470 AVStream *st = avformat_new_stream(oc, NULL); 3471 int idx = oc->nb_streams - 1, ret = 0; 3472 char *bsf = NULL, *next, *codec_tag = NULL; 3473 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL; 3474 double qscale = -1; 3475 3476 if (!st) { 3477 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n"); 3478 exit_program(1); 3479 } 3480 3481 if (oc->nb_streams - 1 < o->nb_streamid_map) 3482 st->id = o->streamid_map[oc->nb_streams - 1]; 3483 3484 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams, 3485 nb_output_streams + 1); 3486 ost = &output_streams[nb_output_streams - 1]; 3487 ost->file_index = nb_output_files; 3488 ost->index = idx; 3489 ost->st = st; 3490 st->codec->codec_type = type; 3491 choose_encoder(o, oc, ost); 3492 if (ost->enc) { 3493 AVIOContext *s = NULL; 3494 char *buf = NULL, *arg = NULL, *preset = NULL; 3495 3496 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st); 3497 3498 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st); 3499 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) { 3500 do { 3501 buf = get_line(s); 3502 if (!buf[0] || buf[0] == '#') { 3503 av_free(buf); 3504 continue; 3505 } 3506 if (!(arg = strchr(buf, '='))) { 3507 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n"); 3508 exit_program(1); 3509 } 3510 *arg++ = 0; 3511 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE); 3512 av_free(buf); 3513 } while (!s->eof_reached); 3514 avio_close(s); 3515 } 3516 if (ret) { 3517 av_log(NULL, AV_LOG_FATAL, 3518 "Preset %s specified for stream %d:%d, but could not be opened.\n", 3519 preset, ost->file_index, ost->index); 3520 exit_program(1); 3521 } 3522 } 3523 3524 avcodec_get_context_defaults3(st->codec, ost->enc); 3525 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy 3526 3527 ost->max_frames = INT64_MAX; 3528 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st); 3529 3530 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st); 3531 while (bsf) { 3532 if (next = strchr(bsf, ',')) 3533 *next++ = 0; 3534 if (!(bsfc = av_bitstream_filter_init(bsf))) { 3535 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf); 3536 exit_program(1); 3537 } 3538 if (bsfc_prev) 3539 bsfc_prev->next = bsfc; 3540 else 3541 ost->bitstream_filters = bsfc; 3542 3543 bsfc_prev = bsfc; 3544 bsf = next; 3545 } 3546 3547 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st); 3548 if (codec_tag) { 3549 uint32_t tag = strtol(codec_tag, &next, 0); 3550 if (*next) 3551 tag = AV_RL32(codec_tag); 3552 st->codec->codec_tag = tag; 3553 } 3554 3555 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st); 3556 if (qscale >= 0 || same_quant) { 3557 st->codec->flags |= CODEC_FLAG_QSCALE; 3558 st->codec->global_quality = FF_QP2LAMBDA * qscale; 3559 } 3560 3561 if (oc->oformat->flags & AVFMT_GLOBALHEADER) 3562 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; 3563 3564 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags); 3565 return ost; 3566} 3567 3568static void parse_matrix_coeffs(uint16_t *dest, const char *str) 3569{ 3570 int i; 3571 const char *p = str; 3572 for (i = 0;; i++) { 3573 dest[i] = atoi(p); 3574 if (i == 63) 3575 break; 3576 p = strchr(p, ','); 3577 if (!p) { 3578 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i); 3579 exit_program(1); 3580 } 3581 p++; 3582 } 3583} 3584 3585static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc) 3586{ 3587 AVStream *st; 3588 OutputStream *ost; 3589 AVCodecContext *video_enc; 3590 3591 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO); 3592 st = ost->st; 3593 video_enc = st->codec; 3594 3595 if (!ost->stream_copy) { 3596 const char *p = NULL; 3597 char *frame_rate = NULL, *frame_size = NULL; 3598 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL; 3599 char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL; 3600 int i; 3601 3602 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st); 3603 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) { 3604 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate); 3605 exit_program(1); 3606 } 3607 3608 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st); 3609 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) { 3610 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size); 3611 exit_program(1); 3612 } 3613 3614 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st); 3615 if (frame_aspect_ratio) 3616 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio); 3617 3618 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st); 3619 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) { 3620 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt); 3621 exit_program(1); 3622 } 3623 st->sample_aspect_ratio = video_enc->sample_aspect_ratio; 3624 3625 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st); 3626 if (intra_matrix) { 3627 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) { 3628 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n"); 3629 exit_program(1); 3630 } 3631 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix); 3632 } 3633 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st); 3634 if (inter_matrix) { 3635 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) { 3636 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n"); 3637 exit_program(1); 3638 } 3639 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix); 3640 } 3641 3642 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st); 3643 for (i = 0; p; i++) { 3644 int start, end, q; 3645 int e = sscanf(p, "%d,%d,%d", &start, &end, &q); 3646 if (e != 3) { 3647 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n"); 3648 exit_program(1); 3649 } 3650 video_enc->rc_override = 3651 av_realloc(video_enc->rc_override, 3652 sizeof(RcOverride) * (i + 1)); 3653 video_enc->rc_override[i].start_frame = start; 3654 video_enc->rc_override[i].end_frame = end; 3655 if (q > 0) { 3656 video_enc->rc_override[i].qscale = q; 3657 video_enc->rc_override[i].quality_factor = 1.0; 3658 } 3659 else { 3660 video_enc->rc_override[i].qscale = 0; 3661 video_enc->rc_override[i].quality_factor = -q/100.0; 3662 } 3663 p = strchr(p, '/'); 3664 if (p) p++; 3665 } 3666 video_enc->rc_override_count = i; 3667 if (!video_enc->rc_initial_buffer_occupancy) 3668 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4; 3669 video_enc->intra_dc_precision = intra_dc_precision - 8; 3670 3671 /* two pass mode */ 3672 if (do_pass) { 3673 if (do_pass == 1) { 3674 video_enc->flags |= CODEC_FLAG_PASS1; 3675 } else { 3676 video_enc->flags |= CODEC_FLAG_PASS2; 3677 } 3678 } 3679 3680 MATCH_PER_STREAM_OPT(forced_key_frames, str, ost->forced_keyframes, oc, st); 3681 if (ost->forced_keyframes) 3682 ost->forced_keyframes = av_strdup(ost->forced_keyframes); 3683 3684 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st); 3685 3686 ost->top_field_first = -1; 3687 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st); 3688 3689#if CONFIG_AVFILTER 3690 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st); 3691 if (filters) 3692 ost->avfilter = av_strdup(filters); 3693#endif 3694 } else { 3695 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st); 3696 } 3697 3698 return ost; 3699} 3700 3701static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc) 3702{ 3703 AVStream *st; 3704 OutputStream *ost; 3705 AVCodecContext *audio_enc; 3706 3707 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO); 3708 st = ost->st; 3709 3710 audio_enc = st->codec; 3711 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO; 3712 3713 if (!ost->stream_copy) { 3714 char *sample_fmt = NULL; 3715 3716 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st); 3717 3718 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st); 3719 if (sample_fmt && 3720 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) { 3721 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt); 3722 exit_program(1); 3723 } 3724 3725 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st); 3726 } 3727 3728 return ost; 3729} 3730 3731static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc) 3732{ 3733 OutputStream *ost; 3734 3735 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA); 3736 if (!ost->stream_copy) { 3737 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n"); 3738 exit_program(1); 3739 } 3740 3741 return ost; 3742} 3743 3744static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc) 3745{ 3746 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT); 3747 ost->stream_copy = 1; 3748 return ost; 3749} 3750 3751static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc) 3752{ 3753 AVStream *st; 3754 OutputStream *ost; 3755 AVCodecContext *subtitle_enc; 3756 3757 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE); 3758 st = ost->st; 3759 subtitle_enc = st->codec; 3760 3761 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE; 3762 3763 return ost; 3764} 3765 3766/* arg format is "output-stream-index:streamid-value". */ 3767static int opt_streamid(OptionsContext *o, const char *opt, const char *arg) 3768{ 3769 int idx; 3770 char *p; 3771 char idx_str[16]; 3772 3773 av_strlcpy(idx_str, arg, sizeof(idx_str)); 3774 p = strchr(idx_str, ':'); 3775 if (!p) { 3776 av_log(NULL, AV_LOG_FATAL, 3777 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n", 3778 arg, opt); 3779 exit_program(1); 3780 } 3781 *p++ = '\0'; 3782 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX); 3783 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1); 3784 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX); 3785 return 0; 3786} 3787 3788static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata) 3789{ 3790 AVFormatContext *is = ifile->ctx; 3791 AVFormatContext *os = ofile->ctx; 3792 int i; 3793 3794 for (i = 0; i < is->nb_chapters; i++) { 3795 AVChapter *in_ch = is->chapters[i], *out_ch; 3796 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset, 3797 AV_TIME_BASE_Q, in_ch->time_base); 3798 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX : 3799 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base); 3800 3801 3802 if (in_ch->end < ts_off) 3803 continue; 3804 if (rt != INT64_MAX && in_ch->start > rt + ts_off) 3805 break; 3806 3807 out_ch = av_mallocz(sizeof(AVChapter)); 3808 if (!out_ch) 3809 return AVERROR(ENOMEM); 3810 3811 out_ch->id = in_ch->id; 3812 out_ch->time_base = in_ch->time_base; 3813 out_ch->start = FFMAX(0, in_ch->start - ts_off); 3814 out_ch->end = FFMIN(rt, in_ch->end - ts_off); 3815 3816 if (copy_metadata) 3817 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0); 3818 3819 os->nb_chapters++; 3820 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters); 3821 if (!os->chapters) 3822 return AVERROR(ENOMEM); 3823 os->chapters[os->nb_chapters - 1] = out_ch; 3824 } 3825 return 0; 3826} 3827 3828static void opt_output_file(void *optctx, const char *filename) 3829{ 3830 OptionsContext *o = optctx; 3831 AVFormatContext *oc; 3832 int i, err; 3833 AVOutputFormat *file_oformat; 3834 OutputStream *ost; 3835 InputStream *ist; 3836 3837 if (!strcmp(filename, "-")) 3838 filename = "pipe:"; 3839 3840 oc = avformat_alloc_context(); 3841 if (!oc) { 3842 print_error(filename, AVERROR(ENOMEM)); 3843 exit_program(1); 3844 } 3845 3846 if (o->format) { 3847 file_oformat = av_guess_format(o->format, NULL, NULL); 3848 if (!file_oformat) { 3849 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format); 3850 exit_program(1); 3851 } 3852 } else { 3853 file_oformat = av_guess_format(NULL, filename, NULL); 3854 if (!file_oformat) { 3855 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n", 3856 filename); 3857 exit_program(1); 3858 } 3859 } 3860 3861 oc->oformat = file_oformat; 3862 oc->interrupt_callback = int_cb; 3863 av_strlcpy(oc->filename, filename, sizeof(oc->filename)); 3864 3865 if (!o->nb_stream_maps) { 3866 /* pick the "best" stream of each type */ 3867#define NEW_STREAM(type, index)\ 3868 if (index >= 0) {\ 3869 ost = new_ ## type ## _stream(o, oc);\ 3870 ost->source_index = index;\ 3871 ost->sync_ist = &input_streams[index];\ 3872 input_streams[index].discard = 0;\ 3873 } 3874 3875 /* video: highest resolution */ 3876 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) { 3877 int area = 0, idx = -1; 3878 for (i = 0; i < nb_input_streams; i++) { 3879 ist = &input_streams[i]; 3880 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && 3881 ist->st->codec->width * ist->st->codec->height > area) { 3882 area = ist->st->codec->width * ist->st->codec->height; 3883 idx = i; 3884 } 3885 } 3886 NEW_STREAM(video, idx); 3887 } 3888 3889 /* audio: most channels */ 3890 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) { 3891 int channels = 0, idx = -1; 3892 for (i = 0; i < nb_input_streams; i++) { 3893 ist = &input_streams[i]; 3894 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && 3895 ist->st->codec->channels > channels) { 3896 channels = ist->st->codec->channels; 3897 idx = i; 3898 } 3899 } 3900 NEW_STREAM(audio, idx); 3901 } 3902 3903 /* subtitles: pick first */ 3904 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) { 3905 for (i = 0; i < nb_input_streams; i++) 3906 if (input_streams[i].st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { 3907 NEW_STREAM(subtitle, i); 3908 break; 3909 } 3910 } 3911 /* do something with data? */ 3912 } else { 3913 for (i = 0; i < o->nb_stream_maps; i++) { 3914 StreamMap *map = &o->stream_maps[i]; 3915 3916 if (map->disabled) 3917 continue; 3918 3919 ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index]; 3920 switch (ist->st->codec->codec_type) { 3921 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break; 3922 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break; 3923 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break; 3924 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break; 3925 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break; 3926 default: 3927 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n", 3928 map->file_index, map->stream_index); 3929 exit_program(1); 3930 } 3931 3932 ost->source_index = input_files[map->file_index].ist_index + map->stream_index; 3933 ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index + 3934 map->sync_stream_index]; 3935 ist->discard = 0; 3936 } 3937 } 3938 3939 /* handle attached files */ 3940 for (i = 0; i < o->nb_attachments; i++) { 3941 AVIOContext *pb; 3942 uint8_t *attachment; 3943 const char *p; 3944 int64_t len; 3945 3946 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) { 3947 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n", 3948 o->attachments[i]); 3949 exit_program(1); 3950 } 3951 if ((len = avio_size(pb)) <= 0) { 3952 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n", 3953 o->attachments[i]); 3954 exit_program(1); 3955 } 3956 if (!(attachment = av_malloc(len))) { 3957 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n", 3958 o->attachments[i]); 3959 exit_program(1); 3960 } 3961 avio_read(pb, attachment, len); 3962 3963 ost = new_attachment_stream(o, oc); 3964 ost->stream_copy = 0; 3965 ost->source_index = -1; 3966 ost->attachment_filename = o->attachments[i]; 3967 ost->st->codec->extradata = attachment; 3968 ost->st->codec->extradata_size = len; 3969 3970 p = strrchr(o->attachments[i], '/'); 3971 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE); 3972 avio_close(pb); 3973 } 3974 3975 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1); 3976 output_files[nb_output_files - 1].ctx = oc; 3977 output_files[nb_output_files - 1].ost_index = nb_output_streams - oc->nb_streams; 3978 output_files[nb_output_files - 1].recording_time = o->recording_time; 3979 output_files[nb_output_files - 1].start_time = o->start_time; 3980 output_files[nb_output_files - 1].limit_filesize = o->limit_filesize; 3981 av_dict_copy(&output_files[nb_output_files - 1].opts, format_opts, 0); 3982 3983 /* check filename in case of an image number is expected */ 3984 if (oc->oformat->flags & AVFMT_NEEDNUMBER) { 3985 if (!av_filename_number_test(oc->filename)) { 3986 print_error(oc->filename, AVERROR(EINVAL)); 3987 exit_program(1); 3988 } 3989 } 3990 3991 if (!(oc->oformat->flags & AVFMT_NOFILE)) { 3992 /* test if it already exists to avoid losing precious files */ 3993 assert_file_overwrite(filename); 3994 3995 /* open the file */ 3996 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE, 3997 &oc->interrupt_callback, 3998 &output_files[nb_output_files - 1].opts)) < 0) { 3999 print_error(filename, err); 4000 exit_program(1); 4001 } 4002 } 4003 4004 if (o->mux_preload) { 4005 uint8_t buf[64]; 4006 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE)); 4007 av_dict_set(&output_files[nb_output_files - 1].opts, "preload", buf, 0); 4008 } 4009 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE); 4010 oc->flags |= AVFMT_FLAG_NONBLOCK; 4011 4012 /* copy metadata */ 4013 for (i = 0; i < o->nb_metadata_map; i++) { 4014 char *p; 4015 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0); 4016 4017 if (in_file_index < 0) 4018 continue; 4019 if (in_file_index >= nb_input_files) { 4020 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index); 4021 exit_program(1); 4022 } 4023 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index].ctx, o); 4024 } 4025 4026 /* copy chapters */ 4027 if (o->chapters_input_file >= nb_input_files) { 4028 if (o->chapters_input_file == INT_MAX) { 4029 /* copy chapters from the first input file that has them*/ 4030 o->chapters_input_file = -1; 4031 for (i = 0; i < nb_input_files; i++) 4032 if (input_files[i].ctx->nb_chapters) { 4033 o->chapters_input_file = i; 4034 break; 4035 } 4036 } else { 4037 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n", 4038 o->chapters_input_file); 4039 exit_program(1); 4040 } 4041 } 4042 if (o->chapters_input_file >= 0) 4043 copy_chapters(&input_files[o->chapters_input_file], &output_files[nb_output_files - 1], 4044 !o->metadata_chapters_manual); 4045 4046 /* copy global metadata by default */ 4047 if (!o->metadata_global_manual && nb_input_files) 4048 av_dict_copy(&oc->metadata, input_files[0].ctx->metadata, 4049 AV_DICT_DONT_OVERWRITE); 4050 if (!o->metadata_streams_manual) 4051 for (i = output_files[nb_output_files - 1].ost_index; i < nb_output_streams; i++) { 4052 InputStream *ist; 4053 if (output_streams[i].source_index < 0) /* this is true e.g. for attached files */ 4054 continue; 4055 ist = &input_streams[output_streams[i].source_index]; 4056 av_dict_copy(&output_streams[i].st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); 4057 } 4058 4059 /* process manually set metadata */ 4060 for (i = 0; i < o->nb_metadata; i++) { 4061 AVDictionary **m; 4062 char type, *val; 4063 const char *stream_spec; 4064 int index = 0, j, ret; 4065 4066 val = strchr(o->metadata[i].u.str, '='); 4067 if (!val) { 4068 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n", 4069 o->metadata[i].u.str); 4070 exit_program(1); 4071 } 4072 *val++ = 0; 4073 4074 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec); 4075 if (type == 's') { 4076 for (j = 0; j < oc->nb_streams; j++) { 4077 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) { 4078 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0); 4079 } else if (ret < 0) 4080 exit_program(1); 4081 } 4082 printf("ret %d, stream_spec %s\n", ret, stream_spec); 4083 } 4084 else { 4085 switch (type) { 4086 case 'g': 4087 m = &oc->metadata; 4088 break; 4089 case 'c': 4090 if (index < 0 || index >= oc->nb_chapters) { 4091 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index); 4092 exit_program(1); 4093 } 4094 m = &oc->chapters[index]->metadata; 4095 break; 4096 default: 4097 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier); 4098 exit_program(1); 4099 } 4100 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0); 4101 } 4102 } 4103 4104 reset_options(o); 4105} 4106 4107/* same option as mencoder */ 4108static int opt_pass(const char *opt, const char *arg) 4109{ 4110 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2); 4111 return 0; 4112} 4113 4114static int64_t getutime(void) 4115{ 4116#if HAVE_GETRUSAGE 4117 struct rusage rusage; 4118 4119 getrusage(RUSAGE_SELF, &rusage); 4120 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec; 4121#elif HAVE_GETPROCESSTIMES 4122 HANDLE proc; 4123 FILETIME c, e, k, u; 4124 proc = GetCurrentProcess(); 4125 GetProcessTimes(proc, &c, &e, &k, &u); 4126 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10; 4127#else 4128 return av_gettime(); 4129#endif 4130} 4131 4132static int64_t getmaxrss(void) 4133{ 4134#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS 4135 struct rusage rusage; 4136 getrusage(RUSAGE_SELF, &rusage); 4137 return (int64_t)rusage.ru_maxrss * 1024; 4138#elif HAVE_GETPROCESSMEMORYINFO 4139 HANDLE proc; 4140 PROCESS_MEMORY_COUNTERS memcounters; 4141 proc = GetCurrentProcess(); 4142 memcounters.cb = sizeof(memcounters); 4143 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters)); 4144 return memcounters.PeakPagefileUsage; 4145#else 4146 return 0; 4147#endif 4148} 4149 4150static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg) 4151{ 4152 return parse_option(o, "q:a", arg, options); 4153} 4154 4155static void show_usage(void) 4156{ 4157 printf("Hyper fast Audio and Video encoder\n"); 4158 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name); 4159 printf("\n"); 4160} 4161 4162static void show_help(void) 4163{ 4164 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM; 4165 av_log_set_callback(log_callback_help); 4166 show_usage(); 4167 show_help_options(options, "Main options:\n", 4168 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0); 4169 show_help_options(options, "\nAdvanced options:\n", 4170 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 4171 OPT_EXPERT); 4172 show_help_options(options, "\nVideo options:\n", 4173 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB, 4174 OPT_VIDEO); 4175 show_help_options(options, "\nAdvanced Video options:\n", 4176 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB, 4177 OPT_VIDEO | OPT_EXPERT); 4178 show_help_options(options, "\nAudio options:\n", 4179 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB, 4180 OPT_AUDIO); 4181 show_help_options(options, "\nAdvanced Audio options:\n", 4182 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB, 4183 OPT_AUDIO | OPT_EXPERT); 4184 show_help_options(options, "\nSubtitle options:\n", 4185 OPT_SUBTITLE | OPT_GRAB, 4186 OPT_SUBTITLE); 4187 show_help_options(options, "\nAudio/Video grab options:\n", 4188 OPT_GRAB, 4189 OPT_GRAB); 4190 printf("\n"); 4191 show_help_children(avcodec_get_class(), flags); 4192 show_help_children(avformat_get_class(), flags); 4193 show_help_children(sws_get_class(), flags); 4194} 4195 4196static int opt_target(OptionsContext *o, const char *opt, const char *arg) 4197{ 4198 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN; 4199 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" }; 4200 4201 if (!strncmp(arg, "pal-", 4)) { 4202 norm = PAL; 4203 arg += 4; 4204 } else if (!strncmp(arg, "ntsc-", 5)) { 4205 norm = NTSC; 4206 arg += 5; 4207 } else if (!strncmp(arg, "film-", 5)) { 4208 norm = FILM; 4209 arg += 5; 4210 } else { 4211 /* Try to determine PAL/NTSC by peeking in the input files */ 4212 if (nb_input_files) { 4213 int i, j, fr; 4214 for (j = 0; j < nb_input_files; j++) { 4215 for (i = 0; i < input_files[j].nb_streams; i++) { 4216 AVCodecContext *c = input_files[j].ctx->streams[i]->codec; 4217 if (c->codec_type != AVMEDIA_TYPE_VIDEO) 4218 continue; 4219 fr = c->time_base.den * 1000 / c->time_base.num; 4220 if (fr == 25000) { 4221 norm = PAL; 4222 break; 4223 } else if ((fr == 29970) || (fr == 23976)) { 4224 norm = NTSC; 4225 break; 4226 } 4227 } 4228 if (norm != UNKNOWN) 4229 break; 4230 } 4231 } 4232 if (norm != UNKNOWN) 4233 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC"); 4234 } 4235 4236 if (norm == UNKNOWN) { 4237 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n"); 4238 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n"); 4239 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n"); 4240 exit_program(1); 4241 } 4242 4243 if (!strcmp(arg, "vcd")) { 4244 opt_video_codec(o, "c:v", "mpeg1video"); 4245 opt_audio_codec(o, "c:a", "mp2"); 4246 parse_option(o, "f", "vcd", options); 4247 4248 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options); 4249 parse_option(o, "r", frame_rates[norm], options); 4250 opt_default("g", norm == PAL ? "15" : "18"); 4251 4252 opt_default("b", "1150000"); 4253 opt_default("maxrate", "1150000"); 4254 opt_default("minrate", "1150000"); 4255 opt_default("bufsize", "327680"); // 40*1024*8; 4256 4257 opt_default("b:a", "224000"); 4258 parse_option(o, "ar", "44100", options); 4259 parse_option(o, "ac", "2", options); 4260 4261 opt_default("packetsize", "2324"); 4262 opt_default("muxrate", "1411200"); // 2352 * 75 * 8; 4263 4264 /* We have to offset the PTS, so that it is consistent with the SCR. 4265 SCR starts at 36000, but the first two packs contain only padding 4266 and the first pack from the other stream, respectively, may also have 4267 been written before. 4268 So the real data starts at SCR 36000+3*1200. */ 4269 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44 4270 } else if (!strcmp(arg, "svcd")) { 4271 4272 opt_video_codec(o, "c:v", "mpeg2video"); 4273 opt_audio_codec(o, "c:a", "mp2"); 4274 parse_option(o, "f", "svcd", options); 4275 4276 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options); 4277 parse_option(o, "r", frame_rates[norm], options); 4278 opt_default("g", norm == PAL ? "15" : "18"); 4279 4280 opt_default("b", "2040000"); 4281 opt_default("maxrate", "2516000"); 4282 opt_default("minrate", "0"); // 1145000; 4283 opt_default("bufsize", "1835008"); // 224*1024*8; 4284 opt_default("flags", "+scan_offset"); 4285 4286 4287 opt_default("b:a", "224000"); 4288 parse_option(o, "ar", "44100", options); 4289 4290 opt_default("packetsize", "2324"); 4291 4292 } else if (!strcmp(arg, "dvd")) { 4293 4294 opt_video_codec(o, "c:v", "mpeg2video"); 4295 opt_audio_codec(o, "c:a", "ac3"); 4296 parse_option(o, "f", "dvd", options); 4297 4298 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options); 4299 parse_option(o, "r", frame_rates[norm], options); 4300 opt_default("g", norm == PAL ? "15" : "18"); 4301 4302 opt_default("b", "6000000"); 4303 opt_default("maxrate", "9000000"); 4304 opt_default("minrate", "0"); // 1500000; 4305 opt_default("bufsize", "1835008"); // 224*1024*8; 4306 4307 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack. 4308 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8 4309 4310 opt_default("b:a", "448000"); 4311 parse_option(o, "ar", "48000", options); 4312 4313 } else if (!strncmp(arg, "dv", 2)) { 4314 4315 parse_option(o, "f", "dv", options); 4316 4317 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options); 4318 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" : 4319 norm == PAL ? "yuv420p" : "yuv411p", options); 4320 parse_option(o, "r", frame_rates[norm], options); 4321 4322 parse_option(o, "ar", "48000", options); 4323 parse_option(o, "ac", "2", options); 4324 4325 } else { 4326 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg); 4327 return AVERROR(EINVAL); 4328 } 4329 return 0; 4330} 4331 4332static int opt_vstats_file(const char *opt, const char *arg) 4333{ 4334 av_free (vstats_filename); 4335 vstats_filename = av_strdup (arg); 4336 return 0; 4337} 4338 4339static int opt_vstats(const char *opt, const char *arg) 4340{ 4341 char filename[40]; 4342 time_t today2 = time(NULL); 4343 struct tm *today = localtime(&today2); 4344 4345 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min, 4346 today->tm_sec); 4347 return opt_vstats_file(opt, filename); 4348} 4349 4350static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg) 4351{ 4352 return parse_option(o, "frames:v", arg, options); 4353} 4354 4355static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg) 4356{ 4357 return parse_option(o, "frames:a", arg, options); 4358} 4359 4360static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg) 4361{ 4362 return parse_option(o, "frames:d", arg, options); 4363} 4364 4365static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg) 4366{ 4367 return parse_option(o, "tag:v", arg, options); 4368} 4369 4370static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg) 4371{ 4372 return parse_option(o, "tag:a", arg, options); 4373} 4374 4375static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg) 4376{ 4377 return parse_option(o, "tag:s", arg, options); 4378} 4379 4380static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg) 4381{ 4382 return parse_option(o, "filter:v", arg, options); 4383} 4384 4385static int opt_vsync(const char *opt, const char *arg) 4386{ 4387 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR; 4388 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR; 4389 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH; 4390 4391 if (video_sync_method == VSYNC_AUTO) 4392 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR); 4393 return 0; 4394} 4395 4396#define OFFSET(x) offsetof(OptionsContext, x) 4397static const OptionDef options[] = { 4398 /* main options */ 4399#include "cmdutils_common_opts.h" 4400 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" }, 4401 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" }, 4402 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" }, 4403 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" }, 4404 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" }, 4405 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" }, 4406 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" }, 4407 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile", 4408 "outfile[,metadata]:infile[,metadata]" }, 4409 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" }, 4410 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" }, 4411 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, // 4412 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" }, 4413 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" }, 4414 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" }, 4415 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" }, 4416 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" }, 4417 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark}, 4418 "add timings for benchmarking" }, 4419 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" }, 4420 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump}, 4421 "dump each input packet" }, 4422 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump}, 4423 "when dumping packets, also dump the payload" }, 4424 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" }, 4425 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" }, 4426 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" }, 4427 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" }, 4428 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" }, 4429 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" }, 4430 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" }, 4431 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, // 4432 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" }, 4433 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" }, 4434 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" }, 4435 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" }, 4436 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" }, 4437 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" }, 4438 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" }, 4439#if CONFIG_AVFILTER 4440 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" }, 4441#endif 4442 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", }, 4443 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" }, 4444 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" }, 4445 4446 /* video options */ 4447 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" }, 4448 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" }, 4449 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" }, 4450 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" }, 4451 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" }, 4452 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" }, 4453 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" }, 4454 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" }, 4455 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" }, 4456 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant}, 4457 "use same quantizer as source (implies VBR)" }, 4458 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" }, 4459 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" }, 4460 { "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace}, 4461 "deinterlace pictures" }, 4462 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" }, 4463 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" }, 4464#if CONFIG_AVFILTER 4465 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" }, 4466#endif 4467 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" }, 4468 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" }, 4469 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" }, 4470 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" }, 4471 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" }, 4472 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" }, 4473 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" }, 4474 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" }, 4475 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" }, 4476 4477 /* audio options */ 4478 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" }, 4479 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", }, 4480 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" }, 4481 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" }, 4482 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" }, 4483 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" }, 4484 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" }, 4485 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, // 4486 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" }, 4487 4488 /* subtitle options */ 4489 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" }, 4490 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" }, 4491 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" }, 4492 4493 /* grab options */ 4494 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" }, 4495 4496 /* muxer options */ 4497 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" }, 4498 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" }, 4499 4500 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" }, 4501 4502 /* data codec support */ 4503 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" }, 4504 4505 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" }, 4506 { NULL, }, 4507}; 4508 4509int main(int argc, char **argv) 4510{ 4511 OptionsContext o = { 0 }; 4512 int64_t ti; 4513 4514 reset_options(&o); 4515 4516 av_log_set_flags(AV_LOG_SKIP_REPEATED); 4517 parse_loglevel(argc, argv, options); 4518 4519 avcodec_register_all(); 4520#if CONFIG_AVDEVICE 4521 avdevice_register_all(); 4522#endif 4523#if CONFIG_AVFILTER 4524 avfilter_register_all(); 4525#endif 4526 av_register_all(); 4527 avformat_network_init(); 4528 4529 show_banner(); 4530 4531 /* parse options */ 4532 parse_options(&o, argc, argv, options, opt_output_file); 4533 4534 if (nb_output_files <= 0 && nb_input_files == 0) { 4535 show_usage(); 4536 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name); 4537 exit_program(1); 4538 } 4539 4540 /* file converter / grab */ 4541 if (nb_output_files <= 0) { 4542 fprintf(stderr, "At least one output file must be specified\n"); 4543 exit_program(1); 4544 } 4545 4546 if (nb_input_files == 0) { 4547 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n"); 4548 exit_program(1); 4549 } 4550 4551 ti = getutime(); 4552 if (transcode(output_files, nb_output_files, input_files, nb_input_files) < 0) 4553 exit_program(1); 4554 ti = getutime() - ti; 4555 if (do_benchmark) { 4556 int maxrss = getmaxrss() / 1024; 4557 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss); 4558 } 4559 4560 exit_program(0); 4561 return 0; 4562} 4563