• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/ap/gpl/minidlna/ffmpeg-2.3.4/doc/examples/

Lines Matching defs:ost

90 static void add_stream(OutputStream *ost, AVFormatContext *oc,
104 ost->st = avformat_new_stream(oc, *codec);
105 if (!ost->st) {
109 ost->st->id = oc->nb_streams-1;
110 c = ost->st->codec;
165 static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
170 c = ost->st->codec;
180 ost->t = 0;
181 ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
183 ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
185 ost->frame = av_frame_alloc();
186 if (!ost->frame)
189 ost->frame->sample_rate = c->sample_rate;
190 ost->frame->format = AV_SAMPLE_FMT_S16;
191 ost->frame->channel_layout = c->channel_layout;
194 ost->frame->nb_samples = 10000;
196 ost->frame->nb_samples = c->frame_size;
198 ost->tmp_frame = av_frame_alloc();
199 if (!ost->frame)
202 ost->tmp_frame->sample_rate = c->sample_rate;
203 ost->tmp_frame->format = c->sample_fmt;
204 ost->tmp_frame->channel_layout = c->channel_layout;
205 ost->tmp_frame->nb_samples = ost->frame->nb_samples;
230 ret = av_frame_get_buffer(ost->frame, 0);
235 ret = av_frame_get_buffer(ost->tmp_frame, 0);
244 static AVFrame *get_audio_frame(OutputStream *ost)
247 int16_t *q = (int16_t*)ost->frame->data[0];
250 if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
258 ret = av_frame_make_writable(ost->frame);
262 for (j = 0; j < ost->frame->nb_samples; j++) {
263 v = (int)(sin(ost->t) * 10000);
264 for (i = 0; i < ost->st->codec->channels; i++)
266 ost->t += ost->tincr;
267 ost->tincr += ost->tincr2;
270 ost->frame->pts = ost->next_pts;
271 ost->next_pts += ost->frame->nb_samples;
273 return ost->frame;
280 static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
290 c = ost->st->codec;
292 frame = get_audio_frame(ost);
304 ost->tmp_frame->data, dst_nb_samples,
310 frame = ost->tmp_frame;
326 ret = write_frame(oc, &c->time_base, ost->st, &pkt);
363 static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
366 AVCodecContext *c = ost->st->codec;
376 ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
377 if (!ost->frame) {
385 ost->tmp_frame = NULL;
387 ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
388 if (!ost->tmp_frame) {
425 static AVFrame *get_video_frame(OutputStream *ost)
427 AVCodecContext *c = ost->st->codec;
430 if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
437 if (!ost->sws_ctx) {
438 ost->sws_ctx = sws_getContext(c->width, c->height,
443 if (!ost->sws_ctx) {
449 fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
450 sws_scale(ost->sws_ctx,
451 (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
452 0, c->height, ost->frame->data, ost->frame->linesize);
454 fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
457 ost->frame->pts = ost->next_pts++;
459 return ost->frame;
466 static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
473 c = ost->st->codec;
475 frame = get_video_frame(ost);
486 pkt.stream_index = ost->st->index;
491 av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
506 ret = write_frame(oc, &c->time_base, ost->st, &pkt);
520 static void close_stream(AVFormatContext *oc, OutputStream *ost)
522 avcodec_close(ost->st->codec);
523 av_frame_free(&ost->frame);
524 av_frame_free(&ost->tmp_frame);
525 sws_freeContext(ost->sws_ctx);