1 | /* |
---|
2 | * Copyright (c) 2003 Fabrice Bellard |
---|
3 | * |
---|
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
---|
5 | * of this software and associated documentation files (the "Software"), to deal |
---|
6 | * in the Software without restriction, including without limitation the rights |
---|
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
---|
8 | * copies of the Software, and to permit persons to whom the Software is |
---|
9 | * furnished to do so, subject to the following conditions: |
---|
10 | * |
---|
11 | * The above copyright notice and this permission notice shall be included in |
---|
12 | * all copies or substantial portions of the Software. |
---|
13 | * |
---|
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
---|
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
---|
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
---|
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
---|
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
---|
19 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
---|
20 | * THE SOFTWARE. |
---|
21 | */ |
---|
22 | /** |
---|
23 | * @file |
---|
24 | * libavformat API example. |
---|
25 | * |
---|
26 | * Output a media file in any supported libavformat format. |
---|
27 | * The default codecs are used. |
---|
28 | * @example doc/examples/muxing.c |
---|
29 | */ |
---|
30 | #include <stdlib.h> |
---|
31 | #include <stdio.h> |
---|
32 | #include <string.h> |
---|
33 | #include <math.h> |
---|
34 | #include <libavutil/opt.h> |
---|
35 | #include <libavutil/mathematics.h> |
---|
36 | #include <libavformat/avformat.h> |
---|
37 | #include <libswscale/swscale.h> |
---|
38 | #include <libswresample/swresample.h> |
---|
39 | /* 5 seconds stream duration */ |
---|
40 | #define STREAM_DURATION 200.0 |
---|
41 | #define STREAM_FRAME_RATE 25 /* 25 images/s */ |
---|
42 | #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) |
---|
43 | #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ |
---|
44 | static int sws_flags = SWS_BICUBIC; |
---|
45 | /* Add an output stream. */ |
---|
46 | static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, |
---|
47 | enum AVCodecID codec_id) |
---|
48 | { |
---|
49 | AVCodecContext *c; |
---|
50 | AVStream *st; |
---|
51 | /* find the encoder */ |
---|
52 | *codec = avcodec_find_encoder(codec_id); |
---|
53 | if (!(*codec)) { |
---|
54 | fprintf(stderr, "Could not find encoder for '%s'\n", |
---|
55 | avcodec_get_name(codec_id)); |
---|
56 | exit(1); |
---|
57 | } |
---|
58 | st = avformat_new_stream(oc, *codec); |
---|
59 | if (!st) { |
---|
60 | fprintf(stderr, "Could not allocate stream\n"); |
---|
61 | exit(1); |
---|
62 | } |
---|
63 | st->id = oc->nb_streams-1; |
---|
64 | c = st->codec; |
---|
65 | switch ((*codec)->type) { |
---|
66 | case AVMEDIA_TYPE_AUDIO: |
---|
67 | c->sample_fmt = AV_SAMPLE_FMT_FLTP; |
---|
68 | c->bit_rate = 64000; |
---|
69 | c->sample_rate = 44100; |
---|
70 | c->channels = 2; |
---|
71 | break; |
---|
72 | case AVMEDIA_TYPE_VIDEO: |
---|
73 | c->codec_id = codec_id; |
---|
74 | c->bit_rate = 400000; |
---|
75 | /* Resolution must be a multiple of two. */ |
---|
76 | c->width = 352; |
---|
77 | c->height = 288; |
---|
78 | /* timebase: This is the fundamental unit of time (in seconds) in terms |
---|
79 | * of which frame timestamps are represented. For fixed-fps content, |
---|
80 | * timebase should be 1/framerate and timestamp increments should be |
---|
81 | * identical to 1. */ |
---|
82 | c->time_base.den = STREAM_FRAME_RATE; |
---|
83 | c->time_base.num = 1; |
---|
84 | c->gop_size = 12; /* emit one intra frame every twelve frames at most */ |
---|
85 | c->pix_fmt = STREAM_PIX_FMT; |
---|
86 | if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { |
---|
87 | /* just for testing, we also add B frames */ |
---|
88 | c->max_b_frames = 2; |
---|
89 | } |
---|
90 | if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { |
---|
91 | /* Needed to avoid using macroblocks in which some coeffs overflow. |
---|
92 | * This does not happen with normal video, it just happens here as |
---|
93 | * the motion of the chroma plane does not match the luma plane. */ |
---|
94 | c->mb_decision = 2; |
---|
95 | } |
---|
96 | break; |
---|
97 | default: |
---|
98 | break; |
---|
99 | } |
---|
100 | /* Some formats want stream headers to be separate. */ |
---|
101 | if (oc->oformat->flags & AVFMT_GLOBALHEADER) |
---|
102 | c->flags |= CODEC_FLAG_GLOBAL_HEADER; |
---|
103 | return st; |
---|
104 | } |
---|
105 | /**************************************************************/ |
---|
106 | /* audio output */ |
---|
107 | static float t, tincr, tincr2; |
---|
108 | static uint8_t **src_samples_data; |
---|
109 | static int src_samples_linesize; |
---|
110 | static int src_nb_samples; |
---|
111 | static int max_dst_nb_samples; |
---|
112 | uint8_t **dst_samples_data; |
---|
113 | int dst_samples_linesize; |
---|
114 | int dst_samples_size; |
---|
115 | struct SwrContext *swr_ctx = NULL; |
---|
116 | static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) |
---|
117 | { |
---|
118 | AVCodecContext *c; |
---|
119 | int ret; |
---|
120 | c = st->codec; |
---|
121 | /* open it */ |
---|
122 | ret = avcodec_open2(c, codec, NULL); |
---|
123 | if (ret < 0) { |
---|
124 | fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret)); |
---|
125 | exit(1); |
---|
126 | } |
---|
127 | /* init signal generator */ |
---|
128 | t = 0; |
---|
129 | tincr = 2 * M_PI * 110.0 / c->sample_rate; |
---|
130 | /* increment frequency by 110 Hz per second */ |
---|
131 | tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; |
---|
132 | src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ? |
---|
133 | 10000 : c->frame_size; |
---|
134 | ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels, |
---|
135 | src_nb_samples, c->sample_fmt, 0); |
---|
136 | if (ret < 0) { |
---|
137 | fprintf(stderr, "Could not allocate source samples\n"); |
---|
138 | exit(1); |
---|
139 | } |
---|
140 | /* create resampler context */ |
---|
141 | if (c->sample_fmt != AV_SAMPLE_FMT_S16) { |
---|
142 | swr_ctx = swr_alloc(); |
---|
143 | if (!swr_ctx) { |
---|
144 | fprintf(stderr, "Could not allocate resampler context\n"); |
---|
145 | exit(1); |
---|
146 | } |
---|
147 | /* set options */ |
---|
148 | av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0); |
---|
149 | av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0); |
---|
150 | av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); |
---|
151 | av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0); |
---|
152 | av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0); |
---|
153 | av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0); |
---|
154 | /* initialize the resampling context */ |
---|
155 | if ((ret = swr_init(swr_ctx)) < 0) { |
---|
156 | fprintf(stderr, "Failed to initialize the resampling context\n"); |
---|
157 | exit(1); |
---|
158 | } |
---|
159 | } |
---|
160 | /* compute the number of converted samples: buffering is avoided |
---|
161 | * ensuring that the output buffer will contain at least all the |
---|
162 | * converted input samples */ |
---|
163 | max_dst_nb_samples = src_nb_samples; |
---|
164 | ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels, |
---|
165 | max_dst_nb_samples, c->sample_fmt, 0); |
---|
166 | if (ret < 0) { |
---|
167 | fprintf(stderr, "Could not allocate destination samples\n"); |
---|
168 | exit(1); |
---|
169 | } |
---|
170 | dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples, |
---|
171 | c->sample_fmt, 0); |
---|
172 | } |
---|
173 | /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and |
---|
174 | * 'nb_channels' channels. */ |
---|
175 | static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) |
---|
176 | { |
---|
177 | int j, i, v; |
---|
178 | int16_t *q; |
---|
179 | q = samples; |
---|
180 | for (j = 0; j < frame_size; j++) { |
---|
181 | v = (int)(sin(t) * 10000); |
---|
182 | for (i = 0; i < nb_channels; i++) |
---|
183 | *q++ = v; |
---|
184 | t += tincr; |
---|
185 | tincr += tincr2; |
---|
186 | } |
---|
187 | } |
---|
188 | static void write_audio_frame(AVFormatContext *oc, AVStream *st) |
---|
189 | { |
---|
190 | AVCodecContext *c; |
---|
191 | AVPacket pkt = { 0 }; // data and size must be 0; |
---|
192 | AVFrame *frame = avcodec_alloc_frame(); |
---|
193 | int got_packet, ret, dst_nb_samples; |
---|
194 | av_init_packet(&pkt); |
---|
195 | c = st->codec; |
---|
196 | get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels); |
---|
197 | /* convert samples from native format to destination codec format, using the resampler */ |
---|
198 | if (swr_ctx) { |
---|
199 | /* compute destination number of samples */ |
---|
200 | dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples, |
---|
201 | c->sample_rate, c->sample_rate, AV_ROUND_UP); |
---|
202 | if (dst_nb_samples > max_dst_nb_samples) { |
---|
203 | av_free(dst_samples_data[0]); |
---|
204 | ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels, |
---|
205 | dst_nb_samples, c->sample_fmt, 0); |
---|
206 | if (ret < 0) |
---|
207 | exit(1); |
---|
208 | max_dst_nb_samples = dst_nb_samples; |
---|
209 | dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples, |
---|
210 | c->sample_fmt, 0); |
---|
211 | } |
---|
212 | /* convert to destination format */ |
---|
213 | ret = swr_convert(swr_ctx, |
---|
214 | dst_samples_data, dst_nb_samples, |
---|
215 | (const uint8_t **)src_samples_data, src_nb_samples); |
---|
216 | if (ret < 0) { |
---|
217 | fprintf(stderr, "Error while converting\n"); |
---|
218 | exit(1); |
---|
219 | } |
---|
220 | } else { |
---|
221 | dst_samples_data[0] = src_samples_data[0]; |
---|
222 | dst_nb_samples = src_nb_samples; |
---|
223 | } |
---|
224 | frame->nb_samples = dst_nb_samples; |
---|
225 | avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, |
---|
226 | dst_samples_data[0], dst_samples_size, 0); |
---|
227 | ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); |
---|
228 | if (ret < 0) { |
---|
229 | fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); |
---|
230 | exit(1); |
---|
231 | } |
---|
232 | if (!got_packet) |
---|
233 | return; |
---|
234 | pkt.stream_index = st->index; |
---|
235 | /* Write the compressed frame to the media file. */ |
---|
236 | ret = av_interleaved_write_frame(oc, &pkt); |
---|
237 | if (ret != 0) { |
---|
238 | fprintf(stderr, "Error while writing audio frame: %s\n", |
---|
239 | av_err2str(ret)); |
---|
240 | exit(1); |
---|
241 | } |
---|
242 | avcodec_free_frame(&frame); |
---|
243 | } |
---|
244 | static void close_audio(AVFormatContext *oc, AVStream *st) |
---|
245 | { |
---|
246 | avcodec_close(st->codec); |
---|
247 | av_free(src_samples_data[0]); |
---|
248 | av_free(dst_samples_data[0]); |
---|
249 | } |
---|
250 | /**************************************************************/ |
---|
251 | /* video output */ |
---|
252 | static AVFrame *frame; |
---|
253 | static AVPicture src_picture, dst_picture; |
---|
254 | static int frame_count; |
---|
255 | static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st) |
---|
256 | { |
---|
257 | int ret; |
---|
258 | AVCodecContext *c = st->codec; |
---|
259 | /* open the codec */ |
---|
260 | ret = avcodec_open2(c, codec, NULL); |
---|
261 | if (ret < 0) { |
---|
262 | fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret)); |
---|
263 | exit(1); |
---|
264 | } |
---|
265 | /* allocate and init a re-usable frame */ |
---|
266 | frame = avcodec_alloc_frame(); |
---|
267 | if (!frame) { |
---|
268 | fprintf(stderr, "Could not allocate video frame\n"); |
---|
269 | exit(1); |
---|
270 | } |
---|
271 | /* Allocate the encoded raw picture. */ |
---|
272 | ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height); |
---|
273 | if (ret < 0) { |
---|
274 | fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret)); |
---|
275 | exit(1); |
---|
276 | } |
---|
277 | /* If the output format is not YUV420P, then a temporary YUV420P |
---|
278 | * picture is needed too. It is then converted to the required |
---|
279 | * output format. */ |
---|
280 | if (c->pix_fmt != AV_PIX_FMT_YUV420P) { |
---|
281 | ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height); |
---|
282 | if (ret < 0) { |
---|
283 | fprintf(stderr, "Could not allocate temporary picture: %s\n", |
---|
284 | av_err2str(ret)); |
---|
285 | exit(1); |
---|
286 | } |
---|
287 | } |
---|
288 | /* copy data and linesize picture pointers to frame */ |
---|
289 | *((AVPicture *)frame) = dst_picture; |
---|
290 | } |
---|
291 | /* Prepare a dummy image. */ |
---|
292 | static void fill_yuv_image(AVPicture *pict, int frame_index, |
---|
293 | int width, int height) |
---|
294 | { |
---|
295 | int x, y, i; |
---|
296 | i = frame_index; |
---|
297 | /* Y */ |
---|
298 | for (y = 0; y < height; y++) |
---|
299 | for (x = 0; x < width; x++) |
---|
300 | pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; |
---|
301 | /* Cb and Cr */ |
---|
302 | for (y = 0; y < height / 2; y++) { |
---|
303 | for (x = 0; x < width / 2; x++) { |
---|
304 | pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; |
---|
305 | pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; |
---|
306 | } |
---|
307 | } |
---|
308 | } |
---|
309 | static void write_video_frame(AVFormatContext *oc, AVStream *st) |
---|
310 | { |
---|
311 | int ret; |
---|
312 | static struct SwsContext *sws_ctx; |
---|
313 | AVCodecContext *c = st->codec; |
---|
314 | if (frame_count >= STREAM_NB_FRAMES) { |
---|
315 | /* No more frames to compress. The codec has a latency of a few |
---|
316 | * frames if using B-frames, so we get the last frames by |
---|
317 | * passing the same picture again. */ |
---|
318 | } else { |
---|
319 | if (c->pix_fmt != AV_PIX_FMT_YUV420P) { |
---|
320 | /* as we only generate a YUV420P picture, we must convert it |
---|
321 | * to the codec pixel format if needed */ |
---|
322 | if (!sws_ctx) { |
---|
323 | sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, |
---|
324 | c->width, c->height, c->pix_fmt, |
---|
325 | sws_flags, NULL, NULL, NULL); |
---|
326 | if (!sws_ctx) { |
---|
327 | fprintf(stderr, |
---|
328 | "Could not initialize the conversion context\n"); |
---|
329 | exit(1); |
---|
330 | } |
---|
331 | } |
---|
332 | fill_yuv_image(&src_picture, frame_count, c->width, c->height); |
---|
333 | sws_scale(sws_ctx, |
---|
334 | (const uint8_t * const *)src_picture.data, src_picture.linesize, |
---|
335 | 0, c->height, dst_picture.data, dst_picture.linesize); |
---|
336 | } else { |
---|
337 | fill_yuv_image(&dst_picture, frame_count, c->width, c->height); |
---|
338 | } |
---|
339 | } |
---|
340 | if (oc->oformat->flags & AVFMT_RAWPICTURE) { |
---|
341 | /* Raw video case - directly store the picture in the packet */ |
---|
342 | AVPacket pkt; |
---|
343 | av_init_packet(&pkt); |
---|
344 | pkt.flags |= AV_PKT_FLAG_KEY; |
---|
345 | pkt.stream_index = st->index; |
---|
346 | pkt.data = dst_picture.data[0]; |
---|
347 | pkt.size = sizeof(AVPicture); |
---|
348 | ret = av_interleaved_write_frame(oc, &pkt); |
---|
349 | } else { |
---|
350 | AVPacket pkt = { 0 }; |
---|
351 | int got_packet; |
---|
352 | av_init_packet(&pkt); |
---|
353 | /* encode the image */ |
---|
354 | ret = avcodec_encode_video2(c, &pkt, frame, &got_packet); |
---|
355 | if (ret < 0) { |
---|
356 | fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); |
---|
357 | exit(1); |
---|
358 | } |
---|
359 | /* If size is zero, it means the image was buffered. */ |
---|
360 | if (!ret && got_packet && pkt.size) { |
---|
361 | pkt.stream_index = st->index; |
---|
362 | /* Write the compressed frame to the media file. */ |
---|
363 | ret = av_interleaved_write_frame(oc, &pkt); |
---|
364 | } else { |
---|
365 | ret = 0; |
---|
366 | } |
---|
367 | } |
---|
368 | if (ret != 0) { |
---|
369 | fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); |
---|
370 | exit(1); |
---|
371 | } |
---|
372 | frame_count++; |
---|
373 | } |
---|
374 | static void close_video(AVFormatContext *oc, AVStream *st) |
---|
375 | { |
---|
376 | avcodec_close(st->codec); |
---|
377 | av_free(src_picture.data[0]); |
---|
378 | av_free(dst_picture.data[0]); |
---|
379 | av_free(frame); |
---|
380 | } |
---|
381 | /**************************************************************/ |
---|
382 | /* media file output */ |
---|
383 | int main(int argc, char **argv) |
---|
384 | { |
---|
385 | const char *filename; |
---|
386 | AVOutputFormat *fmt; |
---|
387 | AVFormatContext *oc; |
---|
388 | AVStream *audio_st, *video_st; |
---|
389 | AVCodec *audio_codec, *video_codec; |
---|
390 | double audio_time, video_time; |
---|
391 | int ret; |
---|
392 | /* Initialize libavcodec, and register all codecs and formats. */ |
---|
393 | av_register_all(); |
---|
394 | if (argc != 2) { |
---|
395 | printf("usage: %s output_file\n" |
---|
396 | "API example program to output a media file with libavformat.\n" |
---|
397 | "This program generates a synthetic audio and video stream, encodes and\n" |
---|
398 | "muxes them into a file named output_file.\n" |
---|
399 | "The output format is automatically guessed according to the file extension.\n" |
---|
400 | "Raw images can also be output by using '%%d' in the filename.\n" |
---|
401 | "\n", argv[0]); |
---|
402 | return 1; |
---|
403 | } |
---|
404 | filename = argv[1]; |
---|
405 | /* allocate the output media context */ |
---|
406 | avformat_alloc_output_context2(&oc, NULL, NULL, filename); |
---|
407 | if (!oc) { |
---|
408 | printf("Could not deduce output format from file extension: using MPEG.\n"); |
---|
409 | avformat_alloc_output_context2(&oc, NULL, "mpeg", filename); |
---|
410 | } |
---|
411 | if (!oc) { |
---|
412 | return 1; |
---|
413 | } |
---|
414 | fmt = oc->oformat; |
---|
415 | /* Add the audio and video streams using the default format codecs |
---|
416 | * and initialize the codecs. */ |
---|
417 | video_st = NULL; |
---|
418 | audio_st = NULL; |
---|
419 | /** |
---|
420 | if (fmt->video_codec != AV_CODEC_ID_NONE) { |
---|
421 | video_st = add_stream(oc, &video_codec, fmt->video_codec); |
---|
422 | } |
---|
423 | **/ |
---|
424 | video_st = add_stream(oc, &video_codec, AV_CODEC_ID_FFV1); |
---|
425 | if (fmt->audio_codec != AV_CODEC_ID_NONE) { |
---|
426 | audio_st = add_stream(oc, &audio_codec, fmt->audio_codec); |
---|
427 | } |
---|
428 | /* Now that all the parameters are set, we can open the audio and |
---|
429 | * video codecs and allocate the necessary encode buffers. */ |
---|
430 | if (video_st) |
---|
431 | open_video(oc, video_codec, video_st); |
---|
432 | if (audio_st) |
---|
433 | open_audio(oc, audio_codec, audio_st); |
---|
434 | av_dump_format(oc, 0, filename, 1); |
---|
435 | /* open the output file, if needed */ |
---|
436 | if (!(fmt->flags & AVFMT_NOFILE)) { |
---|
437 | ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); |
---|
438 | if (ret < 0) { |
---|
439 | fprintf(stderr, "Could not open '%s': %s\n", filename, |
---|
440 | av_err2str(ret)); |
---|
441 | return 1; |
---|
442 | } |
---|
443 | } |
---|
444 | /* Write the stream header, if any. */ |
---|
445 | ret = avformat_write_header(oc, NULL); |
---|
446 | if (ret < 0) { |
---|
447 | fprintf(stderr, "Error occurred when opening output file: %s\n", |
---|
448 | av_err2str(ret)); |
---|
449 | return 1; |
---|
450 | } |
---|
451 | if (frame) |
---|
452 | frame->pts = 0; |
---|
453 | for (;;) { |
---|
454 | /* Compute current audio and video time. */ |
---|
455 | audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0; |
---|
456 | video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0; |
---|
457 | if ((!audio_st || audio_time >= STREAM_DURATION) && |
---|
458 | (!video_st || video_time >= STREAM_DURATION)) |
---|
459 | break; |
---|
460 | /* write interleaved audio and video frames */ |
---|
461 | if (!video_st || (video_st && audio_st && audio_time < video_time)) { |
---|
462 | write_audio_frame(oc, audio_st); |
---|
463 | } else { |
---|
464 | write_video_frame(oc, video_st); |
---|
465 | frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base); |
---|
466 | } |
---|
467 | } |
---|
468 | /* Write the trailer, if any. The trailer must be written before you |
---|
469 | * close the CodecContexts open when you wrote the header; otherwise |
---|
470 | * av_write_trailer() may try to use memory that was freed on |
---|
471 | * av_codec_close(). */ |
---|
472 | av_write_trailer(oc); |
---|
473 | /* Close each codec. */ |
---|
474 | if (video_st) |
---|
475 | close_video(oc, video_st); |
---|
476 | if (audio_st) |
---|
477 | close_audio(oc, audio_st); |
---|
478 | if (!(fmt->flags & AVFMT_NOFILE)) |
---|
479 | /* Close the output file. */ |
---|
480 | avio_close(oc->pb); |
---|
481 | /* free the stream */ |
---|
482 | avformat_free_context(oc); |
---|
483 | return 0; |
---|
484 | } |
---|