1 | /* |
---|
2 | * Copyright (c) 2003 Fabrice Bellard |
---|
3 | * |
---|
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
---|
5 | * of this software and associated documentation files (the "Software"), to deal |
---|
6 | * in the Software without restriction, including without limitation the rights |
---|
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
---|
8 | * copies of the Software, and to permit persons to whom the Software is |
---|
9 | * furnished to do so, subject to the following conditions: |
---|
10 | * |
---|
11 | * The above copyright notice and this permission notice shall be included in |
---|
12 | * all copies or substantial portions of the Software. |
---|
13 | * |
---|
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
---|
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
---|
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
---|
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
---|
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
---|
19 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
---|
20 | * THE SOFTWARE. |
---|
21 | */ |
---|
22 | /** |
---|
23 | * @file |
---|
24 | * libavformat API example. |
---|
25 | * |
---|
26 | * Output a media file in any supported libavformat format. |
---|
27 | * The default codecs are used. |
---|
28 | * @example doc/examples/muxing.c |
---|
29 | */ |
---|
30 | #include <stdlib.h> |
---|
31 | #include <stdio.h> |
---|
32 | #include <string.h> |
---|
33 | #include <math.h> |
---|
34 | #include <libavutil/mathematics.h> |
---|
35 | #include <libavformat/avformat.h> |
---|
36 | #include <libswscale/swscale.h> |
---|
37 | /* 5 seconds stream duration */ |
---|
38 | #define STREAM_DURATION 200.0 |
---|
39 | #define STREAM_FRAME_RATE 25 /* 25 images/s */ |
---|
40 | #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE)) |
---|
41 | #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ |
---|
42 | static int sws_flags = SWS_BICUBIC; |
---|
43 | /**************************************************************/ |
---|
44 | /* audio output */ |
---|
45 | static float t, tincr, tincr2; |
---|
46 | static int16_t *samples; |
---|
47 | static int audio_input_frame_size; |
---|
48 | /* Add an output stream. */ |
---|
49 | static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, |
---|
50 | enum AVCodecID codec_id) |
---|
51 | { |
---|
52 | AVCodecContext *c; |
---|
53 | AVStream *st; |
---|
54 | /* find the encoder */ |
---|
55 | *codec = avcodec_find_encoder(codec_id); |
---|
56 | if (!(*codec)) { |
---|
57 | fprintf(stderr, "Could not find encoder for '%s'\n", |
---|
58 | avcodec_get_name(codec_id)); |
---|
59 | exit(1); |
---|
60 | } |
---|
61 | st = avformat_new_stream(oc, *codec); |
---|
62 | if (!st) { |
---|
63 | fprintf(stderr, "Could not allocate stream\n"); |
---|
64 | exit(1); |
---|
65 | } |
---|
66 | st->id = oc->nb_streams-1; |
---|
67 | c = st->codec; |
---|
68 | switch ((*codec)->type) { |
---|
69 | case AVMEDIA_TYPE_AUDIO: |
---|
70 | st->id = 1; |
---|
71 | c->sample_fmt = AV_SAMPLE_FMT_S16; |
---|
72 | c->bit_rate = 64000; |
---|
73 | c->sample_rate = 44100; |
---|
74 | c->channels = 2; |
---|
75 | break; |
---|
76 | case AVMEDIA_TYPE_VIDEO: |
---|
77 | c->codec_id = codec_id; |
---|
78 | c->bit_rate = 400000; |
---|
79 | /* Resolution must be a multiple of two. */ |
---|
80 | c->width = 352; |
---|
81 | c->height = 288; |
---|
82 | /* timebase: This is the fundamental unit of time (in seconds) in terms |
---|
83 | * of which frame timestamps are represented. For fixed-fps content, |
---|
84 | * timebase should be 1/framerate and timestamp increments should be |
---|
85 | * identical to 1. */ |
---|
86 | c->time_base.den = STREAM_FRAME_RATE; |
---|
87 | c->time_base.num = 1; |
---|
88 | c->gop_size = 12; /* emit one intra frame every twelve frames at most */ |
---|
89 | c->pix_fmt = STREAM_PIX_FMT; |
---|
90 | if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { |
---|
91 | /* just for testing, we also add B frames */ |
---|
92 | c->max_b_frames = 2; |
---|
93 | } |
---|
94 | if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { |
---|
95 | /* Needed to avoid using macroblocks in which some coeffs overflow. |
---|
96 | * This does not happen with normal video, it just happens here as |
---|
97 | * the motion of the chroma plane does not match the luma plane. */ |
---|
98 | c->mb_decision = 2; |
---|
99 | } |
---|
100 | break; |
---|
101 | default: |
---|
102 | break; |
---|
103 | } |
---|
104 | /* Some formats want stream headers to be separate. */ |
---|
105 | if (oc->oformat->flags & AVFMT_GLOBALHEADER) |
---|
106 | c->flags |= CODEC_FLAG_GLOBAL_HEADER; |
---|
107 | return st; |
---|
108 | } |
---|
109 | /**************************************************************/ |
---|
110 | /* audio output */ |
---|
111 | static float t, tincr, tincr2; |
---|
112 | static int16_t *samples; |
---|
113 | static int audio_input_frame_size; |
---|
114 | static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) |
---|
115 | { |
---|
116 | AVCodecContext *c; |
---|
117 | int ret; |
---|
118 | c = st->codec; |
---|
119 | /* open it */ |
---|
120 | ret = avcodec_open2(c, codec, NULL); |
---|
121 | if (ret < 0) { |
---|
122 | fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret)); |
---|
123 | exit(1); |
---|
124 | } |
---|
125 | /* init signal generator */ |
---|
126 | t = 0; |
---|
127 | tincr = 2 * M_PI * 110.0 / c->sample_rate; |
---|
128 | /* increment frequency by 110 Hz per second */ |
---|
129 | tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; |
---|
130 | if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE) |
---|
131 | audio_input_frame_size = 10000; |
---|
132 | else |
---|
133 | audio_input_frame_size = c->frame_size; |
---|
134 | samples = av_malloc(audio_input_frame_size * |
---|
135 | av_get_bytes_per_sample(c->sample_fmt) * |
---|
136 | c->channels); |
---|
137 | if (!samples) { |
---|
138 | fprintf(stderr, "Could not allocate audio samples buffer\n"); |
---|
139 | exit(1); |
---|
140 | } |
---|
141 | } |
---|
142 | /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and |
---|
143 | * 'nb_channels' channels. */ |
---|
144 | static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) |
---|
145 | { |
---|
146 | int j, i, v; |
---|
147 | int16_t *q; |
---|
148 | q = samples; |
---|
149 | for (j = 0; j < frame_size; j++) { |
---|
150 | v = (int)(sin(t) * 10000); |
---|
151 | for (i = 0; i < nb_channels; i++) |
---|
152 | *q++ = v; |
---|
153 | t += tincr; |
---|
154 | tincr += tincr2; |
---|
155 | } |
---|
156 | } |
---|
157 | static void write_audio_frame(AVFormatContext *oc, AVStream *st) |
---|
158 | { |
---|
159 | AVCodecContext *c; |
---|
160 | AVPacket pkt = { 0 }; // data and size must be 0; |
---|
161 | AVFrame *frame = avcodec_alloc_frame(); |
---|
162 | int got_packet, ret; |
---|
163 | av_init_packet(&pkt); |
---|
164 | c = st->codec; |
---|
165 | get_audio_frame(samples, audio_input_frame_size, c->channels); |
---|
166 | frame->nb_samples = audio_input_frame_size; |
---|
167 | avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, |
---|
168 | (uint8_t *)samples, |
---|
169 | audio_input_frame_size * |
---|
170 | av_get_bytes_per_sample(c->sample_fmt) * |
---|
171 | c->channels, 1); |
---|
172 | ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet); |
---|
173 | if (ret < 0) { |
---|
174 | fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); |
---|
175 | exit(1); |
---|
176 | } |
---|
177 | if (!got_packet) |
---|
178 | return; |
---|
179 | pkt.stream_index = st->index; |
---|
180 | /* Write the compressed frame to the media file. */ |
---|
181 | ret = av_interleaved_write_frame(oc, &pkt); |
---|
182 | if (ret != 0) { |
---|
183 | fprintf(stderr, "Error while writing audio frame: %s\n", |
---|
184 | av_err2str(ret)); |
---|
185 | exit(1); |
---|
186 | } |
---|
187 | avcodec_free_frame(&frame); |
---|
188 | } |
---|
189 | static void close_audio(AVFormatContext *oc, AVStream *st) |
---|
190 | { |
---|
191 | avcodec_close(st->codec); |
---|
192 | av_free(samples); |
---|
193 | } |
---|
194 | /**************************************************************/ |
---|
195 | /* video output */ |
---|
196 | static AVFrame *frame; |
---|
197 | static AVPicture src_picture, dst_picture; |
---|
198 | static int frame_count; |
---|
199 | static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st) |
---|
200 | { |
---|
201 | int ret; |
---|
202 | AVCodecContext *c = st->codec; |
---|
203 | /* open the codec */ |
---|
204 | ret = avcodec_open2(c, codec, NULL); |
---|
205 | if (ret < 0) { |
---|
206 | fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret)); |
---|
207 | exit(1); |
---|
208 | } |
---|
209 | /* allocate and init a re-usable frame */ |
---|
210 | frame = avcodec_alloc_frame(); |
---|
211 | if (!frame) { |
---|
212 | fprintf(stderr, "Could not allocate video frame\n"); |
---|
213 | exit(1); |
---|
214 | } |
---|
215 | /* Allocate the encoded raw picture. */ |
---|
216 | ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height); |
---|
217 | if (ret < 0) { |
---|
218 | fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret)); |
---|
219 | exit(1); |
---|
220 | } |
---|
221 | /* If the output format is not YUV420P, then a temporary YUV420P |
---|
222 | * picture is needed too. It is then converted to the required |
---|
223 | * output format. */ |
---|
224 | if (c->pix_fmt != AV_PIX_FMT_YUV420P) { |
---|
225 | ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height); |
---|
226 | if (ret < 0) { |
---|
227 | fprintf(stderr, "Could not allocate temporary picture: %s\n", |
---|
228 | av_err2str(ret)); |
---|
229 | exit(1); |
---|
230 | } |
---|
231 | } |
---|
232 | /* copy data and linesize picture pointers to frame */ |
---|
233 | *((AVPicture *)frame) = dst_picture; |
---|
234 | } |
---|
235 | /* Prepare a dummy image. */ |
---|
236 | static void fill_yuv_image(AVPicture *pict, int frame_index, |
---|
237 | int width, int height) |
---|
238 | { |
---|
239 | int x, y, i; |
---|
240 | i = frame_index; |
---|
241 | /* Y */ |
---|
242 | for (y = 0; y < height; y++) |
---|
243 | for (x = 0; x < width; x++) |
---|
244 | pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; |
---|
245 | /* Cb and Cr */ |
---|
246 | for (y = 0; y < height / 2; y++) { |
---|
247 | for (x = 0; x < width / 2; x++) { |
---|
248 | pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; |
---|
249 | pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; |
---|
250 | } |
---|
251 | } |
---|
252 | } |
---|
253 | static void write_video_frame(AVFormatContext *oc, AVStream *st) |
---|
254 | { |
---|
255 | int ret; |
---|
256 | static struct SwsContext *sws_ctx; |
---|
257 | AVCodecContext *c = st->codec; |
---|
258 | if (frame_count >= STREAM_NB_FRAMES) { |
---|
259 | /* No more frames to compress. The codec has a latency of a few |
---|
260 | * frames if using B-frames, so we get the last frames by |
---|
261 | * passing the same picture again. */ |
---|
262 | } else { |
---|
263 | if (c->pix_fmt != AV_PIX_FMT_YUV420P) { |
---|
264 | /* as we only generate a YUV420P picture, we must convert it |
---|
265 | * to the codec pixel format if needed */ |
---|
266 | if (!sws_ctx) { |
---|
267 | sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, |
---|
268 | c->width, c->height, c->pix_fmt, |
---|
269 | sws_flags, NULL, NULL, NULL); |
---|
270 | if (!sws_ctx) { |
---|
271 | fprintf(stderr, |
---|
272 | "Could not initialize the conversion context\n"); |
---|
273 | exit(1); |
---|
274 | } |
---|
275 | } |
---|
276 | fill_yuv_image(&src_picture, frame_count, c->width, c->height); |
---|
277 | sws_scale(sws_ctx, |
---|
278 | (const uint8_t * const *)src_picture.data, src_picture.linesize, |
---|
279 | 0, c->height, dst_picture.data, dst_picture.linesize); |
---|
280 | } else { |
---|
281 | fill_yuv_image(&dst_picture, frame_count, c->width, c->height); |
---|
282 | } |
---|
283 | } |
---|
284 | if (oc->oformat->flags & AVFMT_RAWPICTURE) { |
---|
285 | /* Raw video case - directly store the picture in the packet */ |
---|
286 | AVPacket pkt; |
---|
287 | av_init_packet(&pkt); |
---|
288 | pkt.flags |= AV_PKT_FLAG_KEY; |
---|
289 | pkt.stream_index = st->index; |
---|
290 | pkt.data = dst_picture.data[0]; |
---|
291 | pkt.size = sizeof(AVPicture); |
---|
292 | ret = av_interleaved_write_frame(oc, &pkt); |
---|
293 | } else { |
---|
294 | /* encode the image */ |
---|
295 | AVPacket pkt; |
---|
296 | int got_output; |
---|
297 | av_init_packet(&pkt); |
---|
298 | pkt.data = NULL; // packet data will be allocated by the encoder |
---|
299 | pkt.size = 0; |
---|
300 | ret = avcodec_encode_video2(c, &pkt, frame, &got_output); |
---|
301 | if (ret < 0) { |
---|
302 | fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); |
---|
303 | exit(1); |
---|
304 | } |
---|
305 | /* If size is zero, it means the image was buffered. */ |
---|
306 | if (got_output) { |
---|
307 | if (c->coded_frame->key_frame) |
---|
308 | pkt.flags |= AV_PKT_FLAG_KEY; |
---|
309 | pkt.stream_index = st->index; |
---|
310 | /* Write the compressed frame to the media file. */ |
---|
311 | ret = av_interleaved_write_frame(oc, &pkt); |
---|
312 | } else { |
---|
313 | ret = 0; |
---|
314 | } |
---|
315 | } |
---|
316 | if (ret != 0) { |
---|
317 | fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); |
---|
318 | exit(1); |
---|
319 | } |
---|
320 | frame_count++; |
---|
321 | } |
---|
322 | static void close_video(AVFormatContext *oc, AVStream *st) |
---|
323 | { |
---|
324 | avcodec_close(st->codec); |
---|
325 | av_free(src_picture.data[0]); |
---|
326 | av_free(dst_picture.data[0]); |
---|
327 | av_free(frame); |
---|
328 | } |
---|
329 | /**************************************************************/ |
---|
330 | /* media file output */ |
---|
331 | int main(int argc, char **argv) |
---|
332 | { |
---|
333 | const char *filename; |
---|
334 | AVOutputFormat *fmt; |
---|
335 | AVFormatContext *oc; |
---|
336 | AVStream *audio_st, *video_st; |
---|
337 | AVCodec *audio_codec, *video_codec; |
---|
338 | double audio_pts, video_pts; |
---|
339 | int ret; |
---|
340 | /* Initialize libavcodec, and register all codecs and formats. */ |
---|
341 | av_register_all(); |
---|
342 | if (argc != 2) { |
---|
343 | printf("usage: %s output_file\n" |
---|
344 | "API example program to output a media file with libavformat.\n" |
---|
345 | "This program generates a synthetic audio and video stream, encodes and\n" |
---|
346 | "muxes them into a file named output_file.\n" |
---|
347 | "The output format is automatically guessed according to the file extension.\n" |
---|
348 | "Raw images can also be output by using '%%d' in the filename.\n" |
---|
349 | "\n", argv[0]); |
---|
350 | return 1; |
---|
351 | } |
---|
352 | filename = argv[1]; |
---|
353 | /* allocate the output media context */ |
---|
354 | avformat_alloc_output_context2(&oc, NULL, NULL, filename); |
---|
355 | if (!oc) { |
---|
356 | printf("Could not deduce output format from file extension: using MPEG.\n"); |
---|
357 | avformat_alloc_output_context2(&oc, NULL, "mpeg", filename); |
---|
358 | } |
---|
359 | if (!oc) { |
---|
360 | return 1; |
---|
361 | } |
---|
362 | fmt = oc->oformat; |
---|
363 | /* Add the audio and video streams using the default format codecs |
---|
364 | * and initialize the codecs. */ |
---|
365 | video_st = NULL; |
---|
366 | audio_st = NULL; |
---|
367 | video_st = add_stream(oc, &video_codec, AV_CODEC_ID_FFV1); |
---|
368 | /* Now that all the parameters are set, we can open the audio and |
---|
369 | * video codecs and allocate the necessary encode buffers. */ |
---|
370 | if (video_st) |
---|
371 | open_video(oc, video_codec, video_st); |
---|
372 | if (audio_st) |
---|
373 | open_audio(oc, audio_codec, audio_st); |
---|
374 | av_dump_format(oc, 0, filename, 1); |
---|
375 | /* open the output file, if needed */ |
---|
376 | if (!(fmt->flags & AVFMT_NOFILE)) { |
---|
377 | ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); |
---|
378 | if (ret < 0) { |
---|
379 | fprintf(stderr, "Could not open '%s': %s\n", filename, |
---|
380 | av_err2str(ret)); |
---|
381 | return 1; |
---|
382 | } |
---|
383 | } |
---|
384 | /* Write the stream header, if any. */ |
---|
385 | ret = avformat_write_header(oc, NULL); |
---|
386 | if (ret < 0) { |
---|
387 | fprintf(stderr, "Error occurred when opening output file: %s\n", |
---|
388 | av_err2str(ret)); |
---|
389 | return 1; |
---|
390 | } |
---|
391 | if (frame) |
---|
392 | frame->pts = 0; |
---|
393 | for (;;) { |
---|
394 | /* Compute current audio and video time. */ |
---|
395 | if (audio_st) |
---|
396 | audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den; |
---|
397 | else |
---|
398 | audio_pts = 0.0; |
---|
399 | if (video_st) |
---|
400 | video_pts = (double)video_st->pts.val * video_st->time_base.num / |
---|
401 | video_st->time_base.den; |
---|
402 | else |
---|
403 | video_pts = 0.0; |
---|
404 | if ((!audio_st || audio_pts >= STREAM_DURATION) && |
---|
405 | (!video_st || video_pts >= STREAM_DURATION)) |
---|
406 | break; |
---|
407 | /* write interleaved audio and video frames */ |
---|
408 | if (!video_st || (video_st && audio_st && audio_pts < video_pts)) { |
---|
409 | write_audio_frame(oc, audio_st); |
---|
410 | } else { |
---|
411 | write_video_frame(oc, video_st); |
---|
412 | frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base); |
---|
413 | } |
---|
414 | } |
---|
415 | /* Write the trailer, if any. The trailer must be written before you |
---|
416 | * close the CodecContexts open when you wrote the header; otherwise |
---|
417 | * av_write_trailer() may try to use memory that was freed on |
---|
418 | * av_codec_close(). */ |
---|
419 | av_write_trailer(oc); |
---|
420 | /* Close each codec. */ |
---|
421 | if (video_st) |
---|
422 | close_video(oc, video_st); |
---|
423 | if (audio_st) |
---|
424 | close_audio(oc, audio_st); |
---|
425 | if (!(fmt->flags & AVFMT_NOFILE)) |
---|
426 | /* Close the output file. */ |
---|
427 | avio_close(oc->pb); |
---|
428 | /* free the stream */ |
---|
429 | avformat_free_context(oc); |
---|
430 | return 0; |
---|
431 | } |
---|