comparison ref/output_example.d @ 0:3425707ddbf6

Initial import (hopefully this mercurial stuff works...)
author fraserofthenight
date Mon, 06 Jul 2009 08:06:28 -0700
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:3425707ddbf6
1 /**
2 * Mime on Fire (mime) -- Simple UPnP server for XBOX360
3 * Copyright (C) 2009 Robert Fraser
4 *
5 * This program is free software; you can redistribute it andor
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15 module mime.app.main;
16
17 // Imports for whole program (just link to them)
18 import tango.stdc.stdarg; // Must be linked in to prevent strange linker errors
19 debug import tango.core.stacktrace.TraceExceptions;
20 import NONE = mime.codec.libav.mingw;
21
22 import Math = tango.math.Math;
23 import tango.stdc.stdlib;
24 import tango.stdc.stdio;
25 import tango.stdc.stringz;
26 import tango.core.Array;
27
28 import mime.codec.libav.avutil;
29 import mime.codec.libav.avcodec;
30 import mime.codec.libav.avformat;
31 import mime.codec.libav.swscale;
32
33 /* 5 seconds stream duration */
34 const STREAM_DURATION = 5.0;
35 const STREAM_FRAME_RATE = 25; /* 25 images/s */
36 const STREAM_NB_FRAMES = (cast(int)(STREAM_DURATION * STREAM_FRAME_RATE));
37 const STREAM_PIX_FMT = PIX_FMT_YUV420P; /* default pix_fmt */
38 static int sws_flags = SWS_BICUBIC;
39
40
41 private bool containsString(char* haystack, char[] needle)
42 {
43 char[] sHaystack = fromStringz(haystack);
44 return sHaystack.find(needle) < sHaystack.length;
45 }
46
47 /**************************************************************/
48 /* audio output */
49
50
51 float t, tincr, tincr2;
52 short *samples;
53 char *audio_outbuf;
54 int audio_outbuf_size;
55 int audio_input_frame_size;
56
57 /*
58 * add an audio output stream
59 */
60 static AVStream *add_audio_stream(AVFormatContext *oc, int codec_id)
61 {
62 AVCodecContext *c;
63 AVStream *st;
64
65 st = av_new_stream(oc, 1);
66 if (!st) {
67 fprintf(stderr, "Could not alloc stream\n");
68 exit(1);
69 }
70
71 c = st.codec;
72 c.codec_id = codec_id;
73 c.codec_type = CODEC_TYPE_AUDIO;
74
75 /* put sample parameters */
76 c.bit_rate = 64000;
77 c.sample_rate = 44100;
78 c.channels = 2;
79 return st;
80 }
81
82 static void open_audio(AVFormatContext *oc, AVStream *st)
83 {
84 AVCodecContext *c;
85 AVCodec *codec;
86
87 c = st.codec;
88
89 /* find the audio encoder */
90 codec = avcodec_find_encoder(c.codec_id);
91 if (!codec) {
92 fprintf(stderr, "codec not found\n");
93 exit(1);
94 }
95
96 /* open it */
97 if (avcodec_open(c, codec) < 0) {
98 fprintf(stderr, "could not open codec\n");
99 exit(1);
100 }
101
102 /* init signal generator */
103 t = 0;
104 tincr = 2 * Math.PI * 110.0 / c.sample_rate;
105 /* increment frequency by 110 Hz per second */
106 tincr2 = 2 * Math.PI * 110.0 / c.sample_rate / c.sample_rate;
107
108 audio_outbuf_size = 10000;
109 audio_outbuf = cast(char*) av_malloc(audio_outbuf_size);
110
111 /* ugly hack for PCM codecs (will be removed ASAP with new PCM
112 support to compute the input frame size in samples */
113 if (c.frame_size <= 1) {
114 audio_input_frame_size = audio_outbuf_size / c.channels;
115 switch(st.codec.codec_id) {
116 case CODEC_ID_PCM_S16LE:
117 case CODEC_ID_PCM_S16BE:
118 case CODEC_ID_PCM_U16LE:
119 case CODEC_ID_PCM_U16BE:
120 audio_input_frame_size >>= 1;
121 break;
122 default:
123 break;
124 }
125 } else {
126 audio_input_frame_size = c.frame_size;
127 }
128
129 if(containsString(oc.oformat.name, "asf"))
130 c.flags |= CODEC_FLAG_GLOBAL_HEADER;
131
132 samples = cast(short*) av_malloc(audio_input_frame_size * 2 * c.channels);
133 }
134
135 /* prepare a 16 bit dummy audio frame of 'frame_size' samples and
136 'nb_channels' channels */
137 static void get_audio_frame(short *samples, int frame_size, int nb_channels)
138 {
139 int j, i, v;
140 short *q;
141
142 q = samples;
143 for(j=0;j<frame_size;j++) {
144 v = cast(int)(Math.sin(t) * 10000);
145 for(i = 0; i < nb_channels; i++)
146 *q++ = v;
147 t += tincr;
148 tincr += tincr2;
149 }
150 }
151
152 static void write_audio_frame(AVFormatContext *oc, AVStream *st)
153 {
154 AVCodecContext *c;
155 AVPacket pkt;
156 av_init_packet(&pkt);
157
158 c = st.codec;
159
160 get_audio_frame(samples, audio_input_frame_size, c.channels);
161
162 pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
163
164 pkt.pts= av_rescale_q(c.coded_frame.pts, c.time_base, st.time_base);
165 pkt.flags |= PKT_FLAG_KEY;
166 pkt.stream_index= st.index;
167 pkt.data= audio_outbuf;
168
169 /* write the compressed frame in the media file */
170 if (av_write_frame(oc, &pkt) != 0) {
171 fprintf(stderr, "Error while writing audio frame\n");
172 exit(1);
173 }
174 }
175
176 static void close_audio(AVFormatContext *oc, AVStream *st)
177 {
178 avcodec_close(st.codec);
179
180 av_free(samples);
181 av_free(audio_outbuf);
182 }
183
184 /**************************************************************/
185 /* video output */
186
187 AVFrame *picture, tmp_picture;
188 char *video_outbuf;
189 int frame_count, video_outbuf_size;
190
191 /* add a video output stream */
192 static AVStream *add_video_stream(AVFormatContext *oc, int codec_id)
193 {
194 AVCodecContext *c;
195 AVStream *st;
196
197 st = av_new_stream(oc, 0);
198 if (!st) {
199 fprintf(stderr, "Could not alloc stream\n");
200 exit(1);
201 }
202
203 c = st.codec;
204 c.codec_id = codec_id;
205 c.codec_type = CODEC_TYPE_VIDEO;
206
207 /* put sample parameters */
208 c.bit_rate = 400000;
209 /* resolution must be a multiple of two */
210 c.width = 352;
211 c.height = 288;
212 /* time base: this is the fundamental unit of time (in seconds) in terms
213 of which frame timestamps are represented. for fixed-fps content,
214 timebase should be 1/framerate and timestamp increments should be
215 identically 1. */
216 c.time_base.den = STREAM_FRAME_RATE;
217 c.time_base.num = 1;
218 c.gop_size = 12; /* emit one intra frame every twelve frames at most */
219 c.pix_fmt = STREAM_PIX_FMT;
220 if (c.codec_id == CODEC_ID_MPEG2VIDEO) {
221 /* just for testing, we also add B frames */
222 c.max_b_frames = 2;
223 }
224 if (c.codec_id == CODEC_ID_MPEG1VIDEO){
225 /* needed to avoid using macroblocks in which some coeffs overflow
226 this doesnt happen with normal video, it just happens here as the
227 motion of the chroma plane doesnt match the luma plane */
228 c.mb_decision=2;
229 }
230 // some formats want stream headers to be separate
231 if(containsString(oc.oformat.name, "mp4") ||
232 containsString(oc.oformat.name, "mpeg4") ||
233 containsString(oc.oformat.name, "asf") ||
234 containsString(oc.oformat.name, "mov") ||
235 containsString(oc.oformat.name, "3gp"))
236 c.flags |= CODEC_FLAG_GLOBAL_HEADER;
237
238 return st;
239 }
240
241 static AVFrame *alloc_picture(int pix_fmt, int width, int height)
242 {
243 AVFrame *picture;
244 char *picture_buf;
245 int size;
246
247 picture = avcodec_alloc_frame();
248 if (!picture)
249 return null;
250 size = avpicture_get_size(pix_fmt, width, height);
251 picture_buf = cast(char*) av_malloc(size);
252 if (!picture_buf) {
253 av_free(picture);
254 return null;
255 }
256 avpicture_fill(cast(AVPicture *)picture, picture_buf,
257 pix_fmt, width, height);
258 return picture;
259 }
260
261 static void open_video(AVFormatContext *oc, AVStream *st)
262 {
263 AVCodec *codec;
264 AVCodecContext *c;
265
266 c = st.codec;
267
268 /* find the video encoder */
269 codec = avcodec_find_encoder(c.codec_id);
270 if (!codec) {
271 fprintf(stderr, "codec not found\n");
272 exit(1);
273 }
274
275 /* open the codec */
276 if (avcodec_open(c, codec) < 0) {
277 fprintf(stderr, "could not open codec\n");
278 exit(1);
279 }
280
281 video_outbuf = null;
282 if (!(oc.oformat.flags & AVFMT_RAWPICTURE)) {
283 /* allocate output buffer */
284 /* XXX: API change will be done */
285 /* buffers passed into lav* can be allocated any way you prefer,
286 as long as they're aligned enough for the architecture, and
287 they're freed appropriately (such as using av_free for buffers
288 allocated with av_malloc) */
289 video_outbuf_size = 200000;
290 video_outbuf = cast(char*) av_malloc(video_outbuf_size);
291 }
292
293 /* allocate the encoded raw picture */
294 picture = alloc_picture(c.pix_fmt, c.width, c.height);
295 if (!picture) {
296 fprintf(stderr, "Could not allocate picture\n");
297 exit(1);
298 }
299
300 /* if the output format is not YUV420P, then a temporary YUV420P
301 picture is needed too. It is then converted to the required
302 output format */
303 tmp_picture = null;
304 if (c.pix_fmt != PIX_FMT_YUV420P) {
305 tmp_picture = alloc_picture(PIX_FMT_YUV420P, c.width, c.height);
306 if (!tmp_picture) {
307 fprintf(stderr, "Could not allocate temporary picture\n");
308 exit(1);
309 }
310 }
311 }
312
313 /* prepare a dummy image */
314 static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
315 {
316 int x, y, i;
317
318 i = frame_index;
319
320 /* Y */
321 for(y=0;y<height;y++) {
322 for(x=0;x<width;x++) {
323 pict.data[0][y * pict.linesize[0] + x] = x + y + i * 3;
324 }
325 }
326
327 /* Cb and Cr */
328 for(y=0;y<height/2;y++) {
329 for(x=0;x<width/2;x++) {
330 pict.data[1][y * pict.linesize[1] + x] = 128 + y + i * 2;
331 pict.data[2][y * pict.linesize[2] + x] = 64 + x + i * 5;
332 }
333 }
334 }
335
336 static void write_video_frame(AVFormatContext *oc, AVStream *st)
337 {
338 int out_size, ret;
339 AVCodecContext *c;
340 static SwsContext *img_convert_ctx;
341
342 c = st.codec;
343
344 if (frame_count >= STREAM_NB_FRAMES) {
345 /* no more frame to compress. The codec has a latency of a few
346 frames if using B frames, so we get the last frames by
347 passing the same picture again */
348 } else {
349 if (c.pix_fmt != PIX_FMT_YUV420P) {
350 /* as we only generate a YUV420P picture, we must convert it
351 to the codec pixel format if needed */
352 if (img_convert_ctx == null) {
353 img_convert_ctx = sws_getContext(c.width, c.height,
354 PIX_FMT_YUV420P,
355 c.width, c.height,
356 c.pix_fmt,
357 sws_flags, null, null, null);
358 if (img_convert_ctx == null) {
359 fprintf(stderr, "Cannot initialize the conversion context\n");
360 exit(1);
361 }
362 }
363 fill_yuv_image(tmp_picture, frame_count, c.width, c.height);
364 sws_scale(img_convert_ctx, tmp_picture.data.ptr, tmp_picture.linesize.ptr,
365 0, c.height, picture.data.ptr, picture.linesize.ptr);
366 } else {
367 fill_yuv_image(picture, frame_count, c.width, c.height);
368 }
369 }
370
371
372 if (oc.oformat.flags & AVFMT_RAWPICTURE) {
373 /* raw video case. The API will change slightly in the near
374 futur for that */
375 AVPacket pkt;
376 av_init_packet(&pkt);
377
378 pkt.flags |= PKT_FLAG_KEY;
379 pkt.stream_index= st.index;
380 pkt.data= cast(char *)picture;
381 pkt.size= AVPicture.sizeof;
382
383 ret = av_write_frame(oc, &pkt);
384 } else {
385 /* encode the image */
386 out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
387 /* if zero size, it means the image was buffered */
388 if (out_size > 0) {
389 AVPacket pkt;
390 av_init_packet(&pkt);
391
392 pkt.pts= av_rescale_q(c.coded_frame.pts, c.time_base, st.time_base);
393 if(c.coded_frame.key_frame)
394 pkt.flags |= PKT_FLAG_KEY;
395 pkt.stream_index= st.index;
396 pkt.data= video_outbuf;
397 pkt.size= out_size;
398
399 /* write the compressed frame in the media file */
400 ret = av_write_frame(oc, &pkt);
401 } else {
402 ret = 0;
403 }
404 }
405 if (ret != 0) {
406 fprintf(stderr, "Error while writing video frame\n");
407 exit(1);
408 }
409 frame_count++;
410 }
411
412 static void close_video(AVFormatContext *oc, AVStream *st)
413 {
414 avcodec_close(st.codec);
415 av_free(picture.data[0]);
416 av_free(picture);
417 if (tmp_picture) {
418 av_free(tmp_picture.data[0]);
419 av_free(tmp_picture);
420 }
421 av_free(video_outbuf);
422 }
423
424 /**************************************************************/
425 /* media file output */
426
427 int main(char[][] args)
428 {
429 char *filename;
430 AVOutputFormat *fmt;
431 AVFormatContext *oc;
432 AVStream *audio_st, video_st;
433 double audio_pts, video_pts;
434 int i;
435
436 /* initialize libavcodec, and register all codecs and formats */
437 av_register_all();
438
439 version(none)
440 {
441 char* codecName(int codec_id)
442 {
443 auto codec = avcodec_find_encoder(codec_id);
444 return codec ? codec.name : toStringz("NONE");
445
446 }
447
448 AVOutputFormat* fmt2 = first_oformat;
449 printf("format (extension, video codec, audio codec)\n");
450 while(fmt2)
451 {
452 printf("%s (.%s, %s, %s)\n", fmt2.name, fmt2.extensions, codecName(fmt2.video_codec), codecName(fmt2.audio_codec));
453 fmt2 = fmt2.next;
454 }
455 }
456
457 version(none)
458 {
459 // List codecs
460 AVCodec* codec = av_codec_next(null); // Get first
461 while(codec)
462 {
463 if(codec.type == CODEC_TYPE_VIDEO)
464 {
465 printf("%d - %s (%s)\n", codec.id, codec.name, codec.long_name);
466 }
467 Lnext: codec = av_codec_next(codec);
468 }
469 }
470
471 filename = toStringz("biff_happy.m4v");
472
473 fmt = guess_format(toStringz("m4v"), null, null);
474 if (!fmt) {
475 fprintf(stderr, "Could not find suitable output format\n");
476 exit(1);
477 }
478
479 /* allocate the output media context */
480 oc = av_alloc_format_context();
481 if (!oc) {
482 fprintf(stderr, "Memory error\n");
483 exit(1);
484 }
485 oc.oformat = fmt;
486 snprintf(oc.filename.ptr, oc.filename.sizeof, "%s", filename);
487
488 /* add the audio and video streams using the default format codecs
489 and initialize the codecs */
490 video_st = null;
491 audio_st = null;
492 // WORKAREA use the right profile for mpeg4 encoding (MP4 2 Advanced Simple)
493 if (fmt.video_codec != CODEC_ID_NONE) {
494 video_st = add_video_stream(oc, fmt.video_codec);
495 }
496 // WORKAREA -- we want to be doing this with AAC - http://blogs.msdn.com/xboxteam/archive/2007/11/30/december-2007-video-playback-faq.aspx
497 /* if (fmt.audio_codec != CODEC_ID_NONE) {
498 audio_st = add_audio_stream(oc, fmt.audio_codec);
499 } */
500
501 /* set the output parameters (must be done even if no
502 parameters). */
503 if (av_set_parameters(oc, null) < 0) {
504 fprintf(stderr, "Invalid output format parameters\n");
505 exit(1);
506 }
507
508 dump_format(oc, 0, filename, 1);
509
510 /* now that all the parameters are set, we can open the audio and
511 video codecs and allocate the necessary encode buffers */
512 if (video_st)
513 open_video(oc, video_st);
514 if (audio_st)
515 open_audio(oc, audio_st);
516
517 /* open the output file, if needed */
518 if (!(fmt.flags & AVFMT_NOFILE)) {
519 if (url_fopen(&oc.pb, filename, URL_WRONLY) < 0) {
520 fprintf(stderr, "Could not open '%s'\n", filename);
521 exit(1);
522 }
523 }
524
525 /* write the stream header, if any */
526 av_write_header(oc);
527
528 for(;;) {
529 /* compute current audio and video time */
530 if (audio_st)
531 audio_pts = cast(double)audio_st.pts.val * audio_st.time_base.num / audio_st.time_base.den;
532 else
533 audio_pts = 0.0;
534
535 if (video_st)
536 video_pts = cast(double)video_st.pts.val * video_st.time_base.num / video_st.time_base.den;
537 else
538 video_pts = 0.0;
539
540 if ((!audio_st || audio_pts >= STREAM_DURATION) &&
541 (!video_st || video_pts >= STREAM_DURATION))
542 break;
543
544 /* write interleaved audio and video frames */
545 if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
546 write_audio_frame(oc, audio_st);
547 } else {
548 write_video_frame(oc, video_st);
549 }
550 }
551
552 /* close each codec */
553 if (video_st)
554 close_video(oc, video_st);
555 if (audio_st)
556 close_audio(oc, audio_st);
557
558 /* write the trailer, if any */
559 av_write_trailer(oc);
560
561 /* free the streams */
562 for(i = 0; i < oc.nb_streams; i++) {
563 av_freep(&oc.streams[i].codec);
564 av_freep(&oc.streams[i]);
565 }
566
567 if (!(fmt.flags & AVFMT_NOFILE)) {
568 /* close the output file */
569 }
570
571 /* free the stream */
572 av_free(oc);
573
574 return 0;
575 }