Mercurial > projects > hoofbaby
view ref/codec_adhoc.d @ 4:a1202aac1124
Started implementing separate encoder class
author | fraserofthenight |
---|---|
date | Wed, 08 Jul 2009 19:16:39 -0700 |
parents | e6cf9f26d0e7 |
children |
line wrap: on
line source
/** * Hoofbaby -- http://www.dsource.org/projects/hoofbaby * Copyright (C) 2009 Robert Fraser * * This program is free software; you can redistribute it andor * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ module hoofbaby.app.codec_adhoc; import hoofbaby.util.buffer; import tango.stdc.stdio; import tango.stdc.stringz; import Math = tango.math.Math; import tango.io.device.File; import tango.time.StopWatch; import hoofbaby.codec.libav.avutil; import hoofbaby.codec.libav.avcodec; import hoofbaby.codec.libav.avformat; import hoofbaby.codec.libav.avbuffer; private const int STREAM_FRAME_RATE = 25; private const double STREAM_DURATION = 5.0; private const int STREAM_NB_FRAMES = (cast(int) (STREAM_DURATION * STREAM_FRAME_RATE)); private const int OUTBUF_SIZE = 100000; int codec_main() { StopWatch sw; int frameCount = 0; double audioCount, audioIncr, audioIncr2; char* voutbuf, aoutbuf; int res; AVFrame* allocFrame(int pix_fmt, int width, int height) { AVFrame* picture; char* buf; int size; picture = avcodec_alloc_frame(); if(!picture) return null; size = avpicture_get_size(pix_fmt, width, height); buf = cast(char*) av_malloc(size); if(!buf) { av_free(picture); return null; } avpicture_fill(cast(AVPicture*) picture, buf, pix_fmt, width, height); return picture; } void generatePicture(AVFrame* pict, int width, int height) { int x, y, i; i = frameCount; /* Y */ for(y = 0; y < height; y++) { for(x = 0; x < width; x++) { pict.data[0][y * pict.linesize[0] + x] = x + y + i * 3; } } /* Cb and Cr */ for(y = 0; y < height / 2; y++) { for(x = 0; x < width / 2; x++) { pict.data[1][y * pict.linesize[1] + x] = 128 + y + i * 2; pict.data[2][y * pict.linesize[2] + x] = 64 + x + i * 5; } } } int writeVideoFrame(AVFormatContext* ctx, AVStream* stream, AVFrame* picture) { AVCodecContext* vcodec = stream.codec; int ret = 0; if(frameCount >= STREAM_NB_FRAMES) { // no more frame to compress. The codec has a latency of a few // frames if using B frames, so we get the last frames by // passing the same picture again } else { generatePicture(picture, vcodec.width, vcodec.height); } // Encode it! int outSize = avcodec_encode_video(vcodec, voutbuf, OUTBUF_SIZE, picture); // if zero size, it means the image was buffered.. if not, write that ****! if(outSize > 0) { AVPacket pkt; av_init_packet(&pkt); pkt.pts = av_rescale_q(vcodec.coded_frame.pts, vcodec.time_base, stream.time_base); if(vcodec.coded_frame.key_frame) pkt.flags |= PKT_FLAG_KEY; pkt.stream_index = stream.index; pkt.data = voutbuf; pkt.size = outSize; // oh yeah! ret = av_write_frame(ctx, &pkt); } else { ret = 0; } frameCount++; assert(!ret, "Error writing video frame"); return ret; } int writeAudioFrame(AVFormatContext* ctx, AVStream* stream, short* samples) { AVCodecContext* acodec = stream.codec; { int j, i, v; short *q; q = samples; for(j = 0; j < acodec.frame_size; j++) { v = cast(int)(Math.sin(audioCount) * 10000); for(i = 0; i < 2; i++) // 2 is number of channels *q++ = v; audioCount += audioIncr; audioIncr += audioIncr2; } } AVPacket pkt; av_init_packet(&pkt); pkt.size = avcodec_encode_audio(acodec, aoutbuf, OUTBUF_SIZE, samples); //pkt.pts = av_rescale_q(acodec.coded_frame.pts, acodec.time_base, acodec.time_base); pkt.flags |= PKT_FLAG_KEY; pkt.stream_index = stream.index; pkt.data = aoutbuf; int res = av_write_frame(ctx, &pkt) != 0; assert(res == 0, "Error writing audio frame"); return res; } //-------------------------------------------------------------------------- // Container format sw.start(); AVOutputFormat* fmt = guess_format("asf", null, null); assert(fmt !is null, "Could not find format"); AVFormatContext* ctx = av_alloc_format_context(); assert(ctx !is null, "Could not allocate format context"); scope(exit) if(ctx) av_free(ctx); ctx.oformat = fmt; //ctx.preload = cast(int) (0.5 * AV_TIME_BASE); ctx.max_delay = cast(int) (0.7 * AV_TIME_BASE); ctx.loop_output = AVFMT_NOOUTPUTLOOP; ctx.flags |= AVFMT_FLAG_NONBLOCK; AVFormatParameters params; params.prealloced_context = 1; params.video_codec_id = CODEC_ID_WMV2; params.audio_codec_id = CODEC_ID_WMAV2; params.width = 352; params.height = 288; params.time_base.num = 1; params.time_base.den = STREAM_FRAME_RATE; params.pix_fmt = PIX_FMT_YUV420P; params.channels = 2; params.sample_rate = 44100; res = av_set_parameters(ctx, null); assert(res >= 0, "Could not set parameters"); //-------------------------------------------------------------------------- // Video stream AVStream* vstream = av_new_stream(ctx, 0); assert(vstream !is null, "Could not allocate video stream"); ctx.streams[0] = vstream; AVCodec* vcodecName = avcodec_find_encoder(CODEC_ID_WMV2); assert(vcodecName, "Could not find video codec"); AVCodecContext* vcodec = vstream.codec; vcodec.codec_id = CODEC_ID_WMV2; vcodec.codec_type = CODEC_TYPE_VIDEO; vcodec.bit_rate = 400000; vcodec.width = 352; vcodec.height = 288; vcodec.gop_size = 12; vcodec.qmin = 3; vcodec.time_base.den = STREAM_FRAME_RATE; vcodec.time_base.num = 1; vcodec.pix_fmt = PIX_FMT_YUV420P; vcodec.flags |= CODEC_FLAG_GLOBAL_HEADER; res = avcodec_open(vcodec, vcodecName); assert(res >= 0, "Could not open video codec"); //-------------------------------------------------------------------------- // Audio stream AVStream* astream = av_new_stream(ctx, 0); assert(astream !is null, "Could not allocate audio stream"); ctx.streams[1] = astream; AVCodec* acodecName = avcodec_find_encoder(CODEC_ID_WMAV2); assert(acodecName, "Could not find audio codec"); AVCodecContext* acodec = astream.codec; acodec.codec_id = CODEC_ID_WMAV2; acodec.codec_type = CODEC_TYPE_AUDIO; acodec.bit_rate = 64000; acodec.sample_rate = 44100; acodec.channels = 2; acodec.flags |= CODEC_FLAG_GLOBAL_HEADER; audioCount = 0.0; audioIncr = 2 * Math.PI * 110.0 / acodec.sample_rate; audioIncr2 = 2 * Math.PI * 110.0 / acodec.sample_rate / acodec.sample_rate; res = avcodec_open(acodec, acodecName); assert(res >= 0, "Could not open audio codec"); //-------------------------------------------------------------------------- // Actually doing stuff // Open output file RingBuffer ringBuf = RingBuffer(1 << 24); // 16 MB scope(exit) ringBuf.free(); ctx.pb = getBioContext(&ringBuf, 1 << 24); assert(ctx.pb !is null); assert(ctx.pb.opaque !is null); // Allocate a video frame and audio buffer to store stuff AVFrame* frame = allocFrame(PIX_FMT_YUV420P, vcodec.width, vcodec.height); assert(frame !is null, "Could not allocate frame"); scope(exit) if(frame) av_free(frame); short* samples = cast(short*) av_malloc(acodec.frame_size * 2 * acodec.channels); assert(frame !is null, "Could not allocate samples"); scope(exit) if(samples) av_free(samples); // Allocate some output buffers voutbuf = cast(char*) av_malloc(OUTBUF_SIZE); assert(voutbuf !is null, "Could not allocate video output buffer"); scope(exit) if(voutbuf) av_free(voutbuf); aoutbuf = cast(char*) av_malloc(OUTBUF_SIZE); assert(aoutbuf !is null, "Could not allocate audio output buffer"); scope(exit) if(aoutbuf) av_free(voutbuf); printf("Setup time %f\n", sw.stop()); sw.start(); // Write the header res = av_write_header(ctx); assert(res >= 0, "Could not write header for output file (incorrect codec paramters?)"); while(true) { double audio_pts = cast(double) astream.pts.val * astream.time_base.num / astream.time_base.den; double video_pts = cast(double) vstream.pts.val * vstream.time_base.num / vstream.time_base.den; if(audio_pts >= STREAM_DURATION && video_pts >= STREAM_DURATION) break; // Write interleaved audio & video if(audio_pts < video_pts) writeAudioFrame(ctx, astream, samples); else writeVideoFrame(ctx, vstream, frame); } res = av_write_trailer(ctx); assert(res >= 0, "Could not write trailer for output file"); printf("Encoding time %f\n", sw.stop()); sw.start(); scope File file = new File("biff_happy.wmv", File.WriteCreate); uint available = ringBuf.available; auto addr = ringBuf.beginRead(available); file.write(addr[0 .. available]); ringBuf.endRead(available); file.close(); printf("IO time %f\n", sw.stop()); sw.start(); return 0; }