Mercurial > projects > hoofbaby
view src/impl/hoofbaby/codec/encoder.d @ 6:270343d824ae
The test program now uses the Encoder class.
author | fraserofthenight |
---|---|
date | Thu, 09 Jul 2009 20:28:13 -0700 |
parents | a1202aac1124 |
children | 71ebad05f542 |
line wrap: on
line source
/** * Hoofbaby -- http://www.dsource.org/projects/hoofbaby * Copyright (C) 2009 Robert Fraser * * This program is free software; you can redistribute it andor * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ module hoofbaby.codec.encoder; import tango.stdc.stringz; import hoofbaby.codec.libav.avutil; import hoofbaby.codec.libav.avcodec; import hoofbaby.codec.libav.avformat; public final class Encoder { // TODO convert asserts to exceptions public const int OUTBUF_SIZE = 100000; public AVOutputFormat* format; public AVFormatContext* formatContext; public AVCodec* audioCodec; public AVCodecContext* audioContext; public AVStream* audioStream; public char* audioOutbuf; public AVCodec* videoCodec; public AVCodecContext* videoContext; public AVStream* videoStream; public char* videoOutbuf; public this(char[] filename) { int ret; // Stores return value of functions called within //-------------------------------------------------------------------------- // Container format format = guess_format("asf", null, null); assert(format !is null, "Could not find format"); formatContext = av_alloc_format_context(); assert(formatContext !is null, "Could not allocate format context"); formatContext.oformat = format; //ctx.preload = cast(int) (0.5 * AV_TIME_BASE); formatContext.max_delay = cast(int) (0.7 * AV_TIME_BASE); formatContext.loop_output = AVFMT_NOOUTPUTLOOP; formatContext.flags |= AVFMT_FLAG_NONBLOCK; // TODO remove ret = url_fopen(&formatContext.pb, toStringz(filename), URL_WRONLY) < 0; assert(ret >= 0); AVFormatParameters params; params.prealloced_context = 1; params.video_codec_id = CODEC_ID_WMV2; params.audio_codec_id = CODEC_ID_WMAV2; params.width = 352; // TEMP params.height = 288; // TEMP params.time_base.num = 1; // TEMP params.time_base.den = 25; // TEMP params.pix_fmt = PIX_FMT_YUV420P; params.channels = 2; // NEXTVERSION support >2 channels for devices that can handle it params.sample_rate = 44100; // TEMP ret = av_set_parameters(formatContext, null); assert(ret >= 0, "Could not set parameters"); //-------------------------------------------------------------------------- // Video stream videoStream = av_new_stream(formatContext, 0); assert(videoStream !is null, "Could not allocate video stream"); formatContext.streams[0] = videoStream; videoCodec = avcodec_find_encoder(CODEC_ID_WMV2); assert(videoCodec !is null, "Could not find video codec"); videoContext = videoStream.codec; videoContext.codec_id = CODEC_ID_WMV2; videoContext.codec_type = CODEC_TYPE_VIDEO; videoContext.bit_rate = 400000; //TEMP videoContext.width = 352; // TEMP videoContext.height = 288; // TEMP videoContext.gop_size = 12; // TEMP videoContext.qmin = 3; // TEMP videoContext.time_base.den = 25; // TEMP videoContext.time_base.num = 1; // TEMP videoContext.pix_fmt = PIX_FMT_YUV420P; videoContext.flags |= CODEC_FLAG_GLOBAL_HEADER; ret = avcodec_open(videoContext, videoCodec); assert(ret >= 0, "Could not open video codec"); videoOutbuf = cast(char*) av_malloc(OUTBUF_SIZE); assert(videoOutbuf !is null, "Could not allocate video output buffer"); //-------------------------------------------------------------------------- // Audio stream audioStream = av_new_stream(formatContext, 0); assert(audioStream !is null, "Could not allocate audio stream"); formatContext.streams[1] = audioStream; audioCodec = avcodec_find_encoder(CODEC_ID_WMAV2); assert(audioCodec !is null, "Could not find audio codec"); audioContext = audioStream.codec; audioContext.codec_id = CODEC_ID_WMAV2; audioContext.codec_type = CODEC_TYPE_AUDIO; audioContext.bit_rate = 64000; audioContext.sample_rate = 44100; audioContext.channels = 2; audioContext.flags |= CODEC_FLAG_GLOBAL_HEADER; ret = avcodec_open(audioContext, audioCodec); assert(ret >= 0, "Could not open audio codec"); audioOutbuf = cast(char*) av_malloc(OUTBUF_SIZE); assert(audioOutbuf !is null, "Could not allocate audio output buffer"); scope(failure) { freeResources(); } } public ~this() { freeResources(); } public void writeHeader() { int ret = av_write_header(formatContext); assert(ret >= 0, "Could not write header for output file (incorrect codec paramters?)"); } public void writeTrailer() { int ret = av_write_trailer(formatContext); assert(ret >= 0, "Could not write trailer for output file"); } public void writeVideoFrame(AVFrame* frame) { int outSize = avcodec_encode_video(videoContext, videoOutbuf, OUTBUF_SIZE, frame); // if zero size, it means the image was buffered, so don't write the packet if(outSize > 0) { AVPacket pkt; av_init_packet(&pkt); pkt.pts = av_rescale_q(videoContext.coded_frame.pts, videoContext.time_base, videoStream.time_base); if(videoContext.coded_frame.key_frame) pkt.flags |= PKT_FLAG_KEY; pkt.stream_index = videoStream.index; pkt.data = videoOutbuf; pkt.size = outSize; int ret = av_write_frame(formatContext, &pkt); assert(!ret, "Error writing video frame"); } } public void writeAudioFrame(short* samples) { AVPacket pkt; av_init_packet(&pkt); pkt.size = avcodec_encode_audio(audioContext, audioOutbuf, OUTBUF_SIZE, samples); //pkt.pts = av_rescale_q(acodec.coded_frame.pts, acodec.time_base, acodec.time_base); pkt.flags |= PKT_FLAG_KEY; pkt.stream_index = audioStream.index; pkt.data = audioOutbuf; int ret = av_write_frame(formatContext, &pkt) != 0; assert(ret == 0, "Error writing audio frame"); } private void freeResources() { if(formatContext) av_free(formatContext); if(audioOutbuf) av_free(audioOutbuf); if(videoOutbuf) av_free(videoOutbuf); } }