changeset 6:270343d824ae

The test program now uses the Encoder class.
author fraserofthenight
date Thu, 09 Jul 2009 20:28:13 -0700
parents 59aa8015d539
children 9fdfe4a64a13
files .hgignore src/build/frankenbuild.d src/impl/hoofbaby/app/main.d src/impl/hoofbaby/codec/encoder.d src/test/hoofbaby/test/adhoc/transcode.d
diffstat 5 files changed, 130 insertions(+), 266 deletions(-) [+]
line wrap: on
line diff
--- a/.hgignore	Wed Jul 08 19:19:56 2009 -0700
+++ b/.hgignore	Thu Jul 09 20:28:13 2009 -0700
@@ -10,5 +10,5 @@
 *.map
 deps/Platinum/Build/Targets/x86-microsoft-win32-vs2008/Platinum/Debug/*
 deps/Platinum/Build/Targets/x86-microsoft-win32-vs2008/Platinum/Release/*
-deps/Platinum/ThirdParty/Neptune/Build\Targets/x86-microsoft-win32-vs2008/Neptune/Debug/*
-deps/Platinum/ThirdParty/Neptune/Build\Targets/x86-microsoft-win32-vs2008/Neptune/Release/*
+deps/Platinum/ThirdParty/Neptune/Build/Targets/x86-microsoft-win32-vs2008/Neptune/Debug/*
+deps/Platinum/ThirdParty/Neptune/Build/Targets/x86-microsoft-win32-vs2008/Neptune/Release/*
--- a/src/build/frankenbuild.d	Wed Jul 08 19:19:56 2009 -0700
+++ b/src/build/frankenbuild.d	Thu Jul 09 20:28:13 2009 -0700
@@ -64,6 +64,7 @@
     if(cats.contains("transcode"))
     {
     	targetFound = true;
+    	 cats ~= "platif-debug";
 	    char[] cargs = cbase.dup;
 	    cargs ~= " -g";
 	    cargs ~= " -debug -debug=AVBuffer";
@@ -71,6 +72,7 @@
 	    cargs ~= " -I../src/test";
 	    cargs ~= " -oqobjs/debug";
 	    cargs ~= " ../src/test/hoofbaby/test/adhoc/transcode.d";
+	    commands ~= { return cpp("../src/platif/platif.h", "../src/impl/hoofbaby/platinum/platif.d"); };
 	    commands ~= { return rebuild(cargs); };
     }
 	
--- a/src/impl/hoofbaby/app/main.d	Wed Jul 08 19:19:56 2009 -0700
+++ b/src/impl/hoofbaby/app/main.d	Thu Jul 09 20:28:13 2009 -0700
@@ -16,14 +16,14 @@
 module hoofbaby.app.main;
 
 // Imports for whole program (just link to them)
-import tango.stdc.stdarg; // Must be linked in to prevent strange linker errors
-debug import tango.core.stacktrace.TraceExceptions;
-import NONE = hoofbaby.codec.libav.mingw;
+import NONE_1       = tango.stdc.stdarg; // Must be linked in to prevent strange linker errors
+debug import NONE_2 = tango.core.stacktrace.TraceExceptions;
+import NONE_3       = hoofbaby.codec.libav.mingw;
 
 import hoofbaby.app.libs;
 
 public int main(char[][] args)
 {
 	initLibs();
-	return Platinum.mimeOnFire(`D:\Media\Videos`);
+	return 0;
 }
\ No newline at end of file
--- a/src/impl/hoofbaby/codec/encoder.d	Wed Jul 08 19:19:56 2009 -0700
+++ b/src/impl/hoofbaby/codec/encoder.d	Thu Jul 09 20:28:13 2009 -0700
@@ -15,6 +15,8 @@
 
 module hoofbaby.codec.encoder;
 
+import tango.stdc.stringz;
+
 import hoofbaby.codec.libav.avutil;
 import hoofbaby.codec.libav.avcodec;
 import hoofbaby.codec.libav.avformat;
@@ -23,22 +25,22 @@
 {
 	// TODO convert asserts to exceptions
 	
-	private const int OUTBUF_SIZE = 100000;
+	public const int OUTBUF_SIZE = 100000;
 	
-	private AVOutputFormat* format;
-	private AVFormatContext* formatContext;
+	public AVOutputFormat* format;
+	public AVFormatContext* formatContext;
 	
-	private AVCodec* audioCodec;
-	private AVCodecContext* audioContext;
-	private AVStream* audioStream;
-	private char* audioOutbuf;
+	public AVCodec* audioCodec;
+	public AVCodecContext* audioContext;
+	public AVStream* audioStream;
+	public char* audioOutbuf;
 	
-	private AVCodec* videoCodec;
-	private AVCodecContext* videoContext;
-	private AVStream* videoStream;
-	private char* videoOutbuf;
+	public AVCodec* videoCodec;
+	public AVCodecContext* videoContext;
+	public AVStream* videoStream;
+	public char* videoOutbuf;
 	
-	public this()
+	public this(char[] filename)
 	{
 		int ret; // Stores return value of functions called within
 		
@@ -50,13 +52,16 @@
 		
 		formatContext = av_alloc_format_context();
 		assert(formatContext !is null, "Could not allocate format context");
-		res.add(formatContext, &av_free);
-		formatContext.oformat = fmt;
+		formatContext.oformat = format;
 		//ctx.preload = cast(int) (0.5 * AV_TIME_BASE);
 		formatContext.max_delay = cast(int) (0.7 * AV_TIME_BASE);
 		formatContext.loop_output = AVFMT_NOOUTPUTLOOP;
 		formatContext.flags |= AVFMT_FLAG_NONBLOCK;
 		
+		// TODO remove
+		ret = url_fopen(&formatContext.pb, toStringz(filename), URL_WRONLY) < 0;
+		assert(ret >= 0);
+		
 		AVFormatParameters params;
 		params.prealloced_context = 1;
 		params.video_codec_id = CODEC_ID_WMV2;
@@ -76,7 +81,7 @@
 		
 		videoStream = av_new_stream(formatContext, 0);
 		assert(videoStream !is null, "Could not allocate video stream");
-		formatContext.streams[0] = vstream;
+		formatContext.streams[0] = videoStream;
 		
 		videoCodec = avcodec_find_encoder(CODEC_ID_WMV2);
 		assert(videoCodec !is null, "Could not find video codec");
@@ -117,7 +122,7 @@
 		audioContext.channels = 2;
 		audioContext.flags |= CODEC_FLAG_GLOBAL_HEADER;
 		ret = avcodec_open(audioContext, audioCodec);
-		assert(res >= 0, "Could not open audio codec");
+		assert(ret >= 0, "Could not open audio codec");
 		
 		audioOutbuf = cast(char*) av_malloc(OUTBUF_SIZE);
 		assert(audioOutbuf !is null, "Could not allocate audio output buffer");
@@ -141,26 +146,26 @@
 	
 	public void writeTrailer()
 	{
-		int ret = av_write_trailer(ctx);
+		int ret = av_write_trailer(formatContext);
 		assert(ret >= 0, "Could not write trailer for output file");
 	}
 	
 	public void writeVideoFrame(AVFrame* frame)
 	{
-		int outSize = avcodec_encode_video(videoCodec, videoOutbuf, OUTBUF_SIZE, frame);
+		int outSize = avcodec_encode_video(videoContext, videoOutbuf, OUTBUF_SIZE, frame);
 		
 		// if zero size, it means the image was buffered, so don't write the packet
 		if(outSize > 0)
 		{
 			AVPacket pkt;
 			av_init_packet(&pkt);
-			pkt.pts = av_rescale_q(videoCodec.coded_frame.pts, videoCodec.time_base, videoStream.time_base);
-			if(videoCodec.coded_frame.key_frame)
+			pkt.pts = av_rescale_q(videoContext.coded_frame.pts, videoContext.time_base, videoStream.time_base);
+			if(videoContext.coded_frame.key_frame)
 				pkt.flags |= PKT_FLAG_KEY;
 			pkt.stream_index = videoStream.index;
 			pkt.data = videoOutbuf;
 			pkt.size = outSize;
-			int ret = av_write_frame(ctx, &pkt);
+			int ret = av_write_frame(formatContext, &pkt);
 			assert(!ret, "Error writing video frame");
 		}
 	}
@@ -169,12 +174,12 @@
 	{
 		AVPacket pkt;
 		av_init_packet(&pkt);
-		pkt.size = avcodec_encode_audio(audioCodec, audioOutbuf, OUTBUF_SIZE, samples);
+		pkt.size = avcodec_encode_audio(audioContext, audioOutbuf, OUTBUF_SIZE, samples);
 		//pkt.pts = av_rescale_q(acodec.coded_frame.pts, acodec.time_base, acodec.time_base);
 		pkt.flags |= PKT_FLAG_KEY;
 		pkt.stream_index = audioStream.index;
 		pkt.data = audioOutbuf;
-		int ret = av_write_frame(ctx, &pkt) != 0;
+		int ret = av_write_frame(formatContext, &pkt) != 0;
 		assert(ret == 0, "Error writing audio frame");
 	}
 	
--- a/src/test/hoofbaby/test/adhoc/transcode.d	Wed Jul 08 19:19:56 2009 -0700
+++ b/src/test/hoofbaby/test/adhoc/transcode.d	Thu Jul 09 20:28:13 2009 -0700
@@ -15,288 +15,145 @@
 
 module hoofbaby.test.adhoc.transcode;
 
-//Imports for whole program (just link to them)
 import NONE_1 = tango.stdc.stdarg; // Must be linked in to prevent strange linker errors
 debug import NONE_2 = tango.core.stacktrace.TraceExceptions;
 import NONE_3 = hoofbaby.codec.libav.mingw;
+import hoofbaby.app.libs;
 
-import tango.stdc.stdio;
-import tango.stdc.stringz;
 import Math = tango.math.Math;
-import tango.io.device.File;
-
-import hoofbaby.app.libs;
-import hoofbaby.util.buffer;
-
+import hoofbaby.codec.encoder;
 import hoofbaby.codec.libav.avutil;
 import hoofbaby.codec.libav.avcodec;
 import hoofbaby.codec.libav.avformat;
-import hoofbaby.codec.libav.avbuffer;
 
 private const int STREAM_FRAME_RATE = 25;
 private const double STREAM_DURATION = 5.0;
 private const int STREAM_NB_FRAMES = (cast(int) (STREAM_DURATION * STREAM_FRAME_RATE));
-private const int OUTBUF_SIZE = 100000;
+private const int WIDTH = 352;
+private const int HEIGHT = 288;
+private const int SAMPLE_RATE = 44100;
+private int AUDIO_FRAME_SIZE;
 
-int main(char[][] args)
+private int frameCount = 0;
+private double audioCount = 0.0;
+private double audioIncr = 2 * Math.PI * 110.0 / SAMPLE_RATE;
+private double audioIncr2 = 2 * Math.PI * 110.0 / SAMPLE_RATE / SAMPLE_RATE;
+
+private AVFrame* allocFrame(int pix_fmt, int width, int height)
 {
-	initLibs();
-	
-	int frameCount = 0;
-	double audioCount, audioIncr, audioIncr2;
-	char* voutbuf, aoutbuf;
-	int res;
+	AVFrame* picture;
+	char* buf;
+	int size;
 
-	AVFrame* allocFrame(int pix_fmt, int width, int height)
+	picture = avcodec_alloc_frame();
+	if(!picture)
+		return null;
+	size = avpicture_get_size(pix_fmt, width, height);
+	buf = cast(char*) av_malloc(size);
+	if(!buf)
 	{
-		AVFrame* picture;
-		char* buf;
-		int size;
+		av_free(picture);
+		return null;
+	}
+	avpicture_fill(cast(AVPicture*) picture, buf, pix_fmt, width, height);
+	return picture;
+}
 
-		picture = avcodec_alloc_frame();
-		if(!picture)
-			return null;
-		size = avpicture_get_size(pix_fmt, width, height);
-		buf = cast(char*) av_malloc(size);
-		if(!buf)
+private AVFrame* generatePicture(AVFrame* pict, int width, int height)
+{
+	int x, y, i;
+	i = frameCount;
+
+	/* Y */
+	for(y = 0; y < height; y++)
+	{
+		for(x = 0; x < width; x++)
 		{
-			av_free(picture);
-			return null;
-		}
-		avpicture_fill(cast(AVPicture*) picture, buf, pix_fmt, width, height);
-		return picture;
-	}
-
-	void generatePicture(AVFrame* pict, int width, int height)
-	{
-		int x, y, i;
-		i = frameCount;
-
-		/* Y */
-		for(y = 0; y < height; y++)
-		{
-			for(x = 0; x < width; x++)
-			{
-				pict.data[0][y * pict.linesize[0] + x] = x + y + i * 3;
-			}
-		}
-
-		/* Cb and Cr */
-		for(y = 0; y < height / 2; y++)
-		{
-			for(x = 0; x < width / 2; x++)
-			{
-				pict.data[1][y * pict.linesize[1] + x] = 128 + y + i * 2;
-				pict.data[2][y * pict.linesize[2] + x] = 64 + x + i * 5;
-			}
+			pict.data[0][y * pict.linesize[0] + x] = x + y + i * 3;
 		}
 	}
 
-	int writeVideoFrame(AVFormatContext* ctx, AVStream* stream, AVFrame* picture)
+	/* Cb and Cr */
+	for(y = 0; y < height / 2; y++)
 	{
-		AVCodecContext* vcodec = stream.codec;
-		int ret = 0;
-
-		if(frameCount >= STREAM_NB_FRAMES)
-		{
-			// no more frame to compress. The codec has a latency of a few
-			// frames if using B frames, so we get the last frames by
-			// passing the same picture again
-		}
-		else
-		{
-			generatePicture(picture, vcodec.width, vcodec.height);
-		}
-
-		// Encode it!
-		int outSize = avcodec_encode_video(vcodec, voutbuf, OUTBUF_SIZE, picture);
-		// if zero size, it means the image was buffered.. if not, write that ****!
-		if(outSize > 0)
-		{
-			AVPacket pkt;
-			av_init_packet(&pkt);
-
-			pkt.pts = av_rescale_q(vcodec.coded_frame.pts, vcodec.time_base, stream.time_base);
-			if(vcodec.coded_frame.key_frame)
-				pkt.flags |= PKT_FLAG_KEY;
-			pkt.stream_index = stream.index;
-			pkt.data = voutbuf;
-			pkt.size = outSize;
-
-			// oh yeah!
-			ret = av_write_frame(ctx, &pkt);
-		}
-		else
+		for(x = 0; x < width / 2; x++)
 		{
-			ret = 0;
+			pict.data[1][y * pict.linesize[1] + x] = 128 + y + i * 2;
+			pict.data[2][y * pict.linesize[2] + x] = 64 + x + i * 5;
 		}
-		
-		frameCount++;
-		assert(!ret, "Error writing video frame");
-		return ret;
-	}
-	
-	int writeAudioFrame(AVFormatContext* ctx, AVStream* stream, short* samples)
-	{
-		AVCodecContext* acodec = stream.codec;
-
-		{
-			int j, i, v;
-		    short *q;
-
-		    q = samples;
-		    for(j = 0; j < acodec.frame_size; j++)
-		    {
-		        v = cast(int)(Math.sin(audioCount) * 10000);
-		        for(i = 0; i < 2; i++) // 2 is number of channels
-		            *q++ = v;
-		        audioCount += audioIncr;
-		        audioIncr += audioIncr2;
-		    }
-		}
-		
-		AVPacket pkt;
-		av_init_packet(&pkt);
-		pkt.size = avcodec_encode_audio(acodec, aoutbuf, OUTBUF_SIZE, samples);
-		//pkt.pts = av_rescale_q(acodec.coded_frame.pts, acodec.time_base, acodec.time_base);
-		pkt.flags |= PKT_FLAG_KEY;
-		pkt.stream_index = stream.index;
-		pkt.data = aoutbuf;
-		
-		int res = av_write_frame(ctx, &pkt) != 0;
-		assert(res == 0, "Error writing audio frame");
-		return res;
 	}
 	
-	//--------------------------------------------------------------------------
-	// Container format
-	
-	AVOutputFormat* fmt = guess_format("asf", null, null);
-	assert(fmt !is null, "Could not find format");
-	
-	AVFormatContext* ctx = av_alloc_format_context();
-	assert(ctx !is null, "Could not allocate format context");
-	scope(exit) if(ctx) av_free(ctx);
-	ctx.oformat = fmt;
-	//ctx.preload = cast(int) (0.5 * AV_TIME_BASE);
-	ctx.max_delay = cast(int) (0.7 * AV_TIME_BASE);
-	ctx.loop_output = AVFMT_NOOUTPUTLOOP;
-	ctx.flags |= AVFMT_FLAG_NONBLOCK;
-	
-	AVFormatParameters params;
-	params.prealloced_context = 1;
-	params.video_codec_id = CODEC_ID_WMV2;
-	params.audio_codec_id = CODEC_ID_WMAV2;
-	params.width = 352;
-	params.height = 288;
-	params.time_base.num = 1;
-	params.time_base.den = STREAM_FRAME_RATE;
-	params.pix_fmt = PIX_FMT_YUV420P;
-	params.channels = 2;
-	params.sample_rate = 44100;
-	res = av_set_parameters(ctx, null);
-	assert(res >= 0, "Could not set parameters");
-	
-	//--------------------------------------------------------------------------
-	// Video stream
-	
-	AVStream* vstream = av_new_stream(ctx, 0);
-	assert(vstream !is null, "Could not allocate video stream");
-	ctx.streams[0] = vstream;
-	
-	AVCodec* vcodecName = avcodec_find_encoder(CODEC_ID_WMV2);
-	assert(vcodecName, "Could not find video codec");
-	AVCodecContext* vcodec = vstream.codec;
-	vcodec.codec_id = CODEC_ID_WMV2;
-	vcodec.codec_type = CODEC_TYPE_VIDEO;
-	vcodec.bit_rate = 400000;
-	vcodec.width = 352;
-	vcodec.height = 288;
-	vcodec.gop_size = 12;
-	vcodec.qmin = 3;
-	vcodec.time_base.den = STREAM_FRAME_RATE;
-	vcodec.time_base.num = 1;
-	vcodec.pix_fmt = PIX_FMT_YUV420P;
-	vcodec.flags |= CODEC_FLAG_GLOBAL_HEADER;
-	res = avcodec_open(vcodec, vcodecName);
-	assert(res >= 0, "Could not open video codec");
-	
-	//--------------------------------------------------------------------------
-	// Audio stream
-	
-	AVStream* astream = av_new_stream(ctx, 0);
-	assert(astream !is null, "Could not allocate audio stream");
-	ctx.streams[1] = astream;
-	
-	AVCodec* acodecName = avcodec_find_encoder(CODEC_ID_WMAV2);
-	assert(acodecName, "Could not find audio codec");
-	AVCodecContext* acodec = astream.codec;
-	acodec.codec_id = CODEC_ID_WMAV2;
-	acodec.codec_type = CODEC_TYPE_AUDIO;
-	acodec.bit_rate = 64000;
-	acodec.sample_rate = 44100;
-	acodec.channels = 2;
-	acodec.flags |= CODEC_FLAG_GLOBAL_HEADER;
-	audioCount = 0.0;
-	audioIncr = 2 * Math.PI * 110.0 / acodec.sample_rate;
-	audioIncr2 = 2 * Math.PI * 110.0 / acodec.sample_rate / acodec.sample_rate;
-	res = avcodec_open(acodec, acodecName);
-	assert(res >= 0, "Could not open audio codec");
-	
-	//--------------------------------------------------------------------------
-	// Actually doing stuff
-	
-	// Open output file
-	RingBuffer ringBuf = RingBuffer(1 << 24); // 16 MB
-	scope(exit) ringBuf.free();
-	ctx.pb = getBioContext(&ringBuf, 1 << 24);
-	assert(ctx.pb !is null);
-	assert(ctx.pb.opaque !is null);
+	return pict;
+}
+
+private short* generateAudio(short* samples)
+{
+	int j, i, v;
+    short *q;
+
+    q = samples;
+    for(j = 0; j < AUDIO_FRAME_SIZE; j++)
+    {
+        v = cast(int)(Math.sin(audioCount) * 10000);
+        for(i = 0; i < 2; i++) // 2 is number of channels
+            *q++ = v;
+        audioCount += audioIncr;
+        audioIncr += audioIncr2;
+    }
+    
+    return samples;
+}
+
+public int main(char[][] args)
+{
+	initLibs();
+	scope Encoder enc = new Encoder("biff_happy.wmv");
+	AUDIO_FRAME_SIZE = enc.audioContext.frame_size;
 
 	// Allocate a video frame and audio buffer to store stuff
-	AVFrame* frame = allocFrame(PIX_FMT_YUV420P, vcodec.width, vcodec.height);
+	AVFrame* frame = allocFrame(PIX_FMT_YUV420P, WIDTH, HEIGHT);
 	assert(frame !is null, "Could not allocate frame");
 	scope(exit) if(frame) av_free(frame);
-	short* samples = cast(short*) av_malloc(acodec.frame_size * 2 * acodec.channels);
+	short* samples = cast(short*) av_malloc(AUDIO_FRAME_SIZE * 2 * 2); // AUDIO_FRAME_SIZE * 2 * number of channels
 	assert(frame !is null, "Could not allocate samples");
 	scope(exit) if(samples) av_free(samples);
 	
-	// Allocate some output buffers
-	voutbuf = cast(char*) av_malloc(OUTBUF_SIZE);
-	assert(voutbuf !is null, "Could not allocate video output buffer");
-	scope(exit) if(voutbuf) av_free(voutbuf);
-	aoutbuf = cast(char*) av_malloc(OUTBUF_SIZE);
-	assert(aoutbuf !is null, "Could not allocate audio output buffer");
-	scope(exit) if(aoutbuf) av_free(voutbuf);
-	
-	// Write the header
-	res = av_write_header(ctx);
-	assert(res >= 0, "Could not write header for output file (incorrect codec paramters?)");
+	// Write header
+	enc.writeHeader();
 	
     while(true)
 	{
-		double audio_pts = cast(double) astream.pts.val * astream.time_base.num / astream.time_base.den;
-		double video_pts = cast(double) vstream.pts.val * vstream.time_base.num / vstream.time_base.den;
+		double audio_pts = cast(double) enc.audioStream.pts.val * enc.audioStream.time_base.num / enc.audioStream.time_base.den;
+		double video_pts = cast(double) enc.videoStream.pts.val * enc.videoStream.time_base.num / enc.videoStream.time_base.den;
 
 		if(audio_pts >= STREAM_DURATION && video_pts >= STREAM_DURATION)
 			break;
 
 		// Write interleaved audio & video
 		if(audio_pts < video_pts)
-			writeAudioFrame(ctx, astream, samples);
+		{
+			enc.writeAudioFrame(generateAudio(samples));
+		}
 		else
-			writeVideoFrame(ctx, vstream, frame);
+		{
+			if(frameCount >= STREAM_NB_FRAMES)
+			{
+				// no more frame to compress. The codec has a latency of a few
+				// frames if using B frames, so we get the last frames by
+				// passing the same picture again
+			}
+			else
+			{
+				generatePicture(frame, WIDTH, HEIGHT);
+			}
+			
+			enc.writeVideoFrame(frame);
+			frameCount++;
+		}
 	}
     
-	res = av_write_trailer(ctx);
-	assert(res >= 0, "Could not write trailer for output file");
-	
-	scope File file = new File("biff_happy.wmv", File.WriteCreate);
-	uint available = ringBuf.available;
-	auto addr = ringBuf.beginRead(available);
-	file.write(addr[0 .. available]);
-	ringBuf.endRead(available);
-	file.close();
+    // Write trailer
+	enc.writeTrailer();
 	
 	return 0;
 }
\ No newline at end of file