view src/test/hoofbaby/test/adhoc/transcode.d @ 6:270343d824ae

The test program now uses the Encoder class.
author fraserofthenight
date Thu, 09 Jul 2009 20:28:13 -0700
parents e6cf9f26d0e7
children 9fdfe4a64a13
line wrap: on
line source

/**
 * Hoofbaby -- http://www.dsource.org/projects/hoofbaby
 * Copyright (C) 2009 Robert Fraser
 * 
 * This program is free software; you can redistribute it andor
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

module hoofbaby.test.adhoc.transcode;

import NONE_1 = tango.stdc.stdarg; // Must be linked in to prevent strange linker errors
debug import NONE_2 = tango.core.stacktrace.TraceExceptions;
import NONE_3 = hoofbaby.codec.libav.mingw;
import hoofbaby.app.libs;

import Math = tango.math.Math;
import hoofbaby.codec.encoder;
import hoofbaby.codec.libav.avutil;
import hoofbaby.codec.libav.avcodec;
import hoofbaby.codec.libav.avformat;

private const int STREAM_FRAME_RATE = 25;
private const double STREAM_DURATION = 5.0;
private const int STREAM_NB_FRAMES = (cast(int) (STREAM_DURATION * STREAM_FRAME_RATE));
private const int WIDTH = 352;
private const int HEIGHT = 288;
private const int SAMPLE_RATE = 44100;
private int AUDIO_FRAME_SIZE;

private int frameCount = 0;
private double audioCount = 0.0;
private double audioIncr = 2 * Math.PI * 110.0 / SAMPLE_RATE;
private double audioIncr2 = 2 * Math.PI * 110.0 / SAMPLE_RATE / SAMPLE_RATE;

private AVFrame* allocFrame(int pix_fmt, int width, int height)
{
	AVFrame* picture;
	char* buf;
	int size;

	picture = avcodec_alloc_frame();
	if(!picture)
		return null;
	size = avpicture_get_size(pix_fmt, width, height);
	buf = cast(char*) av_malloc(size);
	if(!buf)
	{
		av_free(picture);
		return null;
	}
	avpicture_fill(cast(AVPicture*) picture, buf, pix_fmt, width, height);
	return picture;
}

private AVFrame* generatePicture(AVFrame* pict, int width, int height)
{
	int x, y, i;
	i = frameCount;

	/* Y */
	for(y = 0; y < height; y++)
	{
		for(x = 0; x < width; x++)
		{
			pict.data[0][y * pict.linesize[0] + x] = x + y + i * 3;
		}
	}

	/* Cb and Cr */
	for(y = 0; y < height / 2; y++)
	{
		for(x = 0; x < width / 2; x++)
		{
			pict.data[1][y * pict.linesize[1] + x] = 128 + y + i * 2;
			pict.data[2][y * pict.linesize[2] + x] = 64 + x + i * 5;
		}
	}
	
	return pict;
}

private short* generateAudio(short* samples)
{
	int j, i, v;
    short *q;

    q = samples;
    for(j = 0; j < AUDIO_FRAME_SIZE; j++)
    {
        v = cast(int)(Math.sin(audioCount) * 10000);
        for(i = 0; i < 2; i++) // 2 is number of channels
            *q++ = v;
        audioCount += audioIncr;
        audioIncr += audioIncr2;
    }
    
    return samples;
}

public int main(char[][] args)
{
	initLibs();
	scope Encoder enc = new Encoder("biff_happy.wmv");
	AUDIO_FRAME_SIZE = enc.audioContext.frame_size;

	// Allocate a video frame and audio buffer to store stuff
	AVFrame* frame = allocFrame(PIX_FMT_YUV420P, WIDTH, HEIGHT);
	assert(frame !is null, "Could not allocate frame");
	scope(exit) if(frame) av_free(frame);
	short* samples = cast(short*) av_malloc(AUDIO_FRAME_SIZE * 2 * 2); // AUDIO_FRAME_SIZE * 2 * number of channels
	assert(frame !is null, "Could not allocate samples");
	scope(exit) if(samples) av_free(samples);
	
	// Write header
	enc.writeHeader();
	
    while(true)
	{
		double audio_pts = cast(double) enc.audioStream.pts.val * enc.audioStream.time_base.num / enc.audioStream.time_base.den;
		double video_pts = cast(double) enc.videoStream.pts.val * enc.videoStream.time_base.num / enc.videoStream.time_base.den;

		if(audio_pts >= STREAM_DURATION && video_pts >= STREAM_DURATION)
			break;

		// Write interleaved audio & video
		if(audio_pts < video_pts)
		{
			enc.writeAudioFrame(generateAudio(samples));
		}
		else
		{
			if(frameCount >= STREAM_NB_FRAMES)
			{
				// no more frame to compress. The codec has a latency of a few
				// frames if using B frames, so we get the last frames by
				// passing the same picture again
			}
			else
			{
				generatePicture(frame, WIDTH, HEIGHT);
			}
			
			enc.writeVideoFrame(frame);
			frameCount++;
		}
	}
    
    // Write trailer
	enc.writeTrailer();
	
	return 0;
}