Mercurial > mm7
changeset 2315:58be29479e75
add files
author | Ritor1 |
---|---|
date | Wed, 19 Mar 2014 10:09:42 +0600 |
parents | 8e9be4fa33a8 |
children | 25f771e528f0 |
files | MMT.cpp MediaPlayer.cpp MediaPlayer.h OpenALSoundProvider.h |
diffstat | 4 files changed, 1194 insertions(+), 4 deletions(-) [+] |
line wrap: on
line diff
--- a/MMT.cpp Tue Mar 18 23:50:35 2014 +0600 +++ b/MMT.cpp Wed Mar 19 10:09:42 2014 +0600 @@ -12,7 +12,7 @@ #include "CShow.h" #include "GUIFont.h" #include "lib/libpng/png.h" -//#include "MediaPlayer.h" +#include "MediaPlayer.h" void ShowLogoVideo() { @@ -185,9 +185,9 @@ if (!bNoSound ) { - //Media::Player *p = new Media::Player;//создаётся плеер - //Media::ITrack *track = p->LoadTrack(L"Sounds\\New_Sounds/Stronghold_Theme.mp3"); - //track->Play(); + Media::Player *p = new Media::Player;//создаётся плеер + Media::ITrack *track = p->LoadTrack(L"Sounds\\New_Sounds/Stronghold_Theme.mp3"); + track->Play(); } pMouse->RemoveHoldingItem();//избавить курсор от вещи
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/MediaPlayer.cpp Wed Mar 19 10:09:42 2014 +0600 @@ -0,0 +1,820 @@ +extern "C" +{ + #include "lib/libavcodec/avcodec.h" + #include "lib/libavformat/avformat.h" + #include "lib/libavutil/avutil.h" + #include "lib/libavutil/imgutils.h" + #include "lib/libswscale/swscale.h" + #include "lib/libswresample/swresample.h" + #include "lib/libavutil/opt.h" + //#include "libavutil/samplefmt.h" +} +#pragma comment(lib, "avcodec.lib") +#pragma comment(lib, "avformat.lib") +#pragma comment(lib, "avutil.lib") +#pragma comment(lib, "swscale.lib") +#pragma comment(lib, "swresample.lib") + +#include <vector> +#include <deque> + +#include "stuff.h" +#include "OpenALSoundProvider.h" + +#include "MediaPlayer.h" +using namespace Media; + + + + +class MemoryStream +{ + public: + inline MemoryStream(void *data, size_t data_size) + { + this->data_size = data_size; + this->data = data; + this->current_pos = 0; + } + inline MemoryStream() + { + this->data_size = 0; + this->data = nullptr; + this->current_pos = 0; + } + + inline ~MemoryStream() + { + if (data) + delete [] data; + } + + inline size_t Write(void *buffer, size_t num_bytes) + { + if (!data) + { + data_size = 32 + num_bytes; + data = new char[data_size]; + current_pos = 0; + } + else if (current_pos + num_bytes >= data_size) + { + int new_data_size = data_size + num_bytes + data_size / 8 + 4; + auto new_data = new char[new_data_size]; + + memcpy(new_data, data, data_size); + delete [] data; + + data_size = new_data_size; + data = new_data; + } + memcpy((char *)data + current_pos, buffer, num_bytes); + current_pos += num_bytes; + return num_bytes; + } + + inline size_t Read(void *buffer, size_t num_bytes) + { + size_t read_size = min(num_bytes, data_size - current_pos); + if (read_size) + { + memcpy(buffer, (char *)data + current_pos, read_size); + current_pos += read_size; + } + return read_size; + } + + inline void Reset() + { + current_pos = 0; + } + inline void SeekToEnd() + { + current_pos = data_size; + } + + inline size_t Unwind(size_t bytes) + { + if (bytes > current_pos) + current_pos = 0; + else + current_pos -= bytes; + return current_pos; + } + + inline size_t Rewind(size_t bytes) + { + if (current_pos + bytes >= data_size) + current_pos = data_size; + else + current_pos += bytes; + return current_pos; + } + + inline size_t Size() const {return data_size;} + inline size_t Current() const {return current_pos;} + inline void *Ptr() const {return data;} + + private: + void *data; + size_t data_size; + size_t current_pos; +}; + + + + +OpenALSoundProvider *provider = nullptr; + + + +static int av_num_bytes_per_sample(AVSampleFormat sample_fmt) +{ + switch (sample_fmt) + { + case AV_SAMPLE_FMT_U8: + case AV_SAMPLE_FMT_U8P: + return 1; + + case AV_SAMPLE_FMT_S16: + case AV_SAMPLE_FMT_S16P: + return 2; + + case AV_SAMPLE_FMT_S32: + case AV_SAMPLE_FMT_S32P: + case AV_SAMPLE_FMT_FLT: + case AV_SAMPLE_FMT_FLTP: + return 4; + + case AV_SAMPLE_FMT_DBL: + case AV_SAMPLE_FMT_DBLP: + return 8; + + default: + case AV_SAMPLE_FMT_NONE: + Error("Invalid av sample format: %u", sample_fmt); + } + return 0; +} + + + +struct AVStreamWrapper +{ + inline AVStreamWrapper() + { + this->type = AVMEDIA_TYPE_UNKNOWN; + this->stream_idx = -1; + this->stream = nullptr; + this->dec = nullptr; + this->dec_ctx = nullptr; + } + + inline void Release() + { + type = AVMEDIA_TYPE_UNKNOWN; + stream_idx = -1; + stream = nullptr; + dec = nullptr; + if (dec_ctx) + { + avcodec_close(dec_ctx); + dec_ctx = nullptr; + } + } + + AVMediaType type; + int stream_idx; + AVStream *stream; + AVCodec *dec; + AVCodecContext *dec_ctx; +}; + +struct AVAudioStream: public AVStreamWrapper +{ + inline AVAudioStream(): + AVStreamWrapper() + { + this->bytes_per_sample = 0; + this->bytes_per_second = 0; + } + + int bytes_per_sample; + int bytes_per_second; +}; + +struct AVVideoStream: public AVStreamWrapper +{ + inline AVVideoStream(): + AVStreamWrapper() + { + this->frames_per_second = 0.0; + } + + double frames_per_second; +}; + +static bool av_open_stream(AVFormatContext *format_ctx, AVMediaType type, AVStreamWrapper *out_stream) +{ + int stream_idx = av_find_best_stream(format_ctx, type, -1, -1, nullptr, 0); + if (stream_idx >= 0) + { + auto stream = format_ctx->streams[stream_idx]; + auto dec_ctx = stream->codec; + auto dec = avcodec_find_decoder(dec_ctx->codec_id); + if (dec) + { + if (avcodec_open2(dec_ctx, dec, nullptr) >= 0) + { + out_stream->type = type; + out_stream->stream_idx = stream_idx; + out_stream->stream = stream; + out_stream->dec = dec; + out_stream->dec_ctx = dec_ctx; + return true; + } + } + } + return false; +} + +static bool av_open_audio_stream(AVFormatContext *format_ctx, AVAudioStream *out_stream) +{ + if (!av_open_stream(format_ctx, AVMEDIA_TYPE_AUDIO, out_stream)) + return Error("Audio stream not found"), false; + + // we support only 2-channel audio for now + if (out_stream->dec_ctx->channels != 2) + { + out_stream->Release(); + return Error("Unsupported number of channels: %u", out_stream->dec_ctx->channels), false; + } + + out_stream->bytes_per_sample = av_num_bytes_per_sample(out_stream->dec_ctx->sample_fmt); + out_stream->bytes_per_second = out_stream->dec_ctx->channels * out_stream->dec_ctx->sample_rate * out_stream->bytes_per_sample; + + return true; +} + +static bool av_open_video_stream(AVFormatContext *format_ctx, AVVideoStream *out_stream) +{ + if (!av_open_stream(format_ctx, AVMEDIA_TYPE_VIDEO, out_stream)) + return Error("Video stream not found"), false; + + out_stream->frames_per_second = (double)out_stream->dec_ctx->time_base.den / (double)out_stream->dec_ctx->time_base.num; + return true; +} + + + +void InterleaveAudioData(MemoryStream *stream, AVSampleFormat src_format, int num_channels, int num_samples, uint8_t **channels) +{ + unsigned int bytes_per_sample; + switch (src_format) + { + case AV_SAMPLE_FMT_U8: + case AV_SAMPLE_FMT_U8P: + __debugbreak(); + + case AV_SAMPLE_FMT_S16: + bytes_per_sample = sizeof(__int16); + stream->Write(channels[0], num_channels * num_samples * bytes_per_sample); + break; + + case AV_SAMPLE_FMT_S16P: + { + bytes_per_sample = sizeof(__int16); + for (int i = 0; i < num_samples; ++i) + for (int j = 0; j < num_channels; ++j) + stream->Write(channels[j] + i * bytes_per_sample, bytes_per_sample); + } + break; + + case AV_SAMPLE_FMT_FLT: + { + SwrContext *converter = swr_alloc(); + av_opt_set_int(converter, "in_channel_layout", av_get_default_channel_layout(2), 0); + //av_opt_set_int(converter, "in_sample_rate", sample_ra, 0); + av_opt_set_sample_fmt(converter, "in_sample_fmt", AV_SAMPLE_FMT_FLT, 0); + + av_opt_set_int(converter, "out_channel_layout", av_get_default_channel_layout(2), 0); + //av_opt_set_int(converter, "out_sample_rate", dst_sample_rate, 0); + av_opt_set_sample_fmt(converter, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + + if (swr_init(converter) < 0) + { + __debugbreak(); + swr_free(&converter); + return; + } + + uint8_t **dst_channels; + int dst_linesize[8]; + //int dst_nb_channels = av_get_channel_layout_nb_channels(dst_channel_layout); + if (av_samples_alloc_array_and_samples(&dst_channels, dst_linesize, 2, num_channels * num_samples, AV_SAMPLE_FMT_S16, 0) < 0) + { + __debugbreak(); + swr_free(&converter); + return; + } + + if (swr_convert(converter, dst_channels, num_channels * num_samples, (const uint8_t **)channels, num_channels * num_samples) >= 0) + stream->Write(dst_channels[0], num_channels * num_samples * sizeof(__int16)); + else + __debugbreak(); + + av_free(dst_channels[0]); + swr_free(&converter); + } + break; + + default: + __debugbreak(); + //if (Resample(next_frame->avframe, next_frame->avframe->channel_layout, next_frame->avframe->sample_rate, + // av_get_default_channel_layout(2), next_frame->avframe->sample_rate, AV_SAMPLE_FMT_S16P, resampled_data)) + } +} + + + +bool DecodeAudioFrame(AVCodecContext *dec_ctx, AVPacket *avpacket, AVFrame *avframe, MemoryStream *out_audio_data, int *out_num_audio_samples) +{ + volatile int decoded = false; + do + { + if (avcodec_decode_audio4(dec_ctx, avframe, (int *)&decoded, avpacket) < 0) + { + log("Cannot decode audio frame\n"); + return false; + } + + if (!decoded) + log("Cannot decode audio frame in one piece\n"); + } while (!decoded); + + switch (dec_ctx->codec_id) + { + case AV_CODEC_ID_BINKAUDIO_RDFT: + {//pts samples dpts + // 0 960 + //17280 960 17280 18x960 + //18240 960 960 1x960 + //20160 960 1920 2x960 + //21120 960 960 1x960 + //23040 960 1920 2x960 + static int bink_next_pts = 0; + + // there's a gap in the sound - fill empty samples in + if (bink_next_pts < avpacket->pts) + { + short silence[1024]; + memset(silence, 0, sizeof(silence)); + + int samples_to_fill = /*dec_ctx->channels * */(avpacket->pts - bink_next_pts); + while (samples_to_fill > 0) + { + int samples_to_fill_this_step = samples_to_fill >= 1024 ? 1024 : samples_to_fill; + out_audio_data->Write(silence, samples_to_fill_this_step * sizeof(short)); + + samples_to_fill -= samples_to_fill_this_step; + } + } + + bink_next_pts = avpacket->pts + /*dec_ctx->channels * */avframe->nb_samples; + } + break; + /* + case AV_CODEC_ID_SMACKAUDIO: + { + static int smack_debug_next_audio_time = 0; + if (smack_debug_next_audio_time != packet->pts) + { + Error("There's a gap in the sound before frame %u\n", num_audio_frames); + __debugbreak(); // there's a gap in the sound + } + + int num_actual_data_channels = 0; + switch (dec_ctx->sample_fmt) + { + case AV_SAMPLE_FMT_U8: + case AV_SAMPLE_FMT_S16: + case AV_SAMPLE_FMT_S32: + case AV_SAMPLE_FMT_FLT: + case AV_SAMPLE_FMT_DBL: + num_actual_data_channels = 1; + break; + + case AV_SAMPLE_FMT_U8P: + case AV_SAMPLE_FMT_S16P: + case AV_SAMPLE_FMT_S32P: + case AV_SAMPLE_FMT_FLTP: + case AV_SAMPLE_FMT_DBLP: + num_actual_data_channels = dec_ctx->channels; + break; + + default: + case AV_SAMPLE_FMT_NONE: + case AV_SAMPLE_FMT_NB: + __debugbreak(); + } + + smack_debug_next_audio_time += dec_ctx->channels * frame->nb_samples * bytes_per_sample; + Assert(frame->avframe->linesize[0] == audio.dec_ctx->channels * frame->avframe->nb_samples * audio.bytes_per_sample / num_actual_data_channels, + "Smack audio size mismatch in frame %u in %s\n", audio_num_read_frames, movie_filename); + + frame->play_time = (double)frame->avpacket->pts / (double)audio.bytes_per_second; + } + break; + + case AV_CODEC_ID_MP3: + { + static int mp3_samples_decoded_so_far = 0; + static int mp3_prev_samples_count = frame->avframe->nb_samples; // mp3 seems to always feed same amount of samples + frame->play_time = (double)mp3_samples_decoded_so_far / (double)audio.dec_ctx->sample_rate; + + mp3_samples_decoded_so_far += frame->avframe->nb_samples; + Assert(mp3_prev_samples_count == frame->avframe->nb_samples, + "MP3 audio have variable sample count in frame %u in %s\n", audio_num_read_frames, movie_filename); + } + break; + + default: + { + __debugbreak(); + double samples_per_second = (double)audio.dec_ctx->time_base.den / (double)audio.dec_ctx->time_base.num; + double play_length = frame->avframe->nb_samples / samples_per_second; + frame->play_time = (double)frame->avpacket->pts / samples_per_second; + } + break;*/ + } + + if (!avframe->channel_layout) + { + log("Audio channel layout not specified, rolling back to default\n"); + avframe->channel_layout = av_get_default_channel_layout(dec_ctx->channels); + } + + *out_num_audio_samples = dec_ctx->channels * avframe->nb_samples; + InterleaveAudioData(out_audio_data, dec_ctx->sample_fmt, + dec_ctx->channels, avframe->nb_samples, avframe->data); + return true; +} + + +bool LoadAudioTrack(AVFormatContext *format_ctx, AVCodecContext *dec_ctx, int audio_stream_idx, MemoryStream *out_audio_stream, int *out_num_audio_frames, int *out_num_audio_samples) +{ + out_audio_stream->Reset(); + + AVFrame *frame = avcodec_alloc_frame(); + AVPacket *packet = new AVPacket; + av_init_packet(packet); + + int num_audio_frames = 0; + int num_audio_samples = 0; + while (av_read_frame(format_ctx, packet) >= 0) + { + if (packet->stream_index != audio_stream_idx) + { + //log("Suspicious stream id %u in %s", packet->stream_index, filenamea); + continue; + } + + int num_samples_decoded; + DecodeAudioFrame(dec_ctx, packet, frame, out_audio_stream, &num_samples_decoded); + + num_audio_samples += num_samples_decoded; + num_audio_frames++; + } + *out_num_audio_frames = num_audio_frames; + *out_num_audio_samples = num_audio_samples; + + avcodec_free_frame(&frame); + delete packet; + + return true; +} + + +class Track: public Media::ITrack +{ + public: + inline Track() + { + this->format_ctx = nullptr; + this->audio_num_samples = 0; + } + + void Release() + { + ReleaseAvcodec(); + } + + void ReleaseAvcodec() + { + audio.Release(); + if (format_ctx) + { + av_close_input_file(format_ctx); + format_ctx = nullptr; + } + } + + bool Load(const wchar_t *filename) + { + char filenamea[1024]; + sprintf(filenamea, "%S", filename); + + if (avformat_open_input(&format_ctx, filenamea, nullptr, nullptr) >= 0) + { + if (avformat_find_stream_info(format_ctx, nullptr) >= 0) + { + av_dump_format(format_ctx, 0, filenamea, 0); + + if (!av_open_audio_stream(format_ctx, &audio)) + { + Error("Cannot open strack: %s", filenamea); + return Release(), false; + } + + MemoryStream audio_plain_data; + int num_audio_frames; + int num_audio_samples; + if (LoadAudioTrack(format_ctx, audio.dec_ctx, audio.stream_idx, &audio_plain_data, &num_audio_frames, &num_audio_samples)) + { + /*#ifdef _DEBUG + char debug_filename[1024]; + sprintf(debug_filename, "%s.wav", filenamea); + FILE *wav = fopen(debug_filename, "w+b"); + + extern void write_wav_header(FILE *wav, int channel_count = 2, int sample_rate = 22050, int bytes_per_sample = 2); + write_wav_header(wav, audio.dec_ctx->channels, audio.dec_ctx->sample_rate, audio.bytes_per_sample); + + fwrite(audio_plain_data.Ptr(), audio_plain_data.Current(), 1, wav); + + extern void fix_wav_header(FILE *wav, int wav_bytes_in_stream); + fix_wav_header(wav, audio_plain_data.Current()); + #endif*/ + + device_buffer = provider->CreateTrack16(audio.dec_ctx->channels, audio.dec_ctx->sample_rate, 2, num_audio_samples, audio_plain_data.Ptr()); + + ReleaseAvcodec(); + return true; + } + } + Release(); + } + return false; + } + + virtual void Play(bool loop) + { + provider->PlayTrack16(device_buffer, loop); + } + + + protected: + AVFormatContext *format_ctx; + AVAudioStream audio; + int audio_num_samples; + + OpenALSoundProvider::TrackBuffer *device_buffer; +}; + + + +class Movie: public Media::IMovie +{ + public: + inline Movie() + { + this->movie_filename[0] = 0; + this->width = 0; + this->height = 0; + this->format_ctx = nullptr; + this->end_of_file = false; + this->playback_time = 0.0; + + this->num_audio_frames = 0; + this->num_audio_samples = 0; + + this->last_resampled_frame_num = -1; + memset(last_resampled_frame_data, 0, sizeof(last_resampled_frame_data)); + memset(last_resampled_frame_linesize, 0, sizeof(last_resampled_frame_linesize)); + } + + + inline void Release() + { + ReleaseAVCodec(); + } + + inline void ReleaseAVCodec() + { + audio.Release(); + video.Release(); + if (format_ctx) + { + av_close_input_file(format_ctx); + format_ctx = nullptr; + } + } + + bool Load(const wchar_t *filename, int dst_width, int dst_height, int cache_ms) + { + char filenamea[1024]; + sprintf(filenamea, "%S", filename); + sprintf(movie_filename, "%S", filename); + + width = dst_width; + height = dst_height; + if (avformat_open_input(&format_ctx, filenamea, nullptr, nullptr) >= 0) + { + if (avformat_find_stream_info(format_ctx, nullptr) >= 0) + { + av_dump_format(format_ctx, 0, filenamea, 0); + + if (!av_open_audio_stream(format_ctx, &audio)) + { + Error("Cannot open audio stream: %s", filenamea); + return Release(), false; + } + + if (!av_open_video_stream(format_ctx, &video)) + { + Error("Cannot open video stream: %s", filenamea); + return Release(), false; + } + + decoding_packet = new AVPacket; + av_init_packet(decoding_packet); + + decoding_frame = avcodec_alloc_frame(); + + audio_data_in_device = provider->CreateStreamingTrack16(audio.dec_ctx->channels, audio.dec_ctx->sample_rate, 2); + + return true; + } + } + return false; + } + + virtual void Play() + { + } + + virtual void GetNextFrame(double dt, void *dst_surface) + { + playback_time += dt; + + AVPacket *avpacket = decoding_packet; + AVFrame *avframe = decoding_frame; + avcodec_get_frame_defaults(avframe); + + int desired_frame_number = floor(playback_time * video.dec_ctx->time_base.den / video.dec_ctx->time_base.num + 0.5); + if (last_resampled_frame_num == desired_frame_number) + { + memcpy(dst_surface, last_resampled_frame_data[0], height * last_resampled_frame_linesize[0]); + return; + } + + volatile int decoded = false; + do + { + if (av_read_frame(format_ctx, avpacket) < 0) + { + // probably movie is finished + __debugbreak(); + } + + // audio packet - queue into playing + if (avpacket->stream_index == audio.stream_idx) + { + MemoryStream audio_data; + if (DecodeAudioFrame(audio.dec_ctx, avpacket, avframe, &audio_data, &num_audio_samples)) + provider->Stream16(audio_data_in_device, num_audio_samples, audio_data.Ptr()); + } + // video packet - decode & maybe show + else if (avpacket->stream_index == video.stream_idx) + { + do + { + if (avcodec_decode_video2(video.dec_ctx, avframe, (int *)&decoded, avpacket) < 0) + __debugbreak(); + } while (!decoded); + } + + } while (avpacket->stream_index != video.stream_idx || + avpacket->pts != desired_frame_number); + + if (decoded) + { + if (last_resampled_frame_data[0]) + av_freep(&last_resampled_frame_data[0]); + + AVPixelFormat rescaled_format = AV_PIX_FMT_RGB32; + uint8_t *rescaled_data[4] = {nullptr, nullptr, nullptr, nullptr}; + int rescaled_linesize[4] = {0, 0, 0, 0}; + if (av_image_alloc(rescaled_data, rescaled_linesize, width, height, rescaled_format, 1) >= 0) + { + SwsContext *converter = sws_getContext(avframe->width, avframe->height, (AVPixelFormat)avframe->format, + width, height, rescaled_format, + SWS_BICUBIC, nullptr, nullptr, nullptr); + sws_scale(converter, avframe->data, avframe->linesize, 0, avframe->height, rescaled_data, rescaled_linesize); + sws_freeContext(converter); + + memcpy(dst_surface, rescaled_data[0], height * rescaled_linesize[0]); + + last_resampled_frame_num = desired_frame_number; + memcpy(last_resampled_frame_data, rescaled_data, sizeof(rescaled_data)); + memcpy(last_resampled_frame_linesize, rescaled_linesize, sizeof(rescaled_linesize)); + } + } + else + memset(dst_surface, 0, width * height * 4); + } + + protected: + char movie_filename[256]; + int width; + int height; + AVFormatContext *format_ctx; + double playback_time; + bool end_of_file; + + AVPacket *decoding_packet; + AVFrame *decoding_frame; + + AVAudioStream audio; + int num_audio_frames; + int num_audio_samples; + OpenALSoundProvider::StreamingTrackBuffer *audio_data_in_device; + + AVVideoStream video; + int last_resampled_frame_num; + uint8_t *last_resampled_frame_data[4]; + int last_resampled_frame_linesize[4]; +}; + + +ITrack *Player::LoadTrack(const wchar_t *filename) +{ + auto track = new Track; + if (!track->Load(filename)) + { + delete track; + track = nullptr; + } + return track; +} + + +IMovie *Player::LoadMovie(const wchar_t *filename, int width, int height, int cache_ms) +{ + auto movie = new Movie; + if (!movie->Load(filename, width, height, cache_ms)) + { + delete movie; + movie = nullptr; + } + return movie; +} + + + + + + +void av_logger(void *, int, const char *format, va_list args) +{ + va_list va; + va_start(va, format); + char msg[256]; + vsprintf(msg, format, va); + va_end(va); + + log("av: %s", msg); +} + +Player::Player() +{ + static int libavcodec_initialized = false; + + if (!libavcodec_initialized) + { + av_log_set_callback(av_logger); + avcodec_register_all(); + av_register_all(); + + libavcodec_initialized = true; + } + + if (!provider) + { + provider = new OpenALSoundProvider; + provider->Initialize(); + } +} + +Player::~Player() +{ +} \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/MediaPlayer.h Wed Mar 19 10:09:42 2014 +0600 @@ -0,0 +1,26 @@ +#pragma once + +namespace Media +{ + class ITrack + { + public: virtual void Play(bool loop = false) = 0; + }; + + class IMovie + { + public: virtual void Play() = 0; + virtual void GetNextFrame(double dt, void *target_surface) = 0; + }; + + class Player + { + public: + Player(); + virtual ~Player(); + + + ITrack *LoadTrack(const wchar_t *name); + IMovie *LoadMovie(const wchar_t *name, int width, int height, int cache_ms); + }; +}; \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/OpenALSoundProvider.h Wed Mar 19 10:09:42 2014 +0600 @@ -0,0 +1,344 @@ +#pragma once +#include "lib/OpenAL/al.h" +#include "lib/OpenAL/alc.h" +#pragma comment(lib, "OpenAL32.lib") + +#include "stuff.h" + + +class OpenALSoundProvider +{ + public: + struct TrackBuffer + { + unsigned int source_id; + unsigned int buffer_id; + }; + + struct StreamingTrackBuffer + { + unsigned int source_id; + ALenum sample_format; + int sample_rate; + }; + + inline OpenALSoundProvider() + { + this->device = nullptr; + this->context = nullptr; + } + + inline bool Initialize() + { + auto device_names = alcGetString(nullptr, ALC_ALL_DEVICES_SPECIFIER); + if (!device_names) + device_names = alcGetString(nullptr, ALC_DEVICE_SPECIFIER); + if (device_names) + { + for (auto device_name = device_names; device_name[0]; device_name += strlen(device_name)) + { + continue; + } + } + + device = alcOpenDevice(nullptr); + if (!device || CheckError()) + return false; + + context = alcCreateContext(device, nullptr); + if (!context || CheckError()) + return Release(), false; + + alcMakeContextCurrent(context); + + bool eax2 = alIsExtensionPresent("EAX2.0"); + bool eax3 = alIsExtensionPresent("EAX3.0"); + bool eax4 = alIsExtensionPresent("EAX4.0"); + bool eax5 = alIsExtensionPresent("EAX5.0"); + + auto vendor = alGetString(AL_VENDOR); + auto version = alGetString(AL_VERSION); + auto extensions = alcGetString(device, ALC_EXTENSIONS); + + return true; + } + + void Release() + { + alcMakeContextCurrent(nullptr); + if (context) + { + alcDestroyContext(context); + context = nullptr; + } + if (device) + { + alcCloseDevice(device); + device = nullptr; + } + } + + + void DeleteBuffer16(TrackBuffer **buffer) + { + alDeleteBuffers(1, &(*buffer)->buffer_id); + CheckError(); + + delete *buffer; + *buffer = nullptr; + } + + float alBufferLength(unsigned int buffer) + { + int size, bits, channels, freq; + + alGetBufferi(buffer, AL_SIZE, &size); + alGetBufferi(buffer, AL_BITS, &bits); + alGetBufferi(buffer, AL_CHANNELS, &channels); + alGetBufferi(buffer, AL_FREQUENCY, &freq); + if (CheckError()) + return 0.0f; + + return (ALfloat)((ALuint)size / channels / (bits / 8)) / (ALfloat)freq; + } + + StreamingTrackBuffer *CreateStreamingTrack16(int num_channels, int sample_rate, int bytes_per_sample) + { + Assert(bytes_per_sample == 2, "OpenALSoundProvider: unsupported sample size: %u", bytes_per_sample); + + ALenum sound_format; + switch (num_channels) + { + case 1: sound_format = AL_FORMAT_MONO16; break; + case 2: sound_format = AL_FORMAT_STEREO16; break; + default: + if (bool multichannel = alIsExtensionPresent("AL_EXT_MCFORMATS")) + { + switch (num_channels) + { + case 4: sound_format = alGetEnumValue("AL_FORMAT_QUAD16"); break; + case 6: sound_format = alGetEnumValue("AL_FORMAT_51CHN16"); break; + case 7: sound_format = alGetEnumValue("AL_FORMAT_61CHN16"); break; + case 8: sound_format = alGetEnumValue("AL_FORMAT_71CHN16"); break; + } + } + Error("Unsupported number of audio channels: %u", num_channels); + } + + unsigned int al_source = -1; + alGenSources(1, &al_source); + if (CheckError()) + return nullptr; + + float sound_pos[] = {0.0f, 0.0f, 0.0f}, + sound_vel[] = {0.0f, 0.0f, 0.0f}; + + alSourcei(al_source, AL_LOOPING, AL_FALSE); + alSourcef(al_source, AL_PITCH, 1.0f); + alSourcef(al_source, AL_GAIN, 1.0f); + alSourcefv(al_source, AL_POSITION, sound_pos); + alSourcefv(al_source, AL_VELOCITY, sound_vel); + + auto ret = new StreamingTrackBuffer; + ret->source_id = al_source; + ret->sample_format = sound_format; + ret->sample_rate = sample_rate; + return ret; + } + + void Stream16(StreamingTrackBuffer *buffer, int num_samples, const void *samples, bool wait = false) + { + int bytes_per_sample = 2; + + unsigned int al_buffer; + alGenBuffers(1, &al_buffer); + alBufferData(al_buffer, buffer->sample_format, samples, num_samples * bytes_per_sample, buffer->sample_rate); + if (CheckError()) + { + alDeleteBuffers(1, &al_buffer); + return; + } + + int num_processed_buffers = 0; + alGetSourcei(buffer->source_id, AL_BUFFERS_PROCESSED, &num_processed_buffers); + for (int i = 0; i < num_processed_buffers; ++i) + { + unsigned int processed_buffer_id; + alSourceUnqueueBuffers(buffer->source_id, 1, &processed_buffer_id); + if (!CheckError()) + alDeleteBuffers(1, &processed_buffer_id); + } + + alSourceQueueBuffers(buffer->source_id, 1, &al_buffer); + if (CheckError()) + { + alDeleteBuffers(1, &al_buffer); + return; + } + + volatile int status; + alGetSourcei(buffer->source_id, AL_SOURCE_STATE, (int *)&status); + if (status != AL_PLAYING) + { + float listener_pos[] = {0.0f, 0.0f, 0.0f}; + float listener_vel[] = {0.0f, 0.0f, 0.0f}; + float listener_orientation[] = {0.0f, 0.0f, -1.0f, // direction + 0.0f, 1.0f, 0.0f}; // up vector + alListenerfv(AL_POSITION, listener_pos); + alListenerfv(AL_VELOCITY, listener_vel); + alListenerfv(AL_ORIENTATION, listener_orientation); + + alSourcePlay(buffer->source_id); + if (CheckError()) + __debugbreak(); + + if (wait) + { + do + { + alGetSourcei(buffer->source_id, AL_SOURCE_STATE, (int *)&status); + } + while (status == AL_PLAYING); + } + } + } + + + + + TrackBuffer *CreateTrack16(int num_channels, int sample_rate, int bytes_per_sample, int num_samples, const void *samples) + { + Assert(bytes_per_sample == 2, "OpenALSoundProvider: unsupported sample size: %u", bytes_per_sample); + + ALenum sound_format; + switch (num_channels) + { + case 1: sound_format = AL_FORMAT_MONO16; break; + case 2: sound_format = AL_FORMAT_STEREO16; break; + default: + if (bool multichannel = alIsExtensionPresent("AL_EXT_MCFORMATS")) + { + switch (num_channels) + { + case 4: sound_format = alGetEnumValue("AL_FORMAT_QUAD16"); break; + case 6: sound_format = alGetEnumValue("AL_FORMAT_51CHN16"); break; + case 7: sound_format = alGetEnumValue("AL_FORMAT_61CHN16"); break; + case 8: sound_format = alGetEnumValue("AL_FORMAT_71CHN16"); break; + } + } + Error("Unsupported number of audio channels: %u", num_channels); + } + + unsigned int al_source = -1; + alGenSources(1, &al_source); + if (CheckError()) + return nullptr; + + float sound_pos[] = {0.0f, 0.0f, 0.0f}, + sound_vel[] = {0.0f, 0.0f, 0.0f}; + + alSourcei(al_source, AL_LOOPING, AL_FALSE); + alSourcef(al_source, AL_PITCH, 1.0f); + alSourcef(al_source, AL_GAIN, 1.0f); + alSourcefv(al_source, AL_POSITION, sound_pos); + alSourcefv(al_source, AL_VELOCITY, sound_vel); + + unsigned int al_buffer = -1; + alGenBuffers(1, &al_buffer); + if (CheckError()) + { + alDeleteSources(1, &al_source); + return nullptr; + } + + alBufferData(al_buffer, sound_format, samples, num_samples * bytes_per_sample, sample_rate); + if (CheckError()) + { + alDeleteSources(1, &al_source); + alDeleteBuffers(1, &al_buffer); + return nullptr; + } + + alSourcei(al_source, AL_BUFFER, al_buffer); + if (CheckError()) + { + alDeleteSources(1, &al_source); + alDeleteBuffers(1, &al_buffer); + return nullptr; + } + + auto ret = new TrackBuffer; + ret->source_id = al_source; + ret->buffer_id = al_buffer; + return ret; + } + + + void PlayTrack16(TrackBuffer *buffer, bool loop = false, bool wait = false) + { + volatile int status; + alGetSourcei(buffer->source_id, AL_SOURCE_STATE, (int *)&status); + if (status == AL_PLAYING) + Error("Already playing"); + else + { + float listener_pos[] = {0.0f, 0.0f, 0.0f}; + float listener_vel[] = {0.0f, 0.0f, 0.0f}; + float listener_orientation[] = {0.0f, 0.0f, -1.0f, // direction + 0.0f, 1.0f, 0.0f}; // up vector + alListenerfv(AL_POSITION, listener_pos); + alListenerfv(AL_VELOCITY, listener_vel); + alListenerfv(AL_ORIENTATION, listener_orientation); + + alSourcei(buffer->source_id, AL_LOOPING, loop ? AL_TRUE : AL_FALSE); + alSourcePlay(buffer->source_id); + if (CheckError()) + __debugbreak(); + + if (wait && !loop) + { + float track_length = alBufferLength(buffer->buffer_id); + do + { + float track_offset = 0; + alGetSourcef(buffer->source_id, AL_SEC_OFFSET, &track_offset); + log("playing: %.4f/%.4f\n", track_offset, track_length); + + alGetSourcei(buffer->source_id, AL_SOURCE_STATE, (int *)&status); + } + while (status == AL_PLAYING); + } + } + } + + + + protected: + ALCdevice *device; + ALCcontext *context; + + + bool CheckError() + { + ALenum code1 = alGetError(); + if (code1 != AL_NO_ERROR) + { + DWORD w; + const char *message = alGetString(code1); + WriteConsoleA(GetStdHandle(STD_OUTPUT_HANDLE), message, lstrlenA(message), &w, nullptr); + WriteConsoleA(GetStdHandle(STD_OUTPUT_HANDLE), "\n", 1, &w, nullptr); + return true; + } + + ALenum code2 = alcGetError(device); + if (code2 != ALC_NO_ERROR) + { + DWORD w; + const char *message = alcGetString(device, code2); + WriteConsoleA(GetStdHandle(STD_OUTPUT_HANDLE), message, lstrlenA(message), &w, nullptr); + WriteConsoleA(GetStdHandle(STD_OUTPUT_HANDLE), "\n", 1, &w, nullptr); + return true; + } + return false; + } +}; \ No newline at end of file