diff --git a/.travis.yml b/.travis.yml
index 91b72272..87e286a7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,7 +5,7 @@ branches:
- master
before_install:
- - export VK_VERSION=1.1.126.0
+ - export VK_VERSION=1.2.131.2
matrix:
include:
diff --git a/appveyor.yml b/appveyor.yml
index 505d8f55..9854fc23 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -5,7 +5,7 @@ branches:
- master
environment:
- VK_VERSION: 1.1.126.0
+ VK_VERSION: 1.2.131.2
VULKAN_SDK: c:\VulkanSDK\%VK_VERSION%
skip_tags: true
diff --git a/client/menu.c b/client/menu.c
index ecc1d0d9..edad245b 100644
--- a/client/menu.c
+++ b/client/menu.c
@@ -1055,8 +1055,8 @@ static menulist_s s_options_lookspring_box;
static menulist_s s_options_lookstrafe_box;
static menulist_s s_options_crosshair_box;
static menuslider_s s_options_sfxvolume_slider;
+static menuslider_s s_options_cdvolume_slider;
static menulist_s s_options_joystick_box;
-static menulist_s s_options_cdvolume_box;
static menulist_s s_options_quality_list;
static menulist_s s_options_compatibility_list;
static menulist_s s_options_console_action;
@@ -1106,7 +1106,7 @@ static float ClampCvar( float min, float max, float value )
static void ControlsSetMenuItemValues( void )
{
s_options_sfxvolume_slider.curvalue = Cvar_VariableValue( "s_volume" ) * 10;
- s_options_cdvolume_box.curvalue = !Cvar_VariableValue("cd_nocd");
+ s_options_cdvolume_slider.curvalue = Cvar_VariableValue("cd_volume") * 10;
s_options_quality_list.curvalue = !Cvar_VariableValue( "s_loadas8bit" );
s_options_sensitivity_slider.curvalue = ( sensitivity->value ) * 2;
@@ -1163,7 +1163,7 @@ static void UpdateVolumeFunc( void *unused )
static void UpdateCDVolumeFunc( void *unused )
{
- Cvar_SetValue( "cd_nocd", !s_options_cdvolume_box.curvalue );
+ Cvar_SetValue("cd_volume", s_options_cdvolume_slider.curvalue / 10);
}
static void ConsoleFunc( void *unused )
@@ -1206,12 +1206,6 @@ static void UpdateSoundQualityFunc( void *unused )
void Options_MenuInit( void )
{
- static const char *cd_music_items[] =
- {
- "disabled",
- "enabled",
- 0
- };
static const char *quality_items[] =
{
"low", "high", 0
@@ -1256,13 +1250,14 @@ void Options_MenuInit( void )
s_options_sfxvolume_slider.maxvalue = 10;
s_options_sfxvolume_slider.curvalue = Cvar_VariableValue( "s_volume" ) * 10;
- s_options_cdvolume_box.generic.type = MTYPE_SPINCONTROL;
- s_options_cdvolume_box.generic.x = 0;
- s_options_cdvolume_box.generic.y = 10 * vid_hudscale->value;
- s_options_cdvolume_box.generic.name = "CD music";
- s_options_cdvolume_box.generic.callback = UpdateCDVolumeFunc;
- s_options_cdvolume_box.itemnames = cd_music_items;
- s_options_cdvolume_box.curvalue = !Cvar_VariableValue("cd_nocd");
+ s_options_cdvolume_slider.generic.type = MTYPE_SLIDER;
+ s_options_cdvolume_slider.generic.x = 0;
+ s_options_cdvolume_slider.generic.y = 10 * vid_hudscale->value;
+ s_options_cdvolume_slider.generic.name = "music volume";
+ s_options_cdvolume_slider.generic.callback = UpdateCDVolumeFunc;
+ s_options_cdvolume_slider.minvalue = 0;
+ s_options_cdvolume_slider.maxvalue = 10;
+ s_options_cdvolume_slider.curvalue = Cvar_VariableValue("cd_volume") * 10;
s_options_quality_list.generic.type = MTYPE_SPINCONTROL;
s_options_quality_list.generic.x = 0;
@@ -1358,7 +1353,7 @@ void Options_MenuInit( void )
ControlsSetMenuItemValues();
Menu_AddItem( &s_options_menu, ( void * ) &s_options_sfxvolume_slider );
- Menu_AddItem( &s_options_menu, ( void * ) &s_options_cdvolume_box );
+ Menu_AddItem( &s_options_menu, ( void * ) &s_options_cdvolume_slider);
Menu_AddItem( &s_options_menu, ( void * ) &s_options_quality_list );
Menu_AddItem( &s_options_menu, ( void * ) &s_options_compatibility_list );
Menu_AddItem( &s_options_menu, ( void * ) &s_options_sensitivity_slider );
diff --git a/client/miniaudio.h b/client/miniaudio.h
index feae647f..2e9be4a0 100644
--- a/client/miniaudio.h
+++ b/client/miniaudio.h
@@ -1,320 +1,418 @@
/*
Audio playback and capture library. Choice of public domain or MIT-0. See license statements at the end of this file.
-miniaudio (formerly mini_al) - v0.9.5 - 2019-05-21
+miniaudio - v0.10.0 - 2020-03-07
David Reid - davidreidsoftware@gmail.com
+
+Website: https://miniaud.io
+GitHub: https://github.com/dr-soft/miniaudio
*/
/*
-MAJOR CHANGES IN VERSION 0.9
+RELEASE NOTES - VERSION 0.10
============================
-Version 0.9 includes major API changes, centered mostly around full-duplex and the rebrand to "miniaudio". Before I go into
-detail about the major changes I would like to apologize. I know it's annoying dealing with breaking API changes, but I think
-it's best to get these changes out of the way now while the library is still relatively young and unknown.
+Version 0.10 includes major API changes and refactoring, mostly concerned with the data conversion system. Data conversion is performed internally to convert
+audio data between the format requested when initializing the `ma_device` object and the format of the internal device used by the backend. The same applies
+to the `ma_decoder` object. The previous design has several design flaws and missing features which necessitated a complete redesign.
-There's been a lot of refactoring with this release so there's a good chance a few bugs have been introduced. I apologize in
-advance for this. You may want to hold off on upgrading for the short term if you're worried. If mini_al v0.8.14 works for
-you, and you don't need full-duplex support, you can avoid upgrading (though you won't be getting future bug fixes).
+Changes to Data Conversion
+--------------------------
+The previous data conversion system used callbacks to deliver input data for conversion. This design works well in some specific situations, but in other
+situations it has some major readability and maintenance issues. The decision was made to replace this with a more iterative approach where you just pass in a
+pointer to the input data directly rather than dealing with a callback.
-Rebranding to "miniaudio"
--------------------------
-The decision was made to rename mini_al to miniaudio. Don't worry, it's the same project. The reason for this is simple:
+The following are the data conversion APIs that have been removed and their replacements:
-1) Having the word "audio" in the title makes it immediately clear that the library is related to audio; and
-2) I don't like the look of the underscore.
+ - ma_format_converter -> ma_convert_pcm_frames_format()
+ - ma_channel_router -> ma_channel_converter
+ - ma_src -> ma_resampler
+ - ma_pcm_converter -> ma_data_converter
-This rebrand has necessitated a change in namespace from "mal" to "ma". I know this is annoying, and I apologize, but it's
-better to get this out of the road now rather than later. Also, since there are necessary API changes for full-duplex support
-I think it's better to just get the namespace change over and done with at the same time as the full-duplex changes. I'm hoping
-this will be the last of the major API changes. Fingers crossed!
+The previous conversion APIs accepted a callback in their configs. There are no longer any callbacks to deal with. Instead you just pass the data into the
+`*_process_pcm_frames()` function as a pointer to a buffer.
-The implementation define is now "#define MINIAUDIO_IMPLEMENTATION". You can also use "#define MA_IMPLEMENTATION" if that's
-your preference.
+The simplest aspect of data conversion is sample format conversion. To convert between two formats, just call `ma_convert_pcm_frames_format()`. Channel
+conversion is also simple which you can do with `ma_channel_router` via `ma_channel_router_process_pcm_frames()`.
+Resampling is more complicated because the number of output frames that are processed is different to the number of input frames that are consumed. When you
+call `ma_resampler_process_pcm_frames()` you need to pass in the number of input frames available for processing and the number of output frames you want to
+output. Upon returning they will receive the number of input frames that were consumed and the number of output frames that were generated.
-Full-Duplex Support
--------------------
-The major feature added to version 0.9 is full-duplex. This has necessitated a few API changes.
+The `ma_data_converter` API is a wrapper around format, channel and sample rate conversion and handles all of the data conversion you'll need which probably
+makes it the best option if you need to do data conversion.
-1) The data callback has now changed. Previously there was one type of callback for playback and another for capture. I wanted
- to avoid a third callback just for full-duplex so the decision was made to break this API and unify the callbacks. Now,
- there is just one callback which is the same for all three modes (playback, capture, duplex). The new callback looks like
- the following:
+In addition to changes to the API design, a few other changes have been made to the data conversion pipeline:
- void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount);
+ - The sinc resampler has been removed. This was completely broken and never actually worked properly.
+ - The linear resampler now uses low-pass filtering to remove aliasing. The quality of the low-pass filter can be controlled via the resampler config with the
+ `lpfOrder` option, which has a maximum value of MA_MAX_FILTER_ORDER.
+ - Data conversion now supports s16 natively which runs through a fixed point pipeline. Previously everything needed to be converted to floating point before
+ processing, whereas now both s16 and f32 are natively supported. Other formats still require conversion to either s16 or f32 prior to processing, however
+ `ma_data_converter` will handle this for you.
- This callback allows you to move data straight out of the input buffer and into the output buffer in full-duplex mode. In
- playback-only mode, pInput will be null. Likewise, pOutput will be null in capture-only mode. The sample count is no longer
- returned from the callback since it's not necessary for miniaudio anymore.
-2) The device config needed to change in order to support full-duplex. Full-duplex requires the ability to allow the client
- to choose a different PCM format for the playback and capture sides. The old ma_device_config object simply did not allow
- this and needed to change. With these changes you now specify the device ID, format, channels, channel map and share mode
- on a per-playback and per-capture basis (see example below). The sample rate must be the same for playback and capture.
+Custom Memory Allocators
+------------------------
+miniaudio has always supported macro level customization for memory allocation via MA_MALLOC, MA_REALLOC and MA_FREE, however some scenarios require more
+flexibility by allowing a user data pointer to be passed to the custom allocation routines. Support for this has been added to version 0.10 via the
+`ma_allocation_callbacks` structure. Anything making use of heap allocations has been updated to accept this new structure.
- Since the device config API has changed I have also decided to take the opportunity to simplify device initialization. Now,
- the device ID, device type and callback user data are set in the config. ma_device_init() is now simplified down to taking
- just the context, device config and a pointer to the device object being initialized. The rationale for this change is that
- it just makes more sense to me that these are set as part of the config like everything else.
+The `ma_context_config` structure has been updated with a new member called `allocationCallbacks`. Leaving this set to it's defaults returned by
+`ma_context_config_init()` will cause it to use MA_MALLOC, MA_REALLOC and MA_FREE. Likewise, The `ma_decoder_config` structure has been updated in the same
+way, and leaving everything as-is after `ma_decoder_config_init()` will cause it to use the same defaults.
- Example device initialization:
+The following APIs have been updated to take a pointer to a `ma_allocation_callbacks` object. Setting this parameter to NULL will cause it to use defaults.
+Otherwise they will use the relevant callback in the structure.
- ma_device_config config = ma_device_config_init(ma_device_type_duplex); // Or ma_device_type_playback or ma_device_type_capture.
- config.playback.pDeviceID = &myPlaybackDeviceID; // Or NULL for the default playback device.
- config.playback.format = ma_format_f32;
- config.playback.channels = 2;
- config.capture.pDeviceID = &myCaptureDeviceID; // Or NULL for the default capture device.
- config.capture.format = ma_format_s16;
- config.capture.channels = 1;
- config.sampleRate = 44100;
- config.dataCallback = data_callback;
- config.pUserData = &myUserData;
+ - ma_malloc()
+ - ma_realloc()
+ - ma_free()
+ - ma_aligned_malloc()
+ - ma_aligned_free()
+ - ma_rb_init() / ma_rb_init_ex()
+ - ma_pcm_rb_init() / ma_pcm_rb_init_ex()
- result = ma_device_init(&myContext, &config, &device);
- if (result != MA_SUCCESS) {
- ... handle error ...
- }
+Note that you can continue to use MA_MALLOC, MA_REALLOC and MA_FREE as per normal. These will continue to be used by default if you do not specify custom
+allocation callbacks.
- Note that the "onDataCallback" member of ma_device_config has been renamed to "dataCallback". Also, "onStopCallback" has
- been renamed to "stopCallback".
-This is the first pass for full-duplex and there is a known bug. You will hear crackling on the following backends when sample
-rate conversion is required for the playback device:
- - Core Audio
- - JACK
- - AAudio
- - OpenSL
- - WebAudio
+Buffer and Period Configuration Changes
+---------------------------------------
+The way in which the size of the internal buffer and periods are specified in the device configuration have changed. In previous versions, the config variables
+`bufferSizeInFrames` and `bufferSizeInMilliseconds` defined the size of the entire buffer, with the size of a period being the size of this variable divided by
+the period count. This became confusing because people would expect the value of `bufferSizeInFrames` or `bufferSizeInMilliseconds` to independantly determine
+latency, when in fact it was that value divided by the period count that determined it. These variables have been removed and replaced with new ones called
+`periodSizeInFrames` and `periodSizeInMilliseconds`.
-In addition to the above, not all platforms have been absolutely thoroughly tested simply because I lack the hardware for such
-thorough testing. If you experience a bug, an issue report on GitHub or an email would be greatly appreciated (and a sample
-program that reproduces the issue if possible).
+These new configuration variables work in the same way as their predecessors in that if one is set to 0, the other will be used, but the main difference is
+that you now set these to you desired latency rather than the size of the entire buffer. The benefit of this is that it's much easier and less confusing to
+configure latency.
+
+The following unused APIs have been removed:
+
+ ma_get_default_buffer_size_in_milliseconds()
+ ma_get_default_buffer_size_in_frames()
+
+The following macros have been removed:
+
+ MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY
+ MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE
Other API Changes
-----------------
-In addition to the above, the following API changes have been made:
+Other less major API changes have also been made in version 0.10.
-- The log callback is no longer passed to ma_context_config_init(). Instead you need to set it manually after initialization.
-- The onLogCallback member of ma_context_config has been renamed to "logCallback".
-- The log callback now takes a logLevel parameter. The new callback looks like: void log_callback(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message)
- - You can use ma_log_level_to_string() to convert the logLevel to human readable text if you want to log it.
-- Some APIs have been renamed:
- - mal_decoder_read() -> ma_decoder_read_pcm_frames()
- - mal_decoder_seek_to_frame() -> ma_decoder_seek_to_pcm_frame()
- - mal_sine_wave_read() -> ma_sine_wave_read_f32()
- - mal_sine_wave_read_ex() -> ma_sine_wave_read_f32_ex()
-- Some APIs have been removed:
- - mal_device_get_buffer_size_in_bytes()
- - mal_device_set_recv_callback()
- - mal_device_set_send_callback()
- - mal_src_set_input_sample_rate()
- - mal_src_set_output_sample_rate()
-- Error codes have been rearranged. If you're a binding maintainer you will need to update.
-- The ma_backend enums have been rearranged to priority order. The rationale for this is to simplify automatic backend selection
- and to make it easier to see the priority. If you're a binding maintainer you will need to update.
-- ma_dsp has been renamed to ma_pcm_converter. The rationale for this change is that I'm expecting "ma_dsp" to conflict with
- some future planned high-level APIs.
-- For functions that take a pointer/count combo, such as ma_decoder_read_pcm_frames(), the parameter order has changed so that
- the pointer comes before the count. The rationale for this is to keep it consistent with things like memcpy().
+`ma_device_set_stop_callback()` has been removed. If you require a stop callback, you must now set it via the device config just like the data callback.
+
+The `ma_sine_wave` API has been replaced with a more general API called `ma_waveform`. This supports generation of different types of waveforms, including
+sine, square, triangle and sawtooth. Use `ma_waveform_init()` in place of `ma_sine_wave_init()` to initialize the waveform object. This takes a configuration
+object called `ma_waveform_config` which defines the properties of the waveform. Use `ma_waveform_config_init()` to initialize a `ma_waveform_config` object.
+Use `ma_waveform_read_pcm_frames()` in place of `ma_sine_wave_read_f32()` and `ma_sine_wave_read_f32_ex()`.
+
+`ma_convert_frames()` and `ma_convert_frames_ex()` have been changed. Both of these functions now take a new parameter called `frameCountOut` which specifies
+the size of the output buffer in PCM frames. This has been added for safety. In addition to this, the parameters for `ma_convert_frames_ex()` have changed to
+take a pointer to a `ma_data_converter_config` object to specify the input and output formats to convert between. This was done to make it more flexible, to
+prevent the parameter list getting too long, and to prevent API breakage whenever a new conversion property is added.
+
+`ma_calculate_frame_count_after_src()` has been renamed to `ma_calculate_frame_count_after_resampling()` for consistency with the new `ma_resampler` API.
+
+
+Filters
+-------
+The following filters have been added:
+
+ |-------------|-------------------------------------------------------------------|
+ | API | Description |
+ |-------------|-------------------------------------------------------------------|
+ | ma_biquad | Biquad filter (transposed direct form 2) |
+ | ma_lpf1 | First order low-pass filter |
+ | ma_lpf2 | Second order low-pass filter |
+ | ma_lpf | High order low-pass filter (Butterworth) |
+ | ma_hpf1 | First order high-pass filter |
+ | ma_hpf2 | Second order high-pass filter |
+ | ma_hpf | High order high-pass filter (Butterworth) |
+ | ma_bpf2 | Second order band-pass filter |
+ | ma_bpf | High order band-pass filter |
+ | ma_peak2 | Second order peaking filter |
+ | ma_notch2 | Second order notching filter |
+ | ma_loshelf2 | Second order low shelf filter |
+ | ma_hishelf2 | Second order high shelf filter |
+ |-------------|-------------------------------------------------------------------|
+
+These filters all support 32-bit floating point and 16-bit signed integer formats natively. Other formats need to be converted beforehand.
+
+
+Sine, Square, Triangle and Sawtooth Waveforms
+---------------------------------------------
+Previously miniaudio supported only sine wave generation. This has now been generalized to support sine, square, triangle and sawtooth waveforms. The old
+`ma_sine_wave` API has been removed and replaced with the `ma_waveform` API. Use `ma_waveform_config_init()` to initialize a config object, and then pass it
+into `ma_waveform_init()`. Then use `ma_waveform_read_pcm_frames()` to read PCM data.
+
+
+Noise Generation
+----------------
+A noise generation API has been added. This is used via the `ma_noise` API. Currently white, pink and Brownian noise is supported. The `ma_noise` API is
+similar to the waveform API. Use `ma_noise_config_init()` to initialize a config object, and then pass it into `ma_noise_init()` to initialize a `ma_noise`
+object. Then use `ma_noise_read_pcm_frames()` to read PCM data.
Miscellaneous Changes
---------------------
-The following miscellaneous changes have also been made.
+Internal functions have all been made static where possible. If you get warnings about unused functions, please submit a bug report.
-- The AAudio backend has been added for Android 8 and above. This is Android's new "High-Performance Audio" API. (For the
- record, this is one of the nicest audio APIs out there, just behind the BSD audio APIs).
-- The WebAudio backend has been added. This is based on ScriptProcessorNode. This removes the need for SDL.
-- The SDL and OpenAL backends have been removed. These were originally implemented to add support for platforms for which miniaudio
- was not explicitly supported. These are no longer needed and have therefore been removed.
-- Device initialization now fails if the requested share mode is not supported. If you ask for exclusive mode, you either get an
- exclusive mode device, or an error. The rationale for this change is to give the client more control over how to handle cases
- when the desired shared mode is unavailable.
-- A lock-free ring buffer API has been added. There are two varients of this. "ma_rb" operates on bytes, whereas "ma_pcm_rb"
- operates on PCM frames.
-- The library is now licensed as a choice of Public Domain (Unlicense) _or_ MIT-0 (No Attribution) which is the same as MIT, but
- removes the attribution requirement. The rationale for this is to support countries that don't recognize public domain.
+The `ma_device` structure is no longer defined as being aligned to MA_SIMD_ALIGNMENT. This resulted in a possible crash when allocating a `ma_device` object on
+the heap, but not aligning it to MA_SIMD_ALIGNMENT. This crash would happen due to the compiler seeing the alignment specified on the structure and assuming it
+was always aligned as such and thinking it was safe to emit alignment-dependant SIMD instructions. Since miniaudio's philosophy is for things to just work,
+this has been removed from all structures.
+
+Results codes have been overhauled. Unnecessary result codes have been removed, and some have been renumbered for organisation purposes. If you are are binding
+maintainer you will need to update your result codes. Support has also been added for retrieving a human readable description of a given result code via the
+`ma_result_description()` API.
*/
+
/*
-ABOUT
-=====
-miniaudio is a single file library for audio playback and capture. It's written in C (compilable as
-C++) and released into the public domain.
-
-Supported Backends:
- - WASAPI
- - DirectSound
- - WinMM
- - Core Audio (Apple)
- - ALSA
- - PulseAudio
- - JACK
- - sndio (OpenBSD)
- - audio(4) (NetBSD and OpenBSD)
- - OSS (FreeBSD)
- - AAudio (Android 8.0+)
- - OpenSL|ES (Android only)
- - Web Audio (Emscripten)
- - Null (Silence)
-
-Supported Formats:
- - Unsigned 8-bit PCM
- - Signed 16-bit PCM
- - Signed 24-bit PCM (tightly packed)
- - Signed 32-bit PCM
- - IEEE 32-bit floating point PCM
-
-
-USAGE
-=====
-miniaudio is a single-file library. To use it, do something like the following in one .c file.
- #define MINIAUDIO_IMPLEMENTATION
- #include "miniaudio.h"
-
-You can then #include this file in other parts of the program as you would with any other header file.
-
-miniaudio uses an asynchronous, callback based API. You initialize a device with a configuration (sample rate,
-channel count, etc.) which includes the callback you want to use to handle data transmission to/from the
-device. In the callback you either read from a data pointer in the case of playback or write to it in the case
-of capture.
-
-Playback Example
-----------------
- void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
- {
- ma_decoder* pDecoder = (ma_decoder*)pDevice->pUserData;
- if (pDecoder == NULL) {
- return;
- }
-
- ma_decoder_read_pcm_frames(pDecoder, frameCount, pOutput);
- }
+Introduction
+============
+miniaudio is a single file library for audio playback and capture. To use it, do the following in one .c file:
- ...
+ ```c
+ #define MINIAUDIO_IMPLEMENTATION
+ #include "miniaudio.h
+ ```
- ma_device_config config = ma_device_config_init(ma_device_type_playback);
- config.playback.format = decoder.outputFormat;
- config.playback.channels = decoder.outputChannels;
- config.sampleRate = decoder.outputSampleRate;
- config.dataCallback = data_callback;
- config.pUserData = &decoder;
+You can #include miniaudio.h in other parts of the program just like any other header.
- ma_device device;
- if (ma_device_init(NULL, &config, &device) != MA_SUCCESS) {
- ... An error occurred ...
- }
+miniaudio uses the concept of a "device" as the abstraction for physical devices. The idea is that you choose a physical device to emit or capture audio from,
+and then move data to/from the device when miniaudio tells you to. Data is delivered to and from devices asynchronously via a callback which you specify when
+initializing the device.
- ma_device_start(&device); // The device is sleeping by default so you'll need to start it manually.
+When initializing the device you first need to configure it. The device configuration allows you to specify things like the format of the data delivered via
+the callback, the size of the internal buffer and the ID of the device you want to emit or capture audio from.
- ...
+Once you have the device configuration set up you can initialize the device. When initializing a device you need to allocate memory for the device object
+beforehand. This gives the application complete control over how the memory is allocated. In the example below we initialize a playback device on the stack,
+but you could allocate it on the heap if that suits your situation better.
- ma_device_uninit(&device); // This will stop the device so no need to do that manually.
+ ```c
+ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
+ {
+ // In playback mode copy data to pOutput. In capture mode read data from pInput. In full-duplex mode, both pOutput and pInput will be valid and you can
+ // move data from pInput into pOutput. Never process more than frameCount frames.
+ }
+ ...
-BUILDING
-========
-miniaudio should Just Work by adding it to your project's source tree. You do not need to download or install
-any dependencies. See below for platform-specific details.
+ ma_device_config config = ma_device_config_init(ma_device_type_playback);
+ config.playback.format = MY_FORMAT;
+ config.playback.channels = MY_CHANNEL_COUNT;
+ config.sampleRate = MY_SAMPLE_RATE;
+ config.dataCallback = data_callback;
+ config.pUserData = pMyCustomData; // Can be accessed from the device object (device.pUserData).
+
+ ma_device device;
+ if (ma_device_init(NULL, &config, &device) != MA_SUCCESS) {
+ ... An error occurred ...
+ }
+
+ ma_device_start(&device); // The device is sleeping by default so you'll need to start it manually.
+
+ ...
+
+ ma_device_uninit(&device); // This will stop the device so no need to do that manually.
+ ```
+
+In the example above, `data_callback()` is where audio data is written and read from the device. The idea is in playback mode you cause sound to be emitted
+from the speakers by writing audio data to the output buffer (`pOutput` in the example). In capture mode you read data from the input buffer (`pInput`) to
+extract sound captured by the microphone. The `frameCount` parameter tells you how many frames can be written to the output buffer and read from the input
+buffer. A "frame" is one sample for each channel. For example, in a stereo stream (2 channels), one frame is 2 samples: one for the left, one for the right.
+The channel count is defined by the device config. The size in bytes of an individual sample is defined by the sample format which is also specified in the
+device config. Multi-channel audio data is always interleaved, which means the samples for each frame are stored next to each other in memory. For example, in
+a stereo stream the first pair of samples will be the left and right samples for the first frame, the second pair of samples will be the left and right samples
+for the second frame, etc.
+
+The configuration of the device is defined by the `ma_device_config` structure. The config object is always initialized with `ma_device_config_init()`. It's
+important to always initialize the config with this function as it initializes it with logical defaults and ensures your program doesn't break when new members
+are added to the `ma_device_config` structure. The example above uses a fairly simple and standard device configuration. The call to `ma_device_config_init()`
+takes a single parameter, which is whether or not the device is a playback, capture, duplex or loopback device (loopback devices are not supported on all
+backends). The `config.playback.format` member sets the sample format which can be one of the following (all formats are native-endian):
+
+ |---------------|----------------------------------------|---------------------------|
+ | Symbol | Description | Range |
+ |---------------|----------------------------------------|---------------------------|
+ | ma_format_f32 | 32-bit floating point | [-1, 1] |
+ | ma_format_s16 | 16-bit signed integer | [-32768, 32767] |
+ | ma_format_s24 | 24-bit signed integer (tightly packed) | [-8388608, 8388607] |
+ | ma_format_s32 | 32-bit signed integer | [-2147483648, 2147483647] |
+ | ma_format_u8 | 8-bit unsigned integer | [0, 255] |
+ |---------------|----------------------------------------|---------------------------|
+
+The `config.playback.channels` member sets the number of channels to use with the device. The channel count cannot exceed MA_MAX_CHANNELS. The
+`config.sampleRate` member sets the sample rate (which must be the same for both playback and capture in full-duplex configurations). This is usually set to
+44100 or 48000, but can be set to anything. It's recommended to keep this between 8000 and 384000, however.
+
+Note that leaving the format, channel count and/or sample rate at their default values will result in the internal device's native configuration being used
+which is useful if you want to avoid the overhead of miniaudio's automatic data conversion.
+
+In addition to the sample format, channel count and sample rate, the data callback and user data pointer are also set via the config. The user data pointer is
+not passed into the callback as a parameter, but is instead set to the `pUserData` member of `ma_device` which you can access directly since all miniaudio
+structures are transparent.
+
+Initializing the device is done with `ma_device_init()`. This will return a result code telling you what went wrong, if anything. On success it will return
+`MA_SUCCESS`. After initialization is complete the device will be in a stopped state. To start it, use `ma_device_start()`. Uninitializing the device will stop
+it, which is what the example above does, but you can also stop the device with `ma_device_stop()`. To resume the device simply call `ma_device_start()` again.
+Note that it's important to never stop or start the device from inside the callback. This will result in a deadlock. Instead you set a variable or signal an
+event indicating that the device needs to stop and handle it in a different thread. The following APIs must never be called inside the callback:
+
+ ma_device_init()
+ ma_device_init_ex()
+ ma_device_uninit()
+ ma_device_start()
+ ma_device_stop()
+
+You must never try uninitializing and reinitializing a device inside the callback. You must also never try to stop and start it from inside the callback. There
+are a few other things you shouldn't do in the callback depending on your requirements, however this isn't so much a thread-safety thing, but rather a real-
+time processing thing which is beyond the scope of this introduction.
+
+The example above demonstrates the initialization of a playback device, but it works exactly the same for capture. All you need to do is change the device type
+from `ma_device_type_playback` to `ma_device_type_capture` when setting up the config, like so:
+
+ ```c
+ ma_device_config config = ma_device_config_init(ma_device_type_capture);
+ config.capture.format = MY_FORMAT;
+ config.capture.channels = MY_CHANNEL_COUNT;
+ ```
+
+In the data callback you just read from the input buffer (`pInput` in the example above) and leave the output buffer alone (it will be set to NULL when the
+device type is set to `ma_device_type_capture`).
+
+These are the available device types and how you should handle the buffers in the callback:
+
+ |-------------------------|--------------------------------------------------------|
+ | Device Type | Callback Behavior |
+ |-------------------------|--------------------------------------------------------|
+ | ma_device_type_playback | Write to output buffer, leave input buffer untouched. |
+ | ma_device_type_capture | Read from input buffer, leave output buffer untouched. |
+ | ma_device_type_duplex | Read from input buffer, write to output buffer. |
+ | ma_device_type_loopback | Read from input buffer, leave output buffer untouched. |
+ |-------------------------|--------------------------------------------------------|
+
+You will notice in the example above that the sample format and channel count is specified separately for playback and capture. This is to support different
+data formats between the playback and capture devices in a full-duplex system. An example may be that you want to capture audio data as a monaural stream (one
+channel), but output sound to a stereo speaker system. Note that if you use different formats between playback and capture in a full-duplex configuration you
+will need to convert the data yourself. There are functions available to help you do this which will be explained later.
+
+The example above did not specify a physical device to connect to which means it will use the operating system's default device. If you have multiple physical
+devices connected and you want to use a specific one you will need to specify the device ID in the configuration, like so:
+
+ ```
+ config.playback.pDeviceID = pMyPlaybackDeviceID; // Only if requesting a playback or duplex device.
+ config.capture.pDeviceID = pMyCaptureDeviceID; // Only if requesting a capture, duplex or loopback device.
+ ```
+
+To retrieve the device ID you will need to perform device enumeration, however this requires the use of a new concept call the "context". Conceptually speaking
+the context sits above the device. There is one context to many devices. The purpose of the context is to represent the backend at a more global level and to
+perform operations outside the scope of an individual device. Mainly it is used for performing run-time linking against backend libraries, initializing
+backends and enumerating devices. The example below shows how to enumerate devices.
+
+ ```c
+ ma_context context;
+ if (ma_context_init(NULL, 0, NULL, &context) != MA_SUCCESS) {
+ // Error.
+ }
+
+ ma_device_info* pPlaybackDeviceInfos;
+ ma_uint32 playbackDeviceCount;
+ ma_device_info* pCaptureDeviceInfos;
+ ma_uint32 captureDeviceCount;
+ if (ma_context_get_devices(&context, &pPlaybackDeviceInfos, &playbackDeviceCount, &pCaptureDeviceInfos, &captureDeviceCount) != MA_SUCCESS) {
+ // Error.
+ }
+
+ // Loop over the each device info and do something with it. Here we just print the name with their index. You may want to give the user the
+ // opportunity to choose which device they'd prefer.
+ for (ma_uint32 iDevice = 0; iDevice < playbackDeviceCount; iDevice += 1) {
+ printf("%d - %s\n", iDevice, pPlaybackDeviceInfos[iDevice].name);
+ }
-If you want to disable a specific backend, #define the appropriate MA_NO_* option before the implementation.
+ ma_device_config config = ma_device_config_init(ma_device_type_playback);
+ config.playback.pDeviceID = &pPlaybackDeviceInfos[chosenPlaybackDeviceIndex].id;
+ config.playback.format = MY_FORMAT;
+ config.playback.channels = MY_CHANNEL_COUNT;
+ config.sampleRate = MY_SAMPLE_RATE;
+ config.dataCallback = data_callback;
+ config.pUserData = pMyCustomData;
-Note that GCC and Clang requires "-msse2", "-mavx2", etc. for SIMD optimizations.
+ ma_device device;
+ if (ma_device_init(&context, &config, &device) != MA_SUCCESS) {
+ // Error
+ }
+ ...
-Building for Windows
---------------------
-The Windows build should compile clean on all popular compilers without the need to configure any include paths
-nor link to any libraries.
+ ma_device_uninit(&device);
+ ma_context_uninit(&context);
+ ```
-Building for macOS and iOS
---------------------------
-The macOS build should compile clean without the need to download any dependencies or link to any libraries or
-frameworks. The iOS build needs to be compiled as Objective-C (sorry) and will need to link the relevant frameworks
-but should Just Work with Xcode.
+The first thing we do in this example is initialize a `ma_context` object with `ma_context_init()`. The first parameter is a pointer to a list of `ma_backend`
+values which are used to override the default backend priorities. When this is NULL, as in this example, miniaudio's default priorities are used. The second
+parameter is the number of backends listed in the array pointed to by the first paramter. The third parameter is a pointer to a `ma_context_config` object
+which can be NULL, in which case defaults are used. The context configuration is used for setting the logging callback, custom memory allocation callbacks,
+user-defined data and some backend-specific configurations.
-Building for Linux
-------------------
-The Linux build only requires linking to -ldl, -lpthread and -lm. You do not need any development packages.
+Once the context has been initialized you can enumerate devices. In the example above we use the simpler `ma_context_get_devices()`, however you can also use a
+callback for handling devices by using `ma_context_enumerate_devices()`. When using `ma_context_get_devices()` you provide a pointer to a pointer that will,
+upon output, be set to a pointer to a buffer containing a list of `ma_device_info` structures. You also provide a pointer to an unsigned integer that will
+receive the number of items in the returned buffer. Do not free the returned buffers as their memory is managed internally by miniaudio.
-Building for BSD
-----------------
-The BSD build only requires linking to -ldl, -lpthread and -lm. NetBSD uses audio(4), OpenBSD uses sndio and
-FreeBSD uses OSS.
+The `ma_device_info` structure contains an `id` member which is the ID you pass to the device config. It also contains the name of the device which is useful
+for presenting a list of devices to the user via the UI.
-Building for Android
---------------------
-AAudio is the highest priority backend on Android. This should work out out of the box without needing any kind of
-compiler configuration. Support for AAudio starts with Android 8 which means older versions will fall back to
-OpenSL|ES which requires API level 16+.
-
-Building for Emscripten
------------------------
-The Emscripten build emits Web Audio JavaScript directly and should Just Work without any configuration.
-
-
-NOTES
-=====
-- This library uses an asynchronous API for delivering and requesting audio data. Each device will have
- it's own worker thread which is managed by the library.
-- If ma_device_init() is called with a device that's not aligned to the platform's natural alignment
- boundary (4 bytes on 32-bit, 8 bytes on 64-bit), it will _not_ be thread-safe. The reason for this
- is that it depends on members of ma_device being correctly aligned for atomic assignments.
-- Sample data is always native-endian and interleaved. For example, ma_format_s16 means signed 16-bit
- integer samples, interleaved. Let me know if you need non-interleaved and I'll look into it.
-- The sndio backend is currently only enabled on OpenBSD builds.
-- The audio(4) backend is supported on OpenBSD, but you may need to disable sndiod before you can use it.
-- Automatic stream routing is enabled on a per-backend basis. Support is explicitly enabled for WASAPI
- and Core Audio, however other backends such as PulseAudio may naturally support it, though not all have
- been tested.
+When creating your own context you will want to pass it to `ma_device_init()` when initializing the device. Passing in NULL, like we do in the first example,
+will result in miniaudio creating the context for you, which you don't want to do since you've already created a context. Note that internally the context is
+only tracked by it's pointer which means you must not change the location of the `ma_context` object. If this is an issue, consider using `malloc()` to
+allocate memory for the context.
-BACKEND NUANCES
-===============
-PulseAudio
-----------
-- If you experience bad glitching/noise on Arch Linux, consider this fix from the Arch wiki:
- https://wiki.archlinux.org/index.php/PulseAudio/Troubleshooting#Glitches,_skips_or_crackling
- Alternatively, consider using a different backend such as ALSA.
+Building
+========
+miniaudio should work cleanly out of the box without the need to download or install any dependencies. See below for platform-specific details.
-Android
+
+Windows
-------
-- To capture audio on Android, remember to add the RECORD_AUDIO permission to your manifest:
-
-- With OpenSL|ES, only a single ma_context can be active at any given time. This is due to a limitation with OpenSL|ES.
-- With AAudio, only default devices are enumerated. This is due to AAudio not having an enumeration API (devices are
- enumerated through Java). You can however perform your own device enumeration through Java and then set the ID in the
- ma_device_id structure (ma_device_id.aaudio) and pass it to ma_device_init().
-- The backend API will perform resampling where possible. The reason for this as opposed to using miniaudio's built-in
- resampler is to take advantage of any potential device-specific optimizations the driver may implement.
+The Windows build should compile clean on all popular compilers without the need to configure any include paths nor link to any libraries.
-UWP
+macOS and iOS
+-------------
+The macOS build should compile clean without the need to download any dependencies or link to any libraries or frameworks. The iOS build needs to be compiled
+as Objective-C (sorry) and will need to link the relevant frameworks but should Just Work with Xcode. Compiling through the command line requires linking to
+-lpthread and -lm.
+
+Linux
+-----
+The Linux build only requires linking to -ldl, -lpthread and -lm. You do not need any development packages.
+
+BSD
---
-- UWP only supports default playback and capture devices.
-- UWP requires the Microphone capability to be enabled in the application's manifest (Package.appxmanifest):
-
- ...
-
-
-
-
+The BSD build only requires linking to -lpthread and -lm. NetBSD uses audio(4), OpenBSD uses sndio and FreeBSD uses OSS.
-Web Audio / Emscripten
-----------------------
-- The first time a context is initialized it will create a global object called "mal" whose primary purpose is to act
- as a factory for device objects.
-- Currently the Web Audio backend uses ScriptProcessorNode's, but this may need to change later as they've been deprecated.
-- Google is implementing a policy in their browsers that prevent automatic media output without first receiving some kind
- of user input. See here for details: https://developers.google.com/web/updates/2017/09/autoplay-policy-changes. Starting
- the device may fail if you try to start playback without first handling some kind of user input.
+Android
+-------
+AAudio is the highest priority backend on Android. This should work out out of the box without needing any kind of compiler configuration. Support for AAudio
+starts with Android 8 which means older versions will fall back to OpenSL|ES which requires API level 16+.
+
+Emscripten
+----------
+The Emscripten build emits Web Audio JavaScript directly and should Just Work without any configuration. You cannot use -std=c* compiler flags, nor -ansi.
-OPTIONS
-=======
-#define these options before including this file.
+Build Options
+-------------
+#define these options before including miniaudio.h.
#define MA_NO_WASAPI
Disables the WASAPI backend.
@@ -358,20 +456,12 @@ OPTIONS
#define MA_NO_NULL
Disables the null backend.
-#define MA_DEFAULT_PERIODS
- When a period count of 0 is specified when a device is initialized, it will default to this.
-
-#define MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY
-#define MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE
- When a buffer size of 0 is specified when a device is initialized it will default to a buffer of this size, depending
- on the chosen performance profile. These can be increased or decreased depending on your specific requirements.
-
#define MA_NO_DECODING
Disables the decoding APIs.
#define MA_NO_DEVICE_IO
- Disables playback and recording. This will disable ma_context and ma_device APIs. This is useful if you only want to
- use miniaudio's data conversion and/or decoding APIs.
+ Disables playback and recording. This will disable ma_context and ma_device APIs. This is useful if you only want to use miniaudio's data conversion and/or
+ decoding APIs.
#define MA_NO_STDIO
Disables file IO APIs.
@@ -402,10 +492,11 @@ OPTIONS
Windows only. The value to pass to internal calls to CoInitializeEx(). Defaults to COINIT_MULTITHREADED.
-DEFINITIONS
+
+Definitions
===========
-This section defines common terms used throughout miniaudio. Unfortunately there is often ambiguity in the use of terms
-throughout the audio space, so this section is intended to clarify how miniaudio uses each term.
+This section defines common terms used throughout miniaudio. Unfortunately there is often ambiguity in the use of terms throughout the audio space, so this
+section is intended to clarify how miniaudio uses each term.
Sample
------
@@ -413,14906 +504,24372 @@ A sample is a single unit of audio data. If the sample format is f32, then one s
Frame / PCM Frame
-----------------
-A frame is a groups of samples equal to the number of channels. For a stereo stream a frame is 2 samples, a mono frame
-is 1 sample, a 5.1 surround sound frame is 6 samples, etc. The terms "frame" and "PCM frame" are the same thing in
-miniaudio. Note that this is different to a compressed frame. If ever miniaudio needs to refer to a compressed frame, such
-as a FLAC frame, it will always clarify what it's referring to with something like "FLAC frame" or whatnot.
+A frame is a groups of samples equal to the number of channels. For a stereo stream a frame is 2 samples, a mono frame is 1 sample, a 5.1 surround sound frame
+is 6 samples, etc. The terms "frame" and "PCM frame" are the same thing in miniaudio. Note that this is different to a compressed frame. If ever miniaudio
+needs to refer to a compressed frame, such as a FLAC frame, it will always clarify what it's referring to with something like "FLAC frame" or whatnot.
Channel
-------
-A stream of monaural audio that is emitted from an individual speaker in a speaker system, or received from an individual
-microphone in a microphone system. A stereo stream has two channels (a left channel, and a right channel), a 5.1 surround
-sound system has 6 channels, etc. Some audio systems refer to a channel as a complex audio stream that's mixed with other
-channels to produce the final mix - this is completely different to miniaudio's use of the term "channel" and should not be
-confused.
+A stream of monaural audio that is emitted from an individual speaker in a speaker system, or received from an individual microphone in a microphone system. A
+stereo stream has two channels (a left channel, and a right channel), a 5.1 surround sound system has 6 channels, etc. Some audio systems refer to a channel as
+a complex audio stream that's mixed with other channels to produce the final mix - this is completely different to miniaudio's use of the term "channel" and
+should not be confused.
Sample Rate
-----------
-The sample rate in miniaudio is always expressed in Hz, such as 44100, 48000, etc. It's the number of PCM frames that are
-processed per second.
+The sample rate in miniaudio is always expressed in Hz, such as 44100, 48000, etc. It's the number of PCM frames that are processed per second.
Formats
-------
Throughout miniaudio you will see references to different sample formats:
- u8 - Unsigned 8-bit integer
- s16 - Signed 16-bit integer
- s24 - Signed 24-bit integer (tightly packed).
- s32 - Signed 32-bit integer
- f32 - 32-bit floating point
-*/
-#ifndef miniaudio_h
-#define miniaudio_h
+ |---------------|----------------------------------------|---------------------------|
+ | Symbol | Description | Range |
+ |---------------|----------------------------------------|---------------------------|
+ | ma_format_f32 | 32-bit floating point | [-1, 1] |
+ | ma_format_s16 | 16-bit signed integer | [-32768, 32767] |
+ | ma_format_s24 | 24-bit signed integer (tightly packed) | [-8388608, 8388607] |
+ | ma_format_s32 | 32-bit signed integer | [-2147483648, 2147483647] |
+ | ma_format_u8 | 8-bit unsigned integer | [0, 255] |
+ |---------------|----------------------------------------|---------------------------|
-#ifdef __cplusplus
-extern "C" {
-#endif
+All formats are native-endian.
-#if defined(_MSC_VER)
- #pragma warning(push)
- #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */
- #pragma warning(disable:4324) /* structure was padded due to alignment specifier */
-#else
- #pragma GCC diagnostic push
- #pragma GCC diagnostic ignored "-Wpedantic" /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */
-#endif
-/* Platform/backend detection. */
-#ifdef _WIN32
- #define MA_WIN32
- #if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PC_APP || WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP)
- #define MA_WIN32_UWP
- #else
- #define MA_WIN32_DESKTOP
- #endif
-#else
- #define MA_POSIX
- #include /* Unfortunate #include, but needed for pthread_t, pthread_mutex_t and pthread_cond_t types. */
- #ifdef __unix__
- #define MA_UNIX
- #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
- #define MA_BSD
- #endif
- #endif
- #ifdef __linux__
- #define MA_LINUX
- #endif
- #ifdef __APPLE__
- #define MA_APPLE
- #endif
- #ifdef __ANDROID__
- #define MA_ANDROID
- #endif
- #ifdef __EMSCRIPTEN__
- #define MA_EMSCRIPTEN
- #endif
-#endif
+Decoding
+========
+The `ma_decoder` API is used for reading audio files. To enable a decoder you must #include the header of the relevant backend library before the
+implementation of miniaudio. You can find copies of these in the "extras" folder in the miniaudio repository (https://github.com/dr-soft/miniaudio).
-#include /* For size_t. */
+The table below are the supported decoding backends:
-/* Sized types. Prefer built-in types. Fall back to stdint. */
-#ifdef _MSC_VER
- #if defined(__clang__)
- #pragma GCC diagnostic push
- #pragma GCC diagnostic ignored "-Wlanguage-extension-token"
- #pragma GCC diagnostic ignored "-Wc++11-long-long"
- #endif
- typedef signed __int8 ma_int8;
- typedef unsigned __int8 ma_uint8;
- typedef signed __int16 ma_int16;
- typedef unsigned __int16 ma_uint16;
- typedef signed __int32 ma_int32;
- typedef unsigned __int32 ma_uint32;
- typedef signed __int64 ma_int64;
- typedef unsigned __int64 ma_uint64;
- #if defined(__clang__)
- #pragma GCC diagnostic pop
- #endif
-#else
- #define MA_HAS_STDINT
- #include
- typedef int8_t ma_int8;
- typedef uint8_t ma_uint8;
- typedef int16_t ma_int16;
- typedef uint16_t ma_uint16;
- typedef int32_t ma_int32;
- typedef uint32_t ma_uint32;
- typedef int64_t ma_int64;
- typedef uint64_t ma_uint64;
-#endif
+ |--------|-----------------|
+ | Type | Backend Library |
+ |--------|-----------------|
+ | WAV | dr_wav.h |
+ | FLAC | dr_flac.h |
+ | MP3 | dr_mp3.h |
+ | Vorbis | stb_vorbis.c |
+ |--------|-----------------|
-#ifdef MA_HAS_STDINT
- typedef uintptr_t ma_uintptr;
-#else
- #if defined(_WIN32)
- #if defined(_WIN64)
- typedef ma_uint64 ma_uintptr;
- #else
- typedef ma_uint32 ma_uintptr;
- #endif
- #elif defined(__GNUC__)
- #if defined(__LP64__)
- typedef ma_uint64 ma_uintptr;
- #else
- typedef ma_uint32 ma_uintptr;
- #endif
- #else
- typedef ma_uint64 ma_uintptr; /* Fallback. */
- #endif
-#endif
+The code below is an example of how to enable decoding backends:
-typedef ma_uint8 ma_bool8;
-typedef ma_uint32 ma_bool32;
-#define MA_TRUE 1
-#define MA_FALSE 0
+ ```c
+ #include "dr_flac.h" // Enables FLAC decoding.
+ #include "dr_mp3.h" // Enables MP3 decoding.
+ #include "dr_wav.h" // Enables WAV decoding.
-typedef void* ma_handle;
-typedef void* ma_ptr;
-typedef void (* ma_proc)(void);
+ #define MINIAUDIO_IMPLEMENTATION
+ #include "miniaudio.h"
+ ```
-#if defined(_MSC_VER) && !defined(_WCHAR_T_DEFINED)
-typedef ma_uint16 wchar_t;
-#endif
+A decoder can be initialized from a file with `ma_decoder_init_file()`, a block of memory with `ma_decoder_init_memory()`, or from data delivered via callbacks
+with `ma_decoder_init()`. Here is an example for loading a decoder from a file:
-/* Define NULL for some compilers. */
-#ifndef NULL
-#define NULL 0
-#endif
+ ```c
+ ma_decoder decoder;
+ ma_result result = ma_decoder_init_file("MySong.mp3", NULL, &decoder);
+ if (result != MA_SUCCESS) {
+ return false; // An error occurred.
+ }
-#if defined(SIZE_MAX)
- #define MA_SIZE_MAX SIZE_MAX
-#else
- #define MA_SIZE_MAX 0xFFFFFFFF /* When SIZE_MAX is not defined by the standard library just default to the maximum 32-bit unsigned integer. */
-#endif
+ ...
+ ma_decoder_uninit(&decoder);
+ ```
-#ifdef _MSC_VER
-#define MA_INLINE __forceinline
-#else
-#ifdef __GNUC__
-#define MA_INLINE __inline__ __attribute__((always_inline))
-#else
-#define MA_INLINE
-#endif
-#endif
+When initializing a decoder, you can optionally pass in a pointer to a ma_decoder_config object (the NULL argument in the example above) which allows you to
+configure the output format, channel count, sample rate and channel map:
-#if defined(_MSC_VER)
- #if _MSC_VER >= 1400
- #define MA_ALIGN(alignment) __declspec(align(alignment))
- #endif
-#elif !defined(__DMC__)
- #define MA_ALIGN(alignment) __attribute__((aligned(alignment)))
-#endif
-#ifndef MA_ALIGN
- #define MA_ALIGN(alignment)
-#endif
+ ```c
+ ma_decoder_config config = ma_decoder_config_init(ma_format_f32, 2, 48000);
+ ```
-#ifdef _MSC_VER
-#define MA_ALIGNED_STRUCT(alignment) MA_ALIGN(alignment) struct
-#else
-#define MA_ALIGNED_STRUCT(alignment) struct MA_ALIGN(alignment)
-#endif
+When passing in NULL for decoder config in `ma_decoder_init*()`, the output format will be the same as that defined by the decoding backend.
-/* SIMD alignment in bytes. Currently set to 64 bytes in preparation for future AVX-512 optimizations. */
-#define MA_SIMD_ALIGNMENT 64
+Data is read from the decoder as PCM frames:
+ ```c
+ ma_uint64 framesRead = ma_decoder_read_pcm_frames(pDecoder, pFrames, framesToRead);
+ ```
-/* Logging levels */
-#define MA_LOG_LEVEL_VERBOSE 4
-#define MA_LOG_LEVEL_INFO 3
-#define MA_LOG_LEVEL_WARNING 2
-#define MA_LOG_LEVEL_ERROR 1
+You can also seek to a specific frame like so:
-#ifndef MA_LOG_LEVEL
-#define MA_LOG_LEVEL MA_LOG_LEVEL_ERROR
-#endif
+ ```c
+ ma_result result = ma_decoder_seek_to_pcm_frame(pDecoder, targetFrame);
+ if (result != MA_SUCCESS) {
+ return false; // An error occurred.
+ }
+ ```
-typedef struct ma_context ma_context;
-typedef struct ma_device ma_device;
+When loading a decoder, miniaudio uses a trial and error technique to find the appropriate decoding backend. This can be unnecessarily inefficient if the type
+is already known. In this case you can use the `_wav`, `_mp3`, etc. varients of the aforementioned initialization APIs:
-typedef ma_uint8 ma_channel;
-#define MA_CHANNEL_NONE 0
-#define MA_CHANNEL_MONO 1
-#define MA_CHANNEL_FRONT_LEFT 2
-#define MA_CHANNEL_FRONT_RIGHT 3
-#define MA_CHANNEL_FRONT_CENTER 4
-#define MA_CHANNEL_LFE 5
-#define MA_CHANNEL_BACK_LEFT 6
-#define MA_CHANNEL_BACK_RIGHT 7
-#define MA_CHANNEL_FRONT_LEFT_CENTER 8
-#define MA_CHANNEL_FRONT_RIGHT_CENTER 9
-#define MA_CHANNEL_BACK_CENTER 10
-#define MA_CHANNEL_SIDE_LEFT 11
-#define MA_CHANNEL_SIDE_RIGHT 12
-#define MA_CHANNEL_TOP_CENTER 13
-#define MA_CHANNEL_TOP_FRONT_LEFT 14
-#define MA_CHANNEL_TOP_FRONT_CENTER 15
-#define MA_CHANNEL_TOP_FRONT_RIGHT 16
-#define MA_CHANNEL_TOP_BACK_LEFT 17
-#define MA_CHANNEL_TOP_BACK_CENTER 18
-#define MA_CHANNEL_TOP_BACK_RIGHT 19
-#define MA_CHANNEL_AUX_0 20
-#define MA_CHANNEL_AUX_1 21
-#define MA_CHANNEL_AUX_2 22
-#define MA_CHANNEL_AUX_3 23
-#define MA_CHANNEL_AUX_4 24
-#define MA_CHANNEL_AUX_5 25
-#define MA_CHANNEL_AUX_6 26
-#define MA_CHANNEL_AUX_7 27
-#define MA_CHANNEL_AUX_8 28
-#define MA_CHANNEL_AUX_9 29
-#define MA_CHANNEL_AUX_10 30
-#define MA_CHANNEL_AUX_11 31
-#define MA_CHANNEL_AUX_12 32
-#define MA_CHANNEL_AUX_13 33
-#define MA_CHANNEL_AUX_14 34
-#define MA_CHANNEL_AUX_15 35
-#define MA_CHANNEL_AUX_16 36
-#define MA_CHANNEL_AUX_17 37
-#define MA_CHANNEL_AUX_18 38
-#define MA_CHANNEL_AUX_19 39
-#define MA_CHANNEL_AUX_20 40
-#define MA_CHANNEL_AUX_21 41
-#define MA_CHANNEL_AUX_22 42
-#define MA_CHANNEL_AUX_23 43
-#define MA_CHANNEL_AUX_24 44
-#define MA_CHANNEL_AUX_25 45
-#define MA_CHANNEL_AUX_26 46
-#define MA_CHANNEL_AUX_27 47
-#define MA_CHANNEL_AUX_28 48
-#define MA_CHANNEL_AUX_29 49
-#define MA_CHANNEL_AUX_30 50
-#define MA_CHANNEL_AUX_31 51
-#define MA_CHANNEL_LEFT MA_CHANNEL_FRONT_LEFT
-#define MA_CHANNEL_RIGHT MA_CHANNEL_FRONT_RIGHT
-#define MA_CHANNEL_POSITION_COUNT MA_CHANNEL_AUX_31 + 1
+ ```c
+ ma_decoder_init_wav()
+ ma_decoder_init_mp3()
+ ma_decoder_init_memory_wav()
+ ma_decoder_init_memory_mp3()
+ ma_decoder_init_file_wav()
+ ma_decoder_init_file_mp3()
+ etc.
+ ```
+The `ma_decoder_init_file()` API will try using the file extension to determine which decoding backend to prefer.
-typedef int ma_result;
-#define MA_SUCCESS 0
-/* General errors. */
-#define MA_ERROR -1 /* A generic error. */
-#define MA_INVALID_ARGS -2
-#define MA_INVALID_OPERATION -3
-#define MA_OUT_OF_MEMORY -4
-#define MA_ACCESS_DENIED -5
-#define MA_TOO_LARGE -6
-#define MA_TIMEOUT -7
-/* General miniaudio-specific errors. */
-#define MA_FORMAT_NOT_SUPPORTED -100
-#define MA_DEVICE_TYPE_NOT_SUPPORTED -101
-#define MA_SHARE_MODE_NOT_SUPPORTED -102
-#define MA_NO_BACKEND -103
-#define MA_NO_DEVICE -104
-#define MA_API_NOT_FOUND -105
-#define MA_INVALID_DEVICE_CONFIG -106
+Encoding
+========
+The `ma_encoding` API is used for writing audio files. To enable an encoder you must #include the header of the relevant backend library before the
+implementation of miniaudio. You can find copies of these in the "extras" folder in the miniaudio repository (https://github.com/dr-soft/miniaudio).
-/* State errors. */
-#define MA_DEVICE_BUSY -200
-#define MA_DEVICE_NOT_INITIALIZED -201
-#define MA_DEVICE_NOT_STARTED -202
-#define MA_DEVICE_UNAVAILABLE -203
+The table below are the supported encoding backends:
-/* Operation errors. */
-#define MA_FAILED_TO_MAP_DEVICE_BUFFER -300
-#define MA_FAILED_TO_UNMAP_DEVICE_BUFFER -301
-#define MA_FAILED_TO_INIT_BACKEND -302
-#define MA_FAILED_TO_READ_DATA_FROM_CLIENT -303
-#define MA_FAILED_TO_READ_DATA_FROM_DEVICE -304
-#define MA_FAILED_TO_SEND_DATA_TO_CLIENT -305
-#define MA_FAILED_TO_SEND_DATA_TO_DEVICE -306
-#define MA_FAILED_TO_OPEN_BACKEND_DEVICE -307
-#define MA_FAILED_TO_START_BACKEND_DEVICE -308
-#define MA_FAILED_TO_STOP_BACKEND_DEVICE -309
-#define MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE -310
-#define MA_FAILED_TO_CREATE_MUTEX -311
-#define MA_FAILED_TO_CREATE_EVENT -312
-#define MA_FAILED_TO_CREATE_THREAD -313
+ |--------|-----------------|
+ | Type | Backend Library |
+ |--------|-----------------|
+ | WAV | dr_wav.h |
+ |--------|-----------------|
+The code below is an example of how to enable encoding backends:
-/* Standard sample rates. */
-#define MA_SAMPLE_RATE_8000 8000
-#define MA_SAMPLE_RATE_11025 11025
-#define MA_SAMPLE_RATE_16000 16000
-#define MA_SAMPLE_RATE_22050 22050
-#define MA_SAMPLE_RATE_24000 24000
-#define MA_SAMPLE_RATE_32000 32000
-#define MA_SAMPLE_RATE_44100 44100
-#define MA_SAMPLE_RATE_48000 48000
-#define MA_SAMPLE_RATE_88200 88200
-#define MA_SAMPLE_RATE_96000 96000
-#define MA_SAMPLE_RATE_176400 176400
-#define MA_SAMPLE_RATE_192000 192000
-#define MA_SAMPLE_RATE_352800 352800
-#define MA_SAMPLE_RATE_384000 384000
+ ```c
+ #include "dr_wav.h" // Enables WAV decoding.
-#define MA_MIN_PCM_SAMPLE_SIZE_IN_BYTES 1 /* For simplicity, miniaudio does not support PCM samples that are not byte aligned. */
-#define MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES 8
-#define MA_MIN_CHANNELS 1
-#define MA_MAX_CHANNELS 32
-#define MA_MIN_SAMPLE_RATE MA_SAMPLE_RATE_8000
-#define MA_MAX_SAMPLE_RATE MA_SAMPLE_RATE_384000
-#define MA_SRC_SINC_MIN_WINDOW_WIDTH 2
-#define MA_SRC_SINC_MAX_WINDOW_WIDTH 32
-#define MA_SRC_SINC_DEFAULT_WINDOW_WIDTH 32
-#define MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION 8
-#define MA_SRC_INPUT_BUFFER_SIZE_IN_SAMPLES 256
+ #define MINIAUDIO_IMPLEMENTATION
+ #include "miniaudio.h"
+ ```
-typedef enum
-{
- ma_stream_format_pcm = 0,
-} ma_stream_format;
+An encoder can be initialized to write to a file with `ma_encoder_init_file()` or from data delivered via callbacks with `ma_encoder_init()`. Below is an
+example for initializing an encoder to output to a file.
-typedef enum
-{
- ma_stream_layout_interleaved = 0,
- ma_stream_layout_deinterleaved
-} ma_stream_layout;
+ ```c
+ ma_encoder_config config = ma_encoder_config_init(ma_resource_format_wav, FORMAT, CHANNELS, SAMPLE_RATE);
+ ma_encoder encoder;
+ ma_result result = ma_encoder_init_file("my_file.wav", &config, &encoder);
+ if (result != MA_SUCCESS) {
+ // Error
+ }
-typedef enum
-{
- ma_dither_mode_none = 0,
- ma_dither_mode_rectangle,
- ma_dither_mode_triangle
-} ma_dither_mode;
+ ...
-typedef enum
-{
- /*
- I like to keep these explicitly defined because they're used as a key into a lookup table. When items are
- added to this, make sure there are no gaps and that they're added to the lookup table in ma_get_bytes_per_sample().
- */
- ma_format_unknown = 0, /* Mainly used for indicating an error, but also used as the default for the output format for decoders. */
- ma_format_u8 = 1,
- ma_format_s16 = 2, /* Seems to be the most widely supported format. */
- ma_format_s24 = 3, /* Tightly packed. 3 bytes per sample. */
- ma_format_s32 = 4,
- ma_format_f32 = 5,
- ma_format_count
-} ma_format;
+ ma_encoder_uninit(&encoder);
+ ```
-typedef enum
-{
- ma_channel_mix_mode_rectangular = 0, /* Simple averaging based on the plane(s) the channel is sitting on. */
- ma_channel_mix_mode_simple, /* Drop excess channels; zeroed out extra channels. */
- ma_channel_mix_mode_custom_weights, /* Use custom weights specified in ma_channel_router_config. */
- ma_channel_mix_mode_planar_blend = ma_channel_mix_mode_rectangular,
- ma_channel_mix_mode_default = ma_channel_mix_mode_planar_blend
-} ma_channel_mix_mode;
+When initializing an encoder you must specify a config which is initialized with `ma_encoder_config_init()`. Here you must specify the file type, the output
+sample format, output channel count and output sample rate. The following file types are supported:
-typedef enum
-{
- ma_standard_channel_map_microsoft,
- ma_standard_channel_map_alsa,
- ma_standard_channel_map_rfc3551, /* Based off AIFF. */
- ma_standard_channel_map_flac,
- ma_standard_channel_map_vorbis,
- ma_standard_channel_map_sound4, /* FreeBSD's sound(4). */
- ma_standard_channel_map_sndio, /* www.sndio.org/tips.html */
- ma_standard_channel_map_webaudio = ma_standard_channel_map_flac, /* https://webaudio.github.io/web-audio-api/#ChannelOrdering. Only 1, 2, 4 and 6 channels are defined, but can fill in the gaps with logical assumptions. */
- ma_standard_channel_map_default = ma_standard_channel_map_microsoft
-} ma_standard_channel_map;
+ |------------------------|-------------|
+ | Enum | Description |
+ |------------------------|-------------|
+ | ma_resource_format_wav | WAV |
+ |------------------------|-------------|
-typedef enum
-{
- ma_performance_profile_low_latency = 0,
- ma_performance_profile_conservative
-} ma_performance_profile;
+If the format, channel count or sample rate is not supported by the output file type an error will be returned. The encoder will not perform data conversion so
+you will need to convert it before outputting any audio data. To output audio data, use `ma_encoder_write_pcm_frames()`, like in the example below:
+ ```c
+ framesWritten = ma_encoder_write_pcm_frames(&encoder, pPCMFramesToWrite, framesToWrite);
+ ```
-typedef struct ma_format_converter ma_format_converter;
-typedef ma_uint32 (* ma_format_converter_read_proc) (ma_format_converter* pConverter, ma_uint32 frameCount, void* pFramesOut, void* pUserData);
-typedef ma_uint32 (* ma_format_converter_read_deinterleaved_proc)(ma_format_converter* pConverter, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData);
+Encoders must be uninitialized with `ma_encoder_uninit()`.
-typedef struct
-{
- ma_format formatIn;
- ma_format formatOut;
- ma_uint32 channels;
- ma_stream_format streamFormatIn;
- ma_stream_format streamFormatOut;
- ma_dither_mode ditherMode;
- ma_bool32 noSSE2 : 1;
- ma_bool32 noAVX2 : 1;
- ma_bool32 noAVX512 : 1;
- ma_bool32 noNEON : 1;
- ma_format_converter_read_proc onRead;
- ma_format_converter_read_deinterleaved_proc onReadDeinterleaved;
- void* pUserData;
-} ma_format_converter_config;
-
-struct ma_format_converter
-{
- ma_format_converter_config config;
- ma_bool32 useSSE2 : 1;
- ma_bool32 useAVX2 : 1;
- ma_bool32 useAVX512 : 1;
- ma_bool32 useNEON : 1;
- void (* onConvertPCM)(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode);
- void (* onInterleavePCM)(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels);
- void (* onDeinterleavePCM)(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels);
-};
+Sample Format Conversion
+========================
+Conversion between sample formats is achieved with the `ma_pcm_*_to_*()`, `ma_pcm_convert()` and `ma_convert_pcm_frames_format()` APIs. Use `ma_pcm_*_to_*()`
+to convert between two specific formats. Use `ma_pcm_convert()` to convert based on a `ma_format` variable. Use `ma_convert_pcm_frames_format()` to convert
+PCM frames where you want to specify the frame count and channel count as a variable instead of the total sample count.
-typedef struct ma_channel_router ma_channel_router;
-typedef ma_uint32 (* ma_channel_router_read_deinterleaved_proc)(ma_channel_router* pRouter, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData);
+Dithering
+---------
+Dithering can be set using ditherMode parameter.
-typedef struct
-{
- ma_uint32 channelsIn;
- ma_uint32 channelsOut;
- ma_channel channelMapIn[MA_MAX_CHANNELS];
- ma_channel channelMapOut[MA_MAX_CHANNELS];
- ma_channel_mix_mode mixingMode;
- float weights[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; /* [in][out]. Only used when mixingMode is set to ma_channel_mix_mode_custom_weights. */
- ma_bool32 noSSE2 : 1;
- ma_bool32 noAVX2 : 1;
- ma_bool32 noAVX512 : 1;
- ma_bool32 noNEON : 1;
- ma_channel_router_read_deinterleaved_proc onReadDeinterleaved;
- void* pUserData;
-} ma_channel_router_config;
-
-struct ma_channel_router
-{
- ma_channel_router_config config;
- ma_bool32 isPassthrough : 1;
- ma_bool32 isSimpleShuffle : 1;
- ma_bool32 useSSE2 : 1;
- ma_bool32 useAVX2 : 1;
- ma_bool32 useAVX512 : 1;
- ma_bool32 useNEON : 1;
- ma_uint8 shuffleTable[MA_MAX_CHANNELS];
-};
+The different dithering modes include the following, in order of efficiency:
+ |-----------|--------------------------|
+ | Type | Enum Token |
+ |-----------|--------------------------|
+ | None | ma_dither_mode_none |
+ | Rectangle | ma_dither_mode_rectangle |
+ | Triangle | ma_dither_mode_triangle |
+ |-----------|--------------------------|
+Note that even if the dither mode is set to something other than `ma_dither_mode_none`, it will be ignored for conversions where dithering is not needed.
+Dithering is available for the following conversions:
-typedef struct ma_src ma_src;
-typedef ma_uint32 (* ma_src_read_deinterleaved_proc)(ma_src* pSRC, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData); /* Returns the number of frames that were read. */
+ s16 -> u8
+ s24 -> u8
+ s32 -> u8
+ f32 -> u8
+ s24 -> s16
+ s32 -> s16
+ f32 -> s16
-typedef enum
-{
- ma_src_algorithm_linear = 0,
- ma_src_algorithm_sinc,
- ma_src_algorithm_none,
- ma_src_algorithm_default = ma_src_algorithm_linear
-} ma_src_algorithm;
+Note that it is not an error to pass something other than ma_dither_mode_none for conversions where dither is not used. It will just be ignored.
-typedef enum
-{
- ma_src_sinc_window_function_hann = 0,
- ma_src_sinc_window_function_rectangular,
- ma_src_sinc_window_function_default = ma_src_sinc_window_function_hann
-} ma_src_sinc_window_function;
-typedef struct
-{
- ma_src_sinc_window_function windowFunction;
- ma_uint32 windowWidth;
-} ma_src_config_sinc;
-typedef struct
-{
- ma_uint32 sampleRateIn;
- ma_uint32 sampleRateOut;
- ma_uint32 channels;
- ma_src_algorithm algorithm;
- ma_bool32 neverConsumeEndOfInput : 1;
- ma_bool32 noSSE2 : 1;
- ma_bool32 noAVX2 : 1;
- ma_bool32 noAVX512 : 1;
- ma_bool32 noNEON : 1;
- ma_src_read_deinterleaved_proc onReadDeinterleaved;
- void* pUserData;
- union
- {
- ma_src_config_sinc sinc;
- };
-} ma_src_config;
+Channel Conversion
+==================
+Channel conversion is used for channel rearrangement and conversion from one channel count to another. The `ma_channel_converter` API is used for channel
+conversion. Below is an example of initializing a simple channel converter which converts from mono to stereo.
-MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_src
-{
- union
- {
- struct
- {
- MA_ALIGN(MA_SIMD_ALIGNMENT) float input[MA_MAX_CHANNELS][MA_SRC_INPUT_BUFFER_SIZE_IN_SAMPLES];
- float timeIn;
- ma_uint32 leftoverFrames;
- } linear;
+ ```c
+ ma_channel_converter_config config = ma_channel_converter_config_init(ma_format, 1, NULL, 2, NULL, ma_channel_mix_mode_default, NULL);
+ result = ma_channel_converter_init(&config, &converter);
+ if (result != MA_SUCCESS) {
+ // Error.
+ }
+ ```
- struct
- {
- MA_ALIGN(MA_SIMD_ALIGNMENT) float input[MA_MAX_CHANNELS][MA_SRC_SINC_MAX_WINDOW_WIDTH*2 + MA_SRC_INPUT_BUFFER_SIZE_IN_SAMPLES];
- float timeIn;
- ma_uint32 inputFrameCount; /* The number of frames sitting in the input buffer, not including the first half of the window. */
- ma_uint32 windowPosInSamples; /* An offset of . */
- float table[MA_SRC_SINC_MAX_WINDOW_WIDTH*1 * MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION]; /* Precomputed lookup table. The +1 is used to avoid the need for an overflow check. */
- } sinc;
- };
+To perform the conversion simply call `ma_channel_converter_process_pcm_frames()` like so:
- ma_src_config config;
- ma_bool32 isEndOfInputLoaded : 1;
- ma_bool32 useSSE2 : 1;
- ma_bool32 useAVX2 : 1;
- ma_bool32 useAVX512 : 1;
- ma_bool32 useNEON : 1;
-};
+ ```c
+ ma_result result = ma_channel_converter_process_pcm_frames(&converter, pFramesOut, pFramesIn, frameCount);
+ if (result != MA_SUCCESS) {
+ // Error.
+ }
+ ```
+
+It is up to the caller to ensure the output buffer is large enough to accomodate the new PCM frames.
+
+The only formats supported are `ma_format_s16` and `ma_format_f32`. If you need another format you need to convert your data manually which you can do with
+`ma_pcm_convert()`, etc.
+
+Input and output PCM frames are always interleaved. Deinterleaved layouts are not supported.
+
+
+Channel Mapping
+---------------
+In addition to converting from one channel count to another, like the example above, The channel converter can also be used to rearrange channels. When
+initializing the channel converter, you can optionally pass in channel maps for both the input and output frames. If the channel counts are the same, and each
+channel map contains the same channel positions with the exception that they're in a different order, a simple shuffling of the channels will be performed. If,
+however, there is not a 1:1 mapping of channel positions, or the channel counts differ, the input channels will be mixed based on a mixing mode which is
+specified when initializing the `ma_channel_converter_config` object.
+
+When converting from mono to multi-channel, the mono channel is simply copied to each output channel. When going the other way around, the audio of each output
+channel is simply averaged and copied to the mono channel.
+
+In more complicated cases blending is used. The `ma_channel_mix_mode_simple` mode will drop excess channels and silence extra channels. For example, converting
+from 4 to 2 channels, the 3rd and 4th channels will be dropped, whereas converting from 2 to 4 channels will put silence into the 3rd and 4th channels.
+
+The `ma_channel_mix_mode_rectangle` mode uses spacial locality based on a rectangle to compute a simple distribution between input and output. Imagine sitting
+in the middle of a room, with speakers on the walls representing channel positions. The MA_CHANNEL_FRONT_LEFT position can be thought of as being in the corner
+of the front and left walls.
+
+Finally, the `ma_channel_mix_mode_custom_weights` mode can be used to use custom user-defined weights. Custom weights can be passed in as the last parameter of
+`ma_channel_converter_config_init()`.
+
+Predefined channel maps can be retrieved with `ma_get_standard_channel_map()`. This takes a `ma_standard_channel_map` enum as it's first parameter, which can
+be one of the following:
+
+ |-----------------------------------|-----------------------------------------------------------|
+ | Name | Description |
+ |-----------------------------------|-----------------------------------------------------------|
+ | ma_standard_channel_map_default | Default channel map used by miniaudio. See below. |
+ | ma_standard_channel_map_microsoft | Channel map used by Microsoft's bitfield channel maps. |
+ | ma_standard_channel_map_alsa | Default ALSA channel map. |
+ | ma_standard_channel_map_rfc3551 | RFC 3551. Based on AIFF. |
+ | ma_standard_channel_map_flac | FLAC channel map. |
+ | ma_standard_channel_map_vorbis | Vorbis channel map. |
+ | ma_standard_channel_map_sound4 | FreeBSD's sound(4). |
+ | ma_standard_channel_map_sndio | sndio channel map. www.sndio.org/tips.html |
+ | ma_standard_channel_map_webaudio | https://webaudio.github.io/web-audio-api/#ChannelOrdering |
+ |-----------------------------------|-----------------------------------------------------------|
+
+Below are the channel maps used by default in miniaudio (ma_standard_channel_map_default):
+
+ |---------------|------------------------------|
+ | Channel Count | Mapping |
+ |---------------|------------------------------|
+ | 1 (Mono) | 0: MA_CHANNEL_MONO |
+ |---------------|------------------------------|
+ | 2 (Stereo) | 0: MA_CHANNEL_FRONT_LEFT |
+ | | 1: MA_CHANNEL_FRONT_RIGHT |
+ |---------------|------------------------------|
+ | 3 | 0: MA_CHANNEL_FRONT_LEFT |
+ | | 1: MA_CHANNEL_FRONT_RIGHT |
+ | | 2: MA_CHANNEL_FRONT_CENTER |
+ |---------------|------------------------------|
+ | 4 (Surround) | 0: MA_CHANNEL_FRONT_LEFT |
+ | | 1: MA_CHANNEL_FRONT_RIGHT |
+ | | 2: MA_CHANNEL_FRONT_CENTER |
+ | | 3: MA_CHANNEL_BACK_CENTER |
+ |---------------|------------------------------|
+ | 5 | 0: MA_CHANNEL_FRONT_LEFT |
+ | | 1: MA_CHANNEL_FRONT_RIGHT |
+ | | 2: MA_CHANNEL_FRONT_CENTER |
+ | | 3: MA_CHANNEL_BACK_LEFT |
+ | | 4: MA_CHANNEL_BACK_RIGHT |
+ |---------------|------------------------------|
+ | 6 (5.1) | 0: MA_CHANNEL_FRONT_LEFT |
+ | | 1: MA_CHANNEL_FRONT_RIGHT |
+ | | 2: MA_CHANNEL_FRONT_CENTER |
+ | | 3: MA_CHANNEL_LFE |
+ | | 4: MA_CHANNEL_SIDE_LEFT |
+ | | 5: MA_CHANNEL_SIDE_RIGHT |
+ |---------------|------------------------------|
+ | 7 | 0: MA_CHANNEL_FRONT_LEFT |
+ | | 1: MA_CHANNEL_FRONT_RIGHT |
+ | | 2: MA_CHANNEL_FRONT_CENTER |
+ | | 3: MA_CHANNEL_LFE |
+ | | 4: MA_CHANNEL_BACK_CENTER |
+ | | 4: MA_CHANNEL_SIDE_LEFT |
+ | | 5: MA_CHANNEL_SIDE_RIGHT |
+ |---------------|------------------------------|
+ | 8 (7.1) | 0: MA_CHANNEL_FRONT_LEFT |
+ | | 1: MA_CHANNEL_FRONT_RIGHT |
+ | | 2: MA_CHANNEL_FRONT_CENTER |
+ | | 3: MA_CHANNEL_LFE |
+ | | 4: MA_CHANNEL_BACK_LEFT |
+ | | 5: MA_CHANNEL_BACK_RIGHT |
+ | | 6: MA_CHANNEL_SIDE_LEFT |
+ | | 7: MA_CHANNEL_SIDE_RIGHT |
+ |---------------|------------------------------|
+ | Other | All channels set to 0. This |
+ | | is equivalent to the same |
+ | | mapping as the device. |
+ |---------------|------------------------------|
+
+
+
+Resampling
+==========
+Resampling is achieved with the `ma_resampler` object. To create a resampler object, do something like the following:
-typedef struct ma_pcm_converter ma_pcm_converter;
-typedef ma_uint32 (* ma_pcm_converter_read_proc)(ma_pcm_converter* pDSP, void* pSamplesOut, ma_uint32 frameCount, void* pUserData);
+ ```c
+ ma_resampler_config config = ma_resampler_config_init(ma_format_s16, channels, sampleRateIn, sampleRateOut, ma_resample_algorithm_linear);
+ ma_resampler resampler;
+ ma_result result = ma_resampler_init(&config, &resampler);
+ if (result != MA_SUCCESS) {
+ // An error occurred...
+ }
+ ```
-typedef struct
-{
- ma_format formatIn;
- ma_uint32 channelsIn;
- ma_uint32 sampleRateIn;
- ma_channel channelMapIn[MA_MAX_CHANNELS];
- ma_format formatOut;
- ma_uint32 channelsOut;
- ma_uint32 sampleRateOut;
- ma_channel channelMapOut[MA_MAX_CHANNELS];
- ma_channel_mix_mode channelMixMode;
- ma_dither_mode ditherMode;
- ma_src_algorithm srcAlgorithm;
- ma_bool32 allowDynamicSampleRate;
- ma_bool32 neverConsumeEndOfInput : 1; /* <-- For SRC. */
- ma_bool32 noSSE2 : 1;
- ma_bool32 noAVX2 : 1;
- ma_bool32 noAVX512 : 1;
- ma_bool32 noNEON : 1;
- ma_pcm_converter_read_proc onRead;
- void* pUserData;
- union
- {
- ma_src_config_sinc sinc;
- };
-} ma_pcm_converter_config;
+Do the following to uninitialize the resampler:
-MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_pcm_converter
-{
- ma_pcm_converter_read_proc onRead;
- void* pUserData;
- ma_format_converter formatConverterIn; /* For converting data to f32 in preparation for further processing. */
- ma_format_converter formatConverterOut; /* For converting data to the requested output format. Used as the final step in the processing pipeline. */
- ma_channel_router channelRouter; /* For channel conversion. */
- ma_src src; /* For sample rate conversion. */
- ma_bool32 isDynamicSampleRateAllowed : 1; /* ma_pcm_converter_set_input_sample_rate() and ma_pcm_converter_set_output_sample_rate() will fail if this is set to false. */
- ma_bool32 isPreFormatConversionRequired : 1;
- ma_bool32 isPostFormatConversionRequired : 1;
- ma_bool32 isChannelRoutingRequired : 1;
- ma_bool32 isSRCRequired : 1;
- ma_bool32 isChannelRoutingAtStart : 1;
- ma_bool32 isPassthrough : 1; /* <-- Will be set to true when the DSP pipeline is an optimized passthrough. */
-};
+ ```c
+ ma_resampler_uninit(&resampler);
+ ```
+The following example shows how data can be processed
-/************************************************************************************************************************************************************
-*************************************************************************************************************************************************************
+ ```c
+ ma_uint64 frameCountIn = 1000;
+ ma_uint64 frameCountOut = 2000;
+ ma_result result = ma_resampler_process_pcm_frames(&resampler, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut);
+ if (result != MA_SUCCESS) {
+ // An error occurred...
+ }
-DATA CONVERSION
-===============
+ // At this point, frameCountIn contains the number of input frames that were consumed and frameCountOut contains the number of output frames written.
+ ```
-This section contains the APIs for data conversion. You will find everything here for channel mapping, sample format conversion, resampling, etc.
+To initialize the resampler you first need to set up a config (`ma_resampler_config`) with `ma_resampler_config_init()`. You need to specify the sample format
+you want to use, the number of channels, the input and output sample rate, and the algorithm.
-*************************************************************************************************************************************************************
-************************************************************************************************************************************************************/
+The sample format can be either `ma_format_s16` or `ma_format_f32`. If you need a different format you will need to perform pre- and post-conversions yourself
+where necessary. Note that the format is the same for both input and output. The format cannot be changed after initialization.
-/************************************************************************************************************************************************************
+The resampler supports multiple channels and is always interleaved (both input and output). The channel count cannot be changed after initialization.
-Channel Maps
-============
+The sample rates can be anything other than zero, and are always specified in hertz. They should be set to something like 44100, etc. The sample rate is the
+only configuration property that can be changed after initialization.
-Below is the channel map used by ma_standard_channel_map_default:
-
-|---------------|------------------------------|
-| Channel Count | Mapping |
-|---------------|------------------------------|
-| 1 (Mono) | 0: MA_CHANNEL_MONO |
-|---------------|------------------------------|
-| 2 (Stereo) | 0: MA_CHANNEL_FRONT_LEFT |
-| | 1: MA_CHANNEL_FRONT_RIGHT |
-|---------------|------------------------------|
-| 3 | 0: MA_CHANNEL_FRONT_LEFT |
-| | 1: MA_CHANNEL_FRONT_RIGHT |
-| | 2: MA_CHANNEL_FRONT_CENTER |
-|---------------|------------------------------|
-| 4 (Surround) | 0: MA_CHANNEL_FRONT_LEFT |
-| | 1: MA_CHANNEL_FRONT_RIGHT |
-| | 2: MA_CHANNEL_FRONT_CENTER |
-| | 3: MA_CHANNEL_BACK_CENTER |
-|---------------|------------------------------|
-| 5 | 0: MA_CHANNEL_FRONT_LEFT |
-| | 1: MA_CHANNEL_FRONT_RIGHT |
-| | 2: MA_CHANNEL_FRONT_CENTER |
-| | 3: MA_CHANNEL_BACK_LEFT |
-| | 4: MA_CHANNEL_BACK_RIGHT |
-|---------------|------------------------------|
-| 6 (5.1) | 0: MA_CHANNEL_FRONT_LEFT |
-| | 1: MA_CHANNEL_FRONT_RIGHT |
-| | 2: MA_CHANNEL_FRONT_CENTER |
-| | 3: MA_CHANNEL_LFE |
-| | 4: MA_CHANNEL_SIDE_LEFT |
-| | 5: MA_CHANNEL_SIDE_RIGHT |
-|---------------|------------------------------|
-| 7 | 0: MA_CHANNEL_FRONT_LEFT |
-| | 1: MA_CHANNEL_FRONT_RIGHT |
-| | 2: MA_CHANNEL_FRONT_CENTER |
-| | 3: MA_CHANNEL_LFE |
-| | 4: MA_CHANNEL_BACK_CENTER |
-| | 4: MA_CHANNEL_SIDE_LEFT |
-| | 5: MA_CHANNEL_SIDE_RIGHT |
-|---------------|------------------------------|
-| 8 (7.1) | 0: MA_CHANNEL_FRONT_LEFT |
-| | 1: MA_CHANNEL_FRONT_RIGHT |
-| | 2: MA_CHANNEL_FRONT_CENTER |
-| | 3: MA_CHANNEL_LFE |
-| | 4: MA_CHANNEL_BACK_LEFT |
-| | 5: MA_CHANNEL_BACK_RIGHT |
-| | 6: MA_CHANNEL_SIDE_LEFT |
-| | 7: MA_CHANNEL_SIDE_RIGHT |
-|---------------|------------------------------|
-| Other | All channels set to 0. This |
-| | is equivalent to the same |
-| | mapping as the device. |
-|---------------|------------------------------|
+The miniaudio resampler supports multiple algorithms:
-************************************************************************************************************************************************************/
+ |-----------|------------------------------|
+ | Algorithm | Enum Token |
+ |-----------|------------------------------|
+ | Linear | ma_resample_algorithm_linear |
+ | Speex | ma_resample_algorithm_speex |
+ |-----------|------------------------------|
-/*
-Helper for retrieving a standard channel map.
-*/
-void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]);
+Because Speex is not public domain it is strictly opt-in and the code is stored in separate files. if you opt-in to the Speex backend you will need to consider
+it's license, the text of which can be found in it's source files in "extras/speex_resampler". Details on how to opt-in to the Speex resampler is explained in
+the Speex Resampler section below.
-/*
-Copies a channel map.
-*/
-void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels);
+The algorithm cannot be changed after initialization.
+Processing always happens on a per PCM frame basis and always assumes interleaved input and output. De-interleaved processing is not supported. To process
+frames, use `ma_resampler_process_pcm_frames()`. On input, this function takes the number of output frames you can fit in the output buffer and the number of
+input frames contained in the input buffer. On output these variables contain the number of output frames that were written to the output buffer and the
+number of input frames that were consumed in the process. You can pass in NULL for the input buffer in which case it will be treated as an infinitely large
+buffer of zeros. The output buffer can also be NULL, in which case the processing will be treated as seek.
-/*
-Determines whether or not a channel map is valid.
+The sample rate can be changed dynamically on the fly. You can change this with explicit sample rates with `ma_resampler_set_rate()` and also with a decimal
+ratio with `ma_resampler_set_rate_ratio()`. The ratio is in/out.
-A blank channel map is valid (all channels set to MA_CHANNEL_NONE). The way a blank channel map is handled is context specific, but
-is usually treated as a passthrough.
+Sometimes it's useful to know exactly how many input frames will be required to output a specific number of frames. You can calculate this with
+`ma_resampler_get_required_input_frame_count()`. Likewise, it's sometimes useful to know exactly how many frames would be output given a certain number of
+input frames. You can do this with `ma_resampler_get_expected_output_frame_count()`.
-Invalid channel maps:
- - A channel map with no channels
- - A channel map with more than one channel and a mono channel
-*/
-ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]);
+Due to the nature of how resampling works, the resampler introduces some latency. This can be retrieved in terms of both the input rate and the output rate
+with `ma_resampler_get_input_latency()` and `ma_resampler_get_output_latency()`.
-/*
-Helper for comparing two channel maps for equality.
-This assumes the channel count is the same between the two.
-*/
-ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS]);
+Resampling Algorithms
+---------------------
+The choice of resampling algorithm depends on your situation and requirements. The linear resampler is the most efficient and has the least amount of latency,
+but at the expense of poorer quality. The Speex resampler is higher quality, but slower with more latency. It also performs several heap allocations internally
+for memory management.
-/*
-Helper for determining if a channel map is blank (all channels set to MA_CHANNEL_NONE).
-*/
-ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]);
-/*
-Helper for determining whether or not a channel is present in the given channel map.
-*/
-ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition);
+Linear Resampling
+-----------------
+The linear resampler is the fastest, but comes at the expense of poorer quality. There is, however, some control over the quality of the linear resampler which
+may make it a suitable option depending on your requirements.
+The linear resampler performs low-pass filtering before or after downsampling or upsampling, depending on the sample rates you're converting between. When
+decreasing the sample rate, the low-pass filter will be applied before downsampling. When increasing the rate it will be performed after upsampling. By default
+a fourth order low-pass filter will be applied. This can be configured via the `lpfOrder` configuration variable. Setting this to 0 will disable filtering.
-/************************************************************************************************************************************************************
+The low-pass filter has a cutoff frequency which defaults to half the sample rate of the lowest of the input and output sample rates (Nyquist Frequency). This
+can be controlled with the `lpfNyquistFactor` config variable. This defaults to 1, and should be in the range of 0..1, although a value of 0 does not make
+sense and should be avoided. A value of 1 will use the Nyquist Frequency as the cutoff. A value of 0.5 will use half the Nyquist Frequency as the cutoff, etc.
+Values less than 1 will result in more washed out sound due to more of the higher frequencies being removed. This config variable has no impact on performance
+and is a purely perceptual configuration.
-Format Conversion
-=================
-The format converter serves two purposes:
- 1) Conversion between data formats (u8 to f32, etc.)
- 2) Interleaving and deinterleaving
+The API for the linear resampler is the same as the main resampler API, only it's called `ma_linear_resampler`.
-When initializing a converter, you specify the input and output formats (u8, s16, etc.) and read callbacks. There are two read callbacks - one for
-interleaved input data (onRead) and another for deinterleaved input data (onReadDeinterleaved). You implement whichever is most convenient for you. You
-can implement both, but it's not recommended as it just introduces unnecessary complexity.
-To read data as interleaved samples, use ma_format_converter_read(). Otherwise use ma_format_converter_read_deinterleaved().
+Speex Resampling
+----------------
+The Speex resampler is made up of third party code which is released under the BSD license. Because it is licensed differently to miniaudio, which is public
+domain, it is strictly opt-in and all of it's code is stored in separate files. If you opt-in to the Speex resampler you must consider the license text in it's
+source files. To opt-in, you must first #include the following file before the implementation of miniaudio.h:
-Dithering
----------
-The format converter also supports dithering. Dithering can be set using ditherMode variable in the config, like so.
+ #include "extras/speex_resampler/ma_speex_resampler.h"
- pConfig->ditherMode = ma_dither_mode_rectangle;
+Both the header and implementation is contained within the same file. The implementation can be included in your program like so:
-The different dithering modes include the following, in order of efficiency:
- - None: ma_dither_mode_none
- - Rectangle: ma_dither_mode_rectangle
- - Triangle: ma_dither_mode_triangle
+ #define MINIAUDIO_SPEEX_RESAMPLER_IMPLEMENTATION
+ #include "extras/speex_resampler/ma_speex_resampler.h"
-Note that even if the dither mode is set to something other than ma_dither_mode_none, it will be ignored for conversions where dithering is not needed.
-Dithering is available for the following conversions:
- - s16 -> u8
- - s24 -> u8
- - s32 -> u8
- - f32 -> u8
- - s24 -> s16
- - s32 -> s16
- - f32 -> s16
+Note that even if you opt-in to the Speex backend, miniaudio won't use it unless you explicitly ask for it in the respective config of the object you are
+initializing. If you try to use the Speex resampler without opting in, initialization of the `ma_resampler` object will fail with `MA_NO_BACKEND`.
-Note that it is not an error to pass something other than ma_dither_mode_none for conversions where dither is not used. It will just be ignored.
+The only configuration option to consider with the Speex resampler is the `speex.quality` config variable. This is a value between 0 and 10, with 0 being
+the fastest with the poorest quality and 10 being the slowest with the highest quality. The default value is 3.
-************************************************************************************************************************************************************/
-/*
-Initializes a format converter.
-*/
-ma_result ma_format_converter_init(const ma_format_converter_config* pConfig, ma_format_converter* pConverter);
-/*
-Reads data from the format converter as interleaved channels.
-*/
-ma_uint64 ma_format_converter_read(ma_format_converter* pConverter, ma_uint64 frameCount, void* pFramesOut, void* pUserData);
+General Data Conversion
+=======================
+The `ma_data_converter` API can be used to wrap sample format conversion, channel conversion and resampling into one operation. This is what miniaudio uses
+internally to convert between the format requested when the device was initialized and the format of the backend's native device. The API for general data
+conversion is very similar to the resampling API. Create a `ma_data_converter` object like this:
-/*
-Reads data from the format converter as deinterleaved channels.
-*/
-ma_uint64 ma_format_converter_read_deinterleaved(ma_format_converter* pConverter, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData);
+ ```c
+ ma_data_converter_config config = ma_data_converter_config_init(inputFormat, outputFormat, inputChannels, outputChannels, inputSampleRate, outputSampleRate);
+ ma_data_converter converter;
+ ma_result result = ma_data_converter_init(&config, &converter);
+ if (result != MA_SUCCESS) {
+ // An error occurred...
+ }
+ ```
-/*
-Helper for initializing a format converter config.
-*/
-ma_format_converter_config ma_format_converter_config_init_new(void);
-ma_format_converter_config ma_format_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channels, ma_format_converter_read_proc onRead, void* pUserData);
-ma_format_converter_config ma_format_converter_config_init_deinterleaved(ma_format formatIn, ma_format formatOut, ma_uint32 channels, ma_format_converter_read_deinterleaved_proc onReadDeinterleaved, void* pUserData);
+In the example above we use `ma_data_converter_config_init()` to initialize the config, however there's many more properties that can be configured, such as
+channel maps and resampling quality. Something like the following may be more suitable depending on your requirements:
+ ```c
+ ma_data_converter_config config = ma_data_converter_config_init_default();
+ config.formatIn = inputFormat;
+ config.formatOut = outputFormat;
+ config.channelsIn = inputChannels;
+ config.channelsOut = outputChannels;
+ config.sampleRateIn = inputSampleRate;
+ config.sampleRateOut = outputSampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_flac, config.channelCountIn, config.channelMapIn);
+ config.resampling.linear.lpfOrder = MA_MAX_FILTER_ORDER;
+ ```
+Do the following to uninitialize the data converter:
-/************************************************************************************************************************************************************
+ ```c
+ ma_data_converter_uninit(&converter);
+ ```
-Channel Routing
-===============
-There are two main things you can do with the channel router:
- 1) Rearrange channels
- 2) Convert from one channel count to another
+The following example shows how data can be processed
-Channel Rearrangement
----------------------
-A simple example of channel rearrangement may be swapping the left and right channels in a stereo stream. To do this you just pass in the same channel
-count for both the input and output with channel maps that contain the same channels (in a different order).
+ ```c
+ ma_uint64 frameCountIn = 1000;
+ ma_uint64 frameCountOut = 2000;
+ ma_result result = ma_data_converter_process_pcm_frames(&converter, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut);
+ if (result != MA_SUCCESS) {
+ // An error occurred...
+ }
-Channel Conversion
-------------------
-The channel router can also convert from one channel count to another, such as converting a 5.1 stream to stero. When changing the channel count, the
-router will first perform a 1:1 mapping of channel positions that are present in both the input and output channel maps. The second thing it will do
-is distribute the input mono channel (if any) across all output channels, excluding any None and LFE channels. If there is an output mono channel, all
-input channels will be averaged, excluding any None and LFE channels.
-
-The last case to consider is when a channel position in the input channel map is not present in the output channel map, and vice versa. In this case the
-channel router will perform a blend of other related channels to produce an audible channel. There are several blending modes.
- 1) Simple
- Unmatched channels are silenced.
- 2) Planar Blending
- Channels are blended based on a set of planes that each speaker emits audio from.
-
-Rectangular / Planar Blending
------------------------------
-In this mode, channel positions are associated with a set of planes where the channel conceptually emits audio from. An example is the front/left speaker.
-This speaker is positioned to the front of the listener, so you can think of it as emitting audio from the front plane. It is also positioned to the left
-of the listener so you can think of it as also emitting audio from the left plane. Now consider the (unrealistic) situation where the input channel map
-contains only the front/left channel position, but the output channel map contains both the front/left and front/center channel. When deciding on the audio
-data to send to the front/center speaker (which has no 1:1 mapping with an input channel) we need to use some logic based on our available input channel
-positions.
-
-As mentioned earlier, our front/left speaker is, conceptually speaking, emitting audio from the front _and_ the left planes. Similarly, the front/center
-speaker is emitting audio from _only_ the front plane. What these two channels have in common is that they are both emitting audio from the front plane.
-Thus, it makes sense that the front/center speaker should receive some contribution from the front/left channel. How much contribution depends on their
-planar relationship (thus the name of this blending technique).
-
-Because the front/left channel is emitting audio from two planes (front and left), you can think of it as though it's willing to dedicate 50% of it's total
-volume to each of it's planes (a channel position emitting from 1 plane would be willing to given 100% of it's total volume to that plane, and a channel
-position emitting from 3 planes would be willing to given 33% of it's total volume to each plane). Similarly, the front/center speaker is emitting audio
-from only one plane so you can think of it as though it's willing to _take_ 100% of it's volume from front plane emissions. Now, since the front/left
-channel is willing to _give_ 50% of it's total volume to the front plane, and the front/center speaker is willing to _take_ 100% of it's total volume
-from the front, you can imagine that 50% of the front/left speaker will be given to the front/center speaker.
-
-Usage
------
-To use the channel router you need to specify three things:
- 1) The input channel count and channel map
- 2) The output channel count and channel map
- 3) The mixing mode to use in the case where a 1:1 mapping is unavailable
+ // At this point, frameCountIn contains the number of input frames that were consumed and frameCountOut contains the number of output frames written.
+ ```
-Note that input and output data is always deinterleaved 32-bit floating point.
+The data converter supports multiple channels and is always interleaved (both input and output). The channel count cannot be changed after initialization.
-Initialize the channel router with ma_channel_router_init(). You will need to pass in a config object which specifies the input and output configuration,
-mixing mode and a callback for sending data to the router. This callback will be called when input data needs to be sent to the router for processing. Note
-that the mixing mode is only used when a 1:1 mapping is unavailable. This includes the custom weights mode.
+Sample rates can be anything other than zero, and are always specified in hertz. They should be set to something like 44100, etc. The sample rate is the only
+configuration property that can be changed after initialization, but only if the `resampling.allowDynamicSampleRate` member of `ma_data_converter_config` is
+set to MA_TRUE. To change the sample rate, use `ma_data_converter_set_rate()` or `ma_data_converter_set_rate_ratio()`. The ratio must be in/out. The resampling
+algorithm cannot be changed after initialization.
-Read data from the channel router with ma_channel_router_read_deinterleaved(). Output data is always 32-bit floating point.
+Processing always happens on a per PCM frame basis and always assumes interleaved input and output. De-interleaved processing is not supported. To process
+frames, use `ma_data_converter_process_pcm_frames()`. On input, this function takes the number of output frames you can fit in the output buffer and the number
+of input frames contained in the input buffer. On output these variables contain the number of output frames that were written to the output buffer and the
+number of input frames that were consumed in the process. You can pass in NULL for the input buffer in which case it will be treated as an infinitely large
+buffer of zeros. The output buffer can also be NULL, in which case the processing will be treated as seek.
-************************************************************************************************************************************************************/
+Sometimes it's useful to know exactly how many input frames will be required to output a specific number of frames. You can calculate this with
+`ma_data_converter_get_required_input_frame_count()`. Likewise, it's sometimes useful to know exactly how many frames would be output given a certain number of
+input frames. You can do this with `ma_data_converter_get_expected_output_frame_count()`.
-/*
-Initializes a channel router where it is assumed that the input data is non-interleaved.
-*/
-ma_result ma_channel_router_init(const ma_channel_router_config* pConfig, ma_channel_router* pRouter);
+Due to the nature of how resampling works, the data converter introduces some latency if resampling is required. This can be retrieved in terms of both the
+input rate and the output rate with `ma_data_converter_get_input_latency()` and `ma_data_converter_get_output_latency()`.
-/*
-Reads data from the channel router as deinterleaved channels.
-*/
-ma_uint64 ma_channel_router_read_deinterleaved(ma_channel_router* pRouter, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData);
-/*
-Helper for initializing a channel router config.
-*/
-ma_channel_router_config ma_channel_router_config_init(ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode, ma_channel_router_read_deinterleaved_proc onRead, void* pUserData);
+Filtering
+=========
-/************************************************************************************************************************************************************
+Biquad Filtering
+----------------
+Biquad filtering is achieved with the `ma_biquad` API. Example:
-Sample Rate Conversion
-======================
+ ```c
+ ma_biquad_config config = ma_biquad_config_init(ma_format_f32, channels, b0, b1, b2, a0, a1, a2);
+ ma_result result = ma_biquad_init(&config, &biquad);
+ if (result != MA_SUCCESS) {
+ // Error.
+ }
-************************************************************************************************************************************************************/
+ ...
-/*
-Initializes a sample rate conversion object.
-*/
-ma_result ma_src_init(const ma_src_config* pConfig, ma_src* pSRC);
+ ma_biquad_process_pcm_frames(&biquad, pFramesOut, pFramesIn, frameCount);
+ ```
-/*
-Dynamically adjusts the sample rate.
+Biquad filtering is implemented using transposed direct form 2. The numerator coefficients are b0, b1 and b2, and the denominator coefficients are a0, a1 and
+a2. The a0 coefficient is required and coefficients must not be pre-normalized.
-This is useful for dynamically adjust pitch. Keep in mind, however, that this will speed up or slow down the sound. If this
-is not acceptable you will need to use your own algorithm.
-*/
-ma_result ma_src_set_sample_rate(ma_src* pSRC, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut);
+Supported formats are `ma_format_s16` and `ma_format_f32`. If you need to use a different format you need to convert it yourself beforehand. When using
+`ma_format_s16` the biquad filter will use fixed point arithmetic. When using `ma_format_f32`, floating point arithmetic will be used.
-/*
-Reads a number of frames.
+Input and output frames are always interleaved.
-Returns the number of frames actually read.
-*/
-ma_uint64 ma_src_read_deinterleaved(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData);
+Filtering can be applied in-place by passing in the same pointer for both the input and output buffers, like so:
-/*
-Helper for creating a sample rate conversion config.
-*/
-ma_src_config ma_src_config_init_new(void);
-ma_src_config ma_src_config_init(ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_uint32 channels, ma_src_read_deinterleaved_proc onReadDeinterleaved, void* pUserData);
+ ```c
+ ma_biquad_process_pcm_frames(&biquad, pMyData, pMyData, frameCount);
+ ```
+If you need to change the values of the coefficients, but maintain the values in the registers you can do so with `ma_biquad_reinit()`. This is useful if you
+need to change the properties of the filter while keeping the values of registers valid to avoid glitching. Do not use `ma_biquad_init()` for this as it will
+do a full initialization which involves clearing the registers to 0. Note that changing the format or channel count after initialization is invalid and will
+result in an error.
-/************************************************************************************************************************************************************
-Conversion
+Low-Pass Filtering
+------------------
+Low-pass filtering is achieved with the following APIs:
-************************************************************************************************************************************************************/
+ |---------|------------------------------------------|
+ | API | Description |
+ |---------|------------------------------------------|
+ | ma_lpf1 | First order low-pass filter |
+ | ma_lpf2 | Second order low-pass filter |
+ | ma_lpf | High order low-pass filter (Butterworth) |
+ |---------|------------------------------------------|
-/*
-Initializes a DSP object.
-*/
-ma_result ma_pcm_converter_init(const ma_pcm_converter_config* pConfig, ma_pcm_converter* pDSP);
+Low-pass filter example:
-/*
-Dynamically adjusts the input sample rate.
+ ```c
+ ma_lpf_config config = ma_lpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order);
+ ma_result result = ma_lpf_init(&config, &lpf);
+ if (result != MA_SUCCESS) {
+ // Error.
+ }
-This will fail is the DSP was not initialized with allowDynamicSampleRate.
+ ...
-DEPRECATED. Use ma_pcm_converter_set_sample_rate() instead.
-*/
-ma_result ma_pcm_converter_set_input_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateOut);
+ ma_lpf_process_pcm_frames(&lpf, pFramesOut, pFramesIn, frameCount);
+ ```
-/*
-Dynamically adjusts the output sample rate.
+Supported formats are `ma_format_s16` and` ma_format_f32`. If you need to use a different format you need to convert it yourself beforehand. Input and output
+frames are always interleaved.
-This is useful for dynamically adjust pitch. Keep in mind, however, that this will speed up or slow down the sound. If this
-is not acceptable you will need to use your own algorithm.
+Filtering can be applied in-place by passing in the same pointer for both the input and output buffers, like so:
-This will fail is the DSP was not initialized with allowDynamicSampleRate.
+ ```c
+ ma_lpf_process_pcm_frames(&lpf, pMyData, pMyData, frameCount);
+ ```
-DEPRECATED. Use ma_pcm_converter_set_sample_rate() instead.
-*/
-ma_result ma_pcm_converter_set_output_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateOut);
+The maximum filter order is limited to MA_MAX_FILTER_ORDER which is set to 8. If you need more, you can chain first and second order filters together.
-/*
-Dynamically adjusts the output sample rate.
+ ```c
+ for (iFilter = 0; iFilter < filterCount; iFilter += 1) {
+ ma_lpf2_process_pcm_frames(&lpf2[iFilter], pMyData, pMyData, frameCount);
+ }
+ ```
-This is useful for dynamically adjust pitch. Keep in mind, however, that this will speed up or slow down the sound. If this
-is not acceptable you will need to use your own algorithm.
+If you need to change the configuration of the filter, but need to maintain the state of internal registers you can do so with `ma_lpf_reinit()`. This may be
+useful if you need to change the sample rate and/or cutoff frequency dynamically while maintaing smooth transitions. Note that changing the format or channel
+count after initialization is invalid and will result in an error.
-This will fail if the DSP was not initialized with allowDynamicSampleRate.
-*/
-ma_result ma_pcm_converter_set_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut);
+The `ma_lpf` object supports a configurable order, but if you only need a first order filter you may want to consider using `ma_lpf1`. Likewise, if you only
+need a second order filter you can use `ma_lpf2`. The advantage of this is that they're lighter weight and a bit more efficient.
-/*
-Reads a number of frames and runs them through the DSP processor.
-*/
-ma_uint64 ma_pcm_converter_read(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint64 frameCount);
+If an even filter order is specified, a series of second order filters will be processed in a chain. If an odd filter order is specified, a first order filter
+will be applied, followed by a series of second order filters in a chain.
-/*
-Helper for initializing a ma_pcm_converter_config object.
-*/
-ma_pcm_converter_config ma_pcm_converter_config_init_new(void);
-ma_pcm_converter_config ma_pcm_converter_config_init(ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_pcm_converter_read_proc onRead, void* pUserData);
-ma_pcm_converter_config ma_pcm_converter_config_init_ex(ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_channel channelMapIn[MA_MAX_CHANNELS], ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_channel channelMapOut[MA_MAX_CHANNELS], ma_pcm_converter_read_proc onRead, void* pUserData);
-/*
-High-level helper for doing a full format conversion in one go. Returns the number of output frames. Call this with pOut set to NULL to
-determine the required size of the output buffer.
+High-Pass Filtering
+-------------------
+High-pass filtering is achieved with the following APIs:
-A return value of 0 indicates an error.
+ |---------|-------------------------------------------|
+ | API | Description |
+ |---------|-------------------------------------------|
+ | ma_hpf1 | First order high-pass filter |
+ | ma_hpf2 | Second order high-pass filter |
+ | ma_hpf | High order high-pass filter (Butterworth) |
+ |---------|-------------------------------------------|
-This function is useful for one-off bulk conversions, but if you're streaming data you should use the DSP APIs instead.
-*/
-ma_uint64 ma_convert_frames(void* pOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_uint64 frameCount);
-ma_uint64 ma_convert_frames_ex(void* pOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_channel channelMapOut[MA_MAX_CHANNELS], const void* pIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint64 frameCount);
+High-pass filters work exactly the same as low-pass filters, only the APIs are called `ma_hpf1`, `ma_hpf2` and `ma_hpf`. See example code for low-pass filters
+for example usage.
-/************************************************************************************************************************************************************
+Band-Pass Filtering
+-------------------
+Band-pass filtering is achieved with the following APIs:
-Ring Buffer
-===========
+ |---------|-------------------------------|
+ | API | Description |
+ |---------|-------------------------------|
+ | ma_bpf2 | Second order band-pass filter |
+ | ma_bpf | High order band-pass filter |
+ |---------|-------------------------------|
-Features
---------
-- Lock free (assuming single producer, single consumer)
-- Support for interleaved and deinterleaved streams
-- Allows the caller to allocate their own block of memory
+Band-pass filters work exactly the same as low-pass filters, only the APIs are called `ma_bpf2` and `ma_hpf`. See example code for low-pass filters for example
+usage. Note that the order for band-pass filters must be an even number which means there is no first order band-pass filter, unlike low-pass and high-pass
+filters.
-Usage
------
-- Call ma_rb_init() to initialize a simple buffer, with an optional pre-allocated buffer. If you pass in NULL
- for the pre-allocated buffer, it will be allocated for you and free()'d in ma_rb_uninit(). If you pass in
- your own pre-allocated buffer, free()-ing is left to you.
-
-- Call ma_rb_init_ex() if you need a deinterleaved buffer. The data for each sub-buffer is offset from each
- other based on the stride. Use ma_rb_get_subbuffer_stride(), ma_rb_get_subbuffer_offset() and
- ma_rb_get_subbuffer_ptr() to manage your sub-buffers.
-
-- Use ma_rb_acquire_read() and ma_rb_acquire_write() to retrieve a pointer to a section of the ring buffer.
- You specify the number of bytes you need, and on output it will set to what was actually acquired. If the
- read or write pointer is positioned such that the number of bytes requested will require a loop, it will be
- clamped to the end of the buffer. Therefore, the number of bytes you're given may be less than the number
- you requested.
-
-- After calling ma_rb_acquire_read/write(), you do your work on the buffer and then "commit" it with
- ma_rb_commit_read/write(). This is where the read/write pointers are updated. When you commit you need to
- pass in the buffer that was returned by the earlier call to ma_rb_acquire_read/write() and is only used
- for validation. The number of bytes passed to ma_rb_commit_read/write() is what's used to increment the
- pointers.
-
-- If you want to correct for drift between the write pointer and the read pointer you can use a combination
- of ma_rb_pointer_distance(), ma_rb_seek_read() and ma_rb_seek_write(). Note that you can only move the
- pointers forward, and you should only move the read pointer forward via the consumer thread, and the write
- pointer forward by the producer thread. If there is too much space between the pointers, move the read
- pointer forward. If there is too little space between the pointers, move the write pointer forward.
-
-
-Notes
------
-- Thread safety depends on a single producer, single consumer model. Only one thread is allowed to write, and
- only one thread is allowed to read. The producer is the only one allowed to move the write pointer, and the
- consumer is the only one allowed to move the read pointer.
-- Operates on bytes. Use ma_pcm_rb to operate in terms of PCM frames.
-- Maximum buffer size in bytes is 0x7FFFFFFF-(MA_SIMD_ALIGNMENT-1) because of reasons.
+Notch Filtering
+---------------
+Notch filtering is achieved with the following APIs:
-PCM Ring Buffer
-===============
-This is the same as the regular ring buffer, except that it works on PCM frames instead of bytes.
+ |-----------|------------------------------------------|
+ | API | Description |
+ |-----------|------------------------------------------|
+ | ma_notch2 | Second order notching filter |
+ |-----------|------------------------------------------|
-************************************************************************************************************************************************************/
-typedef struct
-{
- void* pBuffer;
- ma_uint32 subbufferSizeInBytes;
- ma_uint32 subbufferCount;
- ma_uint32 subbufferStrideInBytes;
- volatile ma_uint32 encodedReadOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */
- volatile ma_uint32 encodedWriteOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */
- ma_bool32 ownsBuffer : 1; /* Used to know whether or not miniaudio is responsible for free()-ing the buffer. */
- ma_bool32 clearOnWriteAcquire : 1; /* When set, clears the acquired write buffer before returning from ma_rb_acquire_write(). */
-} ma_rb;
-ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, ma_rb* pRB);
-ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, ma_rb* pRB);
-void ma_rb_uninit(ma_rb* pRB);
-ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut);
-ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut);
-ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut);
-ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut);
-ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes);
-ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes);
-ma_int32 ma_rb_pointer_distance(ma_rb* pRB); /* Returns the distance between the write pointer and the read pointer. Should never be negative for a correct program. */
-size_t ma_rb_get_subbuffer_size(ma_rb* pRB);
-size_t ma_rb_get_subbuffer_stride(ma_rb* pRB);
-size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex);
-void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer);
+Peaking EQ Filtering
+--------------------
+Peaking filtering is achieved with the following APIs:
+ |----------|------------------------------------------|
+ | API | Description |
+ |----------|------------------------------------------|
+ | ma_peak2 | Second order peaking filter |
+ |----------|------------------------------------------|
-typedef struct
-{
- ma_rb rb;
- ma_format format;
- ma_uint32 channels;
-} ma_pcm_rb;
-ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, ma_pcm_rb* pRB);
-ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, ma_pcm_rb* pRB);
-void ma_pcm_rb_uninit(ma_pcm_rb* pRB);
-ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut);
-ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut);
-ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut);
-ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut);
-ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames);
-ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames);
-ma_int32 ma_pcm_rb_pointer_disance(ma_pcm_rb* pRB); /* Return value is in frames. */
-ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB);
-ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB);
-ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex);
-void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer);
+Low Shelf Filtering
+-------------------
+Low shelf filtering is achieved with the following APIs:
+ |-------------|------------------------------------------|
+ | API | Description |
+ |-------------|------------------------------------------|
+ | ma_loshelf2 | Second order low shelf filter |
+ |-------------|------------------------------------------|
-/************************************************************************************************************************************************************
+Where a high-pass filter is used to eliminate lower frequencies, a low shelf filter can be used to just turn them down rather than eliminate them entirely.
-Miscellaneous Helpers
-************************************************************************************************************************************************************/
+High Shelf Filtering
+--------------------
+High shelf filtering is achieved with the following APIs:
-/*
-malloc(). Calls MA_MALLOC().
-*/
-void* ma_malloc(size_t sz);
+ |-------------|------------------------------------------|
+ | API | Description |
+ |-------------|------------------------------------------|
+ | ma_hishelf2 | Second order high shelf filter |
+ |-------------|------------------------------------------|
-/*
-realloc(). Calls MA_REALLOC().
-*/
-void* ma_realloc(void* p, size_t sz);
+The high shelf filter has the same API as the low shelf filter, only you would use `ma_hishelf` instead of `ma_loshelf`. Where a low shelf filter is used to
+adjust the volume of low frequencies, the high shelf filter does the same thing for high frequencies.
-/*
-free(). Calls MA_FREE().
-*/
-void ma_free(void* p);
-/*
-Performs an aligned malloc, with the assumption that the alignment is a power of 2.
-*/
-void* ma_aligned_malloc(size_t sz, size_t alignment);
-/*
-Free's an aligned malloc'd buffer.
-*/
-void ma_aligned_free(void* p);
-/*
-Retrieves a friendly name for a format.
-*/
-const char* ma_get_format_name(ma_format format);
+Waveform and Noise Generation
+=============================
-/*
-Blends two frames in floating point format.
-*/
-void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels);
+Waveforms
+---------
+miniaudio supports generation of sine, square, triangle and sawtooth waveforms. This is achieved with the `ma_waveform` API. Example:
-/*
-Retrieves the size of a sample in bytes for the given format.
+ ```c
+ ma_waveform_config config = ma_waveform_config_init(FORMAT, CHANNELS, SAMPLE_RATE, ma_waveform_type_sine, amplitude, frequency);
-This API is efficient and is implemented using a lookup table.
+ ma_waveform waveform;
+ ma_result result = ma_waveform_init(&config, &waveform);
+ if (result != MA_SUCCESS) {
+ // Error.
+ }
-Thread Safety: SAFE
- This API is pure.
-*/
-ma_uint32 ma_get_bytes_per_sample(ma_format format);
-static MA_INLINE ma_uint32 ma_get_bytes_per_frame(ma_format format, ma_uint32 channels) { return ma_get_bytes_per_sample(format) * channels; }
+ ...
-/*
-Converts a log level to a string.
-*/
-const char* ma_log_level_to_string(ma_uint32 logLevel);
+ ma_waveform_read_pcm_frames(&waveform, pOutput, frameCount);
+ ```
+The amplitude, frequency and sample rate can be changed dynamically with `ma_waveform_set_amplitude()`, `ma_waveform_set_frequency()` and
+`ma_waveform_set_sample_rate()` respectively.
-/************************************************************************************************************************************************************
+You can reverse the waveform by setting the amplitude to a negative value. You can use this to control whether or not a sawtooth has a positive or negative
+ramp, for example.
-Format Conversion
+Below are the supported waveform types:
-************************************************************************************************************************************************************/
-void ma_pcm_u8_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_u8_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_u8_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_u8_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s16_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s16_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s16_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s16_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s24_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s24_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s24_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s24_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_s32_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_f32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_f32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_f32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_f32_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
-void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode);
+ |---------------------------|
+ | Enum Name |
+ |---------------------------|
+ | ma_waveform_type_sine |
+ | ma_waveform_type_square |
+ | ma_waveform_type_triangle |
+ | ma_waveform_type_sawtooth |
+ |---------------------------|
-/*
-Deinterleaves an interleaved buffer.
-*/
-void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames);
-
-/*
-Interleaves a group of deinterleaved buffers.
-*/
-void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames);
-/************************************************************************************************************************************************************
-*************************************************************************************************************************************************************
+Noise
+-----
+miniaudio supports generation of white, pink and brownian noise via the `ma_noise` API. Example:
-DEVICE I/O
-==========
+ ```c
+ ma_noise_config config = ma_noise_config_init(FORMAT, CHANNELS, ma_noise_type_white, SEED, amplitude);
-This section contains the APIs for device playback and capture. Here is where you'll find ma_device_init(), etc.
+ ma_noise noise;
+ ma_result result = ma_noise_init(&config, &noise);
+ if (result != MA_SUCCESS) {
+ // Error.
+ }
-*************************************************************************************************************************************************************
-************************************************************************************************************************************************************/
-#ifndef MA_NO_DEVICE_IO
-/* Some backends are only supported on certain platforms. */
-#if defined(MA_WIN32)
- #define MA_SUPPORT_WASAPI
- #if defined(MA_WIN32_DESKTOP) /* DirectSound and WinMM backends are only supported on desktops. */
- #define MA_SUPPORT_DSOUND
- #define MA_SUPPORT_WINMM
- #define MA_SUPPORT_JACK /* JACK is technically supported on Windows, but I don't know how many people use it in practice... */
- #endif
-#endif
-#if defined(MA_UNIX)
- #if defined(MA_LINUX)
- #if !defined(MA_ANDROID) /* ALSA is not supported on Android. */
- #define MA_SUPPORT_ALSA
- #endif
- #endif
- #if !defined(MA_BSD) && !defined(MA_ANDROID) && !defined(MA_EMSCRIPTEN)
- #define MA_SUPPORT_PULSEAUDIO
- #define MA_SUPPORT_JACK
- #endif
- #if defined(MA_ANDROID)
- #define MA_SUPPORT_AAUDIO
- #define MA_SUPPORT_OPENSL
- #endif
- #if defined(__OpenBSD__) /* <-- Change this to "#if defined(MA_BSD)" to enable sndio on all BSD flavors. */
- #define MA_SUPPORT_SNDIO /* sndio is only supported on OpenBSD for now. May be expanded later if there's demand. */
- #endif
- #if defined(__NetBSD__) || defined(__OpenBSD__)
- #define MA_SUPPORT_AUDIO4 /* Only support audio(4) on platforms with known support. */
- #endif
- #if defined(__FreeBSD__) || defined(__DragonFly__)
- #define MA_SUPPORT_OSS /* Only support OSS on specific platforms with known support. */
- #endif
-#endif
-#if defined(MA_APPLE)
- #define MA_SUPPORT_COREAUDIO
-#endif
-#if defined(MA_EMSCRIPTEN)
- #define MA_SUPPORT_WEBAUDIO
-#endif
+ ...
-/* Explicitly disable the Null backend for Emscripten because it uses a background thread which is not properly supported right now. */
-#if !defined(MA_EMSCRIPTEN)
-#define MA_SUPPORT_NULL
-#endif
+ ma_noise_read_pcm_frames(&noise, pOutput, frameCount);
+ ```
+The noise API uses simple LCG random number generation. It supports a custom seed which is useful for things like automated testing requiring reproducibility.
+Setting the seed to zero will default to MA_DEFAULT_LCG_SEED.
-#if !defined(MA_NO_WASAPI) && defined(MA_SUPPORT_WASAPI)
- #define MA_ENABLE_WASAPI
-#endif
-#if !defined(MA_NO_DSOUND) && defined(MA_SUPPORT_DSOUND)
- #define MA_ENABLE_DSOUND
-#endif
-#if !defined(MA_NO_WINMM) && defined(MA_SUPPORT_WINMM)
- #define MA_ENABLE_WINMM
-#endif
-#if !defined(MA_NO_ALSA) && defined(MA_SUPPORT_ALSA)
- #define MA_ENABLE_ALSA
-#endif
-#if !defined(MA_NO_PULSEAUDIO) && defined(MA_SUPPORT_PULSEAUDIO)
- #define MA_ENABLE_PULSEAUDIO
-#endif
-#if !defined(MA_NO_JACK) && defined(MA_SUPPORT_JACK)
- #define MA_ENABLE_JACK
-#endif
-#if !defined(MA_NO_COREAUDIO) && defined(MA_SUPPORT_COREAUDIO)
- #define MA_ENABLE_COREAUDIO
-#endif
-#if !defined(MA_NO_SNDIO) && defined(MA_SUPPORT_SNDIO)
- #define MA_ENABLE_SNDIO
-#endif
-#if !defined(MA_NO_AUDIO4) && defined(MA_SUPPORT_AUDIO4)
- #define MA_ENABLE_AUDIO4
-#endif
-#if !defined(MA_NO_OSS) && defined(MA_SUPPORT_OSS)
- #define MA_ENABLE_OSS
-#endif
-#if !defined(MA_NO_AAUDIO) && defined(MA_SUPPORT_AAUDIO)
- #define MA_ENABLE_AAUDIO
-#endif
-#if !defined(MA_NO_OPENSL) && defined(MA_SUPPORT_OPENSL)
- #define MA_ENABLE_OPENSL
-#endif
-#if !defined(MA_NO_WEBAUDIO) && defined(MA_SUPPORT_WEBAUDIO)
- #define MA_ENABLE_WEBAUDIO
-#endif
-#if !defined(MA_NO_NULL) && defined(MA_SUPPORT_NULL)
- #define MA_ENABLE_NULL
-#endif
+By default, the noise API will use different values for different channels. So, for example, the left side in a stereo stream will be different to the right
+side. To instead have each channel use the same random value, set the `duplicateChannels` member of the noise config to true, like so:
-#ifdef MA_SUPPORT_WASAPI
-/* We need a IMMNotificationClient object for WASAPI. */
-typedef struct
-{
- void* lpVtbl;
- ma_uint32 counter;
- ma_device* pDevice;
-} ma_IMMNotificationClient;
-#endif
+ ```c
+ config.duplicateChannels = MA_TRUE;
+ ```
-/* Backend enums must be in priority order. */
-typedef enum
-{
- ma_backend_wasapi,
- ma_backend_dsound,
- ma_backend_winmm,
- ma_backend_coreaudio,
- ma_backend_sndio,
- ma_backend_audio4,
- ma_backend_oss,
- ma_backend_pulseaudio,
- ma_backend_alsa,
- ma_backend_jack,
- ma_backend_aaudio,
- ma_backend_opensl,
- ma_backend_webaudio,
- ma_backend_null /* <-- Must always be the last item. Lowest priority, and used as the terminator for backend enumeration. */
-} ma_backend;
+Below are the supported noise types.
-/* Thread priorties should be ordered such that the default priority of the worker thread is 0. */
-typedef enum
-{
- ma_thread_priority_idle = -5,
- ma_thread_priority_lowest = -4,
- ma_thread_priority_low = -3,
- ma_thread_priority_normal = -2,
- ma_thread_priority_high = -1,
- ma_thread_priority_highest = 0,
- ma_thread_priority_realtime = 1,
- ma_thread_priority_default = 0
-} ma_thread_priority;
+ |------------------------|
+ | Enum Name |
+ |------------------------|
+ | ma_noise_type_white |
+ | ma_noise_type_pink |
+ | ma_noise_type_brownian |
+ |------------------------|
-typedef struct
-{
- ma_context* pContext;
- union
- {
-#ifdef MA_WIN32
- struct
- {
- /*HANDLE*/ ma_handle hThread;
- } win32;
-#endif
-#ifdef MA_POSIX
- struct
- {
- pthread_t thread;
- } posix;
-#endif
- int _unused;
- };
-} ma_thread;
-typedef struct
-{
- ma_context* pContext;
+Ring Buffers
+============
+miniaudio supports lock free (single producer, single consumer) ring buffers which are exposed via the `ma_rb` and `ma_pcm_rb` APIs. The `ma_rb` API operates
+on bytes, whereas the `ma_pcm_rb` operates on PCM frames. They are otherwise identical as `ma_pcm_rb` is just a wrapper around `ma_rb`.
- union
- {
-#ifdef MA_WIN32
- struct
- {
- /*HANDLE*/ ma_handle hMutex;
- } win32;
-#endif
-#ifdef MA_POSIX
- struct
- {
- pthread_mutex_t mutex;
- } posix;
-#endif
- int _unused;
- };
-} ma_mutex;
+Unlike most other APIs in miniaudio, ring buffers support both interleaved and deinterleaved streams. The caller can also allocate their own backing memory for
+the ring buffer to use internally for added flexibility. Otherwise the ring buffer will manage it's internal memory for you.
-typedef struct
-{
- ma_context* pContext;
+The examples below use the PCM frame variant of the ring buffer since that's most likely the one you will want to use. To initialize a ring buffer, do
+something like the following:
- union
- {
-#ifdef MA_WIN32
- struct
- {
- /*HANDLE*/ ma_handle hEvent;
- } win32;
-#endif
-#ifdef MA_POSIX
- struct
- {
- pthread_mutex_t mutex;
- pthread_cond_t condition;
- ma_uint32 value;
- } posix;
-#endif
- int _unused;
- };
-} ma_event;
+ ```c
+ ma_pcm_rb rb;
+ ma_result result = ma_pcm_rb_init(FORMAT, CHANNELS, BUFFER_SIZE_IN_FRAMES, NULL, NULL, &rb);
+ if (result != MA_SUCCESS) {
+ // Error
+ }
+ ```
+The `ma_pcm_rb_init()` function takes the sample format and channel count as parameters because it's the PCM varient of the ring buffer API. For the regular
+ring buffer that operates on bytes you would call `ma_rb_init()` which leaves these out and just takes the size of the buffer in bytes instead of frames. The
+fourth parameter is an optional pre-allocated buffer and the fifth parameter is a pointer to a `ma_allocation_callbacks` structure for custom memory allocation
+routines. Passing in NULL for this results in MA_MALLOC() and MA_FREE() being used.
-/*
-The callback for processing audio data from the device.
+Use `ma_pcm_rb_init_ex()` if you need a deinterleaved buffer. The data for each sub-buffer is offset from each other based on the stride. To manage your sub-
+buffers you can use `ma_pcm_rb_get_subbuffer_stride()`, `ma_pcm_rb_get_subbuffer_offset()` and `ma_pcm_rb_get_subbuffer_ptr()`.
-pOutput is a pointer to a buffer that will receive audio data that will later be played back through the speakers. This will be non-null
-for a playback or full-duplex device and null for a capture device.
+Use 'ma_pcm_rb_acquire_read()` and `ma_pcm_rb_acquire_write()` to retrieve a pointer to a section of the ring buffer. You specify the number of frames you
+need, and on output it will set to what was actually acquired. If the read or write pointer is positioned such that the number of frames requested will require
+a loop, it will be clamped to the end of the buffer. Therefore, the number of frames you're given may be less than the number you requested.
-pInput is a pointer to a buffer containing input data from the device. This will be non-null for a capture or full-duplex device, and
-null for a playback device.
+After calling `ma_pcm_rb_acquire_read()` or `ma_pcm_rb_acquire_write()`, you do your work on the buffer and then "commit" it with `ma_pcm_rb_commit_read()` or
+`ma_pcm_rb_commit_write()`. This is where the read/write pointers are updated. When you commit you need to pass in the buffer that was returned by the earlier
+call to `ma_pcm_rb_acquire_read()` or `ma_pcm_rb_acquire_write()` and is only used for validation. The number of frames passed to `ma_pcm_rb_commit_read()` and
+`ma_pcm_rb_commit_write()` is what's used to increment the pointers.
-frameCount is the number of PCM frames to process. If an output buffer is provided (pOutput is not null), applications should write out
-to the entire output buffer.
+If you want to correct for drift between the write pointer and the read pointer you can use a combination of `ma_pcm_rb_pointer_distance()`,
+`ma_pcm_rb_seek_read()` and `ma_pcm_rb_seek_write()`. Note that you can only move the pointers forward, and you should only move the read pointer forward via
+the consumer thread, and the write pointer forward by the producer thread. If there is too much space between the pointers, move the read pointer forward. If
+there is too little space between the pointers, move the write pointer forward.
-Do _not_ call any miniaudio APIs from the callback. Attempting the stop the device can result in a deadlock. The proper way to stop the
-device is to call ma_device_stop() from a different thread, normally the main application thread.
-*/
-typedef void (* ma_device_callback_proc)(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount);
+You can use a ring buffer at the byte level instead of the PCM frame level by using the `ma_rb` API. This is exactly the sample, only you will use the `ma_rb`
+functions instead of `ma_pcm_rb` and instead of frame counts you'll pass around byte counts.
-/*
-The callback for when the device has been stopped.
+The maximum size of the buffer in bytes is 0x7FFFFFFF-(MA_SIMD_ALIGNMENT-1) due to the most significant bit being used to encode a flag and the internally
+managed buffers always being aligned to MA_SIMD_ALIGNMENT.
-This will be called when the device is stopped explicitly with ma_device_stop() and also called implicitly when the device is stopped
-through external forces such as being unplugged or an internal error occuring.
+Note that the ring buffer is only thread safe when used by a single consumer thread and single producer thread.
-Do not restart the device from the callback.
-*/
-typedef void (* ma_stop_proc)(ma_device* pDevice);
-/*
-The callback for handling log messages.
-It is possible for pDevice to be null in which case the log originated from the context. If it is non-null you can assume the message
-came from the device.
+Backends
+========
+The following backends are supported by miniaudio.
+
+ |-------------|-----------------------|--------------------------------------------------------|
+ | Name | Enum Name | Supported Operating Systems |
+ |-------------|-----------------------|--------------------------------------------------------|
+ | WASAPI | ma_backend_wasapi | Windows Vista+ |
+ | DirectSound | ma_backend_dsound | Windows XP+ |
+ | WinMM | ma_backend_winmm | Windows XP+ (may work on older versions, but untested) |
+ | Core Audio | ma_backend_coreaudio | macOS, iOS |
+ | ALSA | ma_backend_alsa | Linux |
+ | PulseAudio | ma_backend_pulseaudio | Cross Platform (disabled on Windows, BSD and Android) |
+ | JACK | ma_backend_jack | Cross Platform (disabled on BSD and Android) |
+ | sndio | ma_backend_sndio | OpenBSD |
+ | audio(4) | ma_backend_audio4 | NetBSD, OpenBSD |
+ | OSS | ma_backend_oss | FreeBSD |
+ | AAudio | ma_backend_aaudio | Android 8+ |
+ | OpenSL|ES | ma_backend_opensl | Android (API level 16+) |
+ | Web Audio | ma_backend_webaudio | Web (via Emscripten) |
+ | Null | ma_backend_null | Cross Platform (not used on Web) |
+ |-------------|-----------------------|--------------------------------------------------------|
+
+Some backends have some nuance details you may want to be aware of.
+
+WASAPI
+------
+- Low-latency shared mode will be disabled when using an application-defined sample rate which is different to the device's native sample rate. To work around
+ this, set wasapi.noAutoConvertSRC to true in the device config. This is due to IAudioClient3_InitializeSharedAudioStream() failing when the
+ AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM flag is specified. Setting wasapi.noAutoConvertSRC will result in miniaudio's lower quality internal resampler being used
+ instead which will in turn enable the use of low-latency shared mode.
-logLevel is one of the following:
- MA_LOG_LEVEL_VERBOSE
- MA_LOG_LEVEL_INFO
- MA_LOG_LEVEL_WARNING
- MA_LOG_LEVEL_ERROR
-*/
-typedef void (* ma_log_proc)(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message);
+PulseAudio
+----------
+- If you experience bad glitching/noise on Arch Linux, consider this fix from the Arch wiki:
+ https://wiki.archlinux.org/index.php/PulseAudio/Troubleshooting#Glitches,_skips_or_crackling
+ Alternatively, consider using a different backend such as ALSA.
-typedef enum
-{
- ma_device_type_playback = 1,
- ma_device_type_capture = 2,
- ma_device_type_duplex = ma_device_type_playback | ma_device_type_capture,
-} ma_device_type;
+Android
+-------
+- To capture audio on Android, remember to add the RECORD_AUDIO permission to your manifest:
+
+- With OpenSL|ES, only a single ma_context can be active at any given time. This is due to a limitation with OpenSL|ES.
+- With AAudio, only default devices are enumerated. This is due to AAudio not having an enumeration API (devices are enumerated through Java). You can however
+ perform your own device enumeration through Java and then set the ID in the ma_device_id structure (ma_device_id.aaudio) and pass it to ma_device_init().
+- The backend API will perform resampling where possible. The reason for this as opposed to using miniaudio's built-in resampler is to take advantage of any
+ potential device-specific optimizations the driver may implement.
-typedef enum
-{
- ma_share_mode_shared = 0,
- ma_share_mode_exclusive,
-} ma_share_mode;
+UWP
+---
+- UWP only supports default playback and capture devices.
+- UWP requires the Microphone capability to be enabled in the application's manifest (Package.appxmanifest):
+
+ ...
+
+
+
+
-typedef union
-{
-#ifdef MA_SUPPORT_WASAPI
- wchar_t wasapi[64]; /* WASAPI uses a wchar_t string for identification. */
-#endif
-#ifdef MA_SUPPORT_DSOUND
- ma_uint8 dsound[16]; /* DirectSound uses a GUID for identification. */
-#endif
-#ifdef MA_SUPPORT_WINMM
- /*UINT_PTR*/ ma_uint32 winmm; /* When creating a device, WinMM expects a Win32 UINT_PTR for device identification. In practice it's actually just a UINT. */
-#endif
-#ifdef MA_SUPPORT_ALSA
- char alsa[256]; /* ALSA uses a name string for identification. */
-#endif
-#ifdef MA_SUPPORT_PULSEAUDIO
- char pulse[256]; /* PulseAudio uses a name string for identification. */
-#endif
-#ifdef MA_SUPPORT_JACK
- int jack; /* JACK always uses default devices. */
-#endif
-#ifdef MA_SUPPORT_COREAUDIO
- char coreaudio[256]; /* Core Audio uses a string for identification. */
+Web Audio / Emscripten
+----------------------
+- You cannot use -std=c* compiler flags, nor -ansi. This only applies to the Emscripten build.
+- The first time a context is initialized it will create a global object called "miniaudio" whose primary purpose is to act as a factory for device objects.
+- Currently the Web Audio backend uses ScriptProcessorNode's, but this may need to change later as they've been deprecated.
+- Google has implemented a policy in their browsers that prevent automatic media output without first receiving some kind of user input. The following web page
+ has additional details: https://developers.google.com/web/updates/2017/09/autoplay-policy-changes. Starting the device may fail if you try to start playback
+ without first handling some kind of user input.
+
+
+
+Miscellaneous Notes
+===================
+- Automatic stream routing is enabled on a per-backend basis. Support is explicitly enabled for WASAPI and Core Audio, however other backends such as
+ PulseAudio may naturally support it, though not all have been tested.
+- The contents of the output buffer passed into the data callback will always be pre-initialized to zero unless the noPreZeroedOutputBuffer config variable in
+ ma_device_config is set to true, in which case it'll be undefined which will require you to write something to the entire buffer.
+- By default miniaudio will automatically clip samples. This only applies when the playback sample format is configured as ma_format_f32. If you are doing
+ clipping yourself, you can disable this overhead by setting noClip to true in the device config.
+- The sndio backend is currently only enabled on OpenBSD builds.
+- The audio(4) backend is supported on OpenBSD, but you may need to disable sndiod before you can use it.
+- Note that GCC and Clang requires "-msse2", "-mavx2", etc. for SIMD optimizations.
+*/
+
+#ifndef miniaudio_h
+#define miniaudio_h
+
+#ifdef __cplusplus
+extern "C" {
#endif
-#ifdef MA_SUPPORT_SNDIO
- char sndio[256]; /* "snd/0", etc. */
+
+#if defined(_MSC_VER) && !defined(__clang__)
+ #pragma warning(push)
+ #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */
+ #pragma warning(disable:4324) /* structure was padded due to alignment specifier */
+#else
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wpedantic" /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */
+ #if defined(__clang__)
+ #pragma GCC diagnostic ignored "-Wc11-extensions" /* anonymous unions are a C11 extension */
+ #endif
#endif
-#ifdef MA_SUPPORT_AUDIO4
- char audio4[256]; /* "/dev/audio", etc. */
+
+/* Platform/backend detection. */
+#ifdef _WIN32
+ #define MA_WIN32
+ #if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PC_APP || WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP)
+ #define MA_WIN32_UWP
+ #else
+ #define MA_WIN32_DESKTOP
+ #endif
+#else
+ #define MA_POSIX
+ #include /* Unfortunate #include, but needed for pthread_t, pthread_mutex_t and pthread_cond_t types. */
+ #include
+
+ #ifdef __unix__
+ #define MA_UNIX
+ #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+ #define MA_BSD
+ #endif
+ #endif
+ #ifdef __linux__
+ #define MA_LINUX
+ #endif
+ #ifdef __APPLE__
+ #define MA_APPLE
+ #endif
+ #ifdef __ANDROID__
+ #define MA_ANDROID
+ #endif
+ #ifdef __EMSCRIPTEN__
+ #define MA_EMSCRIPTEN
+ #endif
#endif
-#ifdef MA_SUPPORT_OSS
- char oss[64]; /* "dev/dsp0", etc. "dev/dsp" for the default device. */
+
+#include /* For size_t. */
+
+/* Sized types. Prefer built-in types. Fall back to stdint. */
+#ifdef _MSC_VER
+ #if defined(__clang__)
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wlanguage-extension-token"
+ #pragma GCC diagnostic ignored "-Wlong-long"
+ #pragma GCC diagnostic ignored "-Wc++11-long-long"
+ #endif
+ typedef signed __int8 ma_int8;
+ typedef unsigned __int8 ma_uint8;
+ typedef signed __int16 ma_int16;
+ typedef unsigned __int16 ma_uint16;
+ typedef signed __int32 ma_int32;
+ typedef unsigned __int32 ma_uint32;
+ typedef signed __int64 ma_int64;
+ typedef unsigned __int64 ma_uint64;
+ #if defined(__clang__)
+ #pragma GCC diagnostic pop
+ #endif
+#else
+ #define MA_HAS_STDINT
+ #include
+ typedef int8_t ma_int8;
+ typedef uint8_t ma_uint8;
+ typedef int16_t ma_int16;
+ typedef uint16_t ma_uint16;
+ typedef int32_t ma_int32;
+ typedef uint32_t ma_uint32;
+ typedef int64_t ma_int64;
+ typedef uint64_t ma_uint64;
#endif
-#ifdef MA_SUPPORT_AAUDIO
- ma_int32 aaudio; /* AAudio uses a 32-bit integer for identification. */
+
+#ifdef MA_HAS_STDINT
+ typedef uintptr_t ma_uintptr;
+#else
+ #if defined(_WIN32)
+ #if defined(_WIN64)
+ typedef ma_uint64 ma_uintptr;
+ #else
+ typedef ma_uint32 ma_uintptr;
+ #endif
+ #elif defined(__GNUC__)
+ #if defined(__LP64__)
+ typedef ma_uint64 ma_uintptr;
+ #else
+ typedef ma_uint32 ma_uintptr;
+ #endif
+ #else
+ typedef ma_uint64 ma_uintptr; /* Fallback. */
+ #endif
#endif
-#ifdef MA_SUPPORT_OPENSL
- ma_uint32 opensl; /* OpenSL|ES uses a 32-bit unsigned integer for identification. */
+
+typedef ma_uint8 ma_bool8;
+typedef ma_uint32 ma_bool32;
+#define MA_TRUE 1
+#define MA_FALSE 0
+
+typedef void* ma_handle;
+typedef void* ma_ptr;
+typedef void (* ma_proc)(void);
+
+#if defined(_MSC_VER) && !defined(_WCHAR_T_DEFINED)
+typedef ma_uint16 wchar_t;
#endif
-#ifdef MA_SUPPORT_WEBAUDIO
- char webaudio[32]; /* Web Audio always uses default devices for now, but if this changes it'll be a GUID. */
+
+/* Define NULL for some compilers. */
+#ifndef NULL
+#define NULL 0
#endif
-#ifdef MA_SUPPORT_NULL
- int nullbackend; /* The null backend uses an integer for device IDs. */
+
+#if defined(SIZE_MAX)
+ #define MA_SIZE_MAX SIZE_MAX
+#else
+ #define MA_SIZE_MAX 0xFFFFFFFF /* When SIZE_MAX is not defined by the standard library just default to the maximum 32-bit unsigned integer. */
#endif
-} ma_device_id;
-typedef struct
-{
- /* Basic info. This is the only information guaranteed to be filled in during device enumeration. */
- ma_device_id id;
- char name[256];
+#ifdef _MSC_VER
+ #define MA_INLINE __forceinline
+#elif defined(__GNUC__)
/*
- Detailed info. As much of this is filled as possible with ma_context_get_device_info(). Note that you are allowed to initialize
- a device with settings outside of this range, but it just means the data will be converted using miniaudio's data conversion
- pipeline before sending the data to/from the device. Most programs will need to not worry about these values, but it's provided
- here mainly for informational purposes or in the rare case that someone might find it useful.
-
- These will be set to 0 when returned by ma_context_enumerate_devices() or ma_context_get_devices().
+ I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when
+ the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some
+ case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the
+ command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue
+ I am using "__inline__" only when we're compiling in strict ANSI mode.
*/
- ma_uint32 formatCount;
- ma_format formats[ma_format_count];
- ma_uint32 minChannels;
- ma_uint32 maxChannels;
- ma_uint32 minSampleRate;
- ma_uint32 maxSampleRate;
-} ma_device_info;
+ #if defined(__STRICT_ANSI__)
+ #define MA_INLINE __inline__ __attribute__((always_inline))
+ #else
+ #define MA_INLINE inline __attribute__((always_inline))
+ #endif
+#else
+ #define MA_INLINE
+#endif
-typedef union
-{
- ma_int64 counter;
- double counterD;
-} ma_timer;
+#if defined(_MSC_VER)
+ #if _MSC_VER >= 1400
+ #define MA_ALIGN(alignment) __declspec(align(alignment))
+ #endif
+#elif !defined(__DMC__)
+ #define MA_ALIGN(alignment) __attribute__((aligned(alignment)))
+#endif
+#ifndef MA_ALIGN
+ #define MA_ALIGN(alignment)
+#endif
-typedef struct
-{
- ma_device_type deviceType;
- ma_uint32 sampleRate;
- ma_uint32 bufferSizeInFrames;
- ma_uint32 bufferSizeInMilliseconds;
- ma_uint32 periods;
- ma_performance_profile performanceProfile;
- ma_device_callback_proc dataCallback;
- ma_stop_proc stopCallback;
- void* pUserData;
- struct
- {
- ma_device_id* pDeviceID;
- ma_format format;
- ma_uint32 channels;
- ma_channel channelMap[MA_MAX_CHANNELS];
- ma_share_mode shareMode;
- } playback;
- struct
- {
- ma_device_id* pDeviceID;
- ma_format format;
- ma_uint32 channels;
- ma_channel channelMap[MA_MAX_CHANNELS];
- ma_share_mode shareMode;
- } capture;
+/* SIMD alignment in bytes. Currently set to 64 bytes in preparation for future AVX-512 optimizations. */
+#define MA_SIMD_ALIGNMENT 64
- struct
- {
- ma_bool32 noMMap; /* Disables MMap mode. */
- } alsa;
- struct
- {
- const char* pStreamNamePlayback;
- const char* pStreamNameCapture;
- } pulse;
-} ma_device_config;
-typedef struct
-{
- ma_log_proc logCallback;
- ma_thread_priority threadPriority;
- void* pUserData;
+/* Logging levels */
+#define MA_LOG_LEVEL_VERBOSE 4
+#define MA_LOG_LEVEL_INFO 3
+#define MA_LOG_LEVEL_WARNING 2
+#define MA_LOG_LEVEL_ERROR 1
- struct
- {
- ma_bool32 useVerboseDeviceEnumeration;
- } alsa;
- struct
- {
- const char* pApplicationName;
- const char* pServerName;
- ma_bool32 tryAutoSpawn; /* Enables autospawning of the PulseAudio daemon if necessary. */
- } pulse;
- struct
- {
- const char* pClientName;
- ma_bool32 tryStartServer;
- } jack;
-} ma_context_config;
+#ifndef MA_LOG_LEVEL
+#define MA_LOG_LEVEL MA_LOG_LEVEL_ERROR
+#endif
-typedef ma_bool32 (* ma_enum_devices_callback_proc)(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData);
+typedef struct ma_context ma_context;
+typedef struct ma_device ma_device;
-struct ma_context
-{
- ma_backend backend; /* DirectSound, ALSA, etc. */
- ma_log_proc logCallback;
- ma_thread_priority threadPriority;
- void* pUserData;
- ma_mutex deviceEnumLock; /* Used to make ma_context_get_devices() thread safe. */
- ma_mutex deviceInfoLock; /* Used to make ma_context_get_device_info() thread safe. */
- ma_uint32 deviceInfoCapacity; /* Total capacity of pDeviceInfos. */
- ma_uint32 playbackDeviceInfoCount;
- ma_uint32 captureDeviceInfoCount;
- ma_device_info* pDeviceInfos; /* Playback devices first, then capture. */
- ma_bool32 isBackendAsynchronous : 1; /* Set when the context is initialized. Set to 1 for asynchronous backends such as Core Audio and JACK. Do not modify. */
+typedef ma_uint8 ma_channel;
+#define MA_CHANNEL_NONE 0
+#define MA_CHANNEL_MONO 1
+#define MA_CHANNEL_FRONT_LEFT 2
+#define MA_CHANNEL_FRONT_RIGHT 3
+#define MA_CHANNEL_FRONT_CENTER 4
+#define MA_CHANNEL_LFE 5
+#define MA_CHANNEL_BACK_LEFT 6
+#define MA_CHANNEL_BACK_RIGHT 7
+#define MA_CHANNEL_FRONT_LEFT_CENTER 8
+#define MA_CHANNEL_FRONT_RIGHT_CENTER 9
+#define MA_CHANNEL_BACK_CENTER 10
+#define MA_CHANNEL_SIDE_LEFT 11
+#define MA_CHANNEL_SIDE_RIGHT 12
+#define MA_CHANNEL_TOP_CENTER 13
+#define MA_CHANNEL_TOP_FRONT_LEFT 14
+#define MA_CHANNEL_TOP_FRONT_CENTER 15
+#define MA_CHANNEL_TOP_FRONT_RIGHT 16
+#define MA_CHANNEL_TOP_BACK_LEFT 17
+#define MA_CHANNEL_TOP_BACK_CENTER 18
+#define MA_CHANNEL_TOP_BACK_RIGHT 19
+#define MA_CHANNEL_AUX_0 20
+#define MA_CHANNEL_AUX_1 21
+#define MA_CHANNEL_AUX_2 22
+#define MA_CHANNEL_AUX_3 23
+#define MA_CHANNEL_AUX_4 24
+#define MA_CHANNEL_AUX_5 25
+#define MA_CHANNEL_AUX_6 26
+#define MA_CHANNEL_AUX_7 27
+#define MA_CHANNEL_AUX_8 28
+#define MA_CHANNEL_AUX_9 29
+#define MA_CHANNEL_AUX_10 30
+#define MA_CHANNEL_AUX_11 31
+#define MA_CHANNEL_AUX_12 32
+#define MA_CHANNEL_AUX_13 33
+#define MA_CHANNEL_AUX_14 34
+#define MA_CHANNEL_AUX_15 35
+#define MA_CHANNEL_AUX_16 36
+#define MA_CHANNEL_AUX_17 37
+#define MA_CHANNEL_AUX_18 38
+#define MA_CHANNEL_AUX_19 39
+#define MA_CHANNEL_AUX_20 40
+#define MA_CHANNEL_AUX_21 41
+#define MA_CHANNEL_AUX_22 42
+#define MA_CHANNEL_AUX_23 43
+#define MA_CHANNEL_AUX_24 44
+#define MA_CHANNEL_AUX_25 45
+#define MA_CHANNEL_AUX_26 46
+#define MA_CHANNEL_AUX_27 47
+#define MA_CHANNEL_AUX_28 48
+#define MA_CHANNEL_AUX_29 49
+#define MA_CHANNEL_AUX_30 50
+#define MA_CHANNEL_AUX_31 51
+#define MA_CHANNEL_LEFT MA_CHANNEL_FRONT_LEFT
+#define MA_CHANNEL_RIGHT MA_CHANNEL_FRONT_RIGHT
+#define MA_CHANNEL_POSITION_COUNT (MA_CHANNEL_AUX_31 + 1)
- ma_result (* onUninit )(ma_context* pContext);
- ma_bool32 (* onDeviceIDEqual )(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1);
- ma_result (* onEnumDevices )(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData); /* Return false from the callback to stop enumeration. */
- ma_result (* onGetDeviceInfo )(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo);
- ma_result (* onDeviceInit )(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice);
- void (* onDeviceUninit )(ma_device* pDevice);
- ma_result (* onDeviceStart )(ma_device* pDevice);
- ma_result (* onDeviceStop )(ma_device* pDevice);
- ma_result (* onDeviceWrite )(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount); /* Data is in internal device format. */
- ma_result (* onDeviceRead )(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount); /* Data is in internal device format. */
- ma_result (* onDeviceMainLoop)(ma_device* pDevice);
- union
- {
-#ifdef MA_SUPPORT_WASAPI
- struct
- {
- int _unused;
- } wasapi;
-#endif
-#ifdef MA_SUPPORT_DSOUND
- struct
- {
- ma_handle hDSoundDLL;
- ma_proc DirectSoundCreate;
- ma_proc DirectSoundEnumerateA;
- ma_proc DirectSoundCaptureCreate;
- ma_proc DirectSoundCaptureEnumerateA;
- } dsound;
-#endif
-#ifdef MA_SUPPORT_WINMM
- struct
- {
- ma_handle hWinMM;
- ma_proc waveOutGetNumDevs;
- ma_proc waveOutGetDevCapsA;
- ma_proc waveOutOpen;
- ma_proc waveOutClose;
- ma_proc waveOutPrepareHeader;
- ma_proc waveOutUnprepareHeader;
- ma_proc waveOutWrite;
- ma_proc waveOutReset;
- ma_proc waveInGetNumDevs;
- ma_proc waveInGetDevCapsA;
- ma_proc waveInOpen;
- ma_proc waveInClose;
- ma_proc waveInPrepareHeader;
- ma_proc waveInUnprepareHeader;
- ma_proc waveInAddBuffer;
- ma_proc waveInStart;
- ma_proc waveInReset;
- } winmm;
-#endif
-#ifdef MA_SUPPORT_ALSA
- struct
- {
- ma_handle asoundSO;
- ma_proc snd_pcm_open;
- ma_proc snd_pcm_close;
- ma_proc snd_pcm_hw_params_sizeof;
- ma_proc snd_pcm_hw_params_any;
- ma_proc snd_pcm_hw_params_set_format;
- ma_proc snd_pcm_hw_params_set_format_first;
- ma_proc snd_pcm_hw_params_get_format_mask;
- ma_proc snd_pcm_hw_params_set_channels_near;
- ma_proc snd_pcm_hw_params_set_rate_resample;
- ma_proc snd_pcm_hw_params_set_rate_near;
- ma_proc snd_pcm_hw_params_set_buffer_size_near;
- ma_proc snd_pcm_hw_params_set_periods_near;
- ma_proc snd_pcm_hw_params_set_access;
- ma_proc snd_pcm_hw_params_get_format;
- ma_proc snd_pcm_hw_params_get_channels;
- ma_proc snd_pcm_hw_params_get_channels_min;
- ma_proc snd_pcm_hw_params_get_channels_max;
- ma_proc snd_pcm_hw_params_get_rate;
- ma_proc snd_pcm_hw_params_get_rate_min;
- ma_proc snd_pcm_hw_params_get_rate_max;
- ma_proc snd_pcm_hw_params_get_buffer_size;
- ma_proc snd_pcm_hw_params_get_periods;
- ma_proc snd_pcm_hw_params_get_access;
- ma_proc snd_pcm_hw_params;
- ma_proc snd_pcm_sw_params_sizeof;
- ma_proc snd_pcm_sw_params_current;
- ma_proc snd_pcm_sw_params_get_boundary;
- ma_proc snd_pcm_sw_params_set_avail_min;
- ma_proc snd_pcm_sw_params_set_start_threshold;
- ma_proc snd_pcm_sw_params_set_stop_threshold;
- ma_proc snd_pcm_sw_params;
- ma_proc snd_pcm_format_mask_sizeof;
- ma_proc snd_pcm_format_mask_test;
- ma_proc snd_pcm_get_chmap;
- ma_proc snd_pcm_state;
- ma_proc snd_pcm_prepare;
- ma_proc snd_pcm_start;
- ma_proc snd_pcm_drop;
- ma_proc snd_pcm_drain;
- ma_proc snd_device_name_hint;
- ma_proc snd_device_name_get_hint;
- ma_proc snd_card_get_index;
- ma_proc snd_device_name_free_hint;
- ma_proc snd_pcm_mmap_begin;
- ma_proc snd_pcm_mmap_commit;
- ma_proc snd_pcm_recover;
- ma_proc snd_pcm_readi;
- ma_proc snd_pcm_writei;
- ma_proc snd_pcm_avail;
- ma_proc snd_pcm_avail_update;
- ma_proc snd_pcm_wait;
- ma_proc snd_pcm_info;
- ma_proc snd_pcm_info_sizeof;
- ma_proc snd_pcm_info_get_name;
- ma_proc snd_config_update_free_global;
+typedef int ma_result;
+#define MA_SUCCESS 0
+#define MA_ERROR -1 /* A generic error. */
+#define MA_INVALID_ARGS -2
+#define MA_INVALID_OPERATION -3
+#define MA_OUT_OF_MEMORY -4
+#define MA_OUT_OF_RANGE -5
+#define MA_ACCESS_DENIED -6
+#define MA_DOES_NOT_EXIST -7
+#define MA_ALREADY_EXISTS -8
+#define MA_TOO_MANY_OPEN_FILES -9
+#define MA_INVALID_FILE -10
+#define MA_TOO_BIG -11
+#define MA_PATH_TOO_LONG -12
+#define MA_NAME_TOO_LONG -13
+#define MA_NOT_DIRECTORY -14
+#define MA_IS_DIRECTORY -15
+#define MA_DIRECTORY_NOT_EMPTY -16
+#define MA_END_OF_FILE -17
+#define MA_NO_SPACE -18
+#define MA_BUSY -19
+#define MA_IO_ERROR -20
+#define MA_INTERRUPT -21
+#define MA_UNAVAILABLE -22
+#define MA_ALREADY_IN_USE -23
+#define MA_BAD_ADDRESS -24
+#define MA_BAD_SEEK -25
+#define MA_BAD_PIPE -26
+#define MA_DEADLOCK -27
+#define MA_TOO_MANY_LINKS -28
+#define MA_NOT_IMPLEMENTED -29
+#define MA_NO_MESSAGE -30
+#define MA_BAD_MESSAGE -31
+#define MA_NO_DATA_AVAILABLE -32
+#define MA_INVALID_DATA -33
+#define MA_TIMEOUT -34
+#define MA_NO_NETWORK -35
+#define MA_NOT_UNIQUE -36
+#define MA_NOT_SOCKET -37
+#define MA_NO_ADDRESS -38
+#define MA_BAD_PROTOCOL -39
+#define MA_PROTOCOL_UNAVAILABLE -40
+#define MA_PROTOCOL_NOT_SUPPORTED -41
+#define MA_PROTOCOL_FAMILY_NOT_SUPPORTED -42
+#define MA_ADDRESS_FAMILY_NOT_SUPPORTED -43
+#define MA_SOCKET_NOT_SUPPORTED -44
+#define MA_CONNECTION_RESET -45
+#define MA_ALREADY_CONNECTED -46
+#define MA_NOT_CONNECTED -47
+#define MA_CONNECTION_REFUSED -48
+#define MA_NO_HOST -49
+#define MA_IN_PROGRESS -50
+#define MA_CANCELLED -51
+#define MA_MEMORY_ALREADY_MAPPED -52
+#define MA_AT_END -53
- ma_mutex internalDeviceEnumLock;
- ma_bool32 useVerboseDeviceEnumeration;
- } alsa;
-#endif
-#ifdef MA_SUPPORT_PULSEAUDIO
- struct
- {
- ma_handle pulseSO;
- ma_proc pa_mainloop_new;
- ma_proc pa_mainloop_free;
- ma_proc pa_mainloop_get_api;
- ma_proc pa_mainloop_iterate;
- ma_proc pa_mainloop_wakeup;
- ma_proc pa_context_new;
- ma_proc pa_context_unref;
- ma_proc pa_context_connect;
- ma_proc pa_context_disconnect;
- ma_proc pa_context_set_state_callback;
- ma_proc pa_context_get_state;
- ma_proc pa_context_get_sink_info_list;
- ma_proc pa_context_get_source_info_list;
- ma_proc pa_context_get_sink_info_by_name;
- ma_proc pa_context_get_source_info_by_name;
- ma_proc pa_operation_unref;
- ma_proc pa_operation_get_state;
- ma_proc pa_channel_map_init_extend;
- ma_proc pa_channel_map_valid;
- ma_proc pa_channel_map_compatible;
- ma_proc pa_stream_new;
- ma_proc pa_stream_unref;
- ma_proc pa_stream_connect_playback;
- ma_proc pa_stream_connect_record;
- ma_proc pa_stream_disconnect;
- ma_proc pa_stream_get_state;
- ma_proc pa_stream_get_sample_spec;
- ma_proc pa_stream_get_channel_map;
- ma_proc pa_stream_get_buffer_attr;
- ma_proc pa_stream_set_buffer_attr;
- ma_proc pa_stream_get_device_name;
- ma_proc pa_stream_set_write_callback;
- ma_proc pa_stream_set_read_callback;
- ma_proc pa_stream_flush;
- ma_proc pa_stream_drain;
- ma_proc pa_stream_is_corked;
- ma_proc pa_stream_cork;
- ma_proc pa_stream_trigger;
- ma_proc pa_stream_begin_write;
- ma_proc pa_stream_write;
- ma_proc pa_stream_peek;
- ma_proc pa_stream_drop;
- ma_proc pa_stream_writable_size;
- ma_proc pa_stream_readable_size;
+/* General miniaudio-specific errors. */
+#define MA_FORMAT_NOT_SUPPORTED -100
+#define MA_DEVICE_TYPE_NOT_SUPPORTED -101
+#define MA_SHARE_MODE_NOT_SUPPORTED -102
+#define MA_NO_BACKEND -103
+#define MA_NO_DEVICE -104
+#define MA_API_NOT_FOUND -105
+#define MA_INVALID_DEVICE_CONFIG -106
- char* pApplicationName;
- char* pServerName;
- ma_bool32 tryAutoSpawn;
- } pulse;
-#endif
-#ifdef MA_SUPPORT_JACK
- struct
- {
- ma_handle jackSO;
- ma_proc jack_client_open;
- ma_proc jack_client_close;
- ma_proc jack_client_name_size;
- ma_proc jack_set_process_callback;
- ma_proc jack_set_buffer_size_callback;
- ma_proc jack_on_shutdown;
- ma_proc jack_get_sample_rate;
- ma_proc jack_get_buffer_size;
- ma_proc jack_get_ports;
- ma_proc jack_activate;
- ma_proc jack_deactivate;
- ma_proc jack_connect;
- ma_proc jack_port_register;
- ma_proc jack_port_name;
- ma_proc jack_port_get_buffer;
- ma_proc jack_free;
+/* State errors. */
+#define MA_DEVICE_NOT_INITIALIZED -200
+#define MA_DEVICE_ALREADY_INITIALIZED -201
+#define MA_DEVICE_NOT_STARTED -202
+#define MA_DEVICE_NOT_STOPPED -203
- char* pClientName;
- ma_bool32 tryStartServer;
- } jack;
-#endif
-#ifdef MA_SUPPORT_COREAUDIO
- struct
- {
- ma_handle hCoreFoundation;
- ma_proc CFStringGetCString;
-
- ma_handle hCoreAudio;
- ma_proc AudioObjectGetPropertyData;
- ma_proc AudioObjectGetPropertyDataSize;
- ma_proc AudioObjectSetPropertyData;
- ma_proc AudioObjectAddPropertyListener;
-
- ma_handle hAudioUnit; /* Could possibly be set to AudioToolbox on later versions of macOS. */
- ma_proc AudioComponentFindNext;
- ma_proc AudioComponentInstanceDispose;
- ma_proc AudioComponentInstanceNew;
- ma_proc AudioOutputUnitStart;
- ma_proc AudioOutputUnitStop;
- ma_proc AudioUnitAddPropertyListener;
- ma_proc AudioUnitGetPropertyInfo;
- ma_proc AudioUnitGetProperty;
- ma_proc AudioUnitSetProperty;
- ma_proc AudioUnitInitialize;
- ma_proc AudioUnitRender;
-
- /*AudioComponent*/ ma_ptr component;
- } coreaudio;
-#endif
-#ifdef MA_SUPPORT_SNDIO
- struct
- {
- ma_handle sndioSO;
- ma_proc sio_open;
- ma_proc sio_close;
- ma_proc sio_setpar;
- ma_proc sio_getpar;
- ma_proc sio_getcap;
- ma_proc sio_start;
- ma_proc sio_stop;
- ma_proc sio_read;
- ma_proc sio_write;
- ma_proc sio_onmove;
- ma_proc sio_nfds;
- ma_proc sio_pollfd;
- ma_proc sio_revents;
- ma_proc sio_eof;
- ma_proc sio_setvol;
- ma_proc sio_onvol;
- ma_proc sio_initpar;
- } sndio;
-#endif
-#ifdef MA_SUPPORT_AUDIO4
- struct
- {
- int _unused;
- } audio4;
-#endif
-#ifdef MA_SUPPORT_OSS
- struct
- {
- int versionMajor;
- int versionMinor;
- } oss;
-#endif
-#ifdef MA_SUPPORT_AAUDIO
- struct
- {
- ma_handle hAAudio; /* libaaudio.so */
- ma_proc AAudio_createStreamBuilder;
- ma_proc AAudioStreamBuilder_delete;
- ma_proc AAudioStreamBuilder_setDeviceId;
- ma_proc AAudioStreamBuilder_setDirection;
- ma_proc AAudioStreamBuilder_setSharingMode;
- ma_proc AAudioStreamBuilder_setFormat;
- ma_proc AAudioStreamBuilder_setChannelCount;
- ma_proc AAudioStreamBuilder_setSampleRate;
- ma_proc AAudioStreamBuilder_setBufferCapacityInFrames;
- ma_proc AAudioStreamBuilder_setFramesPerDataCallback;
- ma_proc AAudioStreamBuilder_setDataCallback;
- ma_proc AAudioStreamBuilder_setPerformanceMode;
- ma_proc AAudioStreamBuilder_openStream;
- ma_proc AAudioStream_close;
- ma_proc AAudioStream_getState;
- ma_proc AAudioStream_waitForStateChange;
- ma_proc AAudioStream_getFormat;
- ma_proc AAudioStream_getChannelCount;
- ma_proc AAudioStream_getSampleRate;
- ma_proc AAudioStream_getBufferCapacityInFrames;
- ma_proc AAudioStream_getFramesPerDataCallback;
- ma_proc AAudioStream_getFramesPerBurst;
- ma_proc AAudioStream_requestStart;
- ma_proc AAudioStream_requestStop;
- } aaudio;
-#endif
-#ifdef MA_SUPPORT_OPENSL
- struct
- {
- int _unused;
- } opensl;
-#endif
-#ifdef MA_SUPPORT_WEBAUDIO
- struct
- {
- int _unused;
- } webaudio;
-#endif
-#ifdef MA_SUPPORT_NULL
- struct
- {
- int _unused;
- } null_backend;
-#endif
- };
+/* Operation errors. */
+#define MA_FAILED_TO_INIT_BACKEND -300
+#define MA_FAILED_TO_OPEN_BACKEND_DEVICE -301
+#define MA_FAILED_TO_START_BACKEND_DEVICE -302
+#define MA_FAILED_TO_STOP_BACKEND_DEVICE -303
- union
- {
-#ifdef MA_WIN32
- struct
- {
- /*HMODULE*/ ma_handle hOle32DLL;
- ma_proc CoInitializeEx;
- ma_proc CoUninitialize;
- ma_proc CoCreateInstance;
- ma_proc CoTaskMemFree;
- ma_proc PropVariantClear;
- ma_proc StringFromGUID2;
- /*HMODULE*/ ma_handle hUser32DLL;
- ma_proc GetForegroundWindow;
- ma_proc GetDesktopWindow;
+/* Standard sample rates. */
+#define MA_SAMPLE_RATE_8000 8000
+#define MA_SAMPLE_RATE_11025 11025
+#define MA_SAMPLE_RATE_16000 16000
+#define MA_SAMPLE_RATE_22050 22050
+#define MA_SAMPLE_RATE_24000 24000
+#define MA_SAMPLE_RATE_32000 32000
+#define MA_SAMPLE_RATE_44100 44100
+#define MA_SAMPLE_RATE_48000 48000
+#define MA_SAMPLE_RATE_88200 88200
+#define MA_SAMPLE_RATE_96000 96000
+#define MA_SAMPLE_RATE_176400 176400
+#define MA_SAMPLE_RATE_192000 192000
+#define MA_SAMPLE_RATE_352800 352800
+#define MA_SAMPLE_RATE_384000 384000
- /*HMODULE*/ ma_handle hAdvapi32DLL;
- ma_proc RegOpenKeyExA;
- ma_proc RegCloseKey;
- ma_proc RegQueryValueExA;
- } win32;
-#endif
-#ifdef MA_POSIX
- struct
- {
- ma_handle pthreadSO;
- ma_proc pthread_create;
- ma_proc pthread_join;
- ma_proc pthread_mutex_init;
- ma_proc pthread_mutex_destroy;
- ma_proc pthread_mutex_lock;
- ma_proc pthread_mutex_unlock;
- ma_proc pthread_cond_init;
- ma_proc pthread_cond_destroy;
- ma_proc pthread_cond_wait;
- ma_proc pthread_cond_signal;
- ma_proc pthread_attr_init;
- ma_proc pthread_attr_destroy;
- ma_proc pthread_attr_setschedpolicy;
- ma_proc pthread_attr_getschedparam;
- ma_proc pthread_attr_setschedparam;
- } posix;
+#define MA_MIN_CHANNELS 1
+#define MA_MAX_CHANNELS 32
+#define MA_MIN_SAMPLE_RATE MA_SAMPLE_RATE_8000
+#define MA_MAX_SAMPLE_RATE MA_SAMPLE_RATE_384000
+
+#ifndef MA_MAX_FILTER_ORDER
+#define MA_MAX_FILTER_ORDER 8
#endif
- int _unused;
- };
-};
-MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_device
+typedef enum
{
- ma_context* pContext;
- ma_device_type type;
- ma_uint32 sampleRate;
- ma_uint32 state;
- ma_device_callback_proc onData;
- ma_stop_proc onStop;
- void* pUserData; /* Application defined data. */
- ma_mutex lock;
- ma_event wakeupEvent;
- ma_event startEvent;
- ma_event stopEvent;
- ma_thread thread;
- ma_result workResult; /* This is set by the worker thread after it's finished doing a job. */
- ma_bool32 usingDefaultSampleRate : 1;
- ma_bool32 usingDefaultBufferSize : 1;
- ma_bool32 usingDefaultPeriods : 1;
- ma_bool32 isOwnerOfContext : 1; /* When set to true, uninitializing the device will also uninitialize the context. Set to true when NULL is passed into ma_device_init(). */
- struct
- {
- char name[256]; /* Maybe temporary. Likely to be replaced with a query API. */
- ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */
- ma_bool32 usingDefaultFormat : 1;
- ma_bool32 usingDefaultChannels : 1;
- ma_bool32 usingDefaultChannelMap : 1;
- ma_format format;
- ma_uint32 channels;
- ma_channel channelMap[MA_MAX_CHANNELS];
- ma_format internalFormat;
- ma_uint32 internalChannels;
- ma_uint32 internalSampleRate;
- ma_channel internalChannelMap[MA_MAX_CHANNELS];
- ma_uint32 internalBufferSizeInFrames;
- ma_uint32 internalPeriods;
- ma_pcm_converter converter;
- ma_uint32 _dspFrameCount; /* Internal use only. Used as the data source when reading from the device. */
- const ma_uint8* _dspFrames; /* ^^^ AS ABOVE ^^^ */
- } playback;
- struct
- {
- char name[256]; /* Maybe temporary. Likely to be replaced with a query API. */
- ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */
- ma_bool32 usingDefaultFormat : 1;
- ma_bool32 usingDefaultChannels : 1;
- ma_bool32 usingDefaultChannelMap : 1;
- ma_format format;
- ma_uint32 channels;
- ma_channel channelMap[MA_MAX_CHANNELS];
- ma_format internalFormat;
- ma_uint32 internalChannels;
- ma_uint32 internalSampleRate;
- ma_channel internalChannelMap[MA_MAX_CHANNELS];
- ma_uint32 internalBufferSizeInFrames;
- ma_uint32 internalPeriods;
- ma_pcm_converter converter;
- ma_uint32 _dspFrameCount; /* Internal use only. Used as the data source when reading from the device. */
- const ma_uint8* _dspFrames; /* ^^^ AS ABOVE ^^^ */
- } capture;
+ ma_stream_format_pcm = 0
+} ma_stream_format;
- union
- {
-#ifdef MA_SUPPORT_WASAPI
- struct
- {
- /*IAudioClient**/ ma_ptr pAudioClientPlayback;
- /*IAudioClient**/ ma_ptr pAudioClientCapture;
- /*IAudioRenderClient**/ ma_ptr pRenderClient;
- /*IAudioCaptureClient**/ ma_ptr pCaptureClient;
- /*IMMDeviceEnumerator**/ ma_ptr pDeviceEnumerator; /* Used for IMMNotificationClient notifications. Required for detecting default device changes. */
- ma_IMMNotificationClient notificationClient;
- /*HANDLE*/ ma_handle hEventPlayback; /* Auto reset. Initialized to signaled. */
- /*HANDLE*/ ma_handle hEventCapture; /* Auto reset. Initialized to unsignaled. */
- ma_uint32 actualBufferSizeInFramesPlayback; /* Value from GetBufferSize(). internalBufferSizeInFrames is not set to the _actual_ buffer size when low-latency shared mode is being used due to the way the IAudioClient3 API works. */
- ma_uint32 actualBufferSizeInFramesCapture;
- ma_uint32 originalBufferSizeInFrames;
- ma_uint32 originalBufferSizeInMilliseconds;
- ma_uint32 originalPeriods;
- ma_bool32 hasDefaultPlaybackDeviceChanged; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */
- ma_bool32 hasDefaultCaptureDeviceChanged; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */
- ma_uint32 periodSizeInFramesPlayback;
- ma_uint32 periodSizeInFramesCapture;
- ma_bool32 isStartedCapture;
- ma_bool32 isStartedPlayback;
- } wasapi;
-#endif
-#ifdef MA_SUPPORT_DSOUND
- struct
- {
- /*LPDIRECTSOUND*/ ma_ptr pPlayback;
- /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackPrimaryBuffer;
- /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackBuffer;
- /*LPDIRECTSOUNDCAPTURE*/ ma_ptr pCapture;
- /*LPDIRECTSOUNDCAPTUREBUFFER*/ ma_ptr pCaptureBuffer;
- } dsound;
-#endif
-#ifdef MA_SUPPORT_WINMM
- struct
- {
- /*HWAVEOUT*/ ma_handle hDevicePlayback;
- /*HWAVEIN*/ ma_handle hDeviceCapture;
- /*HANDLE*/ ma_handle hEventPlayback;
- /*HANDLE*/ ma_handle hEventCapture;
- ma_uint32 fragmentSizeInFrames;
- ma_uint32 fragmentSizeInBytes;
- ma_uint32 iNextHeaderPlayback; /* [0,periods). Used as an index into pWAVEHDRPlayback. */
- ma_uint32 iNextHeaderCapture; /* [0,periods). Used as an index into pWAVEHDRCapture. */
- ma_uint32 headerFramesConsumedPlayback; /* The number of PCM frames consumed in the buffer in pWAVEHEADER[iNextHeader]. */
- ma_uint32 headerFramesConsumedCapture; /* ^^^ */
- /*WAVEHDR**/ ma_uint8* pWAVEHDRPlayback; /* One instantiation for each period. */
- /*WAVEHDR**/ ma_uint8* pWAVEHDRCapture; /* One instantiation for each period. */
- ma_uint8* pIntermediaryBufferPlayback;
- ma_uint8* pIntermediaryBufferCapture;
- ma_uint8* _pHeapData; /* Used internally and is used for the heap allocated data for the intermediary buffer and the WAVEHDR structures. */
- ma_bool32 isStarted;
- } winmm;
-#endif
-#ifdef MA_SUPPORT_ALSA
- struct
- {
- /*snd_pcm_t**/ ma_ptr pPCMPlayback;
- /*snd_pcm_t**/ ma_ptr pPCMCapture;
- ma_bool32 isUsingMMapPlayback : 1;
- ma_bool32 isUsingMMapCapture : 1;
- } alsa;
-#endif
-#ifdef MA_SUPPORT_PULSEAUDIO
- struct
- {
- /*pa_mainloop**/ ma_ptr pMainLoop;
- /*pa_mainloop_api**/ ma_ptr pAPI;
- /*pa_context**/ ma_ptr pPulseContext;
- /*pa_stream**/ ma_ptr pStreamPlayback;
- /*pa_stream**/ ma_ptr pStreamCapture;
- /*pa_context_state*/ ma_uint32 pulseContextState;
- void* pMappedBufferPlayback;
- const void* pMappedBufferCapture;
- ma_uint32 mappedBufferFramesRemainingPlayback;
- ma_uint32 mappedBufferFramesRemainingCapture;
- ma_uint32 mappedBufferFramesCapacityPlayback;
- ma_uint32 mappedBufferFramesCapacityCapture;
- ma_bool32 breakFromMainLoop : 1;
- } pulse;
-#endif
-#ifdef MA_SUPPORT_JACK
- struct
- {
- /*jack_client_t**/ ma_ptr pClient;
- /*jack_port_t**/ ma_ptr pPortsPlayback[MA_MAX_CHANNELS];
- /*jack_port_t**/ ma_ptr pPortsCapture[MA_MAX_CHANNELS];
- float* pIntermediaryBufferPlayback; /* Typed as a float because JACK is always floating point. */
- float* pIntermediaryBufferCapture;
- ma_pcm_rb duplexRB;
- } jack;
-#endif
-#ifdef MA_SUPPORT_COREAUDIO
- struct
- {
- ma_uint32 deviceObjectIDPlayback;
- ma_uint32 deviceObjectIDCapture;
- /*AudioUnit*/ ma_ptr audioUnitPlayback;
- /*AudioUnit*/ ma_ptr audioUnitCapture;
- /*AudioBufferList**/ ma_ptr pAudioBufferList; /* Only used for input devices. */
- ma_event stopEvent;
- ma_uint32 originalBufferSizeInFrames;
- ma_uint32 originalBufferSizeInMilliseconds;
- ma_uint32 originalPeriods;
- ma_bool32 isDefaultPlaybackDevice;
- ma_bool32 isDefaultCaptureDevice;
- ma_bool32 isSwitchingPlaybackDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */
- ma_bool32 isSwitchingCaptureDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */
- ma_pcm_rb duplexRB;
- } coreaudio;
-#endif
-#ifdef MA_SUPPORT_SNDIO
- struct
- {
- ma_ptr handlePlayback;
- ma_ptr handleCapture;
- ma_bool32 isStartedPlayback;
- ma_bool32 isStartedCapture;
- } sndio;
-#endif
-#ifdef MA_SUPPORT_AUDIO4
- struct
- {
- int fdPlayback;
- int fdCapture;
- } audio4;
-#endif
-#ifdef MA_SUPPORT_OSS
- struct
- {
- int fdPlayback;
- int fdCapture;
- } oss;
-#endif
-#ifdef MA_SUPPORT_AAUDIO
- struct
- {
- /*AAudioStream**/ ma_ptr pStreamPlayback;
- /*AAudioStream**/ ma_ptr pStreamCapture;
- ma_pcm_rb duplexRB;
- } aaudio;
-#endif
-#ifdef MA_SUPPORT_OPENSL
- struct
- {
- /*SLObjectItf*/ ma_ptr pOutputMixObj;
- /*SLOutputMixItf*/ ma_ptr pOutputMix;
- /*SLObjectItf*/ ma_ptr pAudioPlayerObj;
- /*SLPlayItf*/ ma_ptr pAudioPlayer;
- /*SLObjectItf*/ ma_ptr pAudioRecorderObj;
- /*SLRecordItf*/ ma_ptr pAudioRecorder;
- /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueuePlayback;
- /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueueCapture;
- ma_uint32 currentBufferIndexPlayback;
- ma_uint32 currentBufferIndexCapture;
- ma_uint8* pBufferPlayback; /* This is malloc()'d and is used for storing audio data. Typed as ma_uint8 for easy offsetting. */
- ma_uint8* pBufferCapture;
- ma_pcm_rb duplexRB;
- } opensl;
-#endif
-#ifdef MA_SUPPORT_WEBAUDIO
- struct
- {
- int indexPlayback; /* We use a factory on the JavaScript side to manage devices and use an index for JS/C interop. */
- int indexCapture;
- ma_pcm_rb duplexRB; /* In external capture format. */
- } webaudio;
-#endif
-#ifdef MA_SUPPORT_NULL
- struct
- {
- ma_thread deviceThread;
- ma_event operationEvent;
- ma_event operationCompletionEvent;
- ma_uint32 operation;
- ma_result operationResult;
- ma_timer timer;
- double priorRunTime;
- ma_uint32 currentPeriodFramesRemainingPlayback;
- ma_uint32 currentPeriodFramesRemainingCapture;
- ma_uint64 lastProcessedFramePlayback;
- ma_uint32 lastProcessedFrameCapture;
- ma_bool32 isStarted;
- } null_device;
-#endif
- };
-};
-#if defined(_MSC_VER)
- #pragma warning(pop)
-#else
- #pragma GCC diagnostic pop /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */
-#endif
-
-/*
-Initializes a context.
-
-The context is used for selecting and initializing the relevant backends.
-
-Note that the location of the context cannot change throughout it's lifetime. Consider allocating
-the ma_context object with malloc() if this is an issue. The reason for this is that a pointer
-to the context is stored in the ma_device structure.
-
- is used to allow the application to prioritize backends depending on it's specific
-requirements. This can be null in which case it uses the default priority, which is as follows:
- - WASAPI
- - DirectSound
- - WinMM
- - Core Audio (Apple)
- - sndio
- - audio(4)
- - OSS
- - PulseAudio
- - ALSA
- - JACK
- - AAudio
- - OpenSL|ES
- - Web Audio / Emscripten
- - Null
+typedef enum
+{
+ ma_stream_layout_interleaved = 0,
+ ma_stream_layout_deinterleaved
+} ma_stream_layout;
- is used to configure the context. Use the logCallback config to set a callback for whenever a
-log message is posted. The priority of the worker thread can be set with the threadPriority config.
+typedef enum
+{
+ ma_dither_mode_none = 0,
+ ma_dither_mode_rectangle,
+ ma_dither_mode_triangle
+} ma_dither_mode;
-It is recommended that only a single context is active at any given time because it's a bulky data
-structure which performs run-time linking for the relevant backends every time it's initialized.
+typedef enum
+{
+ /*
+ I like to keep these explicitly defined because they're used as a key into a lookup table. When items are
+ added to this, make sure there are no gaps and that they're added to the lookup table in ma_get_bytes_per_sample().
+ */
+ ma_format_unknown = 0, /* Mainly used for indicating an error, but also used as the default for the output format for decoders. */
+ ma_format_u8 = 1,
+ ma_format_s16 = 2, /* Seems to be the most widely supported format. */
+ ma_format_s24 = 3, /* Tightly packed. 3 bytes per sample. */
+ ma_format_s32 = 4,
+ ma_format_f32 = 5,
+ ma_format_count
+} ma_format;
-Return Value:
- MA_SUCCESS if successful; any other error code otherwise.
+typedef enum
+{
+ ma_channel_mix_mode_rectangular = 0, /* Simple averaging based on the plane(s) the channel is sitting on. */
+ ma_channel_mix_mode_simple, /* Drop excess channels; zeroed out extra channels. */
+ ma_channel_mix_mode_custom_weights, /* Use custom weights specified in ma_channel_router_config. */
+ ma_channel_mix_mode_planar_blend = ma_channel_mix_mode_rectangular,
+ ma_channel_mix_mode_default = ma_channel_mix_mode_planar_blend
+} ma_channel_mix_mode;
-Thread Safety: UNSAFE
-*/
-ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext);
+typedef enum
+{
+ ma_standard_channel_map_microsoft,
+ ma_standard_channel_map_alsa,
+ ma_standard_channel_map_rfc3551, /* Based off AIFF. */
+ ma_standard_channel_map_flac,
+ ma_standard_channel_map_vorbis,
+ ma_standard_channel_map_sound4, /* FreeBSD's sound(4). */
+ ma_standard_channel_map_sndio, /* www.sndio.org/tips.html */
+ ma_standard_channel_map_webaudio = ma_standard_channel_map_flac, /* https://webaudio.github.io/web-audio-api/#ChannelOrdering. Only 1, 2, 4 and 6 channels are defined, but can fill in the gaps with logical assumptions. */
+ ma_standard_channel_map_default = ma_standard_channel_map_microsoft
+} ma_standard_channel_map;
-/*
-Uninitializes a context.
+typedef enum
+{
+ ma_performance_profile_low_latency = 0,
+ ma_performance_profile_conservative
+} ma_performance_profile;
-Results are undefined if you call this while any device created by this context is still active.
-Return Value:
- MA_SUCCESS if successful; any other error code otherwise.
+typedef struct
+{
+ void* pUserData;
+ void* (* onMalloc)(size_t sz, void* pUserData);
+ void* (* onRealloc)(void* p, size_t sz, void* pUserData);
+ void (* onFree)(void* p, void* pUserData);
+} ma_allocation_callbacks;
-Thread Safety: UNSAFE
-*/
-ma_result ma_context_uninit(ma_context* pContext);
-/*
-Enumerates over every device (both playback and capture).
+/**************************************************************************************************************************************************************
-This is a lower-level enumeration function to the easier to use ma_context_get_devices(). Use
-ma_context_enumerate_devices() if you would rather not incur an internal heap allocation, or
-it simply suits your code better.
+Biquad Filtering
-Do _not_ assume the first enumerated device of a given type is the default device.
+**************************************************************************************************************************************************************/
+typedef union
+{
+ float f32;
+ ma_int32 s32;
+} ma_biquad_coefficient;
-Some backends and platforms may only support default playback and capture devices.
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ double b0;
+ double b1;
+ double b2;
+ double a0;
+ double a1;
+ double a2;
+} ma_biquad_config;
-Note that this only retrieves the ID and name/description of the device. The reason for only
-retrieving basic information is that it would otherwise require opening the backend device in
-order to probe it for more detailed information which can be inefficient. Consider using
-ma_context_get_device_info() for this, but don't call it from within the enumeration callback.
+ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2);
-In general, you should not do anything complicated from within the callback. In particular, do
-not try initializing a device from within the callback.
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_biquad_coefficient b0;
+ ma_biquad_coefficient b1;
+ ma_biquad_coefficient b2;
+ ma_biquad_coefficient a1;
+ ma_biquad_coefficient a2;
+ ma_biquad_coefficient r1[MA_MAX_CHANNELS];
+ ma_biquad_coefficient r2[MA_MAX_CHANNELS];
+} ma_biquad;
-Consider using ma_context_get_devices() for a simpler and safer API, albeit at the expense of
-an internal heap allocation.
+ma_result ma_biquad_init(const ma_biquad_config* pConfig, ma_biquad* pBQ);
+ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ);
+ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_biquad_get_latency(ma_biquad* pBQ);
-Returning false from the callback will stop enumeration. Returning true will continue enumeration.
-Return Value:
- MA_SUCCESS if successful; any other error code otherwise.
+/**************************************************************************************************************************************************************
-Thread Safety: SAFE
- This is guarded using a simple mutex lock.
-*/
-ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData);
+Low-Pass Filtering
-/*
-Retrieves basic information about every active playback and/or capture device.
+**************************************************************************************************************************************************************/
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double cutoffFrequency;
+ double q;
+} ma_lpf1_config, ma_lpf2_config;
-You can pass in NULL for the playback or capture lists in which case they'll be ignored.
+ma_lpf1_config ma_lpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency);
+ma_lpf2_config ma_lpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q);
-It is _not_ safe to assume the first device in the list is the default device.
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_biquad_coefficient a;
+ ma_biquad_coefficient r1[MA_MAX_CHANNELS];
+} ma_lpf1;
-The returned pointers will become invalid upon the next call this this function, or when the
-context is uninitialized. Do not free the returned pointers.
+ma_result ma_lpf1_init(const ma_lpf1_config* pConfig, ma_lpf1* pLPF);
+ma_result ma_lpf1_reinit(const ma_lpf1_config* pConfig, ma_lpf1* pLPF);
+ma_result ma_lpf1_process_pcm_frames(ma_lpf1* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_lpf1_get_latency(ma_lpf1* pLPF);
-This function follows the same enumeration rules as ma_context_enumerate_devices(). See
-documentation for ma_context_enumerate_devices() for more information.
+typedef struct
+{
+ ma_biquad bq; /* The second order low-pass filter is implemented as a biquad filter. */
+} ma_lpf2;
-Return Value:
- MA_SUCCESS if successful; any other error code otherwise.
+ma_result ma_lpf2_init(const ma_lpf2_config* pConfig, ma_lpf2* pLPF);
+ma_result ma_lpf2_reinit(const ma_lpf2_config* pConfig, ma_lpf2* pLPF);
+ma_result ma_lpf2_process_pcm_frames(ma_lpf2* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_lpf2_get_latency(ma_lpf2* pLPF);
-Thread Safety: SAFE
- Since each call to this function invalidates the pointers from the previous call, you
- should not be calling this simultaneously across multiple threads. Instead, you need to
- make a copy of the returned data with your own higher level synchronization.
-*/
-ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount);
-/*
-Retrieves information about a device with the given ID.
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double cutoffFrequency;
+ ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */
+} ma_lpf_config;
-Do _not_ call this from within the ma_context_enumerate_devices() callback.
+ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order);
-It's possible for a device to have different information and capabilities depending on whether
-or not it's opened in shared or exclusive mode. For example, in shared mode, WASAPI always uses
-floating point samples for mixing, but in exclusive mode it can be anything. Therefore, this
-function allows you to specify which share mode you want information for. Note that not all
-backends and devices support shared or exclusive mode, in which case this function will fail
-if the requested share mode is unsupported.
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 lpf1Count;
+ ma_uint32 lpf2Count;
+ ma_lpf1 lpf1[1];
+ ma_lpf2 lpf2[MA_MAX_FILTER_ORDER/2];
+} ma_lpf;
-This leaves pDeviceInfo unmodified in the result of an error.
+ma_result ma_lpf_init(const ma_lpf_config* pConfig, ma_lpf* pLPF);
+ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF);
+ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_lpf_get_latency(ma_lpf* pLPF);
-Return Value:
- MA_SUCCESS if successful; any other error code otherwise.
-Thread Safety: SAFE
- This is guarded using a simple mutex lock.
-*/
-ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo);
+/**************************************************************************************************************************************************************
-/*
-Initializes a device.
+High-Pass Filtering
-The context can be null in which case it uses the default. This is equivalent to passing in a
-context that was initialized like so:
+**************************************************************************************************************************************************************/
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double cutoffFrequency;
+ double q;
+} ma_hpf1_config, ma_hpf2_config;
- ma_context_init(NULL, 0, NULL, &context);
+ma_hpf1_config ma_hpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency);
+ma_hpf2_config ma_hpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q);
-Do not pass in null for the context if you are needing to open multiple devices. You can,
-however, use null when initializing the first device, and then use device.pContext for the
-initialization of other devices.
-
-The device's configuration is controlled with pConfig. This allows you to configure the sample
-format, channel count, sample rate, etc. Before calling ma_device_init(), you will need to
-initialize a ma_device_config object using ma_device_config_init(). You must set the callback in
-the device config. Once initialized, the device's config is immutable. If you need to change the
-config you will need to initialize a new device.
-
-Passing in 0 to any property in pConfig will force the use of a default value. In the case of
-sample format, channel count, sample rate and channel map it will default to the values used by
-the backend's internal device. For the size of the buffer you can set bufferSizeInFrames or
-bufferSizeInMilliseconds (if both are set it will prioritize bufferSizeInFrames). If both are
-set to zero, it will default to MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY or
-MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE, depending on whether or not performanceProfile
-is set to ma_performance_profile_low_latency or ma_performance_profile_conservative.
-
-If you request exclusive mode and the backend does not support it an error will be returned. For
-robustness, you may want to first try initializing the device in exclusive mode, and then fall back
-to shared mode if required. Alternatively you can just request shared mode (the default if you
-leave it unset in the config) which is the most reliable option. Some backends do not have a
-practical way of choosing whether or not the device should be exclusive or not (ALSA, for example)
-in which case it just acts as a hint. Unless you have special requirements you should try avoiding
-exclusive mode as it's intrusive to the user. Starting with Windows 10, miniaudio will use low-latency
-shared mode where possible which may make exclusive mode unnecessary.
-
-When sending or receiving data to/from a device, miniaudio will internally perform a format
-conversion to convert between the format specified by pConfig and the format used internally by
-the backend. If you pass in NULL for pConfig or 0 for the sample format, channel count,
-sample rate _and_ channel map, data transmission will run on an optimized pass-through fast path.
-
-The buffer size should be treated as a hint. miniaudio will try it's best to use exactly what you
-ask for, but it may differ. You should not assume the number of frames specified in each call to
-the data callback is exactly what you originally specified.
-
-The property controls how frequently the background thread is woken to check for more
-data. It's tied to the buffer size, so as an example, if your buffer size is equivalent to 10
-milliseconds and you have 2 periods, the CPU will wake up approximately every 5 milliseconds.
-
-When compiling for UWP you must ensure you call this function on the main UI thread because the
-operating system may need to present the user with a message asking for permissions. Please refer
-to the official documentation for ActivateAudioInterfaceAsync() for more information.
-
-ALSA Specific: When initializing the default device, requesting shared mode will try using the
-"dmix" device for playback and the "dsnoop" device for capture. If these fail it will try falling
-back to the "hw" device.
-
-Return Value:
- MA_SUCCESS if successful; any other error code otherwise.
-
-Thread Safety: UNSAFE
- It is not safe to call this function simultaneously for different devices because some backends
- depend on and mutate global state (such as OpenSL|ES). The same applies to calling this at the
- same time as ma_device_uninit().
-*/
-ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice);
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_biquad_coefficient a;
+ ma_biquad_coefficient r1[MA_MAX_CHANNELS];
+} ma_hpf1;
-/*
-Initializes a device without a context, with extra parameters for controlling the configuration
-of the internal self-managed context.
+ma_result ma_hpf1_init(const ma_hpf1_config* pConfig, ma_hpf1* pHPF);
+ma_result ma_hpf1_reinit(const ma_hpf1_config* pConfig, ma_hpf1* pHPF);
+ma_result ma_hpf1_process_pcm_frames(ma_hpf1* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_hpf1_get_latency(ma_hpf1* pHPF);
-See ma_device_init() and ma_context_init().
-*/
-ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice);
+typedef struct
+{
+ ma_biquad bq; /* The second order high-pass filter is implemented as a biquad filter. */
+} ma_hpf2;
-/*
-Uninitializes a device.
+ma_result ma_hpf2_init(const ma_hpf2_config* pConfig, ma_hpf2* pHPF);
+ma_result ma_hpf2_reinit(const ma_hpf2_config* pConfig, ma_hpf2* pHPF);
+ma_result ma_hpf2_process_pcm_frames(ma_hpf2* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_hpf2_get_latency(ma_hpf2* pHPF);
-This will explicitly stop the device. You do not need to call ma_device_stop() beforehand, but it's
-harmless if you do.
-Do not call this in any callback.
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double cutoffFrequency;
+ ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */
+} ma_hpf_config;
-Return Value:
- MA_SUCCESS if successful; any other error code otherwise.
+ma_hpf_config ma_hpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order);
-Thread Safety: UNSAFE
- As soon as this API is called the device should be considered undefined. All bets are off if you
- try using the device at the same time as uninitializing it.
-*/
-void ma_device_uninit(ma_device* pDevice);
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 hpf1Count;
+ ma_uint32 hpf2Count;
+ ma_hpf1 hpf1[1];
+ ma_hpf2 hpf2[MA_MAX_FILTER_ORDER/2];
+} ma_hpf;
-/*
-Sets the callback to use when the device has stopped, either explicitly or as a result of an error.
+ma_result ma_hpf_init(const ma_hpf_config* pConfig, ma_hpf* pHPF);
+ma_result ma_hpf_reinit(const ma_hpf_config* pConfig, ma_hpf* pHPF);
+ma_result ma_hpf_process_pcm_frames(ma_hpf* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_hpf_get_latency(ma_hpf* pHPF);
-Thread Safety: SAFE
- This API is implemented as a simple atomic assignment.
-*/
-void ma_device_set_stop_callback(ma_device* pDevice, ma_stop_proc proc);
-/*
-Activates the device. For playback devices this begins playback. For capture devices it begins
-recording.
+/**************************************************************************************************************************************************************
-For a playback device, this will retrieve an initial chunk of audio data from the client before
-returning. The reason for this is to ensure there is valid audio data in the buffer, which needs
-to be done _before_ the device begins playback.
+Band-Pass Filtering
-This API waits until the backend device has been started for real by the worker thread. It also
-waits on a mutex for thread-safety.
+**************************************************************************************************************************************************************/
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double cutoffFrequency;
+ double q;
+} ma_bpf2_config;
-Do not call this in any callback.
+ma_bpf2_config ma_bpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q);
-Return Value:
- MA_SUCCESS if successful; any other error code otherwise.
+typedef struct
+{
+ ma_biquad bq; /* The second order band-pass filter is implemented as a biquad filter. */
+} ma_bpf2;
-Thread Safety: SAFE
-*/
-ma_result ma_device_start(ma_device* pDevice);
+ma_result ma_bpf2_init(const ma_bpf2_config* pConfig, ma_bpf2* pBPF);
+ma_result ma_bpf2_reinit(const ma_bpf2_config* pConfig, ma_bpf2* pBPF);
+ma_result ma_bpf2_process_pcm_frames(ma_bpf2* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_bpf2_get_latency(ma_bpf2* pBPF);
-/*
-Puts the device to sleep, but does not uninitialize it. Use ma_device_start() to start it up again.
-This API needs to wait on the worker thread to stop the backend device properly before returning. It
-also waits on a mutex for thread-safety. In addition, some backends need to wait for the device to
-finish playback/recording of the current fragment which can take some time (usually proportionate to
-the buffer size that was specified at initialization time).
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double cutoffFrequency;
+ ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */
+} ma_bpf_config;
-This should not drop unprocessed samples. Backends are required to either pause the stream in-place
-or drain the buffer if pausing is not possible. The reason for this is that stopping the device and
-the resuming it with ma_device_start() (which you might do when your program loses focus) may result
-in a situation where those samples are never output to the speakers or received from the microphone
-which can in turn result in de-syncs.
+ma_bpf_config ma_bpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order);
-Do not call this in any callback.
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 bpf2Count;
+ ma_bpf2 bpf2[MA_MAX_FILTER_ORDER/2];
+} ma_bpf;
-Return Value:
- MA_SUCCESS if successful; any other error code otherwise.
+ma_result ma_bpf_init(const ma_bpf_config* pConfig, ma_bpf* pBPF);
+ma_result ma_bpf_reinit(const ma_bpf_config* pConfig, ma_bpf* pBPF);
+ma_result ma_bpf_process_pcm_frames(ma_bpf* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_bpf_get_latency(ma_bpf* pBPF);
-Thread Safety: SAFE
-*/
-ma_result ma_device_stop(ma_device* pDevice);
-/*
-Determines whether or not the device is started.
+/**************************************************************************************************************************************************************
-This is implemented as a simple accessor.
+Notching Filter
-Return Value:
- True if the device is started, false otherwise.
+**************************************************************************************************************************************************************/
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double q;
+ double frequency;
+} ma_notch2_config;
-Thread Safety: SAFE
- If another thread calls ma_device_start() or ma_device_stop() at this same time as this function
- is called, there's a very small chance the return value will be out of sync.
-*/
-ma_bool32 ma_device_is_started(ma_device* pDevice);
+ma_notch2_config ma_notch2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency);
+typedef struct
+{
+ ma_biquad bq;
+} ma_notch2;
-/*
-Helper function for initializing a ma_context_config object.
-*/
-ma_context_config ma_context_config_init(void);
+ma_result ma_notch2_init(const ma_notch2_config* pConfig, ma_notch2* pFilter);
+ma_result ma_notch2_reinit(const ma_notch2_config* pConfig, ma_notch2* pFilter);
+ma_result ma_notch2_process_pcm_frames(ma_notch2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_notch2_get_latency(ma_notch2* pFilter);
-/*
-Initializes a device config.
-By default, the device config will use native device settings (format, channels, sample rate, etc.). Using native
-settings means you will get an optimized pass-through data transmission pipeline to and from the device, but you will
-need to do all format conversions manually. Normally you would want to use a known format that your program can handle
-natively, which you can do by specifying it after this function returns, like so:
+/**************************************************************************************************************************************************************
- ma_device_config config = ma_device_config_init(ma_device_type_playback);
- config.callback = my_data_callback;
- config.pUserData = pMyUserData;
- config.format = ma_format_f32;
- config.channels = 2;
- config.sampleRate = 44100;
+Peaking EQ Filter
-In this case miniaudio will perform all of the necessary data conversion for you behind the scenes.
+**************************************************************************************************************************************************************/
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double gainDB;
+ double q;
+ double frequency;
+} ma_peak2_config;
-Currently miniaudio only supports asynchronous, callback based data delivery which means you must specify callback. A
-pointer to user data can also be specified which is set in the pUserData member of the ma_device object.
+ma_peak2_config ma_peak2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency);
-To specify a channel map you can use ma_get_standard_channel_map():
+typedef struct
+{
+ ma_biquad bq;
+} ma_peak2;
- ma_get_standard_channel_map(ma_standard_channel_map_default, config.channels, config.channelMap);
+ma_result ma_peak2_init(const ma_peak2_config* pConfig, ma_peak2* pFilter);
+ma_result ma_peak2_reinit(const ma_peak2_config* pConfig, ma_peak2* pFilter);
+ma_result ma_peak2_process_pcm_frames(ma_peak2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_peak2_get_latency(ma_peak2* pFilter);
-Alternatively you can set the channel map manually if you need something specific or something that isn't one of miniaudio's
-stock channel maps.
-By default the system's default device will be used. Set the pDeviceID member to a pointer to a ma_device_id object to
-use a specific device. You can enumerate over the devices with ma_context_enumerate_devices() or ma_context_get_devices()
-which will give you access to the device ID. Set pDeviceID to NULL to use the default device.
+/**************************************************************************************************************************************************************
-The device type can be one of the ma_device_type's:
- ma_device_type_playback
- ma_device_type_capture
- ma_device_type_duplex
+Low Shelf Filter
+
+**************************************************************************************************************************************************************/
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double gainDB;
+ double shelfSlope;
+ double frequency;
+} ma_loshelf2_config;
+
+ma_loshelf2_config ma_loshelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency);
+
+typedef struct
+{
+ ma_biquad bq;
+} ma_loshelf2;
+
+ma_result ma_loshelf2_init(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter);
+ma_result ma_loshelf2_reinit(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter);
+ma_result ma_loshelf2_process_pcm_frames(ma_loshelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_loshelf2_get_latency(ma_loshelf2* pFilter);
+
+
+/**************************************************************************************************************************************************************
+
+High Shelf Filter
+
+**************************************************************************************************************************************************************/
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ double gainDB;
+ double shelfSlope;
+ double frequency;
+} ma_hishelf2_config;
+
+ma_hishelf2_config ma_hishelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency);
+
+typedef struct
+{
+ ma_biquad bq;
+} ma_hishelf2;
+
+ma_result ma_hishelf2_init(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter);
+ma_result ma_hishelf2_reinit(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter);
+ma_result ma_hishelf2_process_pcm_frames(ma_hishelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+ma_uint32 ma_hishelf2_get_latency(ma_hishelf2* pFilter);
-Thread Safety: SAFE
-*/
-ma_device_config ma_device_config_init(ma_device_type deviceType);
/************************************************************************************************************************************************************
+*************************************************************************************************************************************************************
-Utiltities
+DATA CONVERSION
+===============
+
+This section contains the APIs for data conversion. You will find everything here for channel mapping, sample format conversion, resampling, etc.
+*************************************************************************************************************************************************************
************************************************************************************************************************************************************/
-/*
-Creates a mutex.
+/**************************************************************************************************************************************************************
-A mutex must be created from a valid context. A mutex is initially unlocked.
-*/
-ma_result ma_mutex_init(ma_context* pContext, ma_mutex* pMutex);
+Resampling
+
+**************************************************************************************************************************************************************/
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRateIn;
+ ma_uint32 sampleRateOut;
+ ma_uint32 lpfOrder; /* The low-pass filter order. Setting this to 0 will disable low-pass filtering. */
+ double lpfNyquistFactor; /* 0..1. Defaults to 1. 1 = Half the sampling frequency (Nyquist Frequency), 0.5 = Quarter the sampling frequency (half Nyquest Frequency), etc. */
+} ma_linear_resampler_config;
+
+ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut);
+
+typedef struct
+{
+ ma_linear_resampler_config config;
+ ma_uint32 inAdvanceInt;
+ ma_uint32 inAdvanceFrac;
+ ma_uint32 inTimeInt;
+ ma_uint32 inTimeFrac;
+ union
+ {
+ float f32[MA_MAX_CHANNELS];
+ ma_int16 s16[MA_MAX_CHANNELS];
+ } x0; /* The previous input frame. */
+ union
+ {
+ float f32[MA_MAX_CHANNELS];
+ ma_int16 s16[MA_MAX_CHANNELS];
+ } x1; /* The next input frame. */
+ ma_lpf lpf;
+} ma_linear_resampler;
+
+ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, ma_linear_resampler* pResampler);
+void ma_linear_resampler_uninit(ma_linear_resampler* pResampler);
+ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut);
+ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut);
+ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut);
+ma_uint64 ma_linear_resampler_get_required_input_frame_count(ma_linear_resampler* pResampler, ma_uint64 outputFrameCount);
+ma_uint64 ma_linear_resampler_get_expected_output_frame_count(ma_linear_resampler* pResampler, ma_uint64 inputFrameCount);
+ma_uint64 ma_linear_resampler_get_input_latency(ma_linear_resampler* pResampler);
+ma_uint64 ma_linear_resampler_get_output_latency(ma_linear_resampler* pResampler);
+
+typedef enum
+{
+ ma_resample_algorithm_linear = 0, /* Fastest, lowest quality. Optional low-pass filtering. Default. */
+ ma_resample_algorithm_speex
+} ma_resample_algorithm;
+
+typedef struct
+{
+ ma_format format; /* Must be either ma_format_f32 or ma_format_s16. */
+ ma_uint32 channels;
+ ma_uint32 sampleRateIn;
+ ma_uint32 sampleRateOut;
+ ma_resample_algorithm algorithm;
+ struct
+ {
+ ma_uint32 lpfOrder;
+ double lpfNyquistFactor;
+ } linear;
+ struct
+ {
+ int quality; /* 0 to 10. Defaults to 3. */
+ } speex;
+} ma_resampler_config;
+
+ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm);
+
+typedef struct
+{
+ ma_resampler_config config;
+ union
+ {
+ ma_linear_resampler linear;
+ struct
+ {
+ void* pSpeexResamplerState; /* SpeexResamplerState* */
+ } speex;
+ } state;
+} ma_resampler;
/*
-Deletes a mutex.
+Initializes a new resampler object from a config.
*/
-void ma_mutex_uninit(ma_mutex* pMutex);
+ma_result ma_resampler_init(const ma_resampler_config* pConfig, ma_resampler* pResampler);
/*
-Locks a mutex with an infinite timeout.
+Uninitializes a resampler.
*/
-void ma_mutex_lock(ma_mutex* pMutex);
+void ma_resampler_uninit(ma_resampler* pResampler);
/*
-Unlocks a mutex.
+Converts the given input data.
+
+Both the input and output frames must be in the format specified in the config when the resampler was initilized.
+
+On input, [pFrameCountOut] contains the number of output frames to process. On output it contains the number of output frames that
+were actually processed, which may be less than the requested amount which will happen if there's not enough input data. You can use
+ma_resampler_get_expected_output_frame_count() to know how many output frames will be processed for a given number of input frames.
+
+On input, [pFrameCountIn] contains the number of input frames contained in [pFramesIn]. On output it contains the number of whole
+input frames that were actually processed. You can use ma_resampler_get_required_input_frame_count() to know how many input frames
+you should provide for a given number of output frames. [pFramesIn] can be NULL, in which case zeroes will be used instead.
+
+If [pFramesOut] is NULL, a seek is performed. In this case, if [pFrameCountOut] is not NULL it will seek by the specified number of
+output frames. Otherwise, if [pFramesCountOut] is NULL and [pFrameCountIn] is not NULL, it will seek by the specified number of input
+frames. When seeking, [pFramesIn] is allowed to NULL, in which case the internal timing state will be updated, but no input will be
+processed. In this case, any internal filter state will be updated as if zeroes were passed in.
+
+It is an error for [pFramesOut] to be non-NULL and [pFrameCountOut] to be NULL.
+
+It is an error for both [pFrameCountOut] and [pFrameCountIn] to be NULL.
*/
-void ma_mutex_unlock(ma_mutex* pMutex);
+ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut);
/*
-Retrieves a friendly name for a backend.
+Sets the input and output sample sample rate.
*/
-const char* ma_get_backend_name(ma_backend backend);
+ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut);
/*
-Adjust buffer size based on a scaling factor.
+Sets the input and output sample rate as a ratio.
-This just multiplies the base size by the scaling factor, making sure it's a size of at least 1.
+The ration is in/out.
*/
-ma_uint32 ma_scale_buffer_size(ma_uint32 baseBufferSize, float scale);
+ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio);
-/*
-Calculates a buffer size in milliseconds from the specified number of frames and sample rate.
-*/
-ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate);
/*
-Calculates a buffer size in frames from the specified number of milliseconds and sample rate.
+Calculates the number of whole input frames that would need to be read from the client in order to output the specified
+number of output frames.
+
+The returned value does not include cached input frames. It only returns the number of extra frames that would need to be
+read from the input buffer in order to output the specified number of output frames.
*/
-ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate);
+ma_uint64 ma_resampler_get_required_input_frame_count(ma_resampler* pResampler, ma_uint64 outputFrameCount);
/*
-Retrieves the default buffer size in milliseconds based on the specified performance profile.
+Calculates the number of whole output frames that would be output after fully reading and consuming the specified number of
+input frames.
*/
-ma_uint32 ma_get_default_buffer_size_in_milliseconds(ma_performance_profile performanceProfile);
+ma_uint64 ma_resampler_get_expected_output_frame_count(ma_resampler* pResampler, ma_uint64 inputFrameCount);
+
/*
-Calculates a buffer size in frames for the specified performance profile and scale factor.
+Retrieves the latency introduced by the resampler in input frames.
*/
-ma_uint32 ma_get_default_buffer_size_in_frames(ma_performance_profile performanceProfile, ma_uint32 sampleRate);
+ma_uint64 ma_resampler_get_input_latency(ma_resampler* pResampler);
/*
-Copies silent frames into the given buffer.
+Retrieves the latency introduced by the resampler in output frames.
*/
-void ma_zero_pcm_frames(void* p, ma_uint32 frameCount, ma_format format, ma_uint32 channels);
-
-#endif /* MA_NO_DEVICE_IO */
+ma_uint64 ma_resampler_get_output_latency(ma_resampler* pResampler);
+/**************************************************************************************************************************************************************
-/************************************************************************************************************************************************************
+Channel Conversion
-Decoding
-
-************************************************************************************************************************************************************/
-#ifndef MA_NO_DECODING
-
-typedef struct ma_decoder ma_decoder;
-
-typedef enum
+**************************************************************************************************************************************************************/
+typedef struct
{
- ma_seek_origin_start,
- ma_seek_origin_current
-} ma_seek_origin;
+ ma_format format;
+ ma_uint32 channelsIn;
+ ma_uint32 channelsOut;
+ ma_channel channelMapIn[MA_MAX_CHANNELS];
+ ma_channel channelMapOut[MA_MAX_CHANNELS];
+ ma_channel_mix_mode mixingMode;
+ float weights[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; /* [in][out]. Only used when mixingMode is set to ma_channel_mix_mode_custom_weights. */
+} ma_channel_converter_config;
-typedef size_t (* ma_decoder_read_proc) (ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead); /* Returns the number of bytes read. */
-typedef ma_bool32 (* ma_decoder_seek_proc) (ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin);
-typedef ma_result (* ma_decoder_seek_to_pcm_frame_proc) (ma_decoder* pDecoder, ma_uint64 frameIndex);
-typedef ma_result (* ma_decoder_uninit_proc) (ma_decoder* pDecoder);
-typedef ma_uint64 (* ma_decoder_get_length_in_pcm_frames_proc)(ma_decoder* pDecoder);
+ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode);
typedef struct
{
- ma_format format; /* Set to 0 or ma_format_unknown to use the stream's internal format. */
- ma_uint32 channels; /* Set to 0 to use the stream's internal channels. */
- ma_uint32 sampleRate; /* Set to 0 to use the stream's internal sample rate. */
- ma_channel channelMap[MA_MAX_CHANNELS];
- ma_channel_mix_mode channelMixMode;
- ma_dither_mode ditherMode;
- ma_src_algorithm srcAlgorithm;
+ ma_format format;
+ ma_uint32 channelsIn;
+ ma_uint32 channelsOut;
+ ma_channel channelMapIn[MA_MAX_CHANNELS];
+ ma_channel channelMapOut[MA_MAX_CHANNELS];
+ ma_channel_mix_mode mixingMode;
union
{
- ma_src_config_sinc sinc;
- } src;
-} ma_decoder_config;
+ float f32[MA_MAX_CHANNELS][MA_MAX_CHANNELS];
+ ma_int32 s16[MA_MAX_CHANNELS][MA_MAX_CHANNELS];
+ } weights;
+ ma_bool32 isPassthrough : 1;
+ ma_bool32 isSimpleShuffle : 1;
+ ma_bool32 isSimpleMonoExpansion : 1;
+ ma_bool32 isStereoToMono : 1;
+ ma_uint8 shuffleTable[MA_MAX_CHANNELS];
+} ma_channel_converter;
-struct ma_decoder
+ma_result ma_channel_converter_init(const ma_channel_converter_config* pConfig, ma_channel_converter* pConverter);
+void ma_channel_converter_uninit(ma_channel_converter* pConverter);
+ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount);
+
+
+/**************************************************************************************************************************************************************
+
+Data Conversion
+
+**************************************************************************************************************************************************************/
+typedef struct
{
- ma_decoder_read_proc onRead;
- ma_decoder_seek_proc onSeek;
- void* pUserData;
- ma_uint64 readPointer; /* Used for returning back to a previous position after analysing the stream or whatnot. */
- ma_format internalFormat;
- ma_uint32 internalChannels;
- ma_uint32 internalSampleRate;
- ma_channel internalChannelMap[MA_MAX_CHANNELS];
- ma_format outputFormat;
- ma_uint32 outputChannels;
- ma_uint32 outputSampleRate;
- ma_channel outputChannelMap[MA_MAX_CHANNELS];
- ma_pcm_converter dsp; /* <-- Format conversion is achieved by running frames through this. */
- ma_decoder_seek_to_pcm_frame_proc onSeekToPCMFrame;
- ma_decoder_uninit_proc onUninit;
- ma_decoder_get_length_in_pcm_frames_proc onGetLengthInPCMFrames;
- void* pInternalDecoder; /* <-- The drwav/drflac/stb_vorbis/etc. objects. */
+ ma_format formatIn;
+ ma_format formatOut;
+ ma_uint32 channelsIn;
+ ma_uint32 channelsOut;
+ ma_uint32 sampleRateIn;
+ ma_uint32 sampleRateOut;
+ ma_channel channelMapIn[MA_MAX_CHANNELS];
+ ma_channel channelMapOut[MA_MAX_CHANNELS];
+ ma_dither_mode ditherMode;
+ ma_channel_mix_mode channelMixMode;
+ float channelWeights[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; /* [in][out]. Only used when channelMixMode is set to ma_channel_mix_mode_custom_weights. */
struct
{
- const ma_uint8* pData;
- size_t dataSize;
- size_t currentReadPos;
- } memory; /* Only used for decoders that were opened against a block of memory. */
-};
+ ma_resample_algorithm algorithm;
+ ma_bool32 allowDynamicSampleRate;
+ struct
+ {
+ ma_uint32 lpfOrder;
+ double lpfNyquistFactor;
+ } linear;
+ struct
+ {
+ int quality;
+ } speex;
+ } resampling;
+} ma_data_converter_config;
-ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate);
+ma_data_converter_config ma_data_converter_config_init_default(void);
+ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut);
-ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_wav(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_flac(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_vorbis(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_mp3(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_raw(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder);
+typedef struct
+{
+ ma_data_converter_config config;
+ ma_channel_converter channelConverter;
+ ma_resampler resampler;
+ ma_bool32 hasPreFormatConversion : 1;
+ ma_bool32 hasPostFormatConversion : 1;
+ ma_bool32 hasChannelConverter : 1;
+ ma_bool32 hasResampler : 1;
+ ma_bool32 isPassthrough : 1;
+} ma_data_converter;
+
+ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, ma_data_converter* pConverter);
+void ma_data_converter_uninit(ma_data_converter* pConverter);
+ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut);
+ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut);
+ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut);
+ma_uint64 ma_data_converter_get_required_input_frame_count(ma_data_converter* pConverter, ma_uint64 outputFrameCount);
+ma_uint64 ma_data_converter_get_expected_output_frame_count(ma_data_converter* pConverter, ma_uint64 inputFrameCount);
+ma_uint64 ma_data_converter_get_input_latency(ma_data_converter* pConverter);
+ma_uint64 ma_data_converter_get_output_latency(ma_data_converter* pConverter);
-ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_memory_wav(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_memory_flac(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_memory_vorbis(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_memory_mp3(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_memory_raw(const void* pData, size_t dataSize, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder);
-#ifndef MA_NO_STDIO
-ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-ma_result ma_decoder_init_file_wav(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
-#endif
+/************************************************************************************************************************************************************
-ma_result ma_decoder_uninit(ma_decoder* pDecoder);
+Format Conversion
+
+************************************************************************************************************************************************************/
+void ma_pcm_u8_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_u8_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_u8_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_u8_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s16_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s16_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s16_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s16_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s24_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s24_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s24_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s24_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_s32_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_f32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_f32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_f32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_f32_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode);
+void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode);
+void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode);
/*
-Retrieves the length of the decoder in PCM frames.
+Deinterleaves an interleaved buffer.
+*/
+void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames);
-Do not call this on streams of an undefined length, such as internet radio.
+/*
+Interleaves a group of deinterleaved buffers.
+*/
+void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames);
-If the length is unknown or an error occurs, 0 will be returned.
+/************************************************************************************************************************************************************
-This will always return 0 for Vorbis decoders. This is due to a limitation with stb_vorbis in push mode which is what miniaudio
-uses internally.
+Channel Maps
-This will run in linear time for MP3 decoders. Do not call this in time critical scenarios.
-*/
-ma_uint64 ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder);
+************************************************************************************************************************************************************/
-ma_uint64 ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount);
-ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex);
+/*
+Helper for retrieving a standard channel map.
+*/
+void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]);
/*
-Helper for opening and decoding a file into a heap allocated block of memory. Free the returned pointer with ma_free(). On input,
-pConfig should be set to what you want. On output it will be set to what you got.
+Copies a channel map.
*/
-#ifndef MA_NO_STDIO
-ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppDataOut);
-#endif
-ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppDataOut);
+void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels);
-#endif /* MA_NO_DECODING */
+/*
+Determines whether or not a channel map is valid.
-/************************************************************************************************************************************************************
+A blank channel map is valid (all channels set to MA_CHANNEL_NONE). The way a blank channel map is handled is context specific, but
+is usually treated as a passthrough.
-Generation
+Invalid channel maps:
+ - A channel map with no channels
+ - A channel map with more than one channel and a mono channel
+*/
+ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]);
-************************************************************************************************************************************************************/
-typedef struct
-{
- double amplitude;
- double periodsPerSecond;
- double delta;
- double time;
-} ma_sine_wave;
+/*
+Helper for comparing two channel maps for equality.
-ma_result ma_sine_wave_init(double amplitude, double period, ma_uint32 sampleRate, ma_sine_wave* pSineWave);
-ma_uint64 ma_sine_wave_read_f32(ma_sine_wave* pSineWave, ma_uint64 count, float* pSamples);
-ma_uint64 ma_sine_wave_read_f32_ex(ma_sine_wave* pSineWave, ma_uint64 frameCount, ma_uint32 channels, ma_stream_layout layout, float** ppFrames);
+This assumes the channel count is the same between the two.
+*/
+ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS]);
-#ifdef __cplusplus
-}
-#endif
-#endif /* miniaudio_h */
+/*
+Helper for determining if a channel map is blank (all channels set to MA_CHANNEL_NONE).
+*/
+ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]);
+/*
+Helper for determining whether or not a channel is present in the given channel map.
+*/
+ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition);
/************************************************************************************************************************************************************
-*************************************************************************************************************************************************************
-IMPLEMENTATION
+Conversion Helpers
-*************************************************************************************************************************************************************
************************************************************************************************************************************************************/
-#if defined(MINIAUDIO_IMPLEMENTATION) || defined(MA_IMPLEMENTATION)
-#include
-#include /* For INT_MAX */
-#include /* sin(), etc. */
-#if defined(MA_DEBUG_OUTPUT)
-#include /* for printf() for debug output */
-#endif
+/*
+High-level helper for doing a full format conversion in one go. Returns the number of output frames. Call this with pOut set to NULL to
+determine the required size of the output buffer. frameCountOut should be set to the capacity of pOut. If pOut is NULL, frameCountOut is
+ignored.
-#ifdef MA_WIN32
-#include
-#include
-#include
-#include
-#else
-#include /* For malloc()/free() */
-#include /* For memset() */
-#endif
+A return value of 0 indicates an error.
-#if defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200)
-#include /* For mach_absolute_time() */
-#endif
+This function is useful for one-off bulk conversions, but if you're streaming data you should use the ma_data_converter APIs instead.
+*/
+ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn);
+ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig);
-#ifdef MA_POSIX
-#include
-#include
-#include
-#include
-#endif
-#ifdef MA_EMSCRIPTEN
-#include
-#endif
+/************************************************************************************************************************************************************
-#if !defined(MA_64BIT) && !defined(MA_32BIT)
-#ifdef _WIN32
-#ifdef _WIN64
-#define MA_64BIT
-#else
-#define MA_32BIT
-#endif
-#endif
-#endif
+Ring Buffer
-#if !defined(MA_64BIT) && !defined(MA_32BIT)
-#ifdef __GNUC__
-#ifdef __LP64__
-#define MA_64BIT
-#else
-#define MA_32BIT
-#endif
-#endif
-#endif
+************************************************************************************************************************************************************/
+typedef struct
+{
+ void* pBuffer;
+ ma_uint32 subbufferSizeInBytes;
+ ma_uint32 subbufferCount;
+ ma_uint32 subbufferStrideInBytes;
+ volatile ma_uint32 encodedReadOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */
+ volatile ma_uint32 encodedWriteOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */
+ ma_bool32 ownsBuffer : 1; /* Used to know whether or not miniaudio is responsible for free()-ing the buffer. */
+ ma_bool32 clearOnWriteAcquire : 1; /* When set, clears the acquired write buffer before returning from ma_rb_acquire_write(). */
+ ma_allocation_callbacks allocationCallbacks;
+} ma_rb;
-#if !defined(MA_64BIT) && !defined(MA_32BIT)
-#include
-#if INTPTR_MAX == INT64_MAX
-#define MA_64BIT
-#else
-#define MA_32BIT
-#endif
-#endif
+ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB);
+ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB);
+void ma_rb_uninit(ma_rb* pRB);
+void ma_rb_reset(ma_rb* pRB);
+ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut);
+ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut);
+ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut);
+ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut);
+ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes);
+ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes);
+ma_int32 ma_rb_pointer_distance(ma_rb* pRB); /* Returns the distance between the write pointer and the read pointer. Should never be negative for a correct program. Will return the number of bytes that can be read before the read pointer hits the write pointer. */
+ma_uint32 ma_rb_available_read(ma_rb* pRB);
+ma_uint32 ma_rb_available_write(ma_rb* pRB);
+size_t ma_rb_get_subbuffer_size(ma_rb* pRB);
+size_t ma_rb_get_subbuffer_stride(ma_rb* pRB);
+size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex);
+void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer);
-/* Architecture Detection */
-#if defined(__x86_64__) || defined(_M_X64)
-#define MA_X64
-#elif defined(__i386) || defined(_M_IX86)
-#define MA_X86
-#elif defined(__arm__) || defined(_M_ARM)
-#define MA_ARM
-#endif
-/* Cannot currently support AVX-512 if AVX is disabled. */
-#if !defined(MA_NO_AVX512) && defined(MA_NO_AVX2)
-#define MA_NO_AVX512
-#endif
+typedef struct
+{
+ ma_rb rb;
+ ma_format format;
+ ma_uint32 channels;
+} ma_pcm_rb;
-/* Intrinsics Support */
-#if defined(MA_X64) || defined(MA_X86)
- #if defined(_MSC_VER) && !defined(__clang__)
- /* MSVC. */
- #if _MSC_VER >= 1400 && !defined(MA_NO_SSE2) /* 2005 */
- #define MA_SUPPORT_SSE2
- #endif
- /*#if _MSC_VER >= 1600 && !defined(MA_NO_AVX)*/ /* 2010 */
- /* #define MA_SUPPORT_AVX*/
- /*#endif*/
- #if _MSC_VER >= 1700 && !defined(MA_NO_AVX2) /* 2012 */
- #define MA_SUPPORT_AVX2
- #endif
- #if _MSC_VER >= 1910 && !defined(MA_NO_AVX512) /* 2017 */
- #define MA_SUPPORT_AVX512
- #endif
- #else
- /* Assume GNUC-style. */
- #if defined(__SSE2__) && !defined(MA_NO_SSE2)
- #define MA_SUPPORT_SSE2
- #endif
- /*#if defined(__AVX__) && !defined(MA_NO_AVX)*/
- /* #define MA_SUPPORT_AVX*/
- /*#endif*/
- #if defined(__AVX2__) && !defined(MA_NO_AVX2)
- #define MA_SUPPORT_AVX2
- #endif
- #if defined(__AVX512F__) && !defined(MA_NO_AVX512)
- #define MA_SUPPORT_AVX512
- #endif
- #endif
+ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB);
+ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB);
+void ma_pcm_rb_uninit(ma_pcm_rb* pRB);
+void ma_pcm_rb_reset(ma_pcm_rb* pRB);
+ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut);
+ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut);
+ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut);
+ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut);
+ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames);
+ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames);
+ma_int32 ma_pcm_rb_pointer_distance(ma_pcm_rb* pRB); /* Return value is in frames. */
+ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB);
+ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB);
+ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB);
+ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB);
+ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex);
+void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer);
- /* If at this point we still haven't determined compiler support for the intrinsics just fall back to __has_include. */
- #if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include)
- #if !defined(MA_SUPPORT_SSE2) && !defined(MA_NO_SSE2) && __has_include()
- #define MA_SUPPORT_SSE2
- #endif
- /*#if !defined(MA_SUPPORT_AVX) && !defined(MA_NO_AVX) && __has_include()*/
- /* #define MA_SUPPORT_AVX*/
- /*#endif*/
- #if !defined(MA_SUPPORT_AVX2) && !defined(MA_NO_AVX2) && __has_include()
- #define MA_SUPPORT_AVX2
- #endif
- #if !defined(MA_SUPPORT_AVX512) && !defined(MA_NO_AVX512) && __has_include()
- #define MA_SUPPORT_AVX512
- #endif
- #endif
- #if defined(MA_SUPPORT_AVX512)
- #include /* Not a mistake. Intentionally including instead of because otherwise the compiler will complain. */
- #elif defined(MA_SUPPORT_AVX2) || defined(MA_SUPPORT_AVX)
- #include
- #elif defined(MA_SUPPORT_SSE2)
- #include
- #endif
-#endif
+/************************************************************************************************************************************************************
-#if defined(MA_ARM)
- #if !defined(MA_NO_NEON) && (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64))
- #define MA_SUPPORT_NEON
- #endif
+Miscellaneous Helpers
- /* Fall back to looking for the #include file. */
- #if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include)
- #if !defined(MA_SUPPORT_NEON) && !defined(MA_NO_NEON) && __has_include()
- #define MA_SUPPORT_NEON
- #endif
- #endif
+************************************************************************************************************************************************************/
+/*
+Retrieves a human readable description of the given result code.
+*/
+const char* ma_result_description(ma_result result);
- #if defined(MA_SUPPORT_NEON)
- #include
- #endif
-#endif
+/*
+malloc(). Calls MA_MALLOC().
+*/
+void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks);
-#if defined(_MSC_VER)
- #pragma warning(push)
- #pragma warning(disable:4752) /* found Intel(R) Advanced Vector Extensions; consider using /arch:AVX */
-#endif
+/*
+realloc(). Calls MA_REALLOC().
+*/
+void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks);
-#if defined(MA_X64) || defined(MA_X86)
- #if defined(_MSC_VER) && !defined(__clang__)
- #if _MSC_VER >= 1400
- #include
- static MA_INLINE void ma_cpuid(int info[4], int fid)
- {
- __cpuid(info, fid);
- }
- #else
- #define MA_NO_CPUID
- #endif
+/*
+free(). Calls MA_FREE().
+*/
+void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks);
- #if _MSC_VER >= 1600
- static MA_INLINE unsigned __int64 ma_xgetbv(int reg)
- {
- return _xgetbv(reg);
- }
- #else
- #define MA_NO_XGETBV
- #endif
- #elif (defined(__GNUC__) || defined(__clang__)) && !defined(MA_ANDROID)
- static MA_INLINE void ma_cpuid(int info[4], int fid)
- {
- /*
- It looks like the -fPIC option uses the ebx register which GCC complains about. We can work around this by just using a different register, the
- specific register of which I'm letting the compiler decide on. The "k" prefix is used to specify a 32-bit register. The {...} syntax is for
- supporting different assembly dialects.
-
- What's basically happening is that we're saving and restoring the ebx register manually.
- */
- #if defined(DRFLAC_X86) && defined(__PIC__)
- __asm__ __volatile__ (
- "xchg{l} {%%}ebx, %k1;"
- "cpuid;"
- "xchg{l} {%%}ebx, %k1;"
- : "=a"(info[0]), "=&r"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0)
- );
- #else
- __asm__ __volatile__ (
- "cpuid" : "=a"(info[0]), "=b"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0)
- );
- #endif
- }
+/*
+Performs an aligned malloc, with the assumption that the alignment is a power of 2.
+*/
+void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks);
- static MA_INLINE ma_uint64 ma_xgetbv(int reg)
- {
- unsigned int hi;
- unsigned int lo;
+/*
+Free's an aligned malloc'd buffer.
+*/
+void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks);
- __asm__ __volatile__ (
- "xgetbv" : "=a"(lo), "=d"(hi) : "c"(reg)
- );
+/*
+Retrieves a friendly name for a format.
+*/
+const char* ma_get_format_name(ma_format format);
- return ((ma_uint64)hi << 32) | (ma_uint64)lo;
- }
- #else
- #define MA_NO_CPUID
- #define MA_NO_XGETBV
- #endif
-#else
- #define MA_NO_CPUID
- #define MA_NO_XGETBV
-#endif
+/*
+Blends two frames in floating point format.
+*/
+void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels);
-static MA_INLINE ma_bool32 ma_has_sse2()
-{
-#if defined(MA_SUPPORT_SSE2)
- #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_SSE2)
- #if defined(MA_X64)
- return MA_TRUE; /* 64-bit targets always support SSE2. */
- #elif (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__)
- return MA_TRUE; /* If the compiler is allowed to freely generate SSE2 code we can assume support. */
- #else
- #if defined(MA_NO_CPUID)
- return MA_FALSE;
- #else
- int info[4];
- ma_cpuid(info, 1);
- return (info[3] & (1 << 26)) != 0;
- #endif
- #endif
- #else
- return MA_FALSE; /* SSE2 is only supported on x86 and x64 architectures. */
- #endif
-#else
- return MA_FALSE; /* No compiler support. */
-#endif
-}
+/*
+Retrieves the size of a sample in bytes for the given format.
-#if 0
-static MA_INLINE ma_bool32 ma_has_avx()
-{
-#if defined(MA_SUPPORT_AVX)
- #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX)
- #if defined(_AVX_) || defined(__AVX__)
- return MA_TRUE; /* If the compiler is allowed to freely generate AVX code we can assume support. */
- #else
- /* AVX requires both CPU and OS support. */
- #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV)
- return MA_FALSE;
- #else
- int info[4];
- ma_cpuid(info, 1);
- if (((info[2] & (1 << 27)) != 0) && ((info[2] & (1 << 28)) != 0)) {
- ma_uint64 xrc = ma_xgetbv(0);
- if ((xrc & 0x06) == 0x06) {
- return MA_TRUE;
- } else {
- return MA_FALSE;
- }
- } else {
- return MA_FALSE;
- }
- #endif
- #endif
- #else
- return MA_FALSE; /* AVX is only supported on x86 and x64 architectures. */
- #endif
-#else
- return MA_FALSE; /* No compiler support. */
-#endif
-}
-#endif
+This API is efficient and is implemented using a lookup table.
-static MA_INLINE ma_bool32 ma_has_avx2()
-{
-#if defined(MA_SUPPORT_AVX2)
- #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX2)
- #if defined(_AVX2_) || defined(__AVX2__)
- return MA_TRUE; /* If the compiler is allowed to freely generate AVX2 code we can assume support. */
- #else
- /* AVX2 requires both CPU and OS support. */
- #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV)
- return MA_FALSE;
- #else
- int info1[4];
- int info7[4];
- ma_cpuid(info1, 1);
- ma_cpuid(info7, 7);
- if (((info1[2] & (1 << 27)) != 0) && ((info7[1] & (1 << 5)) != 0)) {
- ma_uint64 xrc = ma_xgetbv(0);
- if ((xrc & 0x06) == 0x06) {
- return MA_TRUE;
- } else {
- return MA_FALSE;
- }
- } else {
- return MA_FALSE;
- }
- #endif
- #endif
- #else
- return MA_FALSE; /* AVX2 is only supported on x86 and x64 architectures. */
- #endif
-#else
- return MA_FALSE; /* No compiler support. */
-#endif
-}
+Thread Safety: SAFE
+ This API is pure.
+*/
+ma_uint32 ma_get_bytes_per_sample(ma_format format);
+static MA_INLINE ma_uint32 ma_get_bytes_per_frame(ma_format format, ma_uint32 channels) { return ma_get_bytes_per_sample(format) * channels; }
-static MA_INLINE ma_bool32 ma_has_avx512f()
-{
-#if defined(MA_SUPPORT_AVX512)
- #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX512)
- #if defined(__AVX512F__)
- return MA_TRUE; /* If the compiler is allowed to freely generate AVX-512F code we can assume support. */
- #else
- /* AVX-512 requires both CPU and OS support. */
- #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV)
- return MA_FALSE;
- #else
- int info1[4];
- int info7[4];
- ma_cpuid(info1, 1);
- ma_cpuid(info7, 7);
- if (((info1[2] & (1 << 27)) != 0) && ((info7[1] & (1 << 16)) != 0)) {
- ma_uint64 xrc = ma_xgetbv(0);
- if ((xrc & 0xE6) == 0xE6) {
- return MA_TRUE;
- } else {
- return MA_FALSE;
- }
- } else {
- return MA_FALSE;
- }
- #endif
- #endif
- #else
- return MA_FALSE; /* AVX-512F is only supported on x86 and x64 architectures. */
- #endif
-#else
- return MA_FALSE; /* No compiler support. */
-#endif
-}
+/*
+Converts a log level to a string.
+*/
+const char* ma_log_level_to_string(ma_uint32 logLevel);
-static MA_INLINE ma_bool32 ma_has_neon()
-{
-#if defined(MA_SUPPORT_NEON)
- #if defined(MA_ARM) && !defined(MA_NO_NEON)
- #if (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64))
- return MA_TRUE; /* If the compiler is allowed to freely generate NEON code we can assume support. */
- #else
- /* TODO: Runtime check. */
- return MA_FALSE;
- #endif
- #else
- return MA_FALSE; /* NEON is only supported on ARM architectures. */
- #endif
-#else
- return MA_FALSE; /* No compiler support. */
-#endif
-}
-static MA_INLINE ma_bool32 ma_is_little_endian()
-{
-#if defined(MA_X86) || defined(MA_X64)
- return MA_TRUE;
-#else
- int n = 1;
- return (*(char*)&n) == 1;
-#endif
-}
+/************************************************************************************************************************************************************
+*************************************************************************************************************************************************************
-static MA_INLINE ma_bool32 ma_is_big_endian()
-{
- return !ma_is_little_endian();
-}
+DEVICE I/O
+==========
+This section contains the APIs for device playback and capture. Here is where you'll find ma_device_init(), etc.
-#ifndef MA_COINIT_VALUE
-#define MA_COINIT_VALUE 0 /* 0 = COINIT_MULTITHREADED*/
+*************************************************************************************************************************************************************
+************************************************************************************************************************************************************/
+#ifndef MA_NO_DEVICE_IO
+/* Some backends are only supported on certain platforms. */
+#if defined(MA_WIN32)
+ #define MA_SUPPORT_WASAPI
+ #if defined(MA_WIN32_DESKTOP) /* DirectSound and WinMM backends are only supported on desktops. */
+ #define MA_SUPPORT_DSOUND
+ #define MA_SUPPORT_WINMM
+ #define MA_SUPPORT_JACK /* JACK is technically supported on Windows, but I don't know how many people use it in practice... */
+ #endif
+#endif
+#if defined(MA_UNIX)
+ #if defined(MA_LINUX)
+ #if !defined(MA_ANDROID) /* ALSA is not supported on Android. */
+ #define MA_SUPPORT_ALSA
+ #endif
+ #endif
+ #if !defined(MA_BSD) && !defined(MA_ANDROID) && !defined(MA_EMSCRIPTEN)
+ #define MA_SUPPORT_PULSEAUDIO
+ #define MA_SUPPORT_JACK
+ #endif
+ #if defined(MA_ANDROID)
+ #define MA_SUPPORT_AAUDIO
+ #define MA_SUPPORT_OPENSL
+ #endif
+ #if defined(__OpenBSD__) /* <-- Change this to "#if defined(MA_BSD)" to enable sndio on all BSD flavors. */
+ #define MA_SUPPORT_SNDIO /* sndio is only supported on OpenBSD for now. May be expanded later if there's demand. */
+ #endif
+ #if defined(__NetBSD__) || defined(__OpenBSD__)
+ #define MA_SUPPORT_AUDIO4 /* Only support audio(4) on platforms with known support. */
+ #endif
+ #if defined(__FreeBSD__) || defined(__DragonFly__)
+ #define MA_SUPPORT_OSS /* Only support OSS on specific platforms with known support. */
+ #endif
+#endif
+#if defined(MA_APPLE)
+ #define MA_SUPPORT_COREAUDIO
+#endif
+#if defined(MA_EMSCRIPTEN)
+ #define MA_SUPPORT_WEBAUDIO
#endif
+/* Explicitly disable the Null backend for Emscripten because it uses a background thread which is not properly supported right now. */
+#if !defined(MA_EMSCRIPTEN)
+#define MA_SUPPORT_NULL
+#endif
-#ifndef MA_PI
-#define MA_PI 3.14159265358979323846264f
+#if !defined(MA_NO_WASAPI) && defined(MA_SUPPORT_WASAPI)
+ #define MA_ENABLE_WASAPI
#endif
-#ifndef MA_PI_D
-#define MA_PI_D 3.14159265358979323846264
+#if !defined(MA_NO_DSOUND) && defined(MA_SUPPORT_DSOUND)
+ #define MA_ENABLE_DSOUND
#endif
-#ifndef MA_TAU
-#define MA_TAU 6.28318530717958647693f
+#if !defined(MA_NO_WINMM) && defined(MA_SUPPORT_WINMM)
+ #define MA_ENABLE_WINMM
#endif
-#ifndef MA_TAU_D
-#define MA_TAU_D 6.28318530717958647693
+#if !defined(MA_NO_ALSA) && defined(MA_SUPPORT_ALSA)
+ #define MA_ENABLE_ALSA
#endif
-
-
-/* The default format when ma_format_unknown (0) is requested when initializing a device. */
-#ifndef MA_DEFAULT_FORMAT
-#define MA_DEFAULT_FORMAT ma_format_f32
+#if !defined(MA_NO_PULSEAUDIO) && defined(MA_SUPPORT_PULSEAUDIO)
+ #define MA_ENABLE_PULSEAUDIO
#endif
-
-/* The default channel count to use when 0 is used when initializing a device. */
-#ifndef MA_DEFAULT_CHANNELS
-#define MA_DEFAULT_CHANNELS 2
+#if !defined(MA_NO_JACK) && defined(MA_SUPPORT_JACK)
+ #define MA_ENABLE_JACK
#endif
-
-/* The default sample rate to use when 0 is used when initializing a device. */
-#ifndef MA_DEFAULT_SAMPLE_RATE
-#define MA_DEFAULT_SAMPLE_RATE 48000
+#if !defined(MA_NO_COREAUDIO) && defined(MA_SUPPORT_COREAUDIO)
+ #define MA_ENABLE_COREAUDIO
#endif
-
-/* Default periods when none is specified in ma_device_init(). More periods means more work on the CPU. */
-#ifndef MA_DEFAULT_PERIODS
-#define MA_DEFAULT_PERIODS 3
+#if !defined(MA_NO_SNDIO) && defined(MA_SUPPORT_SNDIO)
+ #define MA_ENABLE_SNDIO
#endif
-
-/* The base buffer size in milliseconds for low latency mode. */
-#ifndef MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY
-#define MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY (10*MA_DEFAULT_PERIODS)
+#if !defined(MA_NO_AUDIO4) && defined(MA_SUPPORT_AUDIO4)
+ #define MA_ENABLE_AUDIO4
#endif
-
-/* The base buffer size in milliseconds for conservative mode. */
-#ifndef MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE
-#define MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE (100*MA_DEFAULT_PERIODS)
+#if !defined(MA_NO_OSS) && defined(MA_SUPPORT_OSS)
+ #define MA_ENABLE_OSS
+#endif
+#if !defined(MA_NO_AAUDIO) && defined(MA_SUPPORT_AAUDIO)
+ #define MA_ENABLE_AAUDIO
+#endif
+#if !defined(MA_NO_OPENSL) && defined(MA_SUPPORT_OPENSL)
+ #define MA_ENABLE_OPENSL
+#endif
+#if !defined(MA_NO_WEBAUDIO) && defined(MA_SUPPORT_WEBAUDIO)
+ #define MA_ENABLE_WEBAUDIO
+#endif
+#if !defined(MA_NO_NULL) && defined(MA_SUPPORT_NULL)
+ #define MA_ENABLE_NULL
#endif
+#ifdef MA_SUPPORT_WASAPI
+/* We need a IMMNotificationClient object for WASAPI. */
+typedef struct
+{
+ void* lpVtbl;
+ ma_uint32 counter;
+ ma_device* pDevice;
+} ma_IMMNotificationClient;
+#endif
-/* Standard sample rates, in order of priority. */
-ma_uint32 g_maStandardSampleRatePriorities[] = {
- MA_SAMPLE_RATE_48000, /* Most common */
- MA_SAMPLE_RATE_44100,
+/* Backend enums must be in priority order. */
+typedef enum
+{
+ ma_backend_wasapi,
+ ma_backend_dsound,
+ ma_backend_winmm,
+ ma_backend_coreaudio,
+ ma_backend_sndio,
+ ma_backend_audio4,
+ ma_backend_oss,
+ ma_backend_pulseaudio,
+ ma_backend_alsa,
+ ma_backend_jack,
+ ma_backend_aaudio,
+ ma_backend_opensl,
+ ma_backend_webaudio,
+ ma_backend_null /* <-- Must always be the last item. Lowest priority, and used as the terminator for backend enumeration. */
+} ma_backend;
- MA_SAMPLE_RATE_32000, /* Lows */
- MA_SAMPLE_RATE_24000,
- MA_SAMPLE_RATE_22050,
-
- MA_SAMPLE_RATE_88200, /* Highs */
- MA_SAMPLE_RATE_96000,
- MA_SAMPLE_RATE_176400,
- MA_SAMPLE_RATE_192000,
-
- MA_SAMPLE_RATE_16000, /* Extreme lows */
- MA_SAMPLE_RATE_11025,
- MA_SAMPLE_RATE_8000,
-
- MA_SAMPLE_RATE_352800, /* Extreme highs */
- MA_SAMPLE_RATE_384000
-};
-
-ma_format g_maFormatPriorities[] = {
- ma_format_s16, /* Most common */
- ma_format_f32,
-
- /*ma_format_s24_32,*/ /* Clean alignment */
- ma_format_s32,
-
- ma_format_s24, /* Unclean alignment */
-
- ma_format_u8 /* Low quality */
-};
-
-
-
-/******************************************************************************
+/* Thread priorties should be ordered such that the default priority of the worker thread is 0. */
+typedef enum
+{
+ ma_thread_priority_idle = -5,
+ ma_thread_priority_lowest = -4,
+ ma_thread_priority_low = -3,
+ ma_thread_priority_normal = -2,
+ ma_thread_priority_high = -1,
+ ma_thread_priority_highest = 0,
+ ma_thread_priority_realtime = 1,
+ ma_thread_priority_default = 0
+} ma_thread_priority;
-Standard Library Stuff
+typedef struct
+{
+ ma_context* pContext;
-******************************************************************************/
-#ifndef MA_MALLOC
+ union
+ {
#ifdef MA_WIN32
-#define MA_MALLOC(sz) HeapAlloc(GetProcessHeap(), 0, (sz))
-#else
-#define MA_MALLOC(sz) malloc((sz))
+ struct
+ {
+ /*HANDLE*/ ma_handle hThread;
+ } win32;
#endif
+#ifdef MA_POSIX
+ struct
+ {
+ pthread_t thread;
+ } posix;
#endif
+ int _unused;
+ };
+} ma_thread;
-#ifndef MA_REALLOC
-#ifdef MA_WIN32
-#define MA_REALLOC(p, sz) (((sz) > 0) ? ((p) ? HeapReAlloc(GetProcessHeap(), 0, (p), (sz)) : HeapAlloc(GetProcessHeap(), 0, (sz))) : ((VOID*)(size_t)(HeapFree(GetProcessHeap(), 0, (p)) & 0)))
-#else
-#define MA_REALLOC(p, sz) realloc((p), (sz))
-#endif
-#endif
+typedef struct
+{
+ ma_context* pContext;
-#ifndef MA_FREE
+ union
+ {
#ifdef MA_WIN32
-#define MA_FREE(p) HeapFree(GetProcessHeap(), 0, (p))
-#else
-#define MA_FREE(p) free((p))
+ struct
+ {
+ /*HANDLE*/ ma_handle hMutex;
+ } win32;
#endif
+#ifdef MA_POSIX
+ struct
+ {
+ pthread_mutex_t mutex;
+ } posix;
#endif
+ int _unused;
+ };
+} ma_mutex;
-#ifndef MA_ZERO_MEMORY
-#ifdef MA_WIN32
-#define MA_ZERO_MEMORY(p, sz) ZeroMemory((p), (sz))
-#else
-#define MA_ZERO_MEMORY(p, sz) memset((p), 0, (sz))
-#endif
-#endif
+typedef struct
+{
+ ma_context* pContext;
-#ifndef MA_COPY_MEMORY
+ union
+ {
#ifdef MA_WIN32
-#define MA_COPY_MEMORY(dst, src, sz) CopyMemory((dst), (src), (sz))
-#else
-#define MA_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz))
+ struct
+ {
+ /*HANDLE*/ ma_handle hEvent;
+ } win32;
#endif
+#ifdef MA_POSIX
+ struct
+ {
+ pthread_mutex_t mutex;
+ pthread_cond_t condition;
+ ma_uint32 value;
+ } posix;
#endif
+ int _unused;
+ };
+} ma_event;
-#ifndef MA_ASSERT
+typedef struct
+{
+ ma_context* pContext;
+
+ union
+ {
#ifdef MA_WIN32
-#define MA_ASSERT(condition) assert(condition)
-#else
-#define MA_ASSERT(condition) assert(condition)
+ struct
+ {
+ /*HANDLE*/ ma_handle hSemaphore;
+ } win32;
#endif
+#ifdef MA_POSIX
+ struct
+ {
+ sem_t semaphore;
+ } posix;
#endif
+ int _unused;
+ };
+} ma_semaphore;
-#define ma_zero_memory MA_ZERO_MEMORY
-#define ma_copy_memory MA_COPY_MEMORY
-#define ma_assert MA_ASSERT
-#define ma_zero_object(p) ma_zero_memory((p), sizeof(*(p)))
-#define ma_countof(x) (sizeof(x) / sizeof(x[0]))
-#define ma_max(x, y) (((x) > (y)) ? (x) : (y))
-#define ma_min(x, y) (((x) < (y)) ? (x) : (y))
-#define ma_clamp(x, lo, hi) (ma_max(lo, ma_min(x, hi)))
-#define ma_offset_ptr(p, offset) (((ma_uint8*)(p)) + (offset))
+/*
+The callback for processing audio data from the device.
-#define ma_buffer_frame_capacity(buffer, channels, format) (sizeof(buffer) / ma_get_bytes_per_sample(format) / (channels))
+The data callback is fired by miniaudio whenever the device needs to have more data delivered to a playback device, or when a capture device has some data
+available. This is called as soon as the backend asks for more data which means it may be called with inconsistent frame counts. You cannot assume the
+callback will be fired with a consistent frame count.
-/*
-Return Values:
- 0: Success
- 22: EINVAL
- 34: ERANGE
-Not using symbolic constants for errors because I want to avoid #including errno.h
-*/
-int ma_strcpy_s(char* dst, size_t dstSizeInBytes, const char* src)
-{
- size_t i;
+Parameters
+----------
+pDevice (in)
+ A pointer to the relevant device.
- if (dst == 0) {
- return 22;
- }
- if (dstSizeInBytes == 0) {
- return 34;
- }
- if (src == 0) {
- dst[0] = '\0';
- return 22;
- }
+pOutput (out)
+ A pointer to the output buffer that will receive audio data that will later be played back through the speakers. This will be non-null for a playback or
+ full-duplex device and null for a capture and loopback device.
- for (i = 0; i < dstSizeInBytes && src[i] != '\0'; ++i) {
- dst[i] = src[i];
- }
+pInput (in)
+ A pointer to the buffer containing input data from a recording device. This will be non-null for a capture, full-duplex or loopback device and null for a
+ playback device.
- if (i < dstSizeInBytes) {
- dst[i] = '\0';
- return 0;
- }
+frameCount (in)
+ The number of PCM frames to process. Note that this will not necessarily be equal to what you requested when you initialized the device. The
+ `periodSizeInFrames` and `periodSizeInMilliseconds` members of the device config are just hints, and are not necessarily exactly what you'll get. You must
+ not assume this will always be the same value each time the callback is fired.
- dst[0] = '\0';
- return 34;
-}
-int ma_strncpy_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count)
-{
- size_t maxcount;
- size_t i;
+Remarks
+-------
+You cannot stop and start the device from inside the callback or else you'll get a deadlock. You must also not uninitialize the device from inside the
+callback. The following APIs cannot be called from inside the callback:
- if (dst == 0) {
- return 22;
- }
- if (dstSizeInBytes == 0) {
- return 34;
- }
- if (src == 0) {
- dst[0] = '\0';
- return 22;
- }
+ ma_device_init()
+ ma_device_init_ex()
+ ma_device_uninit()
+ ma_device_start()
+ ma_device_stop()
- maxcount = count;
- if (count == ((size_t)-1) || count >= dstSizeInBytes) { /* -1 = _TRUNCATE */
- maxcount = dstSizeInBytes - 1;
- }
+The proper way to stop the device is to call `ma_device_stop()` from a different thread, normally the main application thread.
+*/
+typedef void (* ma_device_callback_proc)(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount);
- for (i = 0; i < maxcount && src[i] != '\0'; ++i) {
- dst[i] = src[i];
- }
+/*
+The callback for when the device has been stopped.
- if (src[i] == '\0' || i == count || count == ((size_t)-1)) {
- dst[i] = '\0';
- return 0;
- }
+This will be called when the device is stopped explicitly with `ma_device_stop()` and also called implicitly when the device is stopped through external forces
+such as being unplugged or an internal error occuring.
- dst[0] = '\0';
- return 34;
-}
-int ma_strcat_s(char* dst, size_t dstSizeInBytes, const char* src)
-{
- char* dstorig;
+Parameters
+----------
+pDevice (in)
+ A pointer to the device that has just stopped.
- if (dst == 0) {
- return 22;
- }
- if (dstSizeInBytes == 0) {
- return 34;
- }
- if (src == 0) {
- dst[0] = '\0';
- return 22;
- }
- dstorig = dst;
+Remarks
+-------
+Do not restart or uninitialize the device from the callback.
+*/
+typedef void (* ma_stop_proc)(ma_device* pDevice);
- while (dstSizeInBytes > 0 && dst[0] != '\0') {
- dst += 1;
- dstSizeInBytes -= 1;
- }
+/*
+The callback for handling log messages.
- if (dstSizeInBytes == 0) {
- return 22; /* Unterminated. */
- }
+Parameters
+----------
+pContext (in)
+ A pointer to the context the log message originated from.
- while (dstSizeInBytes > 0 && src[0] != '\0') {
- *dst++ = *src++;
- dstSizeInBytes -= 1;
- }
+pDevice (in)
+ A pointer to the device the log message originate from, if any. This can be null, in which case the message came from the context.
- if (dstSizeInBytes > 0) {
- dst[0] = '\0';
- } else {
- dstorig[0] = '\0';
- return 34;
- }
+logLevel (in)
+ The log level. This can be one of the following:
- return 0;
-}
+ |----------------------|
+ | Log Level |
+ |----------------------|
+ | MA_LOG_LEVEL_VERBOSE |
+ | MA_LOG_LEVEL_INFO |
+ | MA_LOG_LEVEL_WARNING |
+ | MA_LOG_LEVEL_ERROR |
+ |----------------------|
-int ma_strncat_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count)
-{
- char* dstorig;
+message (in)
+ The log message.
- if (dst == 0) {
- return 22;
- }
- if (dstSizeInBytes == 0) {
- return 34;
- }
- if (src == 0) {
- return 22;
- }
-
- dstorig = dst;
-
- while (dstSizeInBytes > 0 && dst[0] != '\0') {
- dst += 1;
- dstSizeInBytes -= 1;
- }
-
- if (dstSizeInBytes == 0) {
- return 22; /* Unterminated. */
- }
+Remarks
+-------
+Do not modify the state of the device from inside the callback.
+*/
+typedef void (* ma_log_proc)(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message);
- if (count == ((size_t)-1)) { /* _TRUNCATE */
- count = dstSizeInBytes - 1;
- }
-
- while (dstSizeInBytes > 0 && src[0] != '\0' && count > 0) {
- *dst++ = *src++;
- dstSizeInBytes -= 1;
- count -= 1;
- }
+typedef enum
+{
+ ma_device_type_playback = 1,
+ ma_device_type_capture = 2,
+ ma_device_type_duplex = ma_device_type_playback | ma_device_type_capture, /* 3 */
+ ma_device_type_loopback = 4
+} ma_device_type;
- if (dstSizeInBytes > 0) {
- dst[0] = '\0';
- } else {
- dstorig[0] = '\0';
- return 34;
- }
+typedef enum
+{
+ ma_share_mode_shared = 0,
+ ma_share_mode_exclusive
+} ma_share_mode;
- return 0;
-}
+/* iOS/tvOS/watchOS session categories. */
+typedef enum
+{
+ ma_ios_session_category_default = 0, /* AVAudioSessionCategoryPlayAndRecord with AVAudioSessionCategoryOptionDefaultToSpeaker. */
+ ma_ios_session_category_none, /* Leave the session category unchanged. */
+ ma_ios_session_category_ambient, /* AVAudioSessionCategoryAmbient */
+ ma_ios_session_category_solo_ambient, /* AVAudioSessionCategorySoloAmbient */
+ ma_ios_session_category_playback, /* AVAudioSessionCategoryPlayback */
+ ma_ios_session_category_record, /* AVAudioSessionCategoryRecord */
+ ma_ios_session_category_play_and_record, /* AVAudioSessionCategoryPlayAndRecord */
+ ma_ios_session_category_multi_route /* AVAudioSessionCategoryMultiRoute */
+} ma_ios_session_category;
+
+/* iOS/tvOS/watchOS session category options */
+typedef enum
+{
+ ma_ios_session_category_option_mix_with_others = 0x01, /* AVAudioSessionCategoryOptionMixWithOthers */
+ ma_ios_session_category_option_duck_others = 0x02, /* AVAudioSessionCategoryOptionDuckOthers */
+ ma_ios_session_category_option_allow_bluetooth = 0x04, /* AVAudioSessionCategoryOptionAllowBluetooth */
+ ma_ios_session_category_option_default_to_speaker = 0x08, /* AVAudioSessionCategoryOptionDefaultToSpeaker */
+ ma_ios_session_category_option_interrupt_spoken_audio_and_mix_with_others = 0x11, /* AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers */
+ ma_ios_session_category_option_allow_bluetooth_a2dp = 0x20, /* AVAudioSessionCategoryOptionAllowBluetoothA2DP */
+ ma_ios_session_category_option_allow_air_play = 0x40, /* AVAudioSessionCategoryOptionAllowAirPlay */
+} ma_ios_session_category_option;
-int ma_itoa_s(int value, char* dst, size_t dstSizeInBytes, int radix)
+typedef union
{
- int sign;
- unsigned int valueU;
- char* dstEnd;
+ ma_int64 counter;
+ double counterD;
+} ma_timer;
- if (dst == NULL || dstSizeInBytes == 0) {
- return 22;
- }
- if (radix < 2 || radix > 36) {
- dst[0] = '\0';
- return 22;
- }
+typedef union
+{
+ wchar_t wasapi[64]; /* WASAPI uses a wchar_t string for identification. */
+ ma_uint8 dsound[16]; /* DirectSound uses a GUID for identification. */
+ /*UINT_PTR*/ ma_uint32 winmm; /* When creating a device, WinMM expects a Win32 UINT_PTR for device identification. In practice it's actually just a UINT. */
+ char alsa[256]; /* ALSA uses a name string for identification. */
+ char pulse[256]; /* PulseAudio uses a name string for identification. */
+ int jack; /* JACK always uses default devices. */
+ char coreaudio[256]; /* Core Audio uses a string for identification. */
+ char sndio[256]; /* "snd/0", etc. */
+ char audio4[256]; /* "/dev/audio", etc. */
+ char oss[64]; /* "dev/dsp0", etc. "dev/dsp" for the default device. */
+ ma_int32 aaudio; /* AAudio uses a 32-bit integer for identification. */
+ ma_uint32 opensl; /* OpenSL|ES uses a 32-bit unsigned integer for identification. */
+ char webaudio[32]; /* Web Audio always uses default devices for now, but if this changes it'll be a GUID. */
+ int nullbackend; /* The null backend uses an integer for device IDs. */
+} ma_device_id;
- sign = (value < 0 && radix == 10) ? -1 : 1; /* The negative sign is only used when the base is 10. */
+typedef struct
+{
+ /* Basic info. This is the only information guaranteed to be filled in during device enumeration. */
+ ma_device_id id;
+ char name[256];
- if (value < 0) {
- valueU = -value;
- } else {
- valueU = value;
- }
+ /*
+ Detailed info. As much of this is filled as possible with ma_context_get_device_info(). Note that you are allowed to initialize
+ a device with settings outside of this range, but it just means the data will be converted using miniaudio's data conversion
+ pipeline before sending the data to/from the device. Most programs will need to not worry about these values, but it's provided
+ here mainly for informational purposes or in the rare case that someone might find it useful.
+
+ These will be set to 0 when returned by ma_context_enumerate_devices() or ma_context_get_devices().
+ */
+ ma_uint32 formatCount;
+ ma_format formats[ma_format_count];
+ ma_uint32 minChannels;
+ ma_uint32 maxChannels;
+ ma_uint32 minSampleRate;
+ ma_uint32 maxSampleRate;
- dstEnd = dst;
- do
+ struct
{
- int remainder = valueU % radix;
- if (remainder > 9) {
- *dstEnd = (char)((remainder - 10) + 'a');
- } else {
- *dstEnd = (char)(remainder + '0');
- }
+ ma_bool32 isDefault;
+ } _private;
+} ma_device_info;
- dstEnd += 1;
- dstSizeInBytes -= 1;
- valueU /= radix;
- } while (dstSizeInBytes > 0 && valueU > 0);
+typedef struct
+{
+ ma_device_type deviceType;
+ ma_uint32 sampleRate;
+ ma_uint32 periodSizeInFrames;
+ ma_uint32 periodSizeInMilliseconds;
+ ma_uint32 periods;
+ ma_performance_profile performanceProfile;
+ ma_bool32 noPreZeroedOutputBuffer; /* When set to true, the contents of the output buffer passed into the data callback will be left undefined rather than initialized to zero. */
+ ma_bool32 noClip; /* When set to true, the contents of the output buffer passed into the data callback will be clipped after returning. Only applies when the playback sample format is f32. */
+ ma_device_callback_proc dataCallback;
+ ma_stop_proc stopCallback;
+ void* pUserData;
+ struct
+ {
+ ma_resample_algorithm algorithm;
+ struct
+ {
+ ma_uint32 lpfOrder;
+ } linear;
+ struct
+ {
+ int quality;
+ } speex;
+ } resampling;
+ struct
+ {
+ ma_device_id* pDeviceID;
+ ma_format format;
+ ma_uint32 channels;
+ ma_channel channelMap[MA_MAX_CHANNELS];
+ ma_share_mode shareMode;
+ } playback;
+ struct
+ {
+ ma_device_id* pDeviceID;
+ ma_format format;
+ ma_uint32 channels;
+ ma_channel channelMap[MA_MAX_CHANNELS];
+ ma_share_mode shareMode;
+ } capture;
- if (dstSizeInBytes == 0) {
- dst[0] = '\0';
- return 22; /* Ran out of room in the output buffer. */
- }
+ struct
+ {
+ ma_bool32 noAutoConvertSRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */
+ ma_bool32 noDefaultQualitySRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */
+ ma_bool32 noAutoStreamRouting; /* Disables automatic stream routing. */
+ ma_bool32 noHardwareOffloading; /* Disables WASAPI's hardware offloading feature. */
+ } wasapi;
+ struct
+ {
+ ma_bool32 noMMap; /* Disables MMap mode. */
+ } alsa;
+ struct
+ {
+ const char* pStreamNamePlayback;
+ const char* pStreamNameCapture;
+ } pulse;
+} ma_device_config;
- if (sign < 0) {
- *dstEnd++ = '-';
- dstSizeInBytes -= 1;
- }
+typedef struct
+{
+ ma_log_proc logCallback;
+ ma_thread_priority threadPriority;
+ void* pUserData;
+ ma_allocation_callbacks allocationCallbacks;
+ struct
+ {
+ ma_bool32 useVerboseDeviceEnumeration;
+ } alsa;
+ struct
+ {
+ const char* pApplicationName;
+ const char* pServerName;
+ ma_bool32 tryAutoSpawn; /* Enables autospawning of the PulseAudio daemon if necessary. */
+ } pulse;
+ struct
+ {
+ ma_ios_session_category sessionCategory;
+ ma_uint32 sessionCategoryOptions;
+ } coreaudio;
+ struct
+ {
+ const char* pClientName;
+ ma_bool32 tryStartServer;
+ } jack;
+} ma_context_config;
- if (dstSizeInBytes == 0) {
- dst[0] = '\0';
- return 22; /* Ran out of room in the output buffer. */
- }
+/*
+The callback for handling device enumeration. This is fired from `ma_context_enumerated_devices()`.
- *dstEnd = '\0';
+Parameters
+----------
+pContext (in)
+ A pointer to the context performing the enumeration.
- /* At this point the string will be reversed. */
- dstEnd -= 1;
- while (dst < dstEnd) {
- char temp = *dst;
- *dst = *dstEnd;
- *dstEnd = temp;
+deviceType (in)
+ The type of the device being enumerated. This will always be either `ma_device_type_playback` or `ma_device_type_capture`.
- dst += 1;
- dstEnd -= 1;
- }
+pInfo (in)
+ A pointer to a `ma_device_info` containing the ID and name of the enumerated device. Note that this will not include detailed information about the device,
+ only basic information (ID and name). The reason for this is that it would otherwise require opening the backend device to probe for the information which
+ is too inefficient.
- return 0;
-}
+pUserData (in)
+ The user data pointer passed into `ma_context_enumerate_devices()`.
+*/
+typedef ma_bool32 (* ma_enum_devices_callback_proc)(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData);
-int ma_strcmp(const char* str1, const char* str2)
+struct ma_context
{
- if (str1 == str2) return 0;
-
- /* These checks differ from the standard implementation. It's not important, but I prefer it just for sanity. */
- if (str1 == NULL) return -1;
- if (str2 == NULL) return 1;
-
- for (;;) {
- if (str1[0] == '\0') {
- break;
- }
- if (str1[0] != str2[0]) {
- break;
- }
-
- str1 += 1;
- str2 += 1;
- }
+ ma_backend backend; /* DirectSound, ALSA, etc. */
+ ma_log_proc logCallback;
+ ma_thread_priority threadPriority;
+ void* pUserData;
+ ma_allocation_callbacks allocationCallbacks;
+ ma_mutex deviceEnumLock; /* Used to make ma_context_get_devices() thread safe. */
+ ma_mutex deviceInfoLock; /* Used to make ma_context_get_device_info() thread safe. */
+ ma_uint32 deviceInfoCapacity; /* Total capacity of pDeviceInfos. */
+ ma_uint32 playbackDeviceInfoCount;
+ ma_uint32 captureDeviceInfoCount;
+ ma_device_info* pDeviceInfos; /* Playback devices first, then capture. */
+ ma_bool32 isBackendAsynchronous : 1; /* Set when the context is initialized. Set to 1 for asynchronous backends such as Core Audio and JACK. Do not modify. */
- return ((unsigned char*)str1)[0] - ((unsigned char*)str2)[0];
-}
+ ma_result (* onUninit )(ma_context* pContext);
+ ma_bool32 (* onDeviceIDEqual )(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1);
+ ma_result (* onEnumDevices )(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData); /* Return false from the callback to stop enumeration. */
+ ma_result (* onGetDeviceInfo )(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo);
+ ma_result (* onDeviceInit )(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice);
+ void (* onDeviceUninit )(ma_device* pDevice);
+ ma_result (* onDeviceStart )(ma_device* pDevice);
+ ma_result (* onDeviceStop )(ma_device* pDevice);
+ ma_result (* onDeviceMainLoop)(ma_device* pDevice);
+
+ union
+ {
+#ifdef MA_SUPPORT_WASAPI
+ struct
+ {
+ int _unused;
+ } wasapi;
+#endif
+#ifdef MA_SUPPORT_DSOUND
+ struct
+ {
+ ma_handle hDSoundDLL;
+ ma_proc DirectSoundCreate;
+ ma_proc DirectSoundEnumerateA;
+ ma_proc DirectSoundCaptureCreate;
+ ma_proc DirectSoundCaptureEnumerateA;
+ } dsound;
+#endif
+#ifdef MA_SUPPORT_WINMM
+ struct
+ {
+ ma_handle hWinMM;
+ ma_proc waveOutGetNumDevs;
+ ma_proc waveOutGetDevCapsA;
+ ma_proc waveOutOpen;
+ ma_proc waveOutClose;
+ ma_proc waveOutPrepareHeader;
+ ma_proc waveOutUnprepareHeader;
+ ma_proc waveOutWrite;
+ ma_proc waveOutReset;
+ ma_proc waveInGetNumDevs;
+ ma_proc waveInGetDevCapsA;
+ ma_proc waveInOpen;
+ ma_proc waveInClose;
+ ma_proc waveInPrepareHeader;
+ ma_proc waveInUnprepareHeader;
+ ma_proc waveInAddBuffer;
+ ma_proc waveInStart;
+ ma_proc waveInReset;
+ } winmm;
+#endif
+#ifdef MA_SUPPORT_ALSA
+ struct
+ {
+ ma_handle asoundSO;
+ ma_proc snd_pcm_open;
+ ma_proc snd_pcm_close;
+ ma_proc snd_pcm_hw_params_sizeof;
+ ma_proc snd_pcm_hw_params_any;
+ ma_proc snd_pcm_hw_params_set_format;
+ ma_proc snd_pcm_hw_params_set_format_first;
+ ma_proc snd_pcm_hw_params_get_format_mask;
+ ma_proc snd_pcm_hw_params_set_channels_near;
+ ma_proc snd_pcm_hw_params_set_rate_resample;
+ ma_proc snd_pcm_hw_params_set_rate_near;
+ ma_proc snd_pcm_hw_params_set_buffer_size_near;
+ ma_proc snd_pcm_hw_params_set_periods_near;
+ ma_proc snd_pcm_hw_params_set_access;
+ ma_proc snd_pcm_hw_params_get_format;
+ ma_proc snd_pcm_hw_params_get_channels;
+ ma_proc snd_pcm_hw_params_get_channels_min;
+ ma_proc snd_pcm_hw_params_get_channels_max;
+ ma_proc snd_pcm_hw_params_get_rate;
+ ma_proc snd_pcm_hw_params_get_rate_min;
+ ma_proc snd_pcm_hw_params_get_rate_max;
+ ma_proc snd_pcm_hw_params_get_buffer_size;
+ ma_proc snd_pcm_hw_params_get_periods;
+ ma_proc snd_pcm_hw_params_get_access;
+ ma_proc snd_pcm_hw_params;
+ ma_proc snd_pcm_sw_params_sizeof;
+ ma_proc snd_pcm_sw_params_current;
+ ma_proc snd_pcm_sw_params_get_boundary;
+ ma_proc snd_pcm_sw_params_set_avail_min;
+ ma_proc snd_pcm_sw_params_set_start_threshold;
+ ma_proc snd_pcm_sw_params_set_stop_threshold;
+ ma_proc snd_pcm_sw_params;
+ ma_proc snd_pcm_format_mask_sizeof;
+ ma_proc snd_pcm_format_mask_test;
+ ma_proc snd_pcm_get_chmap;
+ ma_proc snd_pcm_state;
+ ma_proc snd_pcm_prepare;
+ ma_proc snd_pcm_start;
+ ma_proc snd_pcm_drop;
+ ma_proc snd_pcm_drain;
+ ma_proc snd_device_name_hint;
+ ma_proc snd_device_name_get_hint;
+ ma_proc snd_card_get_index;
+ ma_proc snd_device_name_free_hint;
+ ma_proc snd_pcm_mmap_begin;
+ ma_proc snd_pcm_mmap_commit;
+ ma_proc snd_pcm_recover;
+ ma_proc snd_pcm_readi;
+ ma_proc snd_pcm_writei;
+ ma_proc snd_pcm_avail;
+ ma_proc snd_pcm_avail_update;
+ ma_proc snd_pcm_wait;
+ ma_proc snd_pcm_info;
+ ma_proc snd_pcm_info_sizeof;
+ ma_proc snd_pcm_info_get_name;
+ ma_proc snd_config_update_free_global;
+
+ ma_mutex internalDeviceEnumLock;
+ ma_bool32 useVerboseDeviceEnumeration;
+ } alsa;
+#endif
+#ifdef MA_SUPPORT_PULSEAUDIO
+ struct
+ {
+ ma_handle pulseSO;
+ ma_proc pa_mainloop_new;
+ ma_proc pa_mainloop_free;
+ ma_proc pa_mainloop_get_api;
+ ma_proc pa_mainloop_iterate;
+ ma_proc pa_mainloop_wakeup;
+ ma_proc pa_context_new;
+ ma_proc pa_context_unref;
+ ma_proc pa_context_connect;
+ ma_proc pa_context_disconnect;
+ ma_proc pa_context_set_state_callback;
+ ma_proc pa_context_get_state;
+ ma_proc pa_context_get_sink_info_list;
+ ma_proc pa_context_get_source_info_list;
+ ma_proc pa_context_get_sink_info_by_name;
+ ma_proc pa_context_get_source_info_by_name;
+ ma_proc pa_operation_unref;
+ ma_proc pa_operation_get_state;
+ ma_proc pa_channel_map_init_extend;
+ ma_proc pa_channel_map_valid;
+ ma_proc pa_channel_map_compatible;
+ ma_proc pa_stream_new;
+ ma_proc pa_stream_unref;
+ ma_proc pa_stream_connect_playback;
+ ma_proc pa_stream_connect_record;
+ ma_proc pa_stream_disconnect;
+ ma_proc pa_stream_get_state;
+ ma_proc pa_stream_get_sample_spec;
+ ma_proc pa_stream_get_channel_map;
+ ma_proc pa_stream_get_buffer_attr;
+ ma_proc pa_stream_set_buffer_attr;
+ ma_proc pa_stream_get_device_name;
+ ma_proc pa_stream_set_write_callback;
+ ma_proc pa_stream_set_read_callback;
+ ma_proc pa_stream_flush;
+ ma_proc pa_stream_drain;
+ ma_proc pa_stream_is_corked;
+ ma_proc pa_stream_cork;
+ ma_proc pa_stream_trigger;
+ ma_proc pa_stream_begin_write;
+ ma_proc pa_stream_write;
+ ma_proc pa_stream_peek;
+ ma_proc pa_stream_drop;
+ ma_proc pa_stream_writable_size;
+ ma_proc pa_stream_readable_size;
+
+ char* pApplicationName;
+ char* pServerName;
+ ma_bool32 tryAutoSpawn;
+ } pulse;
+#endif
+#ifdef MA_SUPPORT_JACK
+ struct
+ {
+ ma_handle jackSO;
+ ma_proc jack_client_open;
+ ma_proc jack_client_close;
+ ma_proc jack_client_name_size;
+ ma_proc jack_set_process_callback;
+ ma_proc jack_set_buffer_size_callback;
+ ma_proc jack_on_shutdown;
+ ma_proc jack_get_sample_rate;
+ ma_proc jack_get_buffer_size;
+ ma_proc jack_get_ports;
+ ma_proc jack_activate;
+ ma_proc jack_deactivate;
+ ma_proc jack_connect;
+ ma_proc jack_port_register;
+ ma_proc jack_port_name;
+ ma_proc jack_port_get_buffer;
+ ma_proc jack_free;
+
+ char* pClientName;
+ ma_bool32 tryStartServer;
+ } jack;
+#endif
+#ifdef MA_SUPPORT_COREAUDIO
+ struct
+ {
+ ma_handle hCoreFoundation;
+ ma_proc CFStringGetCString;
+ ma_proc CFRelease;
+
+ ma_handle hCoreAudio;
+ ma_proc AudioObjectGetPropertyData;
+ ma_proc AudioObjectGetPropertyDataSize;
+ ma_proc AudioObjectSetPropertyData;
+ ma_proc AudioObjectAddPropertyListener;
+ ma_proc AudioObjectRemovePropertyListener;
+
+ ma_handle hAudioUnit; /* Could possibly be set to AudioToolbox on later versions of macOS. */
+ ma_proc AudioComponentFindNext;
+ ma_proc AudioComponentInstanceDispose;
+ ma_proc AudioComponentInstanceNew;
+ ma_proc AudioOutputUnitStart;
+ ma_proc AudioOutputUnitStop;
+ ma_proc AudioUnitAddPropertyListener;
+ ma_proc AudioUnitGetPropertyInfo;
+ ma_proc AudioUnitGetProperty;
+ ma_proc AudioUnitSetProperty;
+ ma_proc AudioUnitInitialize;
+ ma_proc AudioUnitRender;
+
+ /*AudioComponent*/ ma_ptr component;
+ } coreaudio;
+#endif
+#ifdef MA_SUPPORT_SNDIO
+ struct
+ {
+ ma_handle sndioSO;
+ ma_proc sio_open;
+ ma_proc sio_close;
+ ma_proc sio_setpar;
+ ma_proc sio_getpar;
+ ma_proc sio_getcap;
+ ma_proc sio_start;
+ ma_proc sio_stop;
+ ma_proc sio_read;
+ ma_proc sio_write;
+ ma_proc sio_onmove;
+ ma_proc sio_nfds;
+ ma_proc sio_pollfd;
+ ma_proc sio_revents;
+ ma_proc sio_eof;
+ ma_proc sio_setvol;
+ ma_proc sio_onvol;
+ ma_proc sio_initpar;
+ } sndio;
+#endif
+#ifdef MA_SUPPORT_AUDIO4
+ struct
+ {
+ int _unused;
+ } audio4;
+#endif
+#ifdef MA_SUPPORT_OSS
+ struct
+ {
+ int versionMajor;
+ int versionMinor;
+ } oss;
+#endif
+#ifdef MA_SUPPORT_AAUDIO
+ struct
+ {
+ ma_handle hAAudio; /* libaaudio.so */
+ ma_proc AAudio_createStreamBuilder;
+ ma_proc AAudioStreamBuilder_delete;
+ ma_proc AAudioStreamBuilder_setDeviceId;
+ ma_proc AAudioStreamBuilder_setDirection;
+ ma_proc AAudioStreamBuilder_setSharingMode;
+ ma_proc AAudioStreamBuilder_setFormat;
+ ma_proc AAudioStreamBuilder_setChannelCount;
+ ma_proc AAudioStreamBuilder_setSampleRate;
+ ma_proc AAudioStreamBuilder_setBufferCapacityInFrames;
+ ma_proc AAudioStreamBuilder_setFramesPerDataCallback;
+ ma_proc AAudioStreamBuilder_setDataCallback;
+ ma_proc AAudioStreamBuilder_setErrorCallback;
+ ma_proc AAudioStreamBuilder_setPerformanceMode;
+ ma_proc AAudioStreamBuilder_openStream;
+ ma_proc AAudioStream_close;
+ ma_proc AAudioStream_getState;
+ ma_proc AAudioStream_waitForStateChange;
+ ma_proc AAudioStream_getFormat;
+ ma_proc AAudioStream_getChannelCount;
+ ma_proc AAudioStream_getSampleRate;
+ ma_proc AAudioStream_getBufferCapacityInFrames;
+ ma_proc AAudioStream_getFramesPerDataCallback;
+ ma_proc AAudioStream_getFramesPerBurst;
+ ma_proc AAudioStream_requestStart;
+ ma_proc AAudioStream_requestStop;
+ } aaudio;
+#endif
+#ifdef MA_SUPPORT_OPENSL
+ struct
+ {
+ int _unused;
+ } opensl;
+#endif
+#ifdef MA_SUPPORT_WEBAUDIO
+ struct
+ {
+ int _unused;
+ } webaudio;
+#endif
+#ifdef MA_SUPPORT_NULL
+ struct
+ {
+ int _unused;
+ } null_backend;
+#endif
+ };
+
+ union
+ {
+#ifdef MA_WIN32
+ struct
+ {
+ /*HMODULE*/ ma_handle hOle32DLL;
+ ma_proc CoInitializeEx;
+ ma_proc CoUninitialize;
+ ma_proc CoCreateInstance;
+ ma_proc CoTaskMemFree;
+ ma_proc PropVariantClear;
+ ma_proc StringFromGUID2;
+
+ /*HMODULE*/ ma_handle hUser32DLL;
+ ma_proc GetForegroundWindow;
+ ma_proc GetDesktopWindow;
+
+ /*HMODULE*/ ma_handle hAdvapi32DLL;
+ ma_proc RegOpenKeyExA;
+ ma_proc RegCloseKey;
+ ma_proc RegQueryValueExA;
+ } win32;
+#endif
+#ifdef MA_POSIX
+ struct
+ {
+ ma_handle pthreadSO;
+ ma_proc pthread_create;
+ ma_proc pthread_join;
+ ma_proc pthread_mutex_init;
+ ma_proc pthread_mutex_destroy;
+ ma_proc pthread_mutex_lock;
+ ma_proc pthread_mutex_unlock;
+ ma_proc pthread_cond_init;
+ ma_proc pthread_cond_destroy;
+ ma_proc pthread_cond_wait;
+ ma_proc pthread_cond_signal;
+ ma_proc pthread_attr_init;
+ ma_proc pthread_attr_destroy;
+ ma_proc pthread_attr_setschedpolicy;
+ ma_proc pthread_attr_getschedparam;
+ ma_proc pthread_attr_setschedparam;
+ } posix;
+#endif
+ int _unused;
+ };
+};
+
+struct ma_device
+{
+ ma_context* pContext;
+ ma_device_type type;
+ ma_uint32 sampleRate;
+ volatile ma_uint32 state; /* The state of the device is variable and can change at any time on any thread, so tell the compiler as such with `volatile`. */
+ ma_device_callback_proc onData; /* Set once at initialization time and should not be changed after. */
+ ma_stop_proc onStop; /* Set once at initialization time and should not be changed after. */
+ void* pUserData; /* Application defined data. */
+ ma_mutex lock;
+ ma_event wakeupEvent;
+ ma_event startEvent;
+ ma_event stopEvent;
+ ma_thread thread;
+ ma_result workResult; /* This is set by the worker thread after it's finished doing a job. */
+ ma_bool32 usingDefaultSampleRate : 1;
+ ma_bool32 usingDefaultBufferSize : 1;
+ ma_bool32 usingDefaultPeriods : 1;
+ ma_bool32 isOwnerOfContext : 1; /* When set to true, uninitializing the device will also uninitialize the context. Set to true when NULL is passed into ma_device_init(). */
+ ma_bool32 noPreZeroedOutputBuffer : 1;
+ ma_bool32 noClip : 1;
+ volatile float masterVolumeFactor; /* Volatile so we can use some thread safety when applying volume to periods. */
+ struct
+ {
+ ma_resample_algorithm algorithm;
+ struct
+ {
+ ma_uint32 lpfOrder;
+ } linear;
+ struct
+ {
+ int quality;
+ } speex;
+ } resampling;
+ struct
+ {
+ char name[256]; /* Maybe temporary. Likely to be replaced with a query API. */
+ ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */
+ ma_bool32 usingDefaultFormat : 1;
+ ma_bool32 usingDefaultChannels : 1;
+ ma_bool32 usingDefaultChannelMap : 1;
+ ma_format format;
+ ma_uint32 channels;
+ ma_channel channelMap[MA_MAX_CHANNELS];
+ ma_format internalFormat;
+ ma_uint32 internalChannels;
+ ma_uint32 internalSampleRate;
+ ma_channel internalChannelMap[MA_MAX_CHANNELS];
+ ma_uint32 internalPeriodSizeInFrames;
+ ma_uint32 internalPeriods;
+ ma_data_converter converter;
+ } playback;
+ struct
+ {
+ char name[256]; /* Maybe temporary. Likely to be replaced with a query API. */
+ ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */
+ ma_bool32 usingDefaultFormat : 1;
+ ma_bool32 usingDefaultChannels : 1;
+ ma_bool32 usingDefaultChannelMap : 1;
+ ma_format format;
+ ma_uint32 channels;
+ ma_channel channelMap[MA_MAX_CHANNELS];
+ ma_format internalFormat;
+ ma_uint32 internalChannels;
+ ma_uint32 internalSampleRate;
+ ma_channel internalChannelMap[MA_MAX_CHANNELS];
+ ma_uint32 internalPeriodSizeInFrames;
+ ma_uint32 internalPeriods;
+ ma_data_converter converter;
+ } capture;
+
+ union
+ {
+#ifdef MA_SUPPORT_WASAPI
+ struct
+ {
+ /*IAudioClient**/ ma_ptr pAudioClientPlayback;
+ /*IAudioClient**/ ma_ptr pAudioClientCapture;
+ /*IAudioRenderClient**/ ma_ptr pRenderClient;
+ /*IAudioCaptureClient**/ ma_ptr pCaptureClient;
+ /*IMMDeviceEnumerator**/ ma_ptr pDeviceEnumerator; /* Used for IMMNotificationClient notifications. Required for detecting default device changes. */
+ ma_IMMNotificationClient notificationClient;
+ /*HANDLE*/ ma_handle hEventPlayback; /* Auto reset. Initialized to signaled. */
+ /*HANDLE*/ ma_handle hEventCapture; /* Auto reset. Initialized to unsignaled. */
+ ma_uint32 actualPeriodSizeInFramesPlayback; /* Value from GetBufferSize(). internalPeriodSizeInFrames is not set to the _actual_ buffer size when low-latency shared mode is being used due to the way the IAudioClient3 API works. */
+ ma_uint32 actualPeriodSizeInFramesCapture;
+ ma_uint32 originalPeriodSizeInFrames;
+ ma_uint32 originalPeriodSizeInMilliseconds;
+ ma_uint32 originalPeriods;
+ ma_bool32 hasDefaultPlaybackDeviceChanged; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */
+ ma_bool32 hasDefaultCaptureDeviceChanged; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */
+ ma_uint32 periodSizeInFramesPlayback;
+ ma_uint32 periodSizeInFramesCapture;
+ ma_bool32 isStartedCapture; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */
+ ma_bool32 isStartedPlayback; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */
+ ma_bool32 noAutoConvertSRC : 1; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */
+ ma_bool32 noDefaultQualitySRC : 1; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */
+ ma_bool32 noHardwareOffloading : 1;
+ ma_bool32 allowCaptureAutoStreamRouting : 1;
+ ma_bool32 allowPlaybackAutoStreamRouting : 1;
+ } wasapi;
+#endif
+#ifdef MA_SUPPORT_DSOUND
+ struct
+ {
+ /*LPDIRECTSOUND*/ ma_ptr pPlayback;
+ /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackPrimaryBuffer;
+ /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackBuffer;
+ /*LPDIRECTSOUNDCAPTURE*/ ma_ptr pCapture;
+ /*LPDIRECTSOUNDCAPTUREBUFFER*/ ma_ptr pCaptureBuffer;
+ } dsound;
+#endif
+#ifdef MA_SUPPORT_WINMM
+ struct
+ {
+ /*HWAVEOUT*/ ma_handle hDevicePlayback;
+ /*HWAVEIN*/ ma_handle hDeviceCapture;
+ /*HANDLE*/ ma_handle hEventPlayback;
+ /*HANDLE*/ ma_handle hEventCapture;
+ ma_uint32 fragmentSizeInFrames;
+ ma_uint32 fragmentSizeInBytes;
+ ma_uint32 iNextHeaderPlayback; /* [0,periods). Used as an index into pWAVEHDRPlayback. */
+ ma_uint32 iNextHeaderCapture; /* [0,periods). Used as an index into pWAVEHDRCapture. */
+ ma_uint32 headerFramesConsumedPlayback; /* The number of PCM frames consumed in the buffer in pWAVEHEADER[iNextHeader]. */
+ ma_uint32 headerFramesConsumedCapture; /* ^^^ */
+ /*WAVEHDR**/ ma_uint8* pWAVEHDRPlayback; /* One instantiation for each period. */
+ /*WAVEHDR**/ ma_uint8* pWAVEHDRCapture; /* One instantiation for each period. */
+ ma_uint8* pIntermediaryBufferPlayback;
+ ma_uint8* pIntermediaryBufferCapture;
+ ma_uint8* _pHeapData; /* Used internally and is used for the heap allocated data for the intermediary buffer and the WAVEHDR structures. */
+ } winmm;
+#endif
+#ifdef MA_SUPPORT_ALSA
+ struct
+ {
+ /*snd_pcm_t**/ ma_ptr pPCMPlayback;
+ /*snd_pcm_t**/ ma_ptr pPCMCapture;
+ ma_bool32 isUsingMMapPlayback : 1;
+ ma_bool32 isUsingMMapCapture : 1;
+ } alsa;
+#endif
+#ifdef MA_SUPPORT_PULSEAUDIO
+ struct
+ {
+ /*pa_mainloop**/ ma_ptr pMainLoop;
+ /*pa_mainloop_api**/ ma_ptr pAPI;
+ /*pa_context**/ ma_ptr pPulseContext;
+ /*pa_stream**/ ma_ptr pStreamPlayback;
+ /*pa_stream**/ ma_ptr pStreamCapture;
+ /*pa_context_state*/ ma_uint32 pulseContextState;
+ void* pMappedBufferPlayback;
+ const void* pMappedBufferCapture;
+ ma_uint32 mappedBufferFramesRemainingPlayback;
+ ma_uint32 mappedBufferFramesRemainingCapture;
+ ma_uint32 mappedBufferFramesCapacityPlayback;
+ ma_uint32 mappedBufferFramesCapacityCapture;
+ ma_bool32 breakFromMainLoop : 1;
+ } pulse;
+#endif
+#ifdef MA_SUPPORT_JACK
+ struct
+ {
+ /*jack_client_t**/ ma_ptr pClient;
+ /*jack_port_t**/ ma_ptr pPortsPlayback[MA_MAX_CHANNELS];
+ /*jack_port_t**/ ma_ptr pPortsCapture[MA_MAX_CHANNELS];
+ float* pIntermediaryBufferPlayback; /* Typed as a float because JACK is always floating point. */
+ float* pIntermediaryBufferCapture;
+ ma_pcm_rb duplexRB;
+ } jack;
+#endif
+#ifdef MA_SUPPORT_COREAUDIO
+ struct
+ {
+ ma_uint32 deviceObjectIDPlayback;
+ ma_uint32 deviceObjectIDCapture;
+ /*AudioUnit*/ ma_ptr audioUnitPlayback;
+ /*AudioUnit*/ ma_ptr audioUnitCapture;
+ /*AudioBufferList**/ ma_ptr pAudioBufferList; /* Only used for input devices. */
+ ma_event stopEvent;
+ ma_uint32 originalPeriodSizeInFrames;
+ ma_uint32 originalPeriodSizeInMilliseconds;
+ ma_uint32 originalPeriods;
+ ma_bool32 isDefaultPlaybackDevice;
+ ma_bool32 isDefaultCaptureDevice;
+ ma_bool32 isSwitchingPlaybackDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */
+ ma_bool32 isSwitchingCaptureDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */
+ ma_pcm_rb duplexRB;
+ void* pRouteChangeHandler; /* Only used on mobile platforms. Obj-C object for handling route changes. */
+ } coreaudio;
+#endif
+#ifdef MA_SUPPORT_SNDIO
+ struct
+ {
+ ma_ptr handlePlayback;
+ ma_ptr handleCapture;
+ ma_bool32 isStartedPlayback;
+ ma_bool32 isStartedCapture;
+ } sndio;
+#endif
+#ifdef MA_SUPPORT_AUDIO4
+ struct
+ {
+ int fdPlayback;
+ int fdCapture;
+ } audio4;
+#endif
+#ifdef MA_SUPPORT_OSS
+ struct
+ {
+ int fdPlayback;
+ int fdCapture;
+ } oss;
+#endif
+#ifdef MA_SUPPORT_AAUDIO
+ struct
+ {
+ /*AAudioStream**/ ma_ptr pStreamPlayback;
+ /*AAudioStream**/ ma_ptr pStreamCapture;
+ ma_pcm_rb duplexRB;
+ } aaudio;
+#endif
+#ifdef MA_SUPPORT_OPENSL
+ struct
+ {
+ /*SLObjectItf*/ ma_ptr pOutputMixObj;
+ /*SLOutputMixItf*/ ma_ptr pOutputMix;
+ /*SLObjectItf*/ ma_ptr pAudioPlayerObj;
+ /*SLPlayItf*/ ma_ptr pAudioPlayer;
+ /*SLObjectItf*/ ma_ptr pAudioRecorderObj;
+ /*SLRecordItf*/ ma_ptr pAudioRecorder;
+ /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueuePlayback;
+ /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueueCapture;
+ ma_bool32 isDrainingCapture;
+ ma_bool32 isDrainingPlayback;
+ ma_uint32 currentBufferIndexPlayback;
+ ma_uint32 currentBufferIndexCapture;
+ ma_uint8* pBufferPlayback; /* This is malloc()'d and is used for storing audio data. Typed as ma_uint8 for easy offsetting. */
+ ma_uint8* pBufferCapture;
+ ma_pcm_rb duplexRB;
+ } opensl;
+#endif
+#ifdef MA_SUPPORT_WEBAUDIO
+ struct
+ {
+ int indexPlayback; /* We use a factory on the JavaScript side to manage devices and use an index for JS/C interop. */
+ int indexCapture;
+ ma_pcm_rb duplexRB; /* In external capture format. */
+ } webaudio;
+#endif
+#ifdef MA_SUPPORT_NULL
+ struct
+ {
+ ma_thread deviceThread;
+ ma_event operationEvent;
+ ma_event operationCompletionEvent;
+ ma_uint32 operation;
+ ma_result operationResult;
+ ma_timer timer;
+ double priorRunTime;
+ ma_uint32 currentPeriodFramesRemainingPlayback;
+ ma_uint32 currentPeriodFramesRemainingCapture;
+ ma_uint64 lastProcessedFramePlayback;
+ ma_uint32 lastProcessedFrameCapture;
+ ma_bool32 isStarted;
+ } null_device;
+#endif
+ };
+};
+#if defined(_MSC_VER) && !defined(__clang__)
+ #pragma warning(pop)
+#else
+ #pragma GCC diagnostic pop /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */
+#endif
+
+/*
+Initializes a `ma_context_config` object.
+
+
+Return Value
+------------
+A `ma_context_config` initialized to defaults.
+
+
+Remarks
+-------
+You must always use this to initialize the default state of the `ma_context_config` object. Not using this will result in your program breaking when miniaudio
+is updated and new members are added to `ma_context_config`. It also sets logical defaults.
+
+You can override members of the returned object by changing it's members directly.
+
+
+See Also
+--------
+ma_context_init()
+*/
+ma_context_config ma_context_config_init(void);
+
+/*
+Initializes a context.
+
+The context is used for selecting and initializing an appropriate backend and to represent the backend at a more global level than that of an individual
+device. There is one context to many devices, and a device is created from a context. A context is required to enumerate devices.
+
+
+Parameters
+----------
+backends (in, optional)
+ A list of backends to try initializing, in priority order. Can be NULL, in which case it uses default priority order.
+
+backendCount (in, optional)
+ The number of items in `backend`. Ignored if `backend` is NULL.
+
+pConfig (in, optional)
+ The context configuration.
+
+pContext (in)
+ A pointer to the context object being initialized.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Unsafe. Do not call this function across multiple threads as some backends read and write to global state.
+
+
+Remarks
+-------
+When `backends` is NULL, the default priority order will be used. Below is a list of backends in priority order:
+
+ |-------------|-----------------------|--------------------------------------------------------|
+ | Name | Enum Name | Supported Operating Systems |
+ |-------------|-----------------------|--------------------------------------------------------|
+ | WASAPI | ma_backend_wasapi | Windows Vista+ |
+ | DirectSound | ma_backend_dsound | Windows XP+ |
+ | WinMM | ma_backend_winmm | Windows XP+ (may work on older versions, but untested) |
+ | Core Audio | ma_backend_coreaudio | macOS, iOS |
+ | ALSA | ma_backend_alsa | Linux |
+ | PulseAudio | ma_backend_pulseaudio | Cross Platform (disabled on Windows, BSD and Android) |
+ | JACK | ma_backend_jack | Cross Platform (disabled on BSD and Android) |
+ | sndio | ma_backend_sndio | OpenBSD |
+ | audio(4) | ma_backend_audio4 | NetBSD, OpenBSD |
+ | OSS | ma_backend_oss | FreeBSD |
+ | AAudio | ma_backend_aaudio | Android 8+ |
+ | OpenSL|ES | ma_backend_opensl | Android (API level 16+) |
+ | Web Audio | ma_backend_webaudio | Web (via Emscripten) |
+ | Null | ma_backend_null | Cross Platform (not used on Web) |
+ |-------------|-----------------------|--------------------------------------------------------|
+
+The context can be configured via the `pConfig` argument. The config object is initialized with `ma_context_config_init()`. Individual configuration settings
+can then be set directly on the structure. Below are the members of the `ma_context_config` object.
+
+ logCallback
+ Callback for handling log messages from miniaudio.
+
+ threadPriority
+ The desired priority to use for the audio thread. Allowable values include the following:
+
+ |--------------------------------------|
+ | Thread Priority |
+ |--------------------------------------|
+ | ma_thread_priority_idle |
+ | ma_thread_priority_lowest |
+ | ma_thread_priority_low |
+ | ma_thread_priority_normal |
+ | ma_thread_priority_high |
+ | ma_thread_priority_highest (default) |
+ | ma_thread_priority_realtime |
+ | ma_thread_priority_default |
+ |--------------------------------------|
+
+ pUserData
+ A pointer to application-defined data. This can be accessed from the context object directly such as `context.pUserData`.
+
+ allocationCallbacks
+ Structure containing custom allocation callbacks. Leaving this at defaults will cause it to use MA_MALLOC, MA_REALLOC and MA_FREE. These allocation
+ callbacks will be used for anything tied to the context, including devices.
+
+ alsa.useVerboseDeviceEnumeration
+ ALSA will typically enumerate many different devices which can be intrusive and unuser-friendly. To combat this, miniaudio will enumerate only unique
+ card/device pairs by default. The problem with this is that you lose a bit of flexibility and control. Setting alsa.useVerboseDeviceEnumeration makes
+ it so the ALSA backend includes all devices. Defaults to false.
+
+ pulse.pApplicationName
+ PulseAudio only. The application name to use when initializing the PulseAudio context with `pa_context_new()`.
+
+ pulse.pServerName
+ PulseAudio only. The name of the server to connect to with `pa_context_connect()`.
+
+ pulse.tryAutoSpawn
+ PulseAudio only. Whether or not to try automatically starting the PulseAudio daemon. Defaults to false. If you set this to true, keep in mind that
+ miniaudio uses a trial and error method to find the most appropriate backend, and this will result in the PulseAudio daemon starting which may be
+ intrusive for the end user.
+
+ coreaudio.sessionCategory
+ iOS only. The session category to use for the shared AudioSession instance. Below is a list of allowable values and their Core Audio equivalents.
+
+ |-----------------------------------------|-------------------------------------|
+ | miniaudio Token | Core Audio Token |
+ |-----------------------------------------|-------------------------------------|
+ | ma_ios_session_category_ambient | AVAudioSessionCategoryAmbient |
+ | ma_ios_session_category_solo_ambient | AVAudioSessionCategorySoloAmbient |
+ | ma_ios_session_category_playback | AVAudioSessionCategoryPlayback |
+ | ma_ios_session_category_record | AVAudioSessionCategoryRecord |
+ | ma_ios_session_category_play_and_record | AVAudioSessionCategoryPlayAndRecord |
+ | ma_ios_session_category_multi_route | AVAudioSessionCategoryMultiRoute |
+ | ma_ios_session_category_none | AVAudioSessionCategoryAmbient |
+ | ma_ios_session_category_default | AVAudioSessionCategoryAmbient |
+ |-----------------------------------------|-------------------------------------|
+
+ coreaudio.sessionCategoryOptions
+ iOS only. Session category options to use with the shared AudioSession instance. Below is a list of allowable values and their Core Audio equivalents.
+
+ |---------------------------------------------------------------------------|------------------------------------------------------------------|
+ | miniaudio Token | Core Audio Token |
+ |---------------------------------------------------------------------------|------------------------------------------------------------------|
+ | ma_ios_session_category_option_mix_with_others | AVAudioSessionCategoryOptionMixWithOthers |
+ | ma_ios_session_category_option_duck_others | AVAudioSessionCategoryOptionDuckOthers |
+ | ma_ios_session_category_option_allow_bluetooth | AVAudioSessionCategoryOptionAllowBluetooth |
+ | ma_ios_session_category_option_default_to_speaker | AVAudioSessionCategoryOptionDefaultToSpeaker |
+ | ma_ios_session_category_option_interrupt_spoken_audio_and_mix_with_others | AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers |
+ | ma_ios_session_category_option_allow_bluetooth_a2dp | AVAudioSessionCategoryOptionAllowBluetoothA2DP |
+ | ma_ios_session_category_option_allow_air_play | AVAudioSessionCategoryOptionAllowAirPlay |
+ |---------------------------------------------------------------------------|------------------------------------------------------------------|
+
+ jack.pClientName
+ The name of the client to pass to `jack_client_open()`.
+
+ jack.tryStartServer
+ Whether or not to try auto-starting the JACK server. Defaults to false.
+
+
+It is recommended that only a single context is active at any given time because it's a bulky data structure which performs run-time linking for the
+relevant backends every time it's initialized.
+
+The location of the context cannot change throughout it's lifetime. Consider allocating the `ma_context` object with `malloc()` if this is an issue. The
+reason for this is that a pointer to the context is stored in the `ma_device` structure.
+
+
+Example 1 - Default Initialization
+----------------------------------
+The example below shows how to initialize the context using the default configuration.
+
+```c
+ma_context context;
+ma_result result = ma_context_init(NULL, 0, NULL, &context);
+if (result != MA_SUCCESS) {
+ // Error.
+}
+```
+
+
+Example 2 - Custom Configuration
+--------------------------------
+The example below shows how to initialize the context using custom backend priorities and a custom configuration. In this hypothetical example, the program
+wants to prioritize ALSA over PulseAudio on Linux. They also want to avoid using the WinMM backend on Windows because it's latency is too high. They also
+want an error to be returned if no valid backend is available which they achieve by excluding the Null backend.
+
+For the configuration, the program wants to capture any log messages so they can, for example, route it to a log file and user interface.
+
+```c
+ma_backend backends[] = {
+ ma_backend_alsa,
+ ma_backend_pulseaudio,
+ ma_backend_wasapi,
+ ma_backend_dsound
+};
+
+ma_context_config config = ma_context_config_init();
+config.logCallback = my_log_callback;
+config.pUserData = pMyUserData;
+
+ma_context context;
+ma_result result = ma_context_init(backends, sizeof(backends)/sizeof(backends[0]), &config, &context);
+if (result != MA_SUCCESS) {
+ // Error.
+ if (result == MA_NO_BACKEND) {
+ // Couldn't find an appropriate backend.
+ }
+}
+```
+
+
+See Also
+--------
+ma_context_config_init()
+ma_context_uninit()
+*/
+ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext);
+
+/*
+Uninitializes a context.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Unsafe. Do not call this function across multiple threads as some backends read and write to global state.
+
+
+Remarks
+-------
+Results are undefined if you call this while any device created by this context is still active.
+
+
+See Also
+--------
+ma_context_init()
+*/
+ma_result ma_context_uninit(ma_context* pContext);
+
+/*
+Enumerates over every device (both playback and capture).
+
+This is a lower-level enumeration function to the easier to use `ma_context_get_devices()`. Use `ma_context_enumerate_devices()` if you would rather not incur
+an internal heap allocation, or it simply suits your code better.
+
+Note that this only retrieves the ID and name/description of the device. The reason for only retrieving basic information is that it would otherwise require
+opening the backend device in order to probe it for more detailed information which can be inefficient. Consider using `ma_context_get_device_info()` for this,
+but don't call it from within the enumeration callback.
+
+Returning false from the callback will stop enumeration. Returning true will continue enumeration.
+
+
+Parameters
+----------
+pContext (in)
+ A pointer to the context performing the enumeration.
+
+callback (in)
+ The callback to fire for each enumerated device.
+
+pUserData (in)
+ A pointer to application-defined data passed to the callback.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Safe. This is guarded using a simple mutex lock.
+
+
+Remarks
+-------
+Do _not_ assume the first enumerated device of a given type is the default device.
+
+Some backends and platforms may only support default playback and capture devices.
+
+In general, you should not do anything complicated from within the callback. In particular, do not try initializing a device from within the callback. Also,
+do not try to call `ma_context_get_device_info()` from within the callback.
+
+Consider using `ma_context_get_devices()` for a simpler and safer API, albeit at the expense of an internal heap allocation.
+
+
+Example 1 - Simple Enumeration
+------------------------------
+ma_bool32 ma_device_enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData)
+{
+ printf("Device Name: %s\n", pInfo->name);
+ return MA_TRUE;
+}
+
+ma_result result = ma_context_enumerate_devices(&context, my_device_enum_callback, pMyUserData);
+if (result != MA_SUCCESS) {
+ // Error.
+}
+
+
+See Also
+--------
+ma_context_get_devices()
+*/
+ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData);
+
+/*
+Retrieves basic information about every active playback and/or capture device.
+
+This function will allocate memory internally for the device lists and return a pointer to them through the `ppPlaybackDeviceInfos` and `ppCaptureDeviceInfos`
+parameters. If you do not want to incur the overhead of these allocations consider using `ma_context_enumerate_devices()` which will instead use a callback.
+
+
+Parameters
+----------
+pContext (in)
+ A pointer to the context performing the enumeration.
+
+ppPlaybackDeviceInfos (out)
+ A pointer to a pointer that will receive the address of a buffer containing the list of `ma_device_info` structures for playback devices.
+
+pPlaybackDeviceCount (out)
+ A pointer to an unsigned integer that will receive the number of playback devices.
+
+ppCaptureDeviceInfos (out)
+ A pointer to a pointer that will receive the address of a buffer containing the list of `ma_device_info` structures for capture devices.
+
+pCaptureDeviceCount (out)
+ A pointer to an unsigned integer that will receive the number of capture devices.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Unsafe. Since each call to this function invalidates the pointers from the previous call, you should not be calling this simultaneously across multiple
+threads. Instead, you need to make a copy of the returned data with your own higher level synchronization.
+
+
+Remarks
+-------
+It is _not_ safe to assume the first device in the list is the default device.
+
+You can pass in NULL for the playback or capture lists in which case they'll be ignored.
+
+The returned pointers will become invalid upon the next call this this function, or when the context is uninitialized. Do not free the returned pointers.
+
+
+See Also
+--------
+ma_context_get_devices()
+*/
+ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount);
+
+/*
+Retrieves information about a device of the given type, with the specified ID and share mode.
+
+
+Parameters
+----------
+pContext (in)
+ A pointer to the context performing the query.
+
+deviceType (in)
+ The type of the device being queried. Must be either `ma_device_type_playback` or `ma_device_type_capture`.
+
+pDeviceID (in)
+ The ID of the device being queried.
+
+shareMode (in)
+ The share mode to query for device capabilities. This should be set to whatever you're intending on using when initializing the device. If you're unsure,
+ set this to `ma_share_mode_shared`.
+
+pDeviceInfo (out)
+ A pointer to the `ma_device_info` structure that will receive the device information.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Safe. This is guarded using a simple mutex lock.
+
+
+Remarks
+-------
+Do _not_ call this from within the `ma_context_enumerate_devices()` callback.
+
+It's possible for a device to have different information and capabilities depending on whether or not it's opened in shared or exclusive mode. For example, in
+shared mode, WASAPI always uses floating point samples for mixing, but in exclusive mode it can be anything. Therefore, this function allows you to specify
+which share mode you want information for. Note that not all backends and devices support shared or exclusive mode, in which case this function will fail if
+the requested share mode is unsupported.
+
+This leaves pDeviceInfo unmodified in the result of an error.
+*/
+ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo);
+
+/*
+Determines if the given context supports loopback mode.
+
+
+Parameters
+----------
+pContext (in)
+ A pointer to the context getting queried.
+
+
+Return Value
+------------
+MA_TRUE if the context supports loopback mode; MA_FALSE otherwise.
+*/
+ma_bool32 ma_context_is_loopback_supported(ma_context* pContext);
+
+
+
+/*
+Initializes a device config with default settings.
+
+
+Parameters
+----------
+deviceType (in)
+ The type of the device this config is being initialized for. This must set to one of the following:
+
+ |-------------------------|
+ | Device Type |
+ |-------------------------|
+ | ma_device_type_playback |
+ | ma_device_type_capture |
+ | ma_device_type_duplex |
+ | ma_device_type_loopback |
+ |-------------------------|
+
+
+Return Value
+------------
+A new device config object with default settings. You will typically want to adjust the config after this function returns. See remarks.
+
+
+Thread Safety
+-------------
+Safe.
+
+
+Callback Safety
+---------------
+Safe, but don't try initializing a device in a callback.
+
+
+Remarks
+-------
+The returned config will be initialized to defaults. You will normally want to customize a few variables before initializing the device. See Example 1 for a
+typical configuration which sets the sample format, channel count, sample rate, data callback and user data. These are usually things you will want to change
+before initializing the device.
+
+See `ma_device_init()` for details on specific configuration options.
+
+
+Example 1 - Simple Configuration
+--------------------------------
+The example below is what a program will typically want to configure for each device at a minimum. Notice how `ma_device_config_init()` is called first, and
+then the returned object is modified directly. This is important because it ensures that your program continues to work as new configuration options are added
+to the `ma_device_config` structure.
+
+```c
+ma_device_config config = ma_device_config_init(ma_device_type_playback);
+config.playback.format = ma_format_f32;
+config.playback.channels = 2;
+config.sampleRate = 48000;
+config.dataCallback = ma_data_callback;
+config.pUserData = pMyUserData;
+```
+
+
+See Also
+--------
+ma_device_init()
+ma_device_init_ex()
+*/
+ma_device_config ma_device_config_init(ma_device_type deviceType);
+
+
+/*
+Initializes a device.
+
+A device represents a physical audio device. The idea is you send or receive audio data from the device to either play it back through a speaker, or capture it
+from a microphone. Whether or not you should send or receive data from the device (or both) depends on the type of device you are initializing which can be
+playback, capture, full-duplex or loopback. (Note that loopback mode is only supported on select backends.) Sending and receiving audio data to and from the
+device is done via a callback which is fired by miniaudio at periodic time intervals.
+
+The frequency at which data is deilvered to and from a device depends on the size of it's period which is defined by a buffer size and a period count. The size
+of the buffer can be defined in terms of PCM frames or milliseconds, whichever is more convenient. The size of a period is the size of this buffer, divided by
+the period count. Generally speaking, the smaller the period, the lower the latency at the expense of higher CPU usage and increased risk of glitching due to
+the more frequent and granular data deliver intervals. The size of a period will depend on your requirements, but miniaudio's defaults should work fine for
+most scenarios. If you're building a game you should leave this fairly small, whereas if you're building a simple media player you can make it larger. Note
+that the period size you request is actually just a hint - miniaudio will tell the backend what you want, but the backend is ultimately responsible for what it
+gives you. You cannot assume you will get exactly what you ask for.
+
+When delivering data to and from a device you need to make sure it's in the correct format which you can set through the device configuration. You just set the
+format that you want to use and miniaudio will perform all of the necessary conversion for you internally. When delivering data to and from the callback you
+can assume the format is the same as what you requested when you initialized the device. See Remarks for more details on miniaudio's data conversion pipeline.
+
+
+Parameters
+----------
+pContext (in, optional)
+ A pointer to the context that owns the device. This can be null, in which case it creates a default context internally.
+
+pConfig (in)
+ A pointer to the device configuration. Cannot be null. See remarks for details.
+
+pDevice (out)
+ A pointer to the device object being initialized.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Unsafe. It is not safe to call this function simultaneously for different devices because some backends depend on and mutate global state. The same applies to
+calling this at the same time as `ma_device_uninit()`.
+
+
+Callback Safety
+---------------
+Unsafe. It is not safe to call this inside any callback.
+
+
+Remarks
+-------
+Setting `pContext` to NULL will result in miniaudio creating a default context internally and is equivalent to passing in a context initialized like so:
+
+ ```c
+ ma_context_init(NULL, 0, NULL, &context);
+ ```
+
+Do not set `pContext` to NULL if you are needing to open multiple devices. You can, however, use NULL when initializing the first device, and then use
+device.pContext for the initialization of other devices.
+
+The device can be configured via the `pConfig` argument. The config object is initialized with `ma_device_config_init()`. Individual configuration settings can
+then be set directly on the structure. Below are the members of the `ma_device_config` object.
+
+ deviceType
+ Must be `ma_device_type_playback`, `ma_device_type_capture`, `ma_device_type_duplex` of `ma_device_type_loopback`.
+
+ sampleRate
+ The sample rate, in hertz. The most common sample rates are 48000 and 44100. Setting this to 0 will use the device's native sample rate.
+
+ periodSizeInFrames
+ The desired size of a period in PCM frames. If this is 0, `periodSizeInMilliseconds` will be used instead. If both are 0 the default buffer size will
+ be used depending on the selected performance profile. This value affects latency. See below for details.
+
+ periodSizeInMilliseconds
+ The desired size of a period in milliseconds. If this is 0, `periodSizeInFrames` will be used instead. If both are 0 the default buffer size will be
+ used depending on the selected performance profile. The value affects latency. See below for details.
+
+ periods
+ The number of periods making up the device's entire buffer. The total buffer size is `periodSizeInFrames` or `periodSizeInMilliseconds` multiplied by
+ this value. This is just a hint as backends will be the ones who ultimately decide how your periods will be configured.
+
+ performanceProfile
+ A hint to miniaudio as to the performance requirements of your program. Can be either `ma_performance_profile_low_latency` (default) or
+ `ma_performance_profile_conservative`. This mainly affects the size of default buffers and can usually be left at it's default value.
+
+ noPreZeroedOutputBuffer
+ When set to true, the contents of the output buffer passed into the data callback will be left undefined. When set to false (default), the contents of
+ the output buffer will be cleared the zero. You can use this to avoid the overhead of zeroing out the buffer if you know can guarantee that your data
+ callback will write to every sample in the output buffer, or if you are doing your own clearing.
+
+ noClip
+ When set to true, the contents of the output buffer passed into the data callback will be clipped after returning. When set to false (default), the
+ contents of the output buffer are left alone after returning and it will be left up to the backend itself to decide whether or not the clip. This only
+ applies when the playback sample format is f32.
+
+ dataCallback
+ The callback to fire whenever data is ready to be delivered to or from the device.
+
+ stopCallback
+ The callback to fire whenever the device has stopped, either explicitly via `ma_device_stop()`, or implicitly due to things like the device being
+ disconnected.
+
+ pUserData
+ The user data pointer to use with the device. You can access this directly from the device object like `device.pUserData`.
+
+ resampling.algorithm
+ The resampling algorithm to use when miniaudio needs to perform resampling between the rate specified by `sampleRate` and the device's native rate. The
+ default value is `ma_resample_algorithm_linear`, and the quality can be configured with `resampling.linear.lpfOrder`.
+
+ resampling.linear.lpfOrder
+ The linear resampler applies a low-pass filter as part of it's procesing for anti-aliasing. This setting controls the order of the filter. The higher
+ the value, the better the quality, in general. Setting this to 0 will disable low-pass filtering altogether. The maximum value is
+ `MA_MAX_FILTER_ORDER`. The default value is `min(4, MA_MAX_FILTER_ORDER)`.
+
+ playback.pDeviceID
+ A pointer to a `ma_device_id` structure containing the ID of the playback device to initialize. Setting this NULL (default) will use the system's
+ default playback device. Retrieve the device ID from the `ma_device_info` structure, which can be retrieved using device enumeration.
+
+ playback.format
+ The sample format to use for playback. When set to `ma_format_unknown` the device's native format will be used. This can be retrieved after
+ initialization from the device object directly with `device.playback.format`.
+
+ playback.channels
+ The number of channels to use for playback. When set to 0 the device's native channel count will be used. This can be retrieved after initialization
+ from the device object directly with `device.playback.channels`.
+
+ playback.channelMap
+ The channel map to use for playback. When left empty, the device's native channel map will be used. This can be retrieved after initialization from the
+ device object direct with `device.playback.channelMap`.
+
+ playback.shareMode
+ The preferred share mode to use for playback. Can be either `ma_share_mode_shared` (default) or `ma_share_mode_exclusive`. Note that if you specify
+ exclusive mode, but it's not supported by the backend, initialization will fail. You can then fall back to shared mode if desired.
+
+ playback.pDeviceID
+ A pointer to a `ma_device_id` structure containing the ID of the playback device to initialize. Setting this NULL (default) will use the system's
+ default playback device. Retrieve the device ID from the `ma_device_info` structure, which can be retrieved using device enumeration.
+
+ capture.format
+ The sample format to use for capture. When set to `ma_format_unknown` the device's native format will be used. This can be retrieved after
+ initialization from the device object directly with `device.capture.format`.
+
+ capture.channels
+ The number of channels to use for capture. When set to 0 the device's native channel count will be used. This can be retrieved after initialization
+ from the device object directly with `device.capture.channels`.
+
+ capture.channelMap
+ The channel map to use for capture. When left empty, the device's native channel map will be used. This can be retrieved after initialization from the
+ device object direct with `device.capture.channelMap`.
+
+ capture.shareMode
+ The preferred share mode to use for capture. Can be either `ma_share_mode_shared` (default) or `ma_share_mode_exclusive`. Note that if you specify
+ exclusive mode, but it's not supported by the backend, initialization will fail. You can then fall back to shared mode if desired.
+
+ wasapi.noAutoConvertSRC
+ WASAPI only. When set to true, disables WASAPI's automatic resampling and forces the use of miniaudio's resampler. Defaults to false.
+
+ wasapi.noDefaultQualitySRC
+ WASAPI only. Only used when `wasapi.noAutoConvertSRC` is set to false. When set to true, disables the use of `AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY`.
+ You should usually leave this set to false, which is the default.
+
+ wasapi.noAutoStreamRouting
+ WASAPI only. When set to true, disables automatic stream routing on the WASAPI backend. Defaults to false.
+
+ wasapi.noHardwareOffloading
+ WASAPI only. When set to true, disables the use of WASAPI's hardware offloading feature. Defaults to false.
+
+ alsa.noMMap
+ ALSA only. When set to true, disables MMap mode. Defaults to false.
+
+ pulse.pStreamNamePlayback
+ PulseAudio only. Sets the stream name for playback.
+
+ pulse.pStreamNameCapture
+ PulseAudio only. Sets the stream name for capture.
+
+
+Once initialized, the device's config is immutable. If you need to change the config you will need to initialize a new device.
+
+If both `periodSizeInFrames` and `periodSizeInMilliseconds` are set to zero, it will default to `MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY` or
+`MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE`, depending on whether or not `performanceProfile` is set to `ma_performance_profile_low_latency` or
+`ma_performance_profile_conservative`.
+
+If you request exclusive mode and the backend does not support it an error will be returned. For robustness, you may want to first try initializing the device
+in exclusive mode, and then fall back to shared mode if required. Alternatively you can just request shared mode (the default if you leave it unset in the
+config) which is the most reliable option. Some backends do not have a practical way of choosing whether or not the device should be exclusive or not (ALSA,
+for example) in which case it just acts as a hint. Unless you have special requirements you should try avoiding exclusive mode as it's intrusive to the user.
+Starting with Windows 10, miniaudio will use low-latency shared mode where possible which may make exclusive mode unnecessary.
+
+After initializing the device it will be in a stopped state. To start it, use `ma_device_start()`.
+
+When sending or receiving data to/from a device, miniaudio will internally perform a format conversion to convert between the format specified by pConfig and
+the format used internally by the backend. If you pass in 0 for the sample format, channel count, sample rate _and_ channel map, data transmission will run on
+an optimized pass-through fast path. You can retrieve the format, channel count and sample rate by inspecting the `playback/capture.format`,
+`playback/capture.channels` and `sampleRate` members of the device object.
+
+When compiling for UWP you must ensure you call this function on the main UI thread because the operating system may need to present the user with a message
+asking for permissions. Please refer to the official documentation for ActivateAudioInterfaceAsync() for more information.
+
+ALSA Specific: When initializing the default device, requesting shared mode will try using the "dmix" device for playback and the "dsnoop" device for capture.
+If these fail it will try falling back to the "hw" device.
+
+
+Example 1 - Simple Initialization
+---------------------------------
+This example shows how to initialize a simple playback default using a standard configuration. If you are just needing to do simple playback from the default
+playback device this is usually all you need.
+
+```c
+ma_device_config config = ma_device_config_init(ma_device_type_playback);
+config.playback.format = ma_format_f32;
+config.playback.channels = 2;
+config.sampleRate = 48000;
+config.dataCallback = ma_data_callback;
+config.pMyUserData = pMyUserData;
+
+ma_device device;
+ma_result result = ma_device_init(NULL, &config, &device);
+if (result != MA_SUCCESS) {
+ // Error
+}
+```
+
+
+Example 2 - Advanced Initialization
+-----------------------------------
+This example show how you might do some more advanced initialization. In this hypothetical example we want to control the latency by setting the buffer size
+and period count. We also want to allow the user to be able to choose which device to output from which means we need a context so we can perform device
+enumeration.
+
+```c
+ma_context context;
+ma_result result = ma_context_init(NULL, 0, NULL, &context);
+if (result != MA_SUCCESS) {
+ // Error
+}
+
+ma_device_info* pPlaybackDeviceInfos;
+ma_uint32 playbackDeviceCount;
+result = ma_context_get_devices(&context, &pPlaybackDeviceInfos, &playbackDeviceCount, NULL, NULL);
+if (result != MA_SUCCESS) {
+ // Error
+}
+
+// ... choose a device from pPlaybackDeviceInfos ...
+
+ma_device_config config = ma_device_config_init(ma_device_type_playback);
+config.playback.pDeviceID = pMyChosenDeviceID; // <-- Get this from the `id` member of one of the `ma_device_info` objects returned by ma_context_get_devices().
+config.playback.format = ma_format_f32;
+config.playback.channels = 2;
+config.sampleRate = 48000;
+config.dataCallback = ma_data_callback;
+config.pUserData = pMyUserData;
+config.periodSizeInMilliseconds = 10;
+config.periods = 3;
+
+ma_device device;
+result = ma_device_init(&context, &config, &device);
+if (result != MA_SUCCESS) {
+ // Error
+}
+```
+
+
+See Also
+--------
+ma_device_config_init()
+ma_device_uninit()
+ma_device_start()
+ma_context_init()
+ma_context_get_devices()
+ma_context_enumerate_devices()
+*/
+ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice);
+
+/*
+Initializes a device without a context, with extra parameters for controlling the configuration of the internal self-managed context.
+
+This is the same as `ma_device_init()`, only instead of a context being passed in, the parameters from `ma_context_init()` are passed in instead. This function
+allows you to configure the internally created context.
+
+
+Parameters
+----------
+backends (in, optional)
+ A list of backends to try initializing, in priority order. Can be NULL, in which case it uses default priority order.
+
+backendCount (in, optional)
+ The number of items in `backend`. Ignored if `backend` is NULL.
+
+pContextConfig (in, optional)
+ The context configuration.
+
+pConfig (in)
+ A pointer to the device configuration. Cannot be null. See remarks for details.
+
+pDevice (out)
+ A pointer to the device object being initialized.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Unsafe. It is not safe to call this function simultaneously for different devices because some backends depend on and mutate global state. The same applies to
+calling this at the same time as `ma_device_uninit()`.
+
+
+Callback Safety
+---------------
+Unsafe. It is not safe to call this inside any callback.
+
+
+Remarks
+-------
+You only need to use this function if you want to configure the context differently to it's defaults. You should never use this function if you want to manage
+your own context.
+
+See the documentation for `ma_context_init()` for information on the different context configuration options.
+
+
+See Also
+--------
+ma_device_init()
+ma_device_uninit()
+ma_device_config_init()
+ma_context_init()
+*/
+ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice);
+
+/*
+Uninitializes a device.
+
+This will explicitly stop the device. You do not need to call `ma_device_stop()` beforehand, but it's harmless if you do.
+
+
+Parameters
+----------
+pDevice (in)
+ A pointer to the device to stop.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Unsafe. As soon as this API is called the device should be considered undefined.
+
+
+Callback Safety
+---------------
+Unsafe. It is not safe to call this inside any callback. Doing this will result in a deadlock.
+
+
+See Also
+--------
+ma_device_init()
+ma_device_stop()
+*/
+void ma_device_uninit(ma_device* pDevice);
+
+/*
+Starts the device. For playback devices this begins playback. For capture devices it begins recording.
+
+Use `ma_device_stop()` to stop the device.
+
+
+Parameters
+----------
+pDevice (in)
+ A pointer to the device to start.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Safe. It's safe to call this from any thread with the exception of the callback thread.
+
+
+Callback Safety
+---------------
+Unsafe. It is not safe to call this inside any callback.
+
+
+Remarks
+-------
+For a playback device, this will retrieve an initial chunk of audio data from the client before returning. The reason for this is to ensure there is valid
+audio data in the buffer, which needs to be done before the device begins playback.
+
+This API waits until the backend device has been started for real by the worker thread. It also waits on a mutex for thread-safety.
+
+Do not call this in any callback.
+
+
+See Also
+--------
+ma_device_stop()
+*/
+ma_result ma_device_start(ma_device* pDevice);
+
+/*
+Stops the device. For playback devices this stops playback. For capture devices it stops recording.
+
+Use `ma_device_start()` to start the device again.
+
+
+Parameters
+----------
+pDevice (in)
+ A pointer to the device to stop.
+
+
+Return Value
+------------
+MA_SUCCESS if successful; any other error code otherwise.
+
+
+Thread Safety
+-------------
+Safe. It's safe to call this from any thread with the exception of the callback thread.
+
+
+Callback Safety
+---------------
+Unsafe. It is not safe to call this inside any callback. Doing this will result in a deadlock.
+
+
+Remarks
+-------
+This API needs to wait on the worker thread to stop the backend device properly before returning. It also waits on a mutex for thread-safety. In addition, some
+backends need to wait for the device to finish playback/recording of the current fragment which can take some time (usually proportionate to the buffer size
+that was specified at initialization time).
+
+Backends are required to either pause the stream in-place or drain the buffer if pausing is not possible. The reason for this is that stopping the device and
+the resuming it with ma_device_start() (which you might do when your program loses focus) may result in a situation where those samples are never output to the
+speakers or received from the microphone which can in turn result in de-syncs.
+
+Do not call this in any callback.
+
+This will be called implicitly by `ma_device_uninit()`.
+
+
+See Also
+--------
+ma_device_start()
+*/
+ma_result ma_device_stop(ma_device* pDevice);
+
+/*
+Determines whether or not the device is started.
+
+
+Parameters
+----------
+pDevice (in)
+ A pointer to the device whose start state is being retrieved.
+
+
+Return Value
+------------
+True if the device is started, false otherwise.
+
+
+Thread Safety
+-------------
+Safe. If another thread calls `ma_device_start()` or `ma_device_stop()` at this same time as this function is called, there's a very small chance the return
+value will be out of sync.
+
+
+Callback Safety
+---------------
+Safe. This is implemented as a simple accessor.
+
+
+See Also
+--------
+ma_device_start()
+ma_device_stop()
+*/
+ma_bool32 ma_device_is_started(ma_device* pDevice);
+
+/*
+Sets the master volume factor for the device.
+
+The volume factor must be between 0 (silence) and 1 (full volume). Use `ma_device_set_master_gain_db()` to use decibel notation, where 0 is full volume and
+values less than 0 decreases the volume.
+
+
+Parameters
+----------
+pDevice (in)
+ A pointer to the device whose volume is being set.
+
+volume (in)
+ The new volume factor. Must be within the range of [0, 1].
+
+
+Return Value
+------------
+MA_SUCCESS if the volume was set successfully.
+MA_INVALID_ARGS if pDevice is NULL.
+MA_INVALID_ARGS if the volume factor is not within the range of [0, 1].
+
+
+Thread Safety
+-------------
+Safe. This just sets a local member of the device object.
+
+
+Callback Safety
+---------------
+Safe. If you set the volume in the data callback, that data written to the output buffer will have the new volume applied.
+
+
+Remarks
+-------
+This applies the volume factor across all channels.
+
+This does not change the operating system's volume. It only affects the volume for the given `ma_device` object's audio stream.
+
+
+See Also
+--------
+ma_device_get_master_volume()
+ma_device_set_master_volume_gain_db()
+ma_device_get_master_volume_gain_db()
+*/
+ma_result ma_device_set_master_volume(ma_device* pDevice, float volume);
+
+/*
+Retrieves the master volume factor for the device.
+
+
+Parameters
+----------
+pDevice (in)
+ A pointer to the device whose volume factor is being retrieved.
+
+pVolume (in)
+ A pointer to the variable that will receive the volume factor. The returned value will be in the range of [0, 1].
+
+
+Return Value
+------------
+MA_SUCCESS if successful.
+MA_INVALID_ARGS if pDevice is NULL.
+MA_INVALID_ARGS if pVolume is NULL.
+
+
+Thread Safety
+-------------
+Safe. This just a simple member retrieval.
+
+
+Callback Safety
+---------------
+Safe.
+
+
+Remarks
+-------
+If an error occurs, `*pVolume` will be set to 0.
+
+
+See Also
+--------
+ma_device_set_master_volume()
+ma_device_set_master_volume_gain_db()
+ma_device_get_master_volume_gain_db()
+*/
+ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume);
+
+/*
+Sets the master volume for the device as gain in decibels.
+
+A gain of 0 is full volume, whereas a gain of < 0 will decrease the volume.
+
+
+Parameters
+----------
+pDevice (in)
+ A pointer to the device whose gain is being set.
+
+gainDB (in)
+ The new volume as gain in decibels. Must be less than or equal to 0, where 0 is full volume and anything less than 0 decreases the volume.
+
+
+Return Value
+------------
+MA_SUCCESS if the volume was set successfully.
+MA_INVALID_ARGS if pDevice is NULL.
+MA_INVALID_ARGS if the gain is > 0.
+
+
+Thread Safety
+-------------
+Safe. This just sets a local member of the device object.
+
+
+Callback Safety
+---------------
+Safe. If you set the volume in the data callback, that data written to the output buffer will have the new volume applied.
+
+
+Remarks
+-------
+This applies the gain across all channels.
+
+This does not change the operating system's volume. It only affects the volume for the given `ma_device` object's audio stream.
+
+
+See Also
+--------
+ma_device_get_master_volume_gain_db()
+ma_device_set_master_volume()
+ma_device_get_master_volume()
+*/
+ma_result ma_device_set_master_gain_db(ma_device* pDevice, float gainDB);
+
+/*
+Retrieves the master gain in decibels.
+
+
+Parameters
+----------
+pDevice (in)
+ A pointer to the device whose gain is being retrieved.
+
+pGainDB (in)
+ A pointer to the variable that will receive the gain in decibels. The returned value will be <= 0.
+
+
+Return Value
+------------
+MA_SUCCESS if successful.
+MA_INVALID_ARGS if pDevice is NULL.
+MA_INVALID_ARGS if pGainDB is NULL.
+
+
+Thread Safety
+-------------
+Safe. This just a simple member retrieval.
+
+
+Callback Safety
+---------------
+Safe.
+
+
+Remarks
+-------
+If an error occurs, `*pGainDB` will be set to 0.
+
+
+See Also
+--------
+ma_device_set_master_volume_gain_db()
+ma_device_set_master_volume()
+ma_device_get_master_volume()
+*/
+ma_result ma_device_get_master_gain_db(ma_device* pDevice, float* pGainDB);
+
+
+
+/************************************************************************************************************************************************************
+
+Utiltities
+
+************************************************************************************************************************************************************/
+
+/*
+Creates a mutex.
+
+A mutex must be created from a valid context. A mutex is initially unlocked.
+*/
+ma_result ma_mutex_init(ma_context* pContext, ma_mutex* pMutex);
+
+/*
+Deletes a mutex.
+*/
+void ma_mutex_uninit(ma_mutex* pMutex);
+
+/*
+Locks a mutex with an infinite timeout.
+*/
+void ma_mutex_lock(ma_mutex* pMutex);
+
+/*
+Unlocks a mutex.
+*/
+void ma_mutex_unlock(ma_mutex* pMutex);
+
+
+/*
+Retrieves a friendly name for a backend.
+*/
+const char* ma_get_backend_name(ma_backend backend);
+
+/*
+Determines whether or not loopback mode is support by a backend.
+*/
+ma_bool32 ma_is_loopback_supported(ma_backend backend);
+
+
+/*
+Adjust buffer size based on a scaling factor.
+
+This just multiplies the base size by the scaling factor, making sure it's a size of at least 1.
+*/
+ma_uint32 ma_scale_buffer_size(ma_uint32 baseBufferSize, float scale);
+
+/*
+Calculates a buffer size in milliseconds from the specified number of frames and sample rate.
+*/
+ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate);
+
+/*
+Calculates a buffer size in frames from the specified number of milliseconds and sample rate.
+*/
+ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate);
+
+/*
+Copies silent frames into the given buffer.
+*/
+void ma_zero_pcm_frames(void* p, ma_uint32 frameCount, ma_format format, ma_uint32 channels);
+
+/*
+Clips f32 samples.
+*/
+void ma_clip_samples_f32(float* p, ma_uint32 sampleCount);
+MA_INLINE void ma_clip_pcm_frames_f32(float* p, ma_uint32 frameCount, ma_uint32 channels) { ma_clip_samples_f32(p, frameCount*channels); }
+
+/*
+Helper for applying a volume factor to samples.
+
+Note that the source and destination buffers can be the same, in which case it'll perform the operation in-place.
+*/
+void ma_copy_and_apply_volume_factor_u8(ma_uint8* pSamplesOut, const ma_uint8* pSamplesIn, ma_uint32 sampleCount, float factor);
+void ma_copy_and_apply_volume_factor_s16(ma_int16* pSamplesOut, const ma_int16* pSamplesIn, ma_uint32 sampleCount, float factor);
+void ma_copy_and_apply_volume_factor_s24(void* pSamplesOut, const void* pSamplesIn, ma_uint32 sampleCount, float factor);
+void ma_copy_and_apply_volume_factor_s32(ma_int32* pSamplesOut, const ma_int32* pSamplesIn, ma_uint32 sampleCount, float factor);
+void ma_copy_and_apply_volume_factor_f32(float* pSamplesOut, const float* pSamplesIn, ma_uint32 sampleCount, float factor);
+
+void ma_apply_volume_factor_u8(ma_uint8* pSamples, ma_uint32 sampleCount, float factor);
+void ma_apply_volume_factor_s16(ma_int16* pSamples, ma_uint32 sampleCount, float factor);
+void ma_apply_volume_factor_s24(void* pSamples, ma_uint32 sampleCount, float factor);
+void ma_apply_volume_factor_s32(ma_int32* pSamples, ma_uint32 sampleCount, float factor);
+void ma_apply_volume_factor_f32(float* pSamples, ma_uint32 sampleCount, float factor);
+
+void ma_copy_and_apply_volume_factor_pcm_frames_u8(ma_uint8* pPCMFramesOut, const ma_uint8* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_copy_and_apply_volume_factor_pcm_frames_s16(ma_int16* pPCMFramesOut, const ma_int16* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_copy_and_apply_volume_factor_pcm_frames_s24(void* pPCMFramesOut, const void* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_copy_and_apply_volume_factor_pcm_frames_s32(ma_int32* pPCMFramesOut, const ma_int32* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_copy_and_apply_volume_factor_pcm_frames_f32(float* pPCMFramesOut, const float* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_copy_and_apply_volume_factor_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount, ma_format format, ma_uint32 channels, float factor);
+
+void ma_apply_volume_factor_pcm_frames_u8(ma_uint8* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_apply_volume_factor_pcm_frames_s16(ma_int16* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_apply_volume_factor_pcm_frames_s24(void* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_apply_volume_factor_pcm_frames_s32(ma_int32* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_apply_volume_factor_pcm_frames_f32(float* pFrames, ma_uint32 frameCount, ma_uint32 channels, float factor);
+void ma_apply_volume_factor_pcm_frames(void* pFrames, ma_uint32 frameCount, ma_format format, ma_uint32 channels, float factor);
+
+
+/*
+Helper for converting a linear factor to gain in decibels.
+*/
+float ma_factor_to_gain_db(float factor);
+
+/*
+Helper for converting gain in decibels to a linear factor.
+*/
+float ma_gain_db_to_factor(float gain);
+
+#endif /* MA_NO_DEVICE_IO */
+
+
+#if !defined(MA_NO_DECODING) || !defined(MA_NO_ENCODING)
+typedef enum
+{
+ ma_seek_origin_start,
+ ma_seek_origin_current
+} ma_seek_origin;
+
+typedef enum
+{
+ ma_resource_format_wav
+} ma_resource_format;
+#endif
+
+/************************************************************************************************************************************************************
+
+Decoding
+========
+
+Decoders are independent of the main device API. Decoding APIs can be called freely inside the device's data callback, but they are not thread safe unless
+you do your own synchronization.
+
+************************************************************************************************************************************************************/
+#ifndef MA_NO_DECODING
+typedef struct ma_decoder ma_decoder;
+
+typedef size_t (* ma_decoder_read_proc) (ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead); /* Returns the number of bytes read. */
+typedef ma_bool32 (* ma_decoder_seek_proc) (ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin);
+typedef ma_uint64 (* ma_decoder_read_pcm_frames_proc) (ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount); /* Returns the number of frames read. Output data is in internal format. */
+typedef ma_result (* ma_decoder_seek_to_pcm_frame_proc) (ma_decoder* pDecoder, ma_uint64 frameIndex);
+typedef ma_result (* ma_decoder_uninit_proc) (ma_decoder* pDecoder);
+typedef ma_uint64 (* ma_decoder_get_length_in_pcm_frames_proc)(ma_decoder* pDecoder);
+
+typedef struct
+{
+ ma_format format; /* Set to 0 or ma_format_unknown to use the stream's internal format. */
+ ma_uint32 channels; /* Set to 0 to use the stream's internal channels. */
+ ma_uint32 sampleRate; /* Set to 0 to use the stream's internal sample rate. */
+ ma_channel channelMap[MA_MAX_CHANNELS];
+ ma_channel_mix_mode channelMixMode;
+ ma_dither_mode ditherMode;
+ struct
+ {
+ ma_resample_algorithm algorithm;
+ struct
+ {
+ ma_uint32 lpfOrder;
+ } linear;
+ struct
+ {
+ int quality;
+ } speex;
+ } resampling;
+ ma_allocation_callbacks allocationCallbacks;
+} ma_decoder_config;
+
+struct ma_decoder
+{
+ ma_decoder_read_proc onRead;
+ ma_decoder_seek_proc onSeek;
+ void* pUserData;
+ ma_uint64 readPointer; /* Used for returning back to a previous position after analysing the stream or whatnot. */
+ ma_format internalFormat;
+ ma_uint32 internalChannels;
+ ma_uint32 internalSampleRate;
+ ma_channel internalChannelMap[MA_MAX_CHANNELS];
+ ma_format outputFormat;
+ ma_uint32 outputChannels;
+ ma_uint32 outputSampleRate;
+ ma_channel outputChannelMap[MA_MAX_CHANNELS];
+ ma_data_converter converter; /* <-- Data conversion is achieved by running frames through this. */
+ ma_allocation_callbacks allocationCallbacks;
+ ma_decoder_read_pcm_frames_proc onReadPCMFrames;
+ ma_decoder_seek_to_pcm_frame_proc onSeekToPCMFrame;
+ ma_decoder_uninit_proc onUninit;
+ ma_decoder_get_length_in_pcm_frames_proc onGetLengthInPCMFrames;
+ void* pInternalDecoder; /* <-- The drwav/drflac/stb_vorbis/etc. objects. */
+ struct
+ {
+ const ma_uint8* pData;
+ size_t dataSize;
+ size_t currentReadPos;
+ } memory; /* Only used for decoders that were opened against a block of memory. */
+};
+
+ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate);
+
+ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_wav(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_flac(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_vorbis(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_mp3(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_raw(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder);
+
+ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_memory_wav(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_memory_flac(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_memory_vorbis(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_memory_mp3(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_memory_raw(const void* pData, size_t dataSize, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder);
+
+#ifndef MA_NO_STDIO
+ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_file_wav(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_file_flac(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_file_vorbis(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_file_mp3(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+
+ma_result ma_decoder_init_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_file_wav_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_file_flac_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_file_vorbis_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+ma_result ma_decoder_init_file_mp3_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder);
+#endif
+
+ma_result ma_decoder_uninit(ma_decoder* pDecoder);
+
+/*
+Retrieves the length of the decoder in PCM frames.
+
+Do not call this on streams of an undefined length, such as internet radio.
+
+If the length is unknown or an error occurs, 0 will be returned.
+
+This will always return 0 for Vorbis decoders. This is due to a limitation with stb_vorbis in push mode which is what miniaudio
+uses internally.
+
+For MP3's, this will decode the entire file. Do not call this in time critical scenarios.
+
+This function is not thread safe without your own synchronization.
+*/
+ma_uint64 ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder);
+
+/*
+Reads PCM frames from the given decoder.
+
+This is not thread safe without your own synchronization.
+*/
+ma_uint64 ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount);
+
+/*
+Seeks to a PCM frame based on it's absolute index.
+
+This is not thread safe without your own synchronization.
+*/
+ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex);
+
+/*
+Helper for opening and decoding a file into a heap allocated block of memory. Free the returned pointer with ma_free(). On input,
+pConfig should be set to what you want. On output it will be set to what you got.
+*/
+#ifndef MA_NO_STDIO
+ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppDataOut);
+#endif
+ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppDataOut);
+
+#endif /* MA_NO_DECODING */
+
+
+/************************************************************************************************************************************************************
+
+Encoding
+========
+
+Encoders do not perform any format conversion for you. If your target format does not support the format, and error will be returned.
+
+************************************************************************************************************************************************************/
+#ifndef MA_NO_ENCODING
+typedef struct ma_encoder ma_encoder;
+
+typedef size_t (* ma_encoder_write_proc) (ma_encoder* pEncoder, const void* pBufferIn, size_t bytesToWrite); /* Returns the number of bytes written. */
+typedef ma_bool32 (* ma_encoder_seek_proc) (ma_encoder* pEncoder, int byteOffset, ma_seek_origin origin);
+typedef ma_result (* ma_encoder_init_proc) (ma_encoder* pEncoder);
+typedef void (* ma_encoder_uninit_proc) (ma_encoder* pEncoder);
+typedef ma_uint64 (* ma_encoder_write_pcm_frames_proc)(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount);
+
+typedef struct
+{
+ ma_resource_format resourceFormat;
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ ma_allocation_callbacks allocationCallbacks;
+} ma_encoder_config;
+
+ma_encoder_config ma_encoder_config_init(ma_resource_format resourceFormat, ma_format format, ma_uint32 channels, ma_uint32 sampleRate);
+
+struct ma_encoder
+{
+ ma_encoder_config config;
+ ma_encoder_write_proc onWrite;
+ ma_encoder_seek_proc onSeek;
+ ma_encoder_init_proc onInit;
+ ma_encoder_uninit_proc onUninit;
+ ma_encoder_write_pcm_frames_proc onWritePCMFrames;
+ void* pUserData;
+ void* pInternalEncoder; /* <-- The drwav/drflac/stb_vorbis/etc. objects. */
+ void* pFile; /* FILE*. Only used when initialized with ma_encoder_init_file(). */
+};
+
+ma_result ma_encoder_init(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, const ma_encoder_config* pConfig, ma_encoder* pEncoder);
+#ifndef MA_NO_STDIO
+ma_result ma_encoder_init_file(const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder);
+ma_result ma_encoder_init_file_w(const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder);
+#endif
+void ma_encoder_uninit(ma_encoder* pEncoder);
+ma_uint64 ma_encoder_write_pcm_frames(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount);
+
+#endif /* MA_NO_ENCODING */
+
+
+/************************************************************************************************************************************************************
+
+Generation
+
+************************************************************************************************************************************************************/
+typedef enum
+{
+ ma_waveform_type_sine,
+ ma_waveform_type_square,
+ ma_waveform_type_triangle,
+ ma_waveform_type_sawtooth
+} ma_waveform_type;
+
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ ma_waveform_type type;
+ double amplitude;
+ double frequency;
+} ma_waveform_config;
+
+ma_waveform_config ma_waveform_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_waveform_type type, double amplitude, double frequency);
+
+typedef struct
+{
+ ma_waveform_config config;
+ double advance;
+ double time;
+} ma_waveform;
+
+ma_result ma_waveform_init(const ma_waveform_config* pConfig, ma_waveform* pWaveform);
+ma_uint64 ma_waveform_read_pcm_frames(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount);
+ma_result ma_waveform_set_amplitude(ma_waveform* pWaveform, double amplitude);
+ma_result ma_waveform_set_frequency(ma_waveform* pWaveform, double frequency);
+ma_result ma_waveform_set_sample_rate(ma_waveform* pWaveform, ma_uint32 sampleRate);
+
+
+
+typedef struct
+{
+ ma_int32 state;
+} ma_lcg;
+
+typedef enum
+{
+ ma_noise_type_white,
+ ma_noise_type_pink,
+ ma_noise_type_brownian
+} ma_noise_type;
+
+typedef struct
+{
+ ma_format format;
+ ma_uint32 channels;
+ ma_noise_type type;
+ ma_int32 seed;
+ double amplitude;
+ ma_bool32 duplicateChannels;
+} ma_noise_config;
+
+ma_noise_config ma_noise_config_init(ma_format format, ma_uint32 channels, ma_noise_type type, ma_int32 seed, double amplitude);
+
+typedef struct
+{
+ ma_noise_config config;
+ ma_lcg lcg;
+ union
+ {
+ struct
+ {
+ double bin[MA_MAX_CHANNELS][16];
+ double accumulation[MA_MAX_CHANNELS];
+ ma_uint32 counter[MA_MAX_CHANNELS];
+ } pink;
+ struct
+ {
+ double accumulation[MA_MAX_CHANNELS];
+ } brownian;
+ } state;
+} ma_noise;
+
+ma_result ma_noise_init(const ma_noise_config* pConfig, ma_noise* pNoise);
+ma_uint64 ma_noise_read_pcm_frames(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* miniaudio_h */
+
+
+
+/************************************************************************************************************************************************************
+*************************************************************************************************************************************************************
+
+IMPLEMENTATION
+
+*************************************************************************************************************************************************************
+************************************************************************************************************************************************************/
+#if defined(MINIAUDIO_IMPLEMENTATION) || defined(MA_IMPLEMENTATION)
+#include
+#include /* For INT_MAX */
+#include /* sin(), etc. */
+
+#if !defined(MA_NO_STDIO) || defined(MA_DEBUG_OUTPUT)
+ #include
+ #if !defined(_MSC_VER) && !defined(__DMC__)
+ #include /* For strcasecmp(). */
+ #include /* For wcslen(), wcsrtombs() */
+ #endif
+#endif
+
+#ifdef MA_WIN32
+#include
+#include
+#include
+#include
+#else
+#include /* For malloc(), free(), wcstombs(). */
+#include /* For memset() */
+#endif
+
+#if defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200)
+#include /* For mach_absolute_time() */
+#endif
+
+#ifdef MA_POSIX
+#include
+#include
+#include
+#include
+#endif
+
+#ifdef MA_EMSCRIPTEN
+#include
+#endif
+
+#if !defined(MA_64BIT) && !defined(MA_32BIT)
+#ifdef _WIN32
+#ifdef _WIN64
+#define MA_64BIT
+#else
+#define MA_32BIT
+#endif
+#endif
+#endif
+
+#if !defined(MA_64BIT) && !defined(MA_32BIT)
+#ifdef __GNUC__
+#ifdef __LP64__
+#define MA_64BIT
+#else
+#define MA_32BIT
+#endif
+#endif
+#endif
+
+#if !defined(MA_64BIT) && !defined(MA_32BIT)
+#include
+#if INTPTR_MAX == INT64_MAX
+#define MA_64BIT
+#else
+#define MA_32BIT
+#endif
+#endif
+
+/* Architecture Detection */
+#if defined(__x86_64__) || defined(_M_X64)
+#define MA_X64
+#elif defined(__i386) || defined(_M_IX86)
+#define MA_X86
+#elif defined(__arm__) || defined(_M_ARM)
+#define MA_ARM
+#endif
+
+/* Cannot currently support AVX-512 if AVX is disabled. */
+#if !defined(MA_NO_AVX512) && defined(MA_NO_AVX2)
+#define MA_NO_AVX512
+#endif
+
+/* Intrinsics Support */
+#if defined(MA_X64) || defined(MA_X86)
+ #if defined(_MSC_VER) && !defined(__clang__)
+ /* MSVC. */
+ #if _MSC_VER >= 1400 && !defined(MA_NO_SSE2) /* 2005 */
+ #define MA_SUPPORT_SSE2
+ #endif
+ /*#if _MSC_VER >= 1600 && !defined(MA_NO_AVX)*/ /* 2010 */
+ /* #define MA_SUPPORT_AVX*/
+ /*#endif*/
+ #if _MSC_VER >= 1700 && !defined(MA_NO_AVX2) /* 2012 */
+ #define MA_SUPPORT_AVX2
+ #endif
+ #if _MSC_VER >= 1910 && !defined(MA_NO_AVX512) /* 2017 */
+ #define MA_SUPPORT_AVX512
+ #endif
+ #else
+ /* Assume GNUC-style. */
+ #if defined(__SSE2__) && !defined(MA_NO_SSE2)
+ #define MA_SUPPORT_SSE2
+ #endif
+ /*#if defined(__AVX__) && !defined(MA_NO_AVX)*/
+ /* #define MA_SUPPORT_AVX*/
+ /*#endif*/
+ #if defined(__AVX2__) && !defined(MA_NO_AVX2)
+ #define MA_SUPPORT_AVX2
+ #endif
+ #if defined(__AVX512F__) && !defined(MA_NO_AVX512)
+ #define MA_SUPPORT_AVX512
+ #endif
+ #endif
+
+ /* If at this point we still haven't determined compiler support for the intrinsics just fall back to __has_include. */
+ #if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include)
+ #if !defined(MA_SUPPORT_SSE2) && !defined(MA_NO_SSE2) && __has_include()
+ #define MA_SUPPORT_SSE2
+ #endif
+ /*#if !defined(MA_SUPPORT_AVX) && !defined(MA_NO_AVX) && __has_include()*/
+ /* #define MA_SUPPORT_AVX*/
+ /*#endif*/
+ #if !defined(MA_SUPPORT_AVX2) && !defined(MA_NO_AVX2) && __has_include()
+ #define MA_SUPPORT_AVX2
+ #endif
+ #if !defined(MA_SUPPORT_AVX512) && !defined(MA_NO_AVX512) && __has_include()
+ #define MA_SUPPORT_AVX512
+ #endif
+ #endif
+
+ #if defined(MA_SUPPORT_AVX512)
+ #include /* Not a mistake. Intentionally including instead of because otherwise the compiler will complain. */
+ #elif defined(MA_SUPPORT_AVX2) || defined(MA_SUPPORT_AVX)
+ #include
+ #elif defined(MA_SUPPORT_SSE2)
+ #include
+ #endif
+#endif
+
+#if defined(MA_ARM)
+ #if !defined(MA_NO_NEON) && (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64))
+ #define MA_SUPPORT_NEON
+ #endif
+
+ /* Fall back to looking for the #include file. */
+ #if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include)
+ #if !defined(MA_SUPPORT_NEON) && !defined(MA_NO_NEON) && __has_include()
+ #define MA_SUPPORT_NEON
+ #endif
+ #endif
+
+ #if defined(MA_SUPPORT_NEON)
+ #include
+ #endif
+#endif
+
+/* Begin globally disabled warnings. */
+#if defined(_MSC_VER)
+ #pragma warning(push)
+ #pragma warning(disable:4752) /* found Intel(R) Advanced Vector Extensions; consider using /arch:AVX */
+#endif
+
+#if defined(MA_X64) || defined(MA_X86)
+ #if defined(_MSC_VER) && !defined(__clang__)
+ #if _MSC_VER >= 1400
+ #include
+ static MA_INLINE void ma_cpuid(int info[4], int fid)
+ {
+ __cpuid(info, fid);
+ }
+ #else
+ #define MA_NO_CPUID
+ #endif
+
+ #if _MSC_VER >= 1600 && (defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219)
+ static MA_INLINE unsigned __int64 ma_xgetbv(int reg)
+ {
+ return _xgetbv(reg);
+ }
+ #else
+ #define MA_NO_XGETBV
+ #endif
+ #elif (defined(__GNUC__) || defined(__clang__)) && !defined(MA_ANDROID)
+ static MA_INLINE void ma_cpuid(int info[4], int fid)
+ {
+ /*
+ It looks like the -fPIC option uses the ebx register which GCC complains about. We can work around this by just using a different register, the
+ specific register of which I'm letting the compiler decide on. The "k" prefix is used to specify a 32-bit register. The {...} syntax is for
+ supporting different assembly dialects.
+
+ What's basically happening is that we're saving and restoring the ebx register manually.
+ */
+ #if defined(DRFLAC_X86) && defined(__PIC__)
+ __asm__ __volatile__ (
+ "xchg{l} {%%}ebx, %k1;"
+ "cpuid;"
+ "xchg{l} {%%}ebx, %k1;"
+ : "=a"(info[0]), "=&r"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0)
+ );
+ #else
+ __asm__ __volatile__ (
+ "cpuid" : "=a"(info[0]), "=b"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0)
+ );
+ #endif
+ }
+
+ static MA_INLINE ma_uint64 ma_xgetbv(int reg)
+ {
+ unsigned int hi;
+ unsigned int lo;
+
+ __asm__ __volatile__ (
+ "xgetbv" : "=a"(lo), "=d"(hi) : "c"(reg)
+ );
+
+ return ((ma_uint64)hi << 32) | (ma_uint64)lo;
+ }
+ #else
+ #define MA_NO_CPUID
+ #define MA_NO_XGETBV
+ #endif
+#else
+ #define MA_NO_CPUID
+ #define MA_NO_XGETBV
+#endif
+
+static MA_INLINE ma_bool32 ma_has_sse2()
+{
+#if defined(MA_SUPPORT_SSE2)
+ #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_SSE2)
+ #if defined(MA_X64)
+ return MA_TRUE; /* 64-bit targets always support SSE2. */
+ #elif (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__)
+ return MA_TRUE; /* If the compiler is allowed to freely generate SSE2 code we can assume support. */
+ #else
+ #if defined(MA_NO_CPUID)
+ return MA_FALSE;
+ #else
+ int info[4];
+ ma_cpuid(info, 1);
+ return (info[3] & (1 << 26)) != 0;
+ #endif
+ #endif
+ #else
+ return MA_FALSE; /* SSE2 is only supported on x86 and x64 architectures. */
+ #endif
+#else
+ return MA_FALSE; /* No compiler support. */
+#endif
+}
+
+#if 0
+static MA_INLINE ma_bool32 ma_has_avx()
+{
+#if defined(MA_SUPPORT_AVX)
+ #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX)
+ #if defined(_AVX_) || defined(__AVX__)
+ return MA_TRUE; /* If the compiler is allowed to freely generate AVX code we can assume support. */
+ #else
+ /* AVX requires both CPU and OS support. */
+ #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV)
+ return MA_FALSE;
+ #else
+ int info[4];
+ ma_cpuid(info, 1);
+ if (((info[2] & (1 << 27)) != 0) && ((info[2] & (1 << 28)) != 0)) {
+ ma_uint64 xrc = ma_xgetbv(0);
+ if ((xrc & 0x06) == 0x06) {
+ return MA_TRUE;
+ } else {
+ return MA_FALSE;
+ }
+ } else {
+ return MA_FALSE;
+ }
+ #endif
+ #endif
+ #else
+ return MA_FALSE; /* AVX is only supported on x86 and x64 architectures. */
+ #endif
+#else
+ return MA_FALSE; /* No compiler support. */
+#endif
+}
+#endif
+
+static MA_INLINE ma_bool32 ma_has_avx2()
+{
+#if defined(MA_SUPPORT_AVX2)
+ #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX2)
+ #if defined(_AVX2_) || defined(__AVX2__)
+ return MA_TRUE; /* If the compiler is allowed to freely generate AVX2 code we can assume support. */
+ #else
+ /* AVX2 requires both CPU and OS support. */
+ #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV)
+ return MA_FALSE;
+ #else
+ int info1[4];
+ int info7[4];
+ ma_cpuid(info1, 1);
+ ma_cpuid(info7, 7);
+ if (((info1[2] & (1 << 27)) != 0) && ((info7[1] & (1 << 5)) != 0)) {
+ ma_uint64 xrc = ma_xgetbv(0);
+ if ((xrc & 0x06) == 0x06) {
+ return MA_TRUE;
+ } else {
+ return MA_FALSE;
+ }
+ } else {
+ return MA_FALSE;
+ }
+ #endif
+ #endif
+ #else
+ return MA_FALSE; /* AVX2 is only supported on x86 and x64 architectures. */
+ #endif
+#else
+ return MA_FALSE; /* No compiler support. */
+#endif
+}
+
+static MA_INLINE ma_bool32 ma_has_avx512f()
+{
+#if defined(MA_SUPPORT_AVX512)
+ #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX512)
+ #if defined(__AVX512F__)
+ return MA_TRUE; /* If the compiler is allowed to freely generate AVX-512F code we can assume support. */
+ #else
+ /* AVX-512 requires both CPU and OS support. */
+ #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV)
+ return MA_FALSE;
+ #else
+ int info1[4];
+ int info7[4];
+ ma_cpuid(info1, 1);
+ ma_cpuid(info7, 7);
+ if (((info1[2] & (1 << 27)) != 0) && ((info7[1] & (1 << 16)) != 0)) {
+ ma_uint64 xrc = ma_xgetbv(0);
+ if ((xrc & 0xE6) == 0xE6) {
+ return MA_TRUE;
+ } else {
+ return MA_FALSE;
+ }
+ } else {
+ return MA_FALSE;
+ }
+ #endif
+ #endif
+ #else
+ return MA_FALSE; /* AVX-512F is only supported on x86 and x64 architectures. */
+ #endif
+#else
+ return MA_FALSE; /* No compiler support. */
+#endif
+}
+
+static MA_INLINE ma_bool32 ma_has_neon()
+{
+#if defined(MA_SUPPORT_NEON)
+ #if defined(MA_ARM) && !defined(MA_NO_NEON)
+ #if (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64))
+ return MA_TRUE; /* If the compiler is allowed to freely generate NEON code we can assume support. */
+ #else
+ /* TODO: Runtime check. */
+ return MA_FALSE;
+ #endif
+ #else
+ return MA_FALSE; /* NEON is only supported on ARM architectures. */
+ #endif
+#else
+ return MA_FALSE; /* No compiler support. */
+#endif
+}
+
+#define MA_SIMD_NONE 0
+#define MA_SIMD_SSE2 1
+#define MA_SIMD_AVX2 2
+#define MA_SIMD_NEON 3
+
+#ifndef MA_PREFERRED_SIMD
+ # if defined(MA_SUPPORT_SSE2) && defined(MA_PREFER_SSE2)
+ #define MA_PREFERRED_SIMD MA_SIMD_SSE2
+ #elif defined(MA_SUPPORT_AVX2) && defined(MA_PREFER_AVX2)
+ #define MA_PREFERRED_SIMD MA_SIMD_AVX2
+ #elif defined(MA_SUPPORT_NEON) && defined(MA_PREFER_NEON)
+ #define MA_PREFERRED_SIMD MA_SIMD_NEON
+ #else
+ #define MA_PREFERRED_SIMD MA_SIMD_NONE
+ #endif
+#endif
+
+
+static MA_INLINE ma_bool32 ma_is_little_endian()
+{
+#if defined(MA_X86) || defined(MA_X64)
+ return MA_TRUE;
+#else
+ int n = 1;
+ return (*(char*)&n) == 1;
+#endif
+}
+
+static MA_INLINE ma_bool32 ma_is_big_endian()
+{
+ return !ma_is_little_endian();
+}
+
+
+#ifndef MA_COINIT_VALUE
+#define MA_COINIT_VALUE 0 /* 0 = COINIT_MULTITHREADED */
+#endif
+
+
+
+#ifndef MA_PI
+#define MA_PI 3.14159265358979323846264f
+#endif
+#ifndef MA_PI_D
+#define MA_PI_D 3.14159265358979323846264
+#endif
+#ifndef MA_TAU
+#define MA_TAU 6.28318530717958647693f
+#endif
+#ifndef MA_TAU_D
+#define MA_TAU_D 6.28318530717958647693
+#endif
+
+
+/* The default format when ma_format_unknown (0) is requested when initializing a device. */
+#ifndef MA_DEFAULT_FORMAT
+#define MA_DEFAULT_FORMAT ma_format_f32
+#endif
+
+/* The default channel count to use when 0 is used when initializing a device. */
+#ifndef MA_DEFAULT_CHANNELS
+#define MA_DEFAULT_CHANNELS 2
+#endif
+
+/* The default sample rate to use when 0 is used when initializing a device. */
+#ifndef MA_DEFAULT_SAMPLE_RATE
+#define MA_DEFAULT_SAMPLE_RATE 48000
+#endif
+
+/* Default periods when none is specified in ma_device_init(). More periods means more work on the CPU. */
+#ifndef MA_DEFAULT_PERIODS
+#define MA_DEFAULT_PERIODS 3
+#endif
+
+/* The default period size in milliseconds for low latency mode. */
+#ifndef MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY
+#define MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY 10
+#endif
+
+/* The default buffer size in milliseconds for conservative mode. */
+#ifndef MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE
+#define MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE 100
+#endif
+
+/* The default LPF filter order for linear resampling. Note that this is clamped to MA_MAX_FILTER_ORDER. */
+#ifndef MA_DEFAULT_RESAMPLER_LPF_ORDER
+ #if MA_MAX_FILTER_ORDER >= 4
+ #define MA_DEFAULT_RESAMPLER_LPF_ORDER 4
+ #else
+ #define MA_DEFAULT_RESAMPLER_LPF_ORDER MA_MAX_FILTER_ORDER
+ #endif
+#endif
+
+
+/* Standard sample rates, in order of priority. */
+ma_uint32 g_maStandardSampleRatePriorities[] = {
+ MA_SAMPLE_RATE_48000, /* Most common */
+ MA_SAMPLE_RATE_44100,
+
+ MA_SAMPLE_RATE_32000, /* Lows */
+ MA_SAMPLE_RATE_24000,
+ MA_SAMPLE_RATE_22050,
+
+ MA_SAMPLE_RATE_88200, /* Highs */
+ MA_SAMPLE_RATE_96000,
+ MA_SAMPLE_RATE_176400,
+ MA_SAMPLE_RATE_192000,
+
+ MA_SAMPLE_RATE_16000, /* Extreme lows */
+ MA_SAMPLE_RATE_11025,
+ MA_SAMPLE_RATE_8000,
+
+ MA_SAMPLE_RATE_352800, /* Extreme highs */
+ MA_SAMPLE_RATE_384000
+};
+
+ma_format g_maFormatPriorities[] = {
+ ma_format_s16, /* Most common */
+ ma_format_f32,
+
+ /*ma_format_s24_32,*/ /* Clean alignment */
+ ma_format_s32,
+
+ ma_format_s24, /* Unclean alignment */
+
+ ma_format_u8 /* Low quality */
+};
+
+
+
+/******************************************************************************
+
+Standard Library Stuff
+
+******************************************************************************/
+#ifndef MA_MALLOC
+#ifdef MA_WIN32
+#define MA_MALLOC(sz) HeapAlloc(GetProcessHeap(), 0, (sz))
+#else
+#define MA_MALLOC(sz) malloc((sz))
+#endif
+#endif
+
+#ifndef MA_REALLOC
+#ifdef MA_WIN32
+#define MA_REALLOC(p, sz) (((sz) > 0) ? ((p) ? HeapReAlloc(GetProcessHeap(), 0, (p), (sz)) : HeapAlloc(GetProcessHeap(), 0, (sz))) : ((VOID*)(size_t)(HeapFree(GetProcessHeap(), 0, (p)) & 0)))
+#else
+#define MA_REALLOC(p, sz) realloc((p), (sz))
+#endif
+#endif
+
+#ifndef MA_FREE
+#ifdef MA_WIN32
+#define MA_FREE(p) HeapFree(GetProcessHeap(), 0, (p))
+#else
+#define MA_FREE(p) free((p))
+#endif
+#endif
+
+#ifndef MA_ZERO_MEMORY
+#ifdef MA_WIN32
+#define MA_ZERO_MEMORY(p, sz) ZeroMemory((p), (sz))
+#else
+#define MA_ZERO_MEMORY(p, sz) memset((p), 0, (sz))
+#endif
+#endif
+
+#ifndef MA_COPY_MEMORY
+#ifdef MA_WIN32
+#define MA_COPY_MEMORY(dst, src, sz) CopyMemory((dst), (src), (sz))
+#else
+#define MA_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz))
+#endif
+#endif
+
+#ifndef MA_ASSERT
+#ifdef MA_WIN32
+#define MA_ASSERT(condition) assert(condition)
+#else
+#define MA_ASSERT(condition) assert(condition)
+#endif
+#endif
+
+#define MA_ZERO_OBJECT(p) MA_ZERO_MEMORY((p), sizeof(*(p)))
+
+#define ma_countof(x) (sizeof(x) / sizeof(x[0]))
+#define ma_max(x, y) (((x) > (y)) ? (x) : (y))
+#define ma_min(x, y) (((x) < (y)) ? (x) : (y))
+#define ma_abs(x) (((x) > 0) ? (x) : -(x))
+#define ma_clamp(x, lo, hi) (ma_max(lo, ma_min(x, hi)))
+#define ma_offset_ptr(p, offset) (((ma_uint8*)(p)) + (offset))
+
+#define ma_buffer_frame_capacity(buffer, channels, format) (sizeof(buffer) / ma_get_bytes_per_sample(format) / (channels))
+
+static MA_INLINE double ma_sin(double x)
+{
+ /* TODO: Implement custom sin(x). */
+ return sin(x);
+}
+
+static MA_INLINE double ma_exp(double x)
+{
+ /* TODO: Implement custom exp(x). */
+ return exp(x);
+}
+
+static MA_INLINE double ma_log(double x)
+{
+ /* TODO: Implement custom log(x). */
+ return log(x);
+}
+
+static MA_INLINE double ma_pow(double x, double y)
+{
+ /* TODO: Implement custom pow(x, y). */
+ return pow(x, y);
+}
+
+static MA_INLINE double ma_sqrt(double x)
+{
+ /* TODO: Implement custom sqrt(x). */
+ return sqrt(x);
+}
+
+
+static MA_INLINE double ma_cos(double x)
+{
+ return ma_sin((MA_PI*0.5) - x);
+}
+
+static MA_INLINE double ma_log10(double x)
+{
+ return ma_log(x) * 0.43429448190325182765;
+}
+
+static MA_INLINE float ma_powf(float x, float y)
+{
+ return (float)ma_pow((double)x, (double)y);
+}
+
+static MA_INLINE float ma_log10f(float x)
+{
+ return (float)ma_log10((double)x);
+}
+
+
+/*
+Return Values:
+ 0: Success
+ 22: EINVAL
+ 34: ERANGE
+
+Not using symbolic constants for errors because I want to avoid #including errno.h
+*/
+int ma_strcpy_s(char* dst, size_t dstSizeInBytes, const char* src)
+{
+ size_t i;
+
+ if (dst == 0) {
+ return 22;
+ }
+ if (dstSizeInBytes == 0) {
+ return 34;
+ }
+ if (src == 0) {
+ dst[0] = '\0';
+ return 22;
+ }
+
+ for (i = 0; i < dstSizeInBytes && src[i] != '\0'; ++i) {
+ dst[i] = src[i];
+ }
+
+ if (i < dstSizeInBytes) {
+ dst[i] = '\0';
+ return 0;
+ }
+
+ dst[0] = '\0';
+ return 34;
+}
+
+int ma_strncpy_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count)
+{
+ size_t maxcount;
+ size_t i;
+
+ if (dst == 0) {
+ return 22;
+ }
+ if (dstSizeInBytes == 0) {
+ return 34;
+ }
+ if (src == 0) {
+ dst[0] = '\0';
+ return 22;
+ }
+
+ maxcount = count;
+ if (count == ((size_t)-1) || count >= dstSizeInBytes) { /* -1 = _TRUNCATE */
+ maxcount = dstSizeInBytes - 1;
+ }
+
+ for (i = 0; i < maxcount && src[i] != '\0'; ++i) {
+ dst[i] = src[i];
+ }
+
+ if (src[i] == '\0' || i == count || count == ((size_t)-1)) {
+ dst[i] = '\0';
+ return 0;
+ }
+
+ dst[0] = '\0';
+ return 34;
+}
+
+int ma_strcat_s(char* dst, size_t dstSizeInBytes, const char* src)
+{
+ char* dstorig;
+
+ if (dst == 0) {
+ return 22;
+ }
+ if (dstSizeInBytes == 0) {
+ return 34;
+ }
+ if (src == 0) {
+ dst[0] = '\0';
+ return 22;
+ }
+
+ dstorig = dst;
+
+ while (dstSizeInBytes > 0 && dst[0] != '\0') {
+ dst += 1;
+ dstSizeInBytes -= 1;
+ }
+
+ if (dstSizeInBytes == 0) {
+ return 22; /* Unterminated. */
+ }
+
+
+ while (dstSizeInBytes > 0 && src[0] != '\0') {
+ *dst++ = *src++;
+ dstSizeInBytes -= 1;
+ }
+
+ if (dstSizeInBytes > 0) {
+ dst[0] = '\0';
+ } else {
+ dstorig[0] = '\0';
+ return 34;
+ }
+
+ return 0;
+}
+
+int ma_strncat_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count)
+{
+ char* dstorig;
+
+ if (dst == 0) {
+ return 22;
+ }
+ if (dstSizeInBytes == 0) {
+ return 34;
+ }
+ if (src == 0) {
+ return 22;
+ }
+
+ dstorig = dst;
+
+ while (dstSizeInBytes > 0 && dst[0] != '\0') {
+ dst += 1;
+ dstSizeInBytes -= 1;
+ }
+
+ if (dstSizeInBytes == 0) {
+ return 22; /* Unterminated. */
+ }
+
+
+ if (count == ((size_t)-1)) { /* _TRUNCATE */
+ count = dstSizeInBytes - 1;
+ }
+
+ while (dstSizeInBytes > 0 && src[0] != '\0' && count > 0) {
+ *dst++ = *src++;
+ dstSizeInBytes -= 1;
+ count -= 1;
+ }
+
+ if (dstSizeInBytes > 0) {
+ dst[0] = '\0';
+ } else {
+ dstorig[0] = '\0';
+ return 34;
+ }
+
+ return 0;
+}
+
+int ma_itoa_s(int value, char* dst, size_t dstSizeInBytes, int radix)
+{
+ int sign;
+ unsigned int valueU;
+ char* dstEnd;
+
+ if (dst == NULL || dstSizeInBytes == 0) {
+ return 22;
+ }
+ if (radix < 2 || radix > 36) {
+ dst[0] = '\0';
+ return 22;
+ }
+
+ sign = (value < 0 && radix == 10) ? -1 : 1; /* The negative sign is only used when the base is 10. */
+
+ if (value < 0) {
+ valueU = -value;
+ } else {
+ valueU = value;
+ }
+
+ dstEnd = dst;
+ do
+ {
+ int remainder = valueU % radix;
+ if (remainder > 9) {
+ *dstEnd = (char)((remainder - 10) + 'a');
+ } else {
+ *dstEnd = (char)(remainder + '0');
+ }
+
+ dstEnd += 1;
+ dstSizeInBytes -= 1;
+ valueU /= radix;
+ } while (dstSizeInBytes > 0 && valueU > 0);
+
+ if (dstSizeInBytes == 0) {
+ dst[0] = '\0';
+ return 22; /* Ran out of room in the output buffer. */
+ }
+
+ if (sign < 0) {
+ *dstEnd++ = '-';
+ dstSizeInBytes -= 1;
+ }
+
+ if (dstSizeInBytes == 0) {
+ dst[0] = '\0';
+ return 22; /* Ran out of room in the output buffer. */
+ }
+
+ *dstEnd = '\0';
+
+
+ /* At this point the string will be reversed. */
+ dstEnd -= 1;
+ while (dst < dstEnd) {
+ char temp = *dst;
+ *dst = *dstEnd;
+ *dstEnd = temp;
+
+ dst += 1;
+ dstEnd -= 1;
+ }
+
+ return 0;
+}
+
+int ma_strcmp(const char* str1, const char* str2)
+{
+ if (str1 == str2) return 0;
+
+ /* These checks differ from the standard implementation. It's not important, but I prefer it just for sanity. */
+ if (str1 == NULL) return -1;
+ if (str2 == NULL) return 1;
+
+ for (;;) {
+ if (str1[0] == '\0') {
+ break;
+ }
+ if (str1[0] != str2[0]) {
+ break;
+ }
+
+ str1 += 1;
+ str2 += 1;
+ }
+
+ return ((unsigned char*)str1)[0] - ((unsigned char*)str2)[0];
+}
int ma_strappend(char* dst, size_t dstSize, const char* srcA, const char* srcB)
{
- int result;
+ int result;
+
+ result = ma_strncpy_s(dst, dstSize, srcA, (size_t)-1);
+ if (result != 0) {
+ return result;
+ }
+
+ result = ma_strncat_s(dst, dstSize, srcB, (size_t)-1);
+ if (result != 0) {
+ return result;
+ }
+
+ return result;
+}
+
+char* ma_copy_string(const char* src, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ size_t sz = strlen(src)+1;
+ char* dst = (char*)ma_malloc(sz, pAllocationCallbacks);
+ if (dst == NULL) {
+ return NULL;
+ }
+
+ ma_strcpy_s(dst, sz, src);
+
+ return dst;
+}
+
+
+#include
+static ma_result ma_result_from_errno(int e)
+{
+ switch (e)
+ {
+ case 0: return MA_SUCCESS;
+ #ifdef EPERM
+ case EPERM: return MA_INVALID_OPERATION;
+ #endif
+ #ifdef ENOENT
+ case ENOENT: return MA_DOES_NOT_EXIST;
+ #endif
+ #ifdef ESRCH
+ case ESRCH: return MA_DOES_NOT_EXIST;
+ #endif
+ #ifdef EINTR
+ case EINTR: return MA_INTERRUPT;
+ #endif
+ #ifdef EIO
+ case EIO: return MA_IO_ERROR;
+ #endif
+ #ifdef ENXIO
+ case ENXIO: return MA_DOES_NOT_EXIST;
+ #endif
+ #ifdef E2BIG
+ case E2BIG: return MA_INVALID_ARGS;
+ #endif
+ #ifdef ENOEXEC
+ case ENOEXEC: return MA_INVALID_FILE;
+ #endif
+ #ifdef EBADF
+ case EBADF: return MA_INVALID_FILE;
+ #endif
+ #ifdef ECHILD
+ case ECHILD: return MA_ERROR;
+ #endif
+ #ifdef EAGAIN
+ case EAGAIN: return MA_UNAVAILABLE;
+ #endif
+ #ifdef ENOMEM
+ case ENOMEM: return MA_OUT_OF_MEMORY;
+ #endif
+ #ifdef EACCES
+ case EACCES: return MA_ACCESS_DENIED;
+ #endif
+ #ifdef EFAULT
+ case EFAULT: return MA_BAD_ADDRESS;
+ #endif
+ #ifdef ENOTBLK
+ case ENOTBLK: return MA_ERROR;
+ #endif
+ #ifdef EBUSY
+ case EBUSY: return MA_BUSY;
+ #endif
+ #ifdef EEXIST
+ case EEXIST: return MA_ALREADY_EXISTS;
+ #endif
+ #ifdef EXDEV
+ case EXDEV: return MA_ERROR;
+ #endif
+ #ifdef ENODEV
+ case ENODEV: return MA_DOES_NOT_EXIST;
+ #endif
+ #ifdef ENOTDIR
+ case ENOTDIR: return MA_NOT_DIRECTORY;
+ #endif
+ #ifdef EISDIR
+ case EISDIR: return MA_IS_DIRECTORY;
+ #endif
+ #ifdef EINVAL
+ case EINVAL: return MA_INVALID_ARGS;
+ #endif
+ #ifdef ENFILE
+ case ENFILE: return MA_TOO_MANY_OPEN_FILES;
+ #endif
+ #ifdef EMFILE
+ case EMFILE: return MA_TOO_MANY_OPEN_FILES;
+ #endif
+ #ifdef ENOTTY
+ case ENOTTY: return MA_INVALID_OPERATION;
+ #endif
+ #ifdef ETXTBSY
+ case ETXTBSY: return MA_BUSY;
+ #endif
+ #ifdef EFBIG
+ case EFBIG: return MA_TOO_BIG;
+ #endif
+ #ifdef ENOSPC
+ case ENOSPC: return MA_NO_SPACE;
+ #endif
+ #ifdef ESPIPE
+ case ESPIPE: return MA_BAD_SEEK;
+ #endif
+ #ifdef EROFS
+ case EROFS: return MA_ACCESS_DENIED;
+ #endif
+ #ifdef EMLINK
+ case EMLINK: return MA_TOO_MANY_LINKS;
+ #endif
+ #ifdef EPIPE
+ case EPIPE: return MA_BAD_PIPE;
+ #endif
+ #ifdef EDOM
+ case EDOM: return MA_OUT_OF_RANGE;
+ #endif
+ #ifdef ERANGE
+ case ERANGE: return MA_OUT_OF_RANGE;
+ #endif
+ #ifdef EDEADLK
+ case EDEADLK: return MA_DEADLOCK;
+ #endif
+ #ifdef ENAMETOOLONG
+ case ENAMETOOLONG: return MA_PATH_TOO_LONG;
+ #endif
+ #ifdef ENOLCK
+ case ENOLCK: return MA_ERROR;
+ #endif
+ #ifdef ENOSYS
+ case ENOSYS: return MA_NOT_IMPLEMENTED;
+ #endif
+ #ifdef ENOTEMPTY
+ case ENOTEMPTY: return MA_DIRECTORY_NOT_EMPTY;
+ #endif
+ #ifdef ELOOP
+ case ELOOP: return MA_TOO_MANY_LINKS;
+ #endif
+ #ifdef ENOMSG
+ case ENOMSG: return MA_NO_MESSAGE;
+ #endif
+ #ifdef EIDRM
+ case EIDRM: return MA_ERROR;
+ #endif
+ #ifdef ECHRNG
+ case ECHRNG: return MA_ERROR;
+ #endif
+ #ifdef EL2NSYNC
+ case EL2NSYNC: return MA_ERROR;
+ #endif
+ #ifdef EL3HLT
+ case EL3HLT: return MA_ERROR;
+ #endif
+ #ifdef EL3RST
+ case EL3RST: return MA_ERROR;
+ #endif
+ #ifdef ELNRNG
+ case ELNRNG: return MA_OUT_OF_RANGE;
+ #endif
+ #ifdef EUNATCH
+ case EUNATCH: return MA_ERROR;
+ #endif
+ #ifdef ENOCSI
+ case ENOCSI: return MA_ERROR;
+ #endif
+ #ifdef EL2HLT
+ case EL2HLT: return MA_ERROR;
+ #endif
+ #ifdef EBADE
+ case EBADE: return MA_ERROR;
+ #endif
+ #ifdef EBADR
+ case EBADR: return MA_ERROR;
+ #endif
+ #ifdef EXFULL
+ case EXFULL: return MA_ERROR;
+ #endif
+ #ifdef ENOANO
+ case ENOANO: return MA_ERROR;
+ #endif
+ #ifdef EBADRQC
+ case EBADRQC: return MA_ERROR;
+ #endif
+ #ifdef EBADSLT
+ case EBADSLT: return MA_ERROR;
+ #endif
+ #ifdef EBFONT
+ case EBFONT: return MA_INVALID_FILE;
+ #endif
+ #ifdef ENOSTR
+ case ENOSTR: return MA_ERROR;
+ #endif
+ #ifdef ENODATA
+ case ENODATA: return MA_NO_DATA_AVAILABLE;
+ #endif
+ #ifdef ETIME
+ case ETIME: return MA_TIMEOUT;
+ #endif
+ #ifdef ENOSR
+ case ENOSR: return MA_NO_DATA_AVAILABLE;
+ #endif
+ #ifdef ENONET
+ case ENONET: return MA_NO_NETWORK;
+ #endif
+ #ifdef ENOPKG
+ case ENOPKG: return MA_ERROR;
+ #endif
+ #ifdef EREMOTE
+ case EREMOTE: return MA_ERROR;
+ #endif
+ #ifdef ENOLINK
+ case ENOLINK: return MA_ERROR;
+ #endif
+ #ifdef EADV
+ case EADV: return MA_ERROR;
+ #endif
+ #ifdef ESRMNT
+ case ESRMNT: return MA_ERROR;
+ #endif
+ #ifdef ECOMM
+ case ECOMM: return MA_ERROR;
+ #endif
+ #ifdef EPROTO
+ case EPROTO: return MA_ERROR;
+ #endif
+ #ifdef EMULTIHOP
+ case EMULTIHOP: return MA_ERROR;
+ #endif
+ #ifdef EDOTDOT
+ case EDOTDOT: return MA_ERROR;
+ #endif
+ #ifdef EBADMSG
+ case EBADMSG: return MA_BAD_MESSAGE;
+ #endif
+ #ifdef EOVERFLOW
+ case EOVERFLOW: return MA_TOO_BIG;
+ #endif
+ #ifdef ENOTUNIQ
+ case ENOTUNIQ: return MA_NOT_UNIQUE;
+ #endif
+ #ifdef EBADFD
+ case EBADFD: return MA_ERROR;
+ #endif
+ #ifdef EREMCHG
+ case EREMCHG: return MA_ERROR;
+ #endif
+ #ifdef ELIBACC
+ case ELIBACC: return MA_ACCESS_DENIED;
+ #endif
+ #ifdef ELIBBAD
+ case ELIBBAD: return MA_INVALID_FILE;
+ #endif
+ #ifdef ELIBSCN
+ case ELIBSCN: return MA_INVALID_FILE;
+ #endif
+ #ifdef ELIBMAX
+ case ELIBMAX: return MA_ERROR;
+ #endif
+ #ifdef ELIBEXEC
+ case ELIBEXEC: return MA_ERROR;
+ #endif
+ #ifdef EILSEQ
+ case EILSEQ: return MA_INVALID_DATA;
+ #endif
+ #ifdef ERESTART
+ case ERESTART: return MA_ERROR;
+ #endif
+ #ifdef ESTRPIPE
+ case ESTRPIPE: return MA_ERROR;
+ #endif
+ #ifdef EUSERS
+ case EUSERS: return MA_ERROR;
+ #endif
+ #ifdef ENOTSOCK
+ case ENOTSOCK: return MA_NOT_SOCKET;
+ #endif
+ #ifdef EDESTADDRREQ
+ case EDESTADDRREQ: return MA_NO_ADDRESS;
+ #endif
+ #ifdef EMSGSIZE
+ case EMSGSIZE: return MA_TOO_BIG;
+ #endif
+ #ifdef EPROTOTYPE
+ case EPROTOTYPE: return MA_BAD_PROTOCOL;
+ #endif
+ #ifdef ENOPROTOOPT
+ case ENOPROTOOPT: return MA_PROTOCOL_UNAVAILABLE;
+ #endif
+ #ifdef EPROTONOSUPPORT
+ case EPROTONOSUPPORT: return MA_PROTOCOL_NOT_SUPPORTED;
+ #endif
+ #ifdef ESOCKTNOSUPPORT
+ case ESOCKTNOSUPPORT: return MA_SOCKET_NOT_SUPPORTED;
+ #endif
+ #ifdef EOPNOTSUPP
+ case EOPNOTSUPP: return MA_INVALID_OPERATION;
+ #endif
+ #ifdef EPFNOSUPPORT
+ case EPFNOSUPPORT: return MA_PROTOCOL_FAMILY_NOT_SUPPORTED;
+ #endif
+ #ifdef EAFNOSUPPORT
+ case EAFNOSUPPORT: return MA_ADDRESS_FAMILY_NOT_SUPPORTED;
+ #endif
+ #ifdef EADDRINUSE
+ case EADDRINUSE: return MA_ALREADY_IN_USE;
+ #endif
+ #ifdef EADDRNOTAVAIL
+ case EADDRNOTAVAIL: return MA_ERROR;
+ #endif
+ #ifdef ENETDOWN
+ case ENETDOWN: return MA_NO_NETWORK;
+ #endif
+ #ifdef ENETUNREACH
+ case ENETUNREACH: return MA_NO_NETWORK;
+ #endif
+ #ifdef ENETRESET
+ case ENETRESET: return MA_NO_NETWORK;
+ #endif
+ #ifdef ECONNABORTED
+ case ECONNABORTED: return MA_NO_NETWORK;
+ #endif
+ #ifdef ECONNRESET
+ case ECONNRESET: return MA_CONNECTION_RESET;
+ #endif
+ #ifdef ENOBUFS
+ case ENOBUFS: return MA_NO_SPACE;
+ #endif
+ #ifdef EISCONN
+ case EISCONN: return MA_ALREADY_CONNECTED;
+ #endif
+ #ifdef ENOTCONN
+ case ENOTCONN: return MA_NOT_CONNECTED;
+ #endif
+ #ifdef ESHUTDOWN
+ case ESHUTDOWN: return MA_ERROR;
+ #endif
+ #ifdef ETOOMANYREFS
+ case ETOOMANYREFS: return MA_ERROR;
+ #endif
+ #ifdef ETIMEDOUT
+ case ETIMEDOUT: return MA_TIMEOUT;
+ #endif
+ #ifdef ECONNREFUSED
+ case ECONNREFUSED: return MA_CONNECTION_REFUSED;
+ #endif
+ #ifdef EHOSTDOWN
+ case EHOSTDOWN: return MA_NO_HOST;
+ #endif
+ #ifdef EHOSTUNREACH
+ case EHOSTUNREACH: return MA_NO_HOST;
+ #endif
+ #ifdef EALREADY
+ case EALREADY: return MA_IN_PROGRESS;
+ #endif
+ #ifdef EINPROGRESS
+ case EINPROGRESS: return MA_IN_PROGRESS;
+ #endif
+ #ifdef ESTALE
+ case ESTALE: return MA_INVALID_FILE;
+ #endif
+ #ifdef EUCLEAN
+ case EUCLEAN: return MA_ERROR;
+ #endif
+ #ifdef ENOTNAM
+ case ENOTNAM: return MA_ERROR;
+ #endif
+ #ifdef ENAVAIL
+ case ENAVAIL: return MA_ERROR;
+ #endif
+ #ifdef EISNAM
+ case EISNAM: return MA_ERROR;
+ #endif
+ #ifdef EREMOTEIO
+ case EREMOTEIO: return MA_IO_ERROR;
+ #endif
+ #ifdef EDQUOT
+ case EDQUOT: return MA_NO_SPACE;
+ #endif
+ #ifdef ENOMEDIUM
+ case ENOMEDIUM: return MA_DOES_NOT_EXIST;
+ #endif
+ #ifdef EMEDIUMTYPE
+ case EMEDIUMTYPE: return MA_ERROR;
+ #endif
+ #ifdef ECANCELED
+ case ECANCELED: return MA_CANCELLED;
+ #endif
+ #ifdef ENOKEY
+ case ENOKEY: return MA_ERROR;
+ #endif
+ #ifdef EKEYEXPIRED
+ case EKEYEXPIRED: return MA_ERROR;
+ #endif
+ #ifdef EKEYREVOKED
+ case EKEYREVOKED: return MA_ERROR;
+ #endif
+ #ifdef EKEYREJECTED
+ case EKEYREJECTED: return MA_ERROR;
+ #endif
+ #ifdef EOWNERDEAD
+ case EOWNERDEAD: return MA_ERROR;
+ #endif
+ #ifdef ENOTRECOVERABLE
+ case ENOTRECOVERABLE: return MA_ERROR;
+ #endif
+ #ifdef ERFKILL
+ case ERFKILL: return MA_ERROR;
+ #endif
+ #ifdef EHWPOISON
+ case EHWPOISON: return MA_ERROR;
+ #endif
+ default: return MA_ERROR;
+ }
+}
+
+ma_result ma_fopen(FILE** ppFile, const char* pFilePath, const char* pOpenMode)
+{
+#if _MSC_VER
+ errno_t err;
+#endif
+
+ if (ppFile != NULL) {
+ *ppFile = NULL; /* Safety. */
+ }
+
+ if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+#if _MSC_VER
+ err = fopen_s(ppFile, pFilePath, pOpenMode);
+ if (err != 0) {
+ return ma_result_from_errno(err);
+ }
+#else
+#if defined(_WIN32) || defined(__APPLE__)
+ *ppFile = fopen(pFilePath, pOpenMode);
+#else
+ #if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS == 64 && defined(_LARGEFILE64_SOURCE)
+ *ppFile = fopen64(pFilePath, pOpenMode);
+ #else
+ *ppFile = fopen(pFilePath, pOpenMode);
+ #endif
+#endif
+ if (*ppFile == NULL) {
+ ma_result result = ma_result_from_errno(errno);
+ if (result == MA_SUCCESS) {
+ return MA_ERROR; /* Just a safety check to make sure we never ever return success when pFile == NULL. */
+ }
+ return result;
+ }
+#endif
+
+ return MA_SUCCESS;
+}
+
+
+
+/*
+_wfopen() isn't always available in all compilation environments.
+
+ * Windows only.
+ * MSVC seems to support it universally as far back as VC6 from what I can tell (haven't checked further back).
+ * MinGW-64 (both 32- and 64-bit) seems to support it.
+ * MinGW wraps it in !defined(__STRICT_ANSI__).
+
+This can be reviewed as compatibility issues arise. The preference is to use _wfopen_s() and _wfopen() as opposed to the wcsrtombs()
+fallback, so if you notice your compiler not detecting this properly I'm happy to look at adding support.
+*/
+#if defined(_WIN32)
+ #if defined(_MSC_VER) || defined(__MINGW64__) || !defined(__STRICT_ANSI__)
+ #define MA_HAS_WFOPEN
+ #endif
+#endif
+
+ma_result ma_wfopen(FILE** ppFile, const wchar_t* pFilePath, const wchar_t* pOpenMode, ma_allocation_callbacks* pAllocationCallbacks)
+{
+#if _MSC_VER
+ errno_t err;
+#endif
+
+ if (ppFile != NULL) {
+ *ppFile = NULL; /* Safety. */
+ }
+
+ if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+#if defined(MA_HAS_WFOPEN)
+ (void)pAllocationCallbacks;
+
+ /* Use _wfopen() on Windows. */
+ #if defined(_MSC_VER) && _MSC_VER >= 1400
+ err = _wfopen_s(ppFile, pFilePath, pOpenMode);
+ if (err != 0) {
+ return ma_result_from_errno(err);
+ }
+ #else
+ *ppFile = _wfopen(pFilePath, pOpenMode);
+ if (*ppFile == NULL) {
+ return ma_result_from_errno(errno);
+ }
+ #endif
+#else
+ /*
+ Use fopen() on anything other than Windows. Requires a conversion. This is annoying because fopen() is locale specific. The only real way I can
+ think of to do this is with wcsrtombs(). Note that wcstombs() is apparently not thread-safe because it uses a static global mbstate_t object for
+ maintaining state. I've checked this with -std=c89 and it works, but if somebody get's a compiler error I'll look into improving compatibility.
+ */
+ {
+ mbstate_t mbs;
+ size_t lenMB;
+ const wchar_t* pFilePathTemp = pFilePath;
+ char* pFilePathMB = NULL;
+ char pOpenModeMB[32] = {0};
+
+ /* Get the length first. */
+ MA_ZERO_OBJECT(&mbs);
+ lenMB = wcsrtombs(NULL, &pFilePathTemp, 0, &mbs);
+ if (lenMB == (size_t)-1) {
+ return ma_result_from_errno(errno);
+ }
+
+ pFilePathMB = (char*)ma_malloc(lenMB + 1, pAllocationCallbacks);
+ if (pFilePathMB == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ pFilePathTemp = pFilePath;
+ MA_ZERO_OBJECT(&mbs);
+ wcsrtombs(pFilePathMB, &pFilePathTemp, lenMB + 1, &mbs);
+
+ /* The open mode should always consist of ASCII characters so we should be able to do a trivial conversion. */
+ {
+ size_t i = 0;
+ for (;;) {
+ if (pOpenMode[i] == 0) {
+ pOpenModeMB[i] = '\0';
+ break;
+ }
+
+ pOpenModeMB[i] = (char)pOpenMode[i];
+ i += 1;
+ }
+ }
+
+ *ppFile = fopen(pFilePathMB, pOpenModeMB);
+
+ ma_free(pFilePathMB, pAllocationCallbacks);
+ }
+
+ if (*ppFile == NULL) {
+ return MA_ERROR;
+ }
+#endif
+
+ return MA_SUCCESS;
+}
+
+
+
+static MA_INLINE void ma_copy_memory_64(void* dst, const void* src, ma_uint64 sizeInBytes)
+{
+#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX
+ MA_COPY_MEMORY(dst, src, (size_t)sizeInBytes);
+#else
+ while (sizeInBytes > 0) {
+ ma_uint64 bytesToCopyNow = sizeInBytes;
+ if (bytesToCopyNow > MA_SIZE_MAX) {
+ bytesToCopyNow = MA_SIZE_MAX;
+ }
+
+ MA_COPY_MEMORY(dst, src, (size_t)bytesToCopyNow); /* Safe cast to size_t. */
+
+ sizeInBytes -= bytesToCopyNow;
+ dst = ( void*)(( ma_uint8*)dst + bytesToCopyNow);
+ src = (const void*)((const ma_uint8*)src + bytesToCopyNow);
+ }
+#endif
+}
+
+static MA_INLINE void ma_zero_memory_64(void* dst, ma_uint64 sizeInBytes)
+{
+#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX
+ MA_ZERO_MEMORY(dst, (size_t)sizeInBytes);
+#else
+ while (sizeInBytes > 0) {
+ ma_uint64 bytesToZeroNow = sizeInBytes;
+ if (bytesToZeroNow > MA_SIZE_MAX) {
+ bytesToZeroNow = MA_SIZE_MAX;
+ }
+
+ MA_ZERO_MEMORY(dst, (size_t)bytesToZeroNow); /* Safe cast to size_t. */
+
+ sizeInBytes -= bytesToZeroNow;
+ dst = (void*)((ma_uint8*)dst + bytesToZeroNow);
+ }
+#endif
+}
+
+
+/* Thanks to good old Bit Twiddling Hacks for this one: http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */
+static MA_INLINE unsigned int ma_next_power_of_2(unsigned int x)
+{
+ x--;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+ x++;
+
+ return x;
+}
+
+static MA_INLINE unsigned int ma_prev_power_of_2(unsigned int x)
+{
+ return ma_next_power_of_2(x) >> 1;
+}
+
+static MA_INLINE unsigned int ma_round_to_power_of_2(unsigned int x)
+{
+ unsigned int prev = ma_prev_power_of_2(x);
+ unsigned int next = ma_next_power_of_2(x);
+ if ((next - x) > (x - prev)) {
+ return prev;
+ } else {
+ return next;
+ }
+}
+
+static MA_INLINE unsigned int ma_count_set_bits(unsigned int x)
+{
+ unsigned int count = 0;
+ while (x != 0) {
+ if (x & 1) {
+ count += 1;
+ }
+
+ x = x >> 1;
+ }
+
+ return count;
+}
+
+
+
+/* Clamps an f32 sample to -1..1 */
+static MA_INLINE float ma_clip_f32(float x)
+{
+ if (x < -1) return -1;
+ if (x > +1) return +1;
+ return x;
+}
+
+static MA_INLINE float ma_mix_f32(float x, float y, float a)
+{
+ return x*(1-a) + y*a;
+}
+static MA_INLINE float ma_mix_f32_fast(float x, float y, float a)
+{
+ float r0 = (y - x);
+ float r1 = r0*a;
+ return x + r1;
+ /*return x + (y - x)*a;*/
+}
+
+
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE __m128 ma_mix_f32_fast__sse2(__m128 x, __m128 y, __m128 a)
+{
+ return _mm_add_ps(x, _mm_mul_ps(_mm_sub_ps(y, x), a));
+}
+#endif
+#if defined(MA_SUPPORT_AVX2)
+static MA_INLINE __m256 ma_mix_f32_fast__avx2(__m256 x, __m256 y, __m256 a)
+{
+ return _mm256_add_ps(x, _mm256_mul_ps(_mm256_sub_ps(y, x), a));
+}
+#endif
+#if defined(MA_SUPPORT_AVX512)
+static MA_INLINE __m512 ma_mix_f32_fast__avx512(__m512 x, __m512 y, __m512 a)
+{
+ return _mm512_add_ps(x, _mm512_mul_ps(_mm512_sub_ps(y, x), a));
+}
+#endif
+#if defined(MA_SUPPORT_NEON)
+static MA_INLINE float32x4_t ma_mix_f32_fast__neon(float32x4_t x, float32x4_t y, float32x4_t a)
+{
+ return vaddq_f32(x, vmulq_f32(vsubq_f32(y, x), a));
+}
+#endif
+
+
+static MA_INLINE double ma_mix_f64(double x, double y, double a)
+{
+ return x*(1-a) + y*a;
+}
+static MA_INLINE double ma_mix_f64_fast(double x, double y, double a)
+{
+ return x + (y - x)*a;
+}
+
+static MA_INLINE float ma_scale_to_range_f32(float x, float lo, float hi)
+{
+ return lo + x*(hi-lo);
+}
+
+
+/*
+Greatest common factor using Euclid's algorithm iteratively.
+*/
+static MA_INLINE ma_uint32 ma_gcf_u32(ma_uint32 a, ma_uint32 b)
+{
+ for (;;) {
+ if (b == 0) {
+ break;
+ } else {
+ ma_uint32 t = a;
+ a = b;
+ b = t % a;
+ }
+ }
+
+ return a;
+}
+
+
+/*
+Random Number Generation
+
+miniaudio uses the LCG random number generation algorithm. This is good enough for audio.
+
+Note that miniaudio's global LCG implementation uses global state which is _not_ thread-local. When this is called across
+multiple threads, results will be unpredictable. However, it won't crash and results will still be random enough for
+miniaudio's purposes.
+*/
+#ifndef MA_DEFAULT_LCG_SEED
+#define MA_DEFAULT_LCG_SEED 4321
+#endif
+
+#define MA_LCG_M 2147483647
+#define MA_LCG_A 48271
+#define MA_LCG_C 0
+
+static ma_lcg g_maLCG = {MA_DEFAULT_LCG_SEED}; /* Non-zero initial seed. Use ma_seed() to use an explicit seed. */
+
+static MA_INLINE void ma_lcg_seed(ma_lcg* pLCG, ma_int32 seed)
+{
+ MA_ASSERT(pLCG != NULL);
+ pLCG->state = seed;
+}
+
+static MA_INLINE ma_int32 ma_lcg_rand_s32(ma_lcg* pLCG)
+{
+ pLCG->state = (MA_LCG_A * pLCG->state + MA_LCG_C) % MA_LCG_M;
+ return pLCG->state;
+}
+
+static MA_INLINE ma_uint32 ma_lcg_rand_u32(ma_lcg* pLCG)
+{
+ return (ma_uint32)ma_lcg_rand_s32(pLCG);
+}
+
+static MA_INLINE ma_int16 ma_lcg_rand_s16(ma_lcg* pLCG)
+{
+ return (ma_int16)(ma_lcg_rand_s32(pLCG) & 0xFFFF);
+}
+
+static MA_INLINE double ma_lcg_rand_f64(ma_lcg* pLCG)
+{
+ return ma_lcg_rand_s32(pLCG) / (double)0x7FFFFFFF;
+}
+
+static MA_INLINE float ma_lcg_rand_f32(ma_lcg* pLCG)
+{
+ return (float)ma_lcg_rand_f64(pLCG);
+}
+
+static MA_INLINE float ma_lcg_rand_range_f32(ma_lcg* pLCG, float lo, float hi)
+{
+ return ma_scale_to_range_f32(ma_lcg_rand_f32(pLCG), lo, hi);
+}
+
+static MA_INLINE ma_int32 ma_lcg_rand_range_s32(ma_lcg* pLCG, ma_int32 lo, ma_int32 hi)
+{
+ if (lo == hi) {
+ return lo;
+ }
+
+ return lo + ma_lcg_rand_u32(pLCG) / (0xFFFFFFFF / (hi - lo + 1) + 1);
+}
+
+
+
+static MA_INLINE void ma_seed(ma_int32 seed)
+{
+ ma_lcg_seed(&g_maLCG, seed);
+}
+
+static MA_INLINE ma_int32 ma_rand_s32()
+{
+ return ma_lcg_rand_s32(&g_maLCG);
+}
+
+static MA_INLINE ma_uint32 ma_rand_u32()
+{
+ return ma_lcg_rand_u32(&g_maLCG);
+}
+
+static MA_INLINE double ma_rand_f64()
+{
+ return ma_lcg_rand_f64(&g_maLCG);
+}
+
+static MA_INLINE float ma_rand_f32()
+{
+ return ma_lcg_rand_f32(&g_maLCG);
+}
+
+static MA_INLINE float ma_rand_range_f32(float lo, float hi)
+{
+ return ma_lcg_rand_range_f32(&g_maLCG, lo, hi);
+}
+
+static MA_INLINE ma_int32 ma_rand_range_s32(ma_int32 lo, ma_int32 hi)
+{
+ return ma_lcg_rand_range_s32(&g_maLCG, lo, hi);
+}
+
+
+static MA_INLINE float ma_dither_f32_rectangle(float ditherMin, float ditherMax)
+{
+ return ma_rand_range_f32(ditherMin, ditherMax);
+}
+
+static MA_INLINE float ma_dither_f32_triangle(float ditherMin, float ditherMax)
+{
+ float a = ma_rand_range_f32(ditherMin, 0);
+ float b = ma_rand_range_f32(0, ditherMax);
+ return a + b;
+}
+
+static MA_INLINE float ma_dither_f32(ma_dither_mode ditherMode, float ditherMin, float ditherMax)
+{
+ if (ditherMode == ma_dither_mode_rectangle) {
+ return ma_dither_f32_rectangle(ditherMin, ditherMax);
+ }
+ if (ditherMode == ma_dither_mode_triangle) {
+ return ma_dither_f32_triangle(ditherMin, ditherMax);
+ }
+
+ return 0;
+}
+
+static MA_INLINE ma_int32 ma_dither_s32(ma_dither_mode ditherMode, ma_int32 ditherMin, ma_int32 ditherMax)
+{
+ if (ditherMode == ma_dither_mode_rectangle) {
+ ma_int32 a = ma_rand_range_s32(ditherMin, ditherMax);
+ return a;
+ }
+ if (ditherMode == ma_dither_mode_triangle) {
+ ma_int32 a = ma_rand_range_s32(ditherMin, 0);
+ ma_int32 b = ma_rand_range_s32(0, ditherMax);
+ return a + b;
+ }
+
+ return 0;
+}
+
+
+/******************************************************************************
+
+Atomics
+
+******************************************************************************/
+#if defined(__clang__)
+ #if defined(__has_builtin)
+ #if __has_builtin(__sync_swap)
+ #define MA_HAS_SYNC_SWAP
+ #endif
+ #endif
+#elif defined(__GNUC__)
+ #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC__ >= 7)
+ #define MA_HAS_GNUC_ATOMICS
+ #endif
+#endif
+
+#if defined(_WIN32) && !defined(__GNUC__) && !defined(__clang__)
+#define ma_memory_barrier() MemoryBarrier()
+#define ma_atomic_exchange_32(a, b) InterlockedExchange((LONG*)a, (LONG)b)
+#define ma_atomic_exchange_64(a, b) InterlockedExchange64((LONGLONG*)a, (LONGLONG)b)
+#define ma_atomic_increment_32(a) InterlockedIncrement((LONG*)a)
+#define ma_atomic_decrement_32(a) InterlockedDecrement((LONG*)a)
+#else
+#define ma_memory_barrier() __sync_synchronize()
+#if defined(MA_HAS_SYNC_SWAP)
+ #define ma_atomic_exchange_32(a, b) __sync_swap(a, b)
+ #define ma_atomic_exchange_64(a, b) __sync_swap(a, b)
+#elif defined(MA_HAS_GNUC_ATOMICS)
+ #define ma_atomic_exchange_32(a, b) (void)__atomic_exchange_n(a, b, __ATOMIC_ACQ_REL)
+ #define ma_atomic_exchange_64(a, b) (void)__atomic_exchange_n(a, b, __ATOMIC_ACQ_REL)
+#else
+ #define ma_atomic_exchange_32(a, b) __sync_synchronize(); (void)__sync_lock_test_and_set(a, b)
+ #define ma_atomic_exchange_64(a, b) __sync_synchronize(); (void)__sync_lock_test_and_set(a, b)
+#endif
+#define ma_atomic_increment_32(a) __sync_add_and_fetch(a, 1)
+#define ma_atomic_decrement_32(a) __sync_sub_and_fetch(a, 1)
+#endif
+
+#ifdef MA_64BIT
+#define ma_atomic_exchange_ptr ma_atomic_exchange_64
+#endif
+#ifdef MA_32BIT
+#define ma_atomic_exchange_ptr ma_atomic_exchange_32
+#endif
+
+
+static void* ma__malloc_default(size_t sz, void* pUserData)
+{
+ (void)pUserData;
+ return MA_MALLOC(sz);
+}
+
+static void* ma__realloc_default(void* p, size_t sz, void* pUserData)
+{
+ (void)pUserData;
+ return MA_REALLOC(p, sz);
+}
+
+static void ma__free_default(void* p, void* pUserData)
+{
+ (void)pUserData;
+ MA_FREE(p);
+}
+
+
+static void* ma__malloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks == NULL) {
+ return NULL;
+ }
+
+ if (pAllocationCallbacks->onMalloc != NULL) {
+ return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData);
+ }
+
+ /* Try using realloc(). */
+ if (pAllocationCallbacks->onRealloc != NULL) {
+ return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData);
+ }
+
+ return NULL;
+}
+
+static void* ma__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks == NULL) {
+ return NULL;
+ }
+
+ if (pAllocationCallbacks->onRealloc != NULL) {
+ return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData);
+ }
+
+ /* Try emulating realloc() in terms of malloc()/free(). */
+ if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) {
+ void* p2;
+
+ p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData);
+ if (p2 == NULL) {
+ return NULL;
+ }
+
+ if (p != NULL) {
+ MA_COPY_MEMORY(p2, p, szOld);
+ pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
+ }
+
+ return p2;
+ }
+
+ return NULL;
+}
+
+static MA_INLINE void* ma__calloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ void* p = ma__malloc_from_callbacks(sz, pAllocationCallbacks);
+ if (p != NULL) {
+ MA_ZERO_MEMORY(p, sz);
+ }
+
+ return p;
+}
+
+static void ma__free_from_callbacks(void* p, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ if (p == NULL || pAllocationCallbacks == NULL) {
+ return;
+ }
+
+ if (pAllocationCallbacks->onFree != NULL) {
+ pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
+ }
+}
+
+static ma_allocation_callbacks ma_allocation_callbacks_init_default()
+{
+ ma_allocation_callbacks callbacks;
+ callbacks.pUserData = NULL;
+ callbacks.onMalloc = ma__malloc_default;
+ callbacks.onRealloc = ma__realloc_default;
+ callbacks.onFree = ma__free_default;
+
+ return callbacks;
+}
+
+static ma_result ma_allocation_callbacks_init_copy(ma_allocation_callbacks* pDst, const ma_allocation_callbacks* pSrc)
+{
+ if (pDst == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ if (pSrc == NULL) {
+ *pDst = ma_allocation_callbacks_init_default();
+ } else {
+ if (pSrc->pUserData == NULL && pSrc->onFree == NULL && pSrc->onMalloc == NULL && pSrc->onRealloc == NULL) {
+ *pDst = ma_allocation_callbacks_init_default();
+ } else {
+ if (pSrc->onFree == NULL || (pSrc->onMalloc == NULL && pSrc->onRealloc == NULL)) {
+ return MA_INVALID_ARGS; /* Invalid allocation callbacks. */
+ } else {
+ *pDst = *pSrc;
+ }
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+
+ma_uint64 ma_calculate_frame_count_after_resampling(ma_uint32 sampleRateOut, ma_uint32 sampleRateIn, ma_uint64 frameCountIn)
+{
+ /* For robustness we're going to use a resampler object to calculate this since that already has a way of calculating this. */
+ ma_result result;
+ ma_uint64 frameCountOut;
+ ma_resampler_config config;
+ ma_resampler resampler;
+
+ config = ma_resampler_config_init(ma_format_s16, 1, sampleRateIn, sampleRateOut, ma_resample_algorithm_linear);
+ result = ma_resampler_init(&config, &resampler);
+ if (result != MA_SUCCESS) {
+ return 0;
+ }
+
+ frameCountOut = ma_resampler_get_expected_output_frame_count(&resampler, frameCountIn);
+
+ ma_resampler_uninit(&resampler);
+ return frameCountOut;
+}
+
+#ifndef MA_DATA_CONVERTER_STACK_BUFFER_SIZE
+#define MA_DATA_CONVERTER_STACK_BUFFER_SIZE 4096
+#endif
+
+/************************************************************************************************************************************************************
+*************************************************************************************************************************************************************
+
+DEVICE I/O
+==========
+
+*************************************************************************************************************************************************************
+************************************************************************************************************************************************************/
+#ifndef MA_NO_DEVICE_IO
+/*
+Unfortunately using runtime linking for pthreads causes problems. This has occurred for me when testing on FreeBSD. When
+using runtime linking, deadlocks can occur (for me it happens when loading data from fread()). It turns out that doing
+compile-time linking fixes this. I'm not sure why this happens, but the safest way I can think of to fix this is to simply
+disable runtime linking by default. To enable runtime linking, #define this before the implementation of this file. I am
+not officially supporting this, but I'm leaving it here in case it's useful for somebody, somewhere.
+*/
+/*#define MA_USE_RUNTIME_LINKING_FOR_PTHREAD*/
+
+/* Disable run-time linking on certain backends. */
+#ifndef MA_NO_RUNTIME_LINKING
+ #if defined(MA_ANDROID) || defined(MA_EMSCRIPTEN)
+ #define MA_NO_RUNTIME_LINKING
+ #endif
+#endif
+
+/*
+Check if we have the necessary development packages for each backend at the top so we can use this to determine whether or not
+certain unused functions and variables can be excluded from the build to avoid warnings.
+*/
+#ifdef MA_ENABLE_WASAPI
+ #define MA_HAS_WASAPI /* Every compiler should support WASAPI */
+#endif
+#ifdef MA_ENABLE_DSOUND
+ #define MA_HAS_DSOUND /* Every compiler should support DirectSound. */
+#endif
+#ifdef MA_ENABLE_WINMM
+ #define MA_HAS_WINMM /* Every compiler I'm aware of supports WinMM. */
+#endif
+#ifdef MA_ENABLE_ALSA
+ #define MA_HAS_ALSA
+ #ifdef MA_NO_RUNTIME_LINKING
+ #ifdef __has_include
+ #if !__has_include()
+ #undef MA_HAS_ALSA
+ #endif
+ #endif
+ #endif
+#endif
+#ifdef MA_ENABLE_PULSEAUDIO
+ #define MA_HAS_PULSEAUDIO
+ #ifdef MA_NO_RUNTIME_LINKING
+ #ifdef __has_include
+ #if !__has_include()
+ #undef MA_HAS_PULSEAUDIO
+ #endif
+ #endif
+ #endif
+#endif
+#ifdef MA_ENABLE_JACK
+ #define MA_HAS_JACK
+ #ifdef MA_NO_RUNTIME_LINKING
+ #ifdef __has_include
+ #if !__has_include()
+ #undef MA_HAS_JACK
+ #endif
+ #endif
+ #endif
+#endif
+#ifdef MA_ENABLE_COREAUDIO
+ #define MA_HAS_COREAUDIO
+#endif
+#ifdef MA_ENABLE_SNDIO
+ #define MA_HAS_SNDIO
+#endif
+#ifdef MA_ENABLE_AUDIO4
+ #define MA_HAS_AUDIO4
+#endif
+#ifdef MA_ENABLE_OSS
+ #define MA_HAS_OSS
+#endif
+#ifdef MA_ENABLE_AAUDIO
+ #define MA_HAS_AAUDIO
+#endif
+#ifdef MA_ENABLE_OPENSL
+ #define MA_HAS_OPENSL
+#endif
+#ifdef MA_ENABLE_WEBAUDIO
+ #define MA_HAS_WEBAUDIO
+#endif
+#ifdef MA_ENABLE_NULL
+ #define MA_HAS_NULL /* Everything supports the null backend. */
+#endif
+
+const char* ma_get_backend_name(ma_backend backend)
+{
+ switch (backend)
+ {
+ case ma_backend_wasapi: return "WASAPI";
+ case ma_backend_dsound: return "DirectSound";
+ case ma_backend_winmm: return "WinMM";
+ case ma_backend_coreaudio: return "Core Audio";
+ case ma_backend_sndio: return "sndio";
+ case ma_backend_audio4: return "audio(4)";
+ case ma_backend_oss: return "OSS";
+ case ma_backend_pulseaudio: return "PulseAudio";
+ case ma_backend_alsa: return "ALSA";
+ case ma_backend_jack: return "JACK";
+ case ma_backend_aaudio: return "AAudio";
+ case ma_backend_opensl: return "OpenSL|ES";
+ case ma_backend_webaudio: return "Web Audio";
+ case ma_backend_null: return "Null";
+ default: return "Unknown";
+ }
+}
+
+ma_bool32 ma_is_loopback_supported(ma_backend backend)
+{
+ switch (backend)
+ {
+ case ma_backend_wasapi: return MA_TRUE;
+ case ma_backend_dsound: return MA_FALSE;
+ case ma_backend_winmm: return MA_FALSE;
+ case ma_backend_coreaudio: return MA_FALSE;
+ case ma_backend_sndio: return MA_FALSE;
+ case ma_backend_audio4: return MA_FALSE;
+ case ma_backend_oss: return MA_FALSE;
+ case ma_backend_pulseaudio: return MA_FALSE;
+ case ma_backend_alsa: return MA_FALSE;
+ case ma_backend_jack: return MA_FALSE;
+ case ma_backend_aaudio: return MA_FALSE;
+ case ma_backend_opensl: return MA_FALSE;
+ case ma_backend_webaudio: return MA_FALSE;
+ case ma_backend_null: return MA_FALSE;
+ default: return MA_FALSE;
+ }
+}
+
+
+
+#ifdef MA_WIN32
+ #define MA_THREADCALL WINAPI
+ typedef unsigned long ma_thread_result;
+#else
+ #define MA_THREADCALL
+ typedef void* ma_thread_result;
+#endif
+typedef ma_thread_result (MA_THREADCALL * ma_thread_entry_proc)(void* pData);
+
+#ifdef MA_WIN32
+static ma_result ma_result_from_GetLastError(DWORD error)
+{
+ switch (error)
+ {
+ case ERROR_SUCCESS: return MA_SUCCESS;
+ case ERROR_PATH_NOT_FOUND: return MA_DOES_NOT_EXIST;
+ case ERROR_TOO_MANY_OPEN_FILES: return MA_TOO_MANY_OPEN_FILES;
+ case ERROR_NOT_ENOUGH_MEMORY: return MA_OUT_OF_MEMORY;
+ case ERROR_DISK_FULL: return MA_NO_SPACE;
+ case ERROR_HANDLE_EOF: return MA_END_OF_FILE;
+ case ERROR_NEGATIVE_SEEK: return MA_BAD_SEEK;
+ case ERROR_INVALID_PARAMETER: return MA_INVALID_ARGS;
+ case ERROR_ACCESS_DENIED: return MA_ACCESS_DENIED;
+ case ERROR_SEM_TIMEOUT: return MA_TIMEOUT;
+ case ERROR_FILE_NOT_FOUND: return MA_DOES_NOT_EXIST;
+ default: break;
+ }
+
+ return MA_ERROR;
+}
+
+/* WASAPI error codes. */
+#define MA_AUDCLNT_E_NOT_INITIALIZED ((HRESULT)0x88890001)
+#define MA_AUDCLNT_E_ALREADY_INITIALIZED ((HRESULT)0x88890002)
+#define MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE ((HRESULT)0x88890003)
+#define MA_AUDCLNT_E_DEVICE_INVALIDATED ((HRESULT)0x88890004)
+#define MA_AUDCLNT_E_NOT_STOPPED ((HRESULT)0x88890005)
+#define MA_AUDCLNT_E_BUFFER_TOO_LARGE ((HRESULT)0x88890006)
+#define MA_AUDCLNT_E_OUT_OF_ORDER ((HRESULT)0x88890007)
+#define MA_AUDCLNT_E_UNSUPPORTED_FORMAT ((HRESULT)0x88890008)
+#define MA_AUDCLNT_E_INVALID_SIZE ((HRESULT)0x88890009)
+#define MA_AUDCLNT_E_DEVICE_IN_USE ((HRESULT)0x8889000A)
+#define MA_AUDCLNT_E_BUFFER_OPERATION_PENDING ((HRESULT)0x8889000B)
+#define MA_AUDCLNT_E_THREAD_NOT_REGISTERED ((HRESULT)0x8889000C)
+#define MA_AUDCLNT_E_NO_SINGLE_PROCESS ((HRESULT)0x8889000D)
+#define MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED ((HRESULT)0x8889000E)
+#define MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED ((HRESULT)0x8889000F)
+#define MA_AUDCLNT_E_SERVICE_NOT_RUNNING ((HRESULT)0x88890010)
+#define MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED ((HRESULT)0x88890011)
+#define MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY ((HRESULT)0x88890012)
+#define MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL ((HRESULT)0x88890013)
+#define MA_AUDCLNT_E_EVENTHANDLE_NOT_SET ((HRESULT)0x88890014)
+#define MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE ((HRESULT)0x88890015)
+#define MA_AUDCLNT_E_BUFFER_SIZE_ERROR ((HRESULT)0x88890016)
+#define MA_AUDCLNT_E_CPUUSAGE_EXCEEDED ((HRESULT)0x88890017)
+#define MA_AUDCLNT_E_BUFFER_ERROR ((HRESULT)0x88890018)
+#define MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED ((HRESULT)0x88890019)
+#define MA_AUDCLNT_E_INVALID_DEVICE_PERIOD ((HRESULT)0x88890020)
+#define MA_AUDCLNT_E_INVALID_STREAM_FLAG ((HRESULT)0x88890021)
+#define MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE ((HRESULT)0x88890022)
+#define MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES ((HRESULT)0x88890023)
+#define MA_AUDCLNT_E_OFFLOAD_MODE_ONLY ((HRESULT)0x88890024)
+#define MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY ((HRESULT)0x88890025)
+#define MA_AUDCLNT_E_RESOURCES_INVALIDATED ((HRESULT)0x88890026)
+#define MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED ((HRESULT)0x88890027)
+#define MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED ((HRESULT)0x88890028)
+#define MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED ((HRESULT)0x88890029)
+#define MA_AUDCLNT_E_HEADTRACKING_ENABLED ((HRESULT)0x88890030)
+#define MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED ((HRESULT)0x88890040)
+#define MA_AUDCLNT_S_BUFFER_EMPTY ((HRESULT)0x08890001)
+#define MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED ((HRESULT)0x08890002)
+#define MA_AUDCLNT_S_POSITION_STALLED ((HRESULT)0x08890003)
+
+#define MA_DS_OK ((HRESULT)0)
+#define MA_DS_NO_VIRTUALIZATION ((HRESULT)0x0878000A)
+#define MA_DSERR_ALLOCATED ((HRESULT)0x8878000A)
+#define MA_DSERR_CONTROLUNAVAIL ((HRESULT)0x8878001E)
+#define MA_DSERR_INVALIDPARAM ((HRESULT)0x80070057) /*E_INVALIDARG*/
+#define MA_DSERR_INVALIDCALL ((HRESULT)0x88780032)
+#define MA_DSERR_GENERIC ((HRESULT)0x80004005) /*E_FAIL*/
+#define MA_DSERR_PRIOLEVELNEEDED ((HRESULT)0x88780046)
+#define MA_DSERR_OUTOFMEMORY ((HRESULT)0x8007000E) /*E_OUTOFMEMORY*/
+#define MA_DSERR_BADFORMAT ((HRESULT)0x88780064)
+#define MA_DSERR_UNSUPPORTED ((HRESULT)0x80004001) /*E_NOTIMPL*/
+#define MA_DSERR_NODRIVER ((HRESULT)0x88780078)
+#define MA_DSERR_ALREADYINITIALIZED ((HRESULT)0x88780082)
+#define MA_DSERR_NOAGGREGATION ((HRESULT)0x80040110) /*CLASS_E_NOAGGREGATION*/
+#define MA_DSERR_BUFFERLOST ((HRESULT)0x88780096)
+#define MA_DSERR_OTHERAPPHASPRIO ((HRESULT)0x887800A0)
+#define MA_DSERR_UNINITIALIZED ((HRESULT)0x887800AA)
+#define MA_DSERR_NOINTERFACE ((HRESULT)0x80004002) /*E_NOINTERFACE*/
+#define MA_DSERR_ACCESSDENIED ((HRESULT)0x80070005) /*E_ACCESSDENIED*/
+#define MA_DSERR_BUFFERTOOSMALL ((HRESULT)0x887800B4)
+#define MA_DSERR_DS8_REQUIRED ((HRESULT)0x887800BE)
+#define MA_DSERR_SENDLOOP ((HRESULT)0x887800C8)
+#define MA_DSERR_BADSENDBUFFERGUID ((HRESULT)0x887800D2)
+#define MA_DSERR_OBJECTNOTFOUND ((HRESULT)0x88781161)
+#define MA_DSERR_FXUNAVAILABLE ((HRESULT)0x887800DC)
+
+static ma_result ma_result_from_HRESULT(HRESULT hr)
+{
+ switch (hr)
+ {
+ case NOERROR: return MA_SUCCESS;
+ /*case S_OK: return MA_SUCCESS;*/
+
+ case E_POINTER: return MA_INVALID_ARGS;
+ case E_UNEXPECTED: return MA_ERROR;
+ case E_NOTIMPL: return MA_NOT_IMPLEMENTED;
+ case E_OUTOFMEMORY: return MA_OUT_OF_MEMORY;
+ case E_INVALIDARG: return MA_INVALID_ARGS;
+ case E_NOINTERFACE: return MA_API_NOT_FOUND;
+ case E_HANDLE: return MA_INVALID_ARGS;
+ case E_ABORT: return MA_ERROR;
+ case E_FAIL: return MA_ERROR;
+ case E_ACCESSDENIED: return MA_ACCESS_DENIED;
+
+ /* WASAPI */
+ case MA_AUDCLNT_E_NOT_INITIALIZED: return MA_DEVICE_NOT_INITIALIZED;
+ case MA_AUDCLNT_E_ALREADY_INITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED;
+ case MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_DEVICE_INVALIDATED: return MA_UNAVAILABLE;
+ case MA_AUDCLNT_E_NOT_STOPPED: return MA_DEVICE_NOT_STOPPED;
+ case MA_AUDCLNT_E_BUFFER_TOO_LARGE: return MA_TOO_BIG;
+ case MA_AUDCLNT_E_OUT_OF_ORDER: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_UNSUPPORTED_FORMAT: return MA_FORMAT_NOT_SUPPORTED;
+ case MA_AUDCLNT_E_INVALID_SIZE: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_DEVICE_IN_USE: return MA_BUSY;
+ case MA_AUDCLNT_E_BUFFER_OPERATION_PENDING: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_THREAD_NOT_REGISTERED: return MA_DOES_NOT_EXIST;
+ case MA_AUDCLNT_E_NO_SINGLE_PROCESS: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED: return MA_SHARE_MODE_NOT_SUPPORTED;
+ case MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED: return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ case MA_AUDCLNT_E_SERVICE_NOT_RUNNING: return MA_NOT_CONNECTED;
+ case MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY: return MA_SHARE_MODE_NOT_SUPPORTED;
+ case MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_EVENTHANDLE_NOT_SET: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_BUFFER_SIZE_ERROR: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_CPUUSAGE_EXCEEDED: return MA_ERROR;
+ case MA_AUDCLNT_E_BUFFER_ERROR: return MA_ERROR;
+ case MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_INVALID_DEVICE_PERIOD: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_INVALID_STREAM_FLAG: return MA_INVALID_ARGS;
+ case MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES: return MA_OUT_OF_MEMORY;
+ case MA_AUDCLNT_E_OFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_RESOURCES_INVALIDATED: return MA_INVALID_DATA;
+ case MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_HEADTRACKING_ENABLED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED: return MA_INVALID_OPERATION;
+ case MA_AUDCLNT_S_BUFFER_EMPTY: return MA_NO_SPACE;
+ case MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED: return MA_ALREADY_EXISTS;
+ case MA_AUDCLNT_S_POSITION_STALLED: return MA_ERROR;
+
+ /* DirectSound */
+ /*case MA_DS_OK: return MA_SUCCESS;*/ /* S_OK */
+ case MA_DS_NO_VIRTUALIZATION: return MA_SUCCESS;
+ case MA_DSERR_ALLOCATED: return MA_ALREADY_IN_USE;
+ case MA_DSERR_CONTROLUNAVAIL: return MA_INVALID_OPERATION;
+ /*case MA_DSERR_INVALIDPARAM: return MA_INVALID_ARGS;*/ /* E_INVALIDARG */
+ case MA_DSERR_INVALIDCALL: return MA_INVALID_OPERATION;
+ /*case MA_DSERR_GENERIC: return MA_ERROR;*/ /* E_FAIL */
+ case MA_DSERR_PRIOLEVELNEEDED: return MA_INVALID_OPERATION;
+ /*case MA_DSERR_OUTOFMEMORY: return MA_OUT_OF_MEMORY;*/ /* E_OUTOFMEMORY */
+ case MA_DSERR_BADFORMAT: return MA_FORMAT_NOT_SUPPORTED;
+ /*case MA_DSERR_UNSUPPORTED: return MA_NOT_IMPLEMENTED;*/ /* E_NOTIMPL */
+ case MA_DSERR_NODRIVER: return MA_FAILED_TO_INIT_BACKEND;
+ case MA_DSERR_ALREADYINITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED;
+ case MA_DSERR_NOAGGREGATION: return MA_ERROR;
+ case MA_DSERR_BUFFERLOST: return MA_UNAVAILABLE;
+ case MA_DSERR_OTHERAPPHASPRIO: return MA_ACCESS_DENIED;
+ case MA_DSERR_UNINITIALIZED: return MA_DEVICE_NOT_INITIALIZED;
+ /*case MA_DSERR_NOINTERFACE: return MA_API_NOT_FOUND;*/ /* E_NOINTERFACE */
+ /*case MA_DSERR_ACCESSDENIED: return MA_ACCESS_DENIED;*/ /* E_ACCESSDENIED */
+ case MA_DSERR_BUFFERTOOSMALL: return MA_NO_SPACE;
+ case MA_DSERR_DS8_REQUIRED: return MA_INVALID_OPERATION;
+ case MA_DSERR_SENDLOOP: return MA_DEADLOCK;
+ case MA_DSERR_BADSENDBUFFERGUID: return MA_INVALID_ARGS;
+ case MA_DSERR_OBJECTNOTFOUND: return MA_NO_DEVICE;
+ case MA_DSERR_FXUNAVAILABLE: return MA_UNAVAILABLE;
+
+ default: return MA_ERROR;
+ }
+}
+
+typedef HRESULT (WINAPI * MA_PFN_CoInitializeEx)(LPVOID pvReserved, DWORD dwCoInit);
+typedef void (WINAPI * MA_PFN_CoUninitialize)();
+typedef HRESULT (WINAPI * MA_PFN_CoCreateInstance)(REFCLSID rclsid, LPUNKNOWN pUnkOuter, DWORD dwClsContext, REFIID riid, LPVOID *ppv);
+typedef void (WINAPI * MA_PFN_CoTaskMemFree)(LPVOID pv);
+typedef HRESULT (WINAPI * MA_PFN_PropVariantClear)(PROPVARIANT *pvar);
+typedef int (WINAPI * MA_PFN_StringFromGUID2)(const GUID* const rguid, LPOLESTR lpsz, int cchMax);
+
+typedef HWND (WINAPI * MA_PFN_GetForegroundWindow)();
+typedef HWND (WINAPI * MA_PFN_GetDesktopWindow)();
+
+/* Microsoft documents these APIs as returning LSTATUS, but the Win32 API shipping with some compilers do not define it. It's just a LONG. */
+typedef LONG (WINAPI * MA_PFN_RegOpenKeyExA)(HKEY hKey, LPCSTR lpSubKey, DWORD ulOptions, REGSAM samDesired, PHKEY phkResult);
+typedef LONG (WINAPI * MA_PFN_RegCloseKey)(HKEY hKey);
+typedef LONG (WINAPI * MA_PFN_RegQueryValueExA)(HKEY hKey, LPCSTR lpValueName, LPDWORD lpReserved, LPDWORD lpType, LPBYTE lpData, LPDWORD lpcbData);
+#endif
+
+
+#define MA_STATE_UNINITIALIZED 0
+#define MA_STATE_STOPPED 1 /* The device's default state after initialization. */
+#define MA_STATE_STARTED 2 /* The worker thread is in it's main loop waiting for the driver to request or deliver audio data. */
+#define MA_STATE_STARTING 3 /* Transitioning from a stopped state to started. */
+#define MA_STATE_STOPPING 4 /* Transitioning from a started state to stopped. */
+
+#define MA_DEFAULT_PLAYBACK_DEVICE_NAME "Default Playback Device"
+#define MA_DEFAULT_CAPTURE_DEVICE_NAME "Default Capture Device"
+
+
+const char* ma_log_level_to_string(ma_uint32 logLevel)
+{
+ switch (logLevel)
+ {
+ case MA_LOG_LEVEL_VERBOSE: return "";
+ case MA_LOG_LEVEL_INFO: return "INFO";
+ case MA_LOG_LEVEL_WARNING: return "WARNING";
+ case MA_LOG_LEVEL_ERROR: return "ERROR";
+ default: return "ERROR";
+ }
+}
+
+/* Posts a log message. */
+static void ma_post_log_message(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message)
+{
+ if (pContext == NULL) {
+ return;
+ }
+
+#if defined(MA_LOG_LEVEL)
+ if (logLevel <= MA_LOG_LEVEL) {
+ ma_log_proc onLog;
+
+ #if defined(MA_DEBUG_OUTPUT)
+ if (logLevel <= MA_LOG_LEVEL) {
+ printf("%s: %s\n", ma_log_level_to_string(logLevel), message);
+ }
+ #endif
+
+ onLog = pContext->logCallback;
+ if (onLog) {
+ onLog(pContext, pDevice, logLevel, message);
+ }
+ }
+#endif
+}
+
+/* Posts an log message. Throw a breakpoint in here if you're needing to debug. The return value is always "resultCode". */
+static ma_result ma_context_post_error(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode)
+{
+ /* Derive the context from the device if necessary. */
+ if (pContext == NULL) {
+ if (pDevice != NULL) {
+ pContext = pDevice->pContext;
+ }
+ }
+
+ ma_post_log_message(pContext, pDevice, logLevel, message);
+ return resultCode;
+}
+
+static ma_result ma_post_error(ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode)
+{
+ return ma_context_post_error(NULL, pDevice, logLevel, message, resultCode);
+}
+
+
+/*******************************************************************************
+
+Timing
+
+*******************************************************************************/
+#ifdef MA_WIN32
+LARGE_INTEGER g_ma_TimerFrequency = {{0}};
+static void ma_timer_init(ma_timer* pTimer)
+{
+ LARGE_INTEGER counter;
+
+ if (g_ma_TimerFrequency.QuadPart == 0) {
+ QueryPerformanceFrequency(&g_ma_TimerFrequency);
+ }
+
+ QueryPerformanceCounter(&counter);
+ pTimer->counter = counter.QuadPart;
+}
+
+static double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+{
+ LARGE_INTEGER counter;
+ if (!QueryPerformanceCounter(&counter)) {
+ return 0;
+ }
+
+ return (double)(counter.QuadPart - pTimer->counter) / g_ma_TimerFrequency.QuadPart;
+}
+#elif defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200)
+ma_uint64 g_ma_TimerFrequency = 0;
+static void ma_timer_init(ma_timer* pTimer)
+{
+ mach_timebase_info_data_t baseTime;
+ mach_timebase_info(&baseTime);
+ g_ma_TimerFrequency = (baseTime.denom * 1e9) / baseTime.numer;
+
+ pTimer->counter = mach_absolute_time();
+}
+
+static double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+{
+ ma_uint64 newTimeCounter = mach_absolute_time();
+ ma_uint64 oldTimeCounter = pTimer->counter;
+
+ return (newTimeCounter - oldTimeCounter) / g_ma_TimerFrequency;
+}
+#elif defined(MA_EMSCRIPTEN)
+static MA_INLINE void ma_timer_init(ma_timer* pTimer)
+{
+ pTimer->counterD = emscripten_get_now();
+}
+
+static MA_INLINE double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+{
+ return (emscripten_get_now() - pTimer->counterD) / 1000; /* Emscripten is in milliseconds. */
+}
+#else
+#if _POSIX_C_SOURCE >= 199309L
+#if defined(CLOCK_MONOTONIC)
+ #define MA_CLOCK_ID CLOCK_MONOTONIC
+#else
+ #define MA_CLOCK_ID CLOCK_REALTIME
+#endif
+
+static void ma_timer_init(ma_timer* pTimer)
+{
+ struct timespec newTime;
+ clock_gettime(MA_CLOCK_ID, &newTime);
+
+ pTimer->counter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec;
+}
+
+static double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+{
+ ma_uint64 newTimeCounter;
+ ma_uint64 oldTimeCounter;
+
+ struct timespec newTime;
+ clock_gettime(MA_CLOCK_ID, &newTime);
+
+ newTimeCounter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec;
+ oldTimeCounter = pTimer->counter;
+
+ return (newTimeCounter - oldTimeCounter) / 1000000000.0;
+}
+#else
+static void ma_timer_init(ma_timer* pTimer)
+{
+ struct timeval newTime;
+ gettimeofday(&newTime, NULL);
+
+ pTimer->counter = (newTime.tv_sec * 1000000) + newTime.tv_usec;
+}
+
+static double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+{
+ ma_uint64 newTimeCounter;
+ ma_uint64 oldTimeCounter;
+
+ struct timeval newTime;
+ gettimeofday(&newTime, NULL);
+
+ newTimeCounter = (newTime.tv_sec * 1000000) + newTime.tv_usec;
+ oldTimeCounter = pTimer->counter;
+
+ return (newTimeCounter - oldTimeCounter) / 1000000.0;
+}
+#endif
+#endif
+
+
+/*******************************************************************************
+
+Dynamic Linking
+
+*******************************************************************************/
+ma_handle ma_dlopen(ma_context* pContext, const char* filename)
+{
+ ma_handle handle;
+
+#if MA_LOG_LEVEL >= MA_LOG_LEVEL_VERBOSE
+ if (pContext != NULL) {
+ char message[256];
+ ma_strappend(message, sizeof(message), "Loading library: ", filename);
+ ma_post_log_message(pContext, NULL, MA_LOG_LEVEL_VERBOSE, message);
+ }
+#endif
+
+#ifdef _WIN32
+#ifdef MA_WIN32_DESKTOP
+ handle = (ma_handle)LoadLibraryA(filename);
+#else
+ /* *sigh* It appears there is no ANSI version of LoadPackagedLibrary()... */
+ WCHAR filenameW[4096];
+ if (MultiByteToWideChar(CP_UTF8, 0, filename, -1, filenameW, sizeof(filenameW)) == 0) {
+ handle = NULL;
+ } else {
+ handle = (ma_handle)LoadPackagedLibrary(filenameW, 0);
+ }
+#endif
+#else
+ handle = (ma_handle)dlopen(filename, RTLD_NOW);
+#endif
+
+ /*
+ I'm not considering failure to load a library an error nor a warning because seamlessly falling through to a lower-priority
+ backend is a deliberate design choice. Instead I'm logging it as an informational message.
+ */
+#if MA_LOG_LEVEL >= MA_LOG_LEVEL_INFO
+ if (handle == NULL) {
+ char message[256];
+ ma_strappend(message, sizeof(message), "Failed to load library: ", filename);
+ ma_post_log_message(pContext, NULL, MA_LOG_LEVEL_INFO, message);
+ }
+#endif
+
+ (void)pContext; /* It's possible for pContext to be unused. */
+ return handle;
+}
+
+void ma_dlclose(ma_context* pContext, ma_handle handle)
+{
+#ifdef _WIN32
+ FreeLibrary((HMODULE)handle);
+#else
+ dlclose((void*)handle);
+#endif
+
+ (void)pContext;
+}
+
+ma_proc ma_dlsym(ma_context* pContext, ma_handle handle, const char* symbol)
+{
+ ma_proc proc;
+
+#if MA_LOG_LEVEL >= MA_LOG_LEVEL_VERBOSE
+ if (pContext != NULL) {
+ char message[256];
+ ma_strappend(message, sizeof(message), "Loading symbol: ", symbol);
+ ma_post_log_message(pContext, NULL, MA_LOG_LEVEL_VERBOSE, message);
+ }
+#endif
+
+#ifdef _WIN32
+ proc = (ma_proc)GetProcAddress((HMODULE)handle, symbol);
+#else
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+ proc = (ma_proc)dlsym((void*)handle, symbol);
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
+ #pragma GCC diagnostic pop
+#endif
+#endif
+
+#if MA_LOG_LEVEL >= MA_LOG_LEVEL_WARNING
+ if (handle == NULL) {
+ char message[256];
+ ma_strappend(message, sizeof(message), "Failed to load symbol: ", symbol);
+ ma_post_log_message(pContext, NULL, MA_LOG_LEVEL_WARNING, message);
+ }
+#endif
+
+ (void)pContext; /* It's possible for pContext to be unused. */
+ return proc;
+}
+
+
+/*******************************************************************************
+
+Threading
+
+*******************************************************************************/
+#ifdef MA_WIN32
+static int ma_thread_priority_to_win32(ma_thread_priority priority)
+{
+ switch (priority) {
+ case ma_thread_priority_idle: return THREAD_PRIORITY_IDLE;
+ case ma_thread_priority_lowest: return THREAD_PRIORITY_LOWEST;
+ case ma_thread_priority_low: return THREAD_PRIORITY_BELOW_NORMAL;
+ case ma_thread_priority_normal: return THREAD_PRIORITY_NORMAL;
+ case ma_thread_priority_high: return THREAD_PRIORITY_ABOVE_NORMAL;
+ case ma_thread_priority_highest: return THREAD_PRIORITY_HIGHEST;
+ case ma_thread_priority_realtime: return THREAD_PRIORITY_TIME_CRITICAL;
+ default: return THREAD_PRIORITY_NORMAL;
+ }
+}
+
+static ma_result ma_thread_create__win32(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData)
+{
+ pThread->win32.hThread = CreateThread(NULL, 0, entryProc, pData, 0, NULL);
+ if (pThread->win32.hThread == NULL) {
+ return ma_result_from_GetLastError(GetLastError());
+ }
+
+ SetThreadPriority((HANDLE)pThread->win32.hThread, ma_thread_priority_to_win32(pContext->threadPriority));
+
+ return MA_SUCCESS;
+}
+
+static void ma_thread_wait__win32(ma_thread* pThread)
+{
+ WaitForSingleObject(pThread->win32.hThread, INFINITE);
+}
+
+static void ma_sleep__win32(ma_uint32 milliseconds)
+{
+ Sleep((DWORD)milliseconds);
+}
+
+
+static ma_result ma_mutex_init__win32(ma_context* pContext, ma_mutex* pMutex)
+{
+ (void)pContext;
+
+ pMutex->win32.hMutex = CreateEventW(NULL, FALSE, TRUE, NULL);
+ if (pMutex->win32.hMutex == NULL) {
+ return ma_result_from_GetLastError(GetLastError());
+ }
+
+ return MA_SUCCESS;
+}
+
+static void ma_mutex_uninit__win32(ma_mutex* pMutex)
+{
+ CloseHandle(pMutex->win32.hMutex);
+}
+
+static void ma_mutex_lock__win32(ma_mutex* pMutex)
+{
+ WaitForSingleObject(pMutex->win32.hMutex, INFINITE);
+}
+
+static void ma_mutex_unlock__win32(ma_mutex* pMutex)
+{
+ SetEvent(pMutex->win32.hMutex);
+}
+
+
+static ma_result ma_event_init__win32(ma_context* pContext, ma_event* pEvent)
+{
+ (void)pContext;
+
+ pEvent->win32.hEvent = CreateEventW(NULL, FALSE, FALSE, NULL);
+ if (pEvent->win32.hEvent == NULL) {
+ return ma_result_from_GetLastError(GetLastError());
+ }
+
+ return MA_SUCCESS;
+}
+
+static void ma_event_uninit__win32(ma_event* pEvent)
+{
+ CloseHandle(pEvent->win32.hEvent);
+}
+
+static ma_bool32 ma_event_wait__win32(ma_event* pEvent)
+{
+ return WaitForSingleObject(pEvent->win32.hEvent, INFINITE) == WAIT_OBJECT_0;
+}
+
+static ma_bool32 ma_event_signal__win32(ma_event* pEvent)
+{
+ return SetEvent(pEvent->win32.hEvent);
+}
+
+
+static ma_result ma_semaphore_init__win32(ma_context* pContext, int initialValue, ma_semaphore* pSemaphore)
+{
+ (void)pContext;
+
+ pSemaphore->win32.hSemaphore = CreateSemaphoreW(NULL, (LONG)initialValue, LONG_MAX, NULL);
+ if (pSemaphore->win32.hSemaphore == NULL) {
+ return ma_result_from_GetLastError(GetLastError());
+ }
+
+ return MA_SUCCESS;
+}
+
+static void ma_semaphore_uninit__win32(ma_semaphore* pSemaphore)
+{
+ CloseHandle((HANDLE)pSemaphore->win32.hSemaphore);
+}
+
+static ma_bool32 ma_semaphore_wait__win32(ma_semaphore* pSemaphore)
+{
+ return WaitForSingleObject((HANDLE)pSemaphore->win32.hSemaphore, INFINITE) == WAIT_OBJECT_0;
+}
+
+static ma_bool32 ma_semaphore_release__win32(ma_semaphore* pSemaphore)
+{
+ return ReleaseSemaphore((HANDLE)pSemaphore->win32.hSemaphore, 1, NULL) != 0;
+}
+#endif
+
+
+#ifdef MA_POSIX
+#include
+
+typedef int (* ma_pthread_create_proc)(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
+typedef int (* ma_pthread_join_proc)(pthread_t thread, void **retval);
+typedef int (* ma_pthread_mutex_init_proc)(pthread_mutex_t *__mutex, const pthread_mutexattr_t *__mutexattr);
+typedef int (* ma_pthread_mutex_destroy_proc)(pthread_mutex_t *__mutex);
+typedef int (* ma_pthread_mutex_lock_proc)(pthread_mutex_t *__mutex);
+typedef int (* ma_pthread_mutex_unlock_proc)(pthread_mutex_t *__mutex);
+typedef int (* ma_pthread_cond_init_proc)(pthread_cond_t *__restrict __cond, const pthread_condattr_t *__restrict __cond_attr);
+typedef int (* ma_pthread_cond_destroy_proc)(pthread_cond_t *__cond);
+typedef int (* ma_pthread_cond_signal_proc)(pthread_cond_t *__cond);
+typedef int (* ma_pthread_cond_wait_proc)(pthread_cond_t *__restrict __cond, pthread_mutex_t *__restrict __mutex);
+typedef int (* ma_pthread_attr_init_proc)(pthread_attr_t *attr);
+typedef int (* ma_pthread_attr_destroy_proc)(pthread_attr_t *attr);
+typedef int (* ma_pthread_attr_setschedpolicy_proc)(pthread_attr_t *attr, int policy);
+typedef int (* ma_pthread_attr_getschedparam_proc)(const pthread_attr_t *attr, struct sched_param *param);
+typedef int (* ma_pthread_attr_setschedparam_proc)(pthread_attr_t *attr, const struct sched_param *param);
+
+static ma_result ma_thread_create__posix(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData)
+{
+ int result;
+ pthread_attr_t* pAttr = NULL;
+
+#if !defined(__EMSCRIPTEN__)
+ /* Try setting the thread priority. It's not critical if anything fails here. */
+ pthread_attr_t attr;
+ if (((ma_pthread_attr_init_proc)pContext->posix.pthread_attr_init)(&attr) == 0) {
+ int scheduler = -1;
+ if (pContext->threadPriority == ma_thread_priority_idle) {
+#ifdef SCHED_IDLE
+ if (((ma_pthread_attr_setschedpolicy_proc)pContext->posix.pthread_attr_setschedpolicy)(&attr, SCHED_IDLE) == 0) {
+ scheduler = SCHED_IDLE;
+ }
+#endif
+ } else if (pContext->threadPriority == ma_thread_priority_realtime) {
+#ifdef SCHED_FIFO
+ if (((ma_pthread_attr_setschedpolicy_proc)pContext->posix.pthread_attr_setschedpolicy)(&attr, SCHED_FIFO) == 0) {
+ scheduler = SCHED_FIFO;
+ }
+#endif
+#ifdef MA_LINUX
+ } else {
+ scheduler = sched_getscheduler(0);
+#endif
+ }
+
+ if (scheduler != -1) {
+ int priorityMin = sched_get_priority_min(scheduler);
+ int priorityMax = sched_get_priority_max(scheduler);
+ int priorityStep = (priorityMax - priorityMin) / 7; /* 7 = number of priorities supported by miniaudio. */
+
+ struct sched_param sched;
+ if (((ma_pthread_attr_getschedparam_proc)pContext->posix.pthread_attr_getschedparam)(&attr, &sched) == 0) {
+ if (pContext->threadPriority == ma_thread_priority_idle) {
+ sched.sched_priority = priorityMin;
+ } else if (pContext->threadPriority == ma_thread_priority_realtime) {
+ sched.sched_priority = priorityMax;
+ } else {
+ sched.sched_priority += ((int)pContext->threadPriority + 5) * priorityStep; /* +5 because the lowest priority is -5. */
+ if (sched.sched_priority < priorityMin) {
+ sched.sched_priority = priorityMin;
+ }
+ if (sched.sched_priority > priorityMax) {
+ sched.sched_priority = priorityMax;
+ }
+ }
+
+ if (((ma_pthread_attr_setschedparam_proc)pContext->posix.pthread_attr_setschedparam)(&attr, &sched) == 0) {
+ pAttr = &attr;
+ }
+ }
+ }
+
+ ((ma_pthread_attr_destroy_proc)pContext->posix.pthread_attr_destroy)(&attr);
+ }
+#endif
+
+ result = ((ma_pthread_create_proc)pContext->posix.pthread_create)(&pThread->posix.thread, pAttr, entryProc, pData);
+ if (result != 0) {
+ return ma_result_from_errno(result);
+ }
+
+ return MA_SUCCESS;
+}
+
+static void ma_thread_wait__posix(ma_thread* pThread)
+{
+ ((ma_pthread_join_proc)pThread->pContext->posix.pthread_join)(pThread->posix.thread, NULL);
+}
+
+#if !defined(MA_EMSCRIPTEN)
+static void ma_sleep__posix(ma_uint32 milliseconds)
+{
+#ifdef MA_EMSCRIPTEN
+ (void)milliseconds;
+ MA_ASSERT(MA_FALSE); /* The Emscripten build should never sleep. */
+#else
+ #if _POSIX_C_SOURCE >= 199309L
+ struct timespec ts;
+ ts.tv_sec = milliseconds / 1000000;
+ ts.tv_nsec = milliseconds % 1000000 * 1000000;
+ nanosleep(&ts, NULL);
+ #else
+ struct timeval tv;
+ tv.tv_sec = milliseconds / 1000;
+ tv.tv_usec = milliseconds % 1000 * 1000;
+ select(0, NULL, NULL, NULL, &tv);
+ #endif
+#endif
+}
+#endif /* MA_EMSCRIPTEN */
+
+
+static ma_result ma_mutex_init__posix(ma_context* pContext, ma_mutex* pMutex)
+{
+ int result = ((ma_pthread_mutex_init_proc)pContext->posix.pthread_mutex_init)(&pMutex->posix.mutex, NULL);
+ if (result != 0) {
+ return ma_result_from_errno(result);
+ }
+
+ return MA_SUCCESS;
+}
+
+static void ma_mutex_uninit__posix(ma_mutex* pMutex)
+{
+ ((ma_pthread_mutex_destroy_proc)pMutex->pContext->posix.pthread_mutex_destroy)(&pMutex->posix.mutex);
+}
+
+static void ma_mutex_lock__posix(ma_mutex* pMutex)
+{
+ ((ma_pthread_mutex_lock_proc)pMutex->pContext->posix.pthread_mutex_lock)(&pMutex->posix.mutex);
+}
+
+static void ma_mutex_unlock__posix(ma_mutex* pMutex)
+{
+ ((ma_pthread_mutex_unlock_proc)pMutex->pContext->posix.pthread_mutex_unlock)(&pMutex->posix.mutex);
+}
+
+
+static ma_result ma_event_init__posix(ma_context* pContext, ma_event* pEvent)
+{
+ int result;
+
+ result = ((ma_pthread_mutex_init_proc)pContext->posix.pthread_mutex_init)(&pEvent->posix.mutex, NULL);
+ if (result != 0) {
+ return ma_result_from_errno(result);
+ }
+
+ result = ((ma_pthread_cond_init_proc)pContext->posix.pthread_cond_init)(&pEvent->posix.condition, NULL);
+ if (result != 0) {
+ ((ma_pthread_mutex_destroy_proc)pEvent->pContext->posix.pthread_mutex_destroy)(&pEvent->posix.mutex);
+ return ma_result_from_errno(result);
+ }
+
+ pEvent->posix.value = 0;
+ return MA_SUCCESS;
+}
+
+static void ma_event_uninit__posix(ma_event* pEvent)
+{
+ ((ma_pthread_cond_destroy_proc)pEvent->pContext->posix.pthread_cond_destroy)(&pEvent->posix.condition);
+ ((ma_pthread_mutex_destroy_proc)pEvent->pContext->posix.pthread_mutex_destroy)(&pEvent->posix.mutex);
+}
+
+static ma_bool32 ma_event_wait__posix(ma_event* pEvent)
+{
+ ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex);
+ {
+ while (pEvent->posix.value == 0) {
+ ((ma_pthread_cond_wait_proc)pEvent->pContext->posix.pthread_cond_wait)(&pEvent->posix.condition, &pEvent->posix.mutex);
+ }
+ pEvent->posix.value = 0; /* Auto-reset. */
+ }
+ ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex);
+
+ return MA_TRUE;
+}
+
+static ma_bool32 ma_event_signal__posix(ma_event* pEvent)
+{
+ ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex);
+ {
+ pEvent->posix.value = 1;
+ ((ma_pthread_cond_signal_proc)pEvent->pContext->posix.pthread_cond_signal)(&pEvent->posix.condition);
+ }
+ ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex);
+
+ return MA_TRUE;
+}
+
+
+static ma_result ma_semaphore_init__posix(ma_context* pContext, int initialValue, ma_semaphore* pSemaphore)
+{
+ (void)pContext;
+
+#if defined(MA_APPLE)
+ /* Not yet implemented for Apple platforms since sem_init() is deprecated. Need to use a named semaphore via sem_open() instead. */
+ return MA_INVALID_OPERATION;
+#else
+ if (sem_init(&pSemaphore->posix.semaphore, 0, (unsigned int)initialValue) == 0) {
+ return ma_result_from_errno(errno);
+ }
+#endif
+
+ return MA_SUCCESS;
+}
+
+static void ma_semaphore_uninit__posix(ma_semaphore* pSemaphore)
+{
+ sem_close(&pSemaphore->posix.semaphore);
+}
+
+static ma_bool32 ma_semaphore_wait__posix(ma_semaphore* pSemaphore)
+{
+ return sem_wait(&pSemaphore->posix.semaphore) != -1;
+}
+
+static ma_bool32 ma_semaphore_release__posix(ma_semaphore* pSemaphore)
+{
+ return sem_post(&pSemaphore->posix.semaphore) != -1;
+}
+#endif
+
+static ma_result ma_thread_create(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData)
+{
+ if (pContext == NULL || pThread == NULL || entryProc == NULL) {
+ return MA_FALSE;
+ }
+
+ pThread->pContext = pContext;
+
+#ifdef MA_WIN32
+ return ma_thread_create__win32(pContext, pThread, entryProc, pData);
+#endif
+#ifdef MA_POSIX
+ return ma_thread_create__posix(pContext, pThread, entryProc, pData);
+#endif
+}
+
+static void ma_thread_wait(ma_thread* pThread)
+{
+ if (pThread == NULL) {
+ return;
+ }
+
+#ifdef MA_WIN32
+ ma_thread_wait__win32(pThread);
+#endif
+#ifdef MA_POSIX
+ ma_thread_wait__posix(pThread);
+#endif
+}
+
+#if !defined(MA_EMSCRIPTEN)
+static void ma_sleep(ma_uint32 milliseconds)
+{
+#ifdef MA_WIN32
+ ma_sleep__win32(milliseconds);
+#endif
+#ifdef MA_POSIX
+ ma_sleep__posix(milliseconds);
+#endif
+}
+#endif
+
+
+ma_result ma_mutex_init(ma_context* pContext, ma_mutex* pMutex)
+{
+ if (pContext == NULL || pMutex == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ pMutex->pContext = pContext;
+
+#ifdef MA_WIN32
+ return ma_mutex_init__win32(pContext, pMutex);
+#endif
+#ifdef MA_POSIX
+ return ma_mutex_init__posix(pContext, pMutex);
+#endif
+}
+
+void ma_mutex_uninit(ma_mutex* pMutex)
+{
+ if (pMutex == NULL || pMutex->pContext == NULL) {
+ return;
+ }
+
+#ifdef MA_WIN32
+ ma_mutex_uninit__win32(pMutex);
+#endif
+#ifdef MA_POSIX
+ ma_mutex_uninit__posix(pMutex);
+#endif
+}
+
+void ma_mutex_lock(ma_mutex* pMutex)
+{
+ if (pMutex == NULL || pMutex->pContext == NULL) {
+ return;
+ }
+
+#ifdef MA_WIN32
+ ma_mutex_lock__win32(pMutex);
+#endif
+#ifdef MA_POSIX
+ ma_mutex_lock__posix(pMutex);
+#endif
+}
+
+void ma_mutex_unlock(ma_mutex* pMutex)
+{
+ if (pMutex == NULL || pMutex->pContext == NULL) {
+ return;
+}
+
+#ifdef MA_WIN32
+ ma_mutex_unlock__win32(pMutex);
+#endif
+#ifdef MA_POSIX
+ ma_mutex_unlock__posix(pMutex);
+#endif
+}
+
+
+ma_result ma_event_init(ma_context* pContext, ma_event* pEvent)
+{
+ if (pContext == NULL || pEvent == NULL) {
+ return MA_FALSE;
+ }
+
+ pEvent->pContext = pContext;
+
+#ifdef MA_WIN32
+ return ma_event_init__win32(pContext, pEvent);
+#endif
+#ifdef MA_POSIX
+ return ma_event_init__posix(pContext, pEvent);
+#endif
+}
+
+void ma_event_uninit(ma_event* pEvent)
+{
+ if (pEvent == NULL || pEvent->pContext == NULL) {
+ return;
+ }
+
+#ifdef MA_WIN32
+ ma_event_uninit__win32(pEvent);
+#endif
+#ifdef MA_POSIX
+ ma_event_uninit__posix(pEvent);
+#endif
+}
+
+ma_bool32 ma_event_wait(ma_event* pEvent)
+{
+ if (pEvent == NULL || pEvent->pContext == NULL) {
+ return MA_FALSE;
+ }
+
+#ifdef MA_WIN32
+ return ma_event_wait__win32(pEvent);
+#endif
+#ifdef MA_POSIX
+ return ma_event_wait__posix(pEvent);
+#endif
+}
+
+ma_bool32 ma_event_signal(ma_event* pEvent)
+{
+ if (pEvent == NULL || pEvent->pContext == NULL) {
+ return MA_FALSE;
+ }
+
+#ifdef MA_WIN32
+ return ma_event_signal__win32(pEvent);
+#endif
+#ifdef MA_POSIX
+ return ma_event_signal__posix(pEvent);
+#endif
+}
+
+
+ma_result ma_semaphore_init(ma_context* pContext, int initialValue, ma_semaphore* pSemaphore)
+{
+ if (pContext == NULL || pSemaphore == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+#ifdef MA_WIN32
+ return ma_semaphore_init__win32(pContext, initialValue, pSemaphore);
+#endif
+#ifdef MA_POSIX
+ return ma_semaphore_init__posix(pContext, initialValue, pSemaphore);
+#endif
+}
+
+void ma_semaphore_uninit(ma_semaphore* pSemaphore)
+{
+ if (pSemaphore == NULL) {
+ return;
+ }
+
+#ifdef MA_WIN32
+ ma_semaphore_uninit__win32(pSemaphore);
+#endif
+#ifdef MA_POSIX
+ ma_semaphore_uninit__posix(pSemaphore);
+#endif
+}
+
+ma_bool32 ma_semaphore_wait(ma_semaphore* pSemaphore)
+{
+ if (pSemaphore == NULL) {
+ return MA_FALSE;
+ }
+
+#ifdef MA_WIN32
+ return ma_semaphore_wait__win32(pSemaphore);
+#endif
+#ifdef MA_POSIX
+ return ma_semaphore_wait__posix(pSemaphore);
+#endif
+}
+
+ma_bool32 ma_semaphore_release(ma_semaphore* pSemaphore)
+{
+ if (pSemaphore == NULL) {
+ return MA_FALSE;
+ }
+
+#ifdef MA_WIN32
+ return ma_semaphore_release__win32(pSemaphore);
+#endif
+#ifdef MA_POSIX
+ return ma_semaphore_release__posix(pSemaphore);
+#endif
+}
+
+
+#if 0
+ma_uint32 ma_get_closest_standard_sample_rate(ma_uint32 sampleRateIn)
+{
+ ma_uint32 closestRate = 0;
+ ma_uint32 closestDiff = 0xFFFFFFFF;
+ size_t iStandardRate;
+
+ for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) {
+ ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate];
+ ma_uint32 diff;
+
+ if (sampleRateIn > standardRate) {
+ diff = sampleRateIn - standardRate;
+ } else {
+ diff = standardRate - sampleRateIn;
+ }
+
+ if (diff == 0) {
+ return standardRate; /* The input sample rate is a standard rate. */
+ }
+
+ if (closestDiff > diff) {
+ closestDiff = diff;
+ closestRate = standardRate;
+ }
+ }
+
+ return closestRate;
+}
+#endif
+
+ma_uint32 ma_scale_buffer_size(ma_uint32 baseBufferSize, float scale)
+{
+ return ma_max(1, (ma_uint32)(baseBufferSize*scale));
+}
+
+ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate)
+{
+ return bufferSizeInFrames / (sampleRate/1000);
+}
+
+ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate)
+{
+ return bufferSizeInMilliseconds * (sampleRate/1000);
+}
+
+void ma_zero_pcm_frames(void* p, ma_uint32 frameCount, ma_format format, ma_uint32 channels)
+{
+ MA_ZERO_MEMORY(p, frameCount * ma_get_bytes_per_frame(format, channels));
+}
+
+void ma_clip_samples_f32(float* p, ma_uint32 sampleCount)
+{
+ ma_uint32 iSample;
+
+ /* TODO: Research a branchless SSE implementation. */
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ p[iSample] = ma_clip_f32(p[iSample]);
+ }
+}
+
+
+void ma_copy_and_apply_volume_factor_u8(ma_uint8* pSamplesOut, const ma_uint8* pSamplesIn, ma_uint32 sampleCount, float factor)
+{
+ ma_uint32 iSample;
+
+ if (pSamplesOut == NULL || pSamplesIn == NULL) {
+ return;
+ }
+
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ pSamplesOut[iSample] = (ma_uint8)(pSamplesIn[iSample] * factor);
+ }
+}
+
+void ma_copy_and_apply_volume_factor_s16(ma_int16* pSamplesOut, const ma_int16* pSamplesIn, ma_uint32 sampleCount, float factor)
+{
+ ma_uint32 iSample;
+
+ if (pSamplesOut == NULL || pSamplesIn == NULL) {
+ return;
+ }
+
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ pSamplesOut[iSample] = (ma_int16)(pSamplesIn[iSample] * factor);
+ }
+}
+
+void ma_copy_and_apply_volume_factor_s24(void* pSamplesOut, const void* pSamplesIn, ma_uint32 sampleCount, float factor)
+{
+ ma_uint32 iSample;
+ ma_uint8* pSamplesOut8;
+ ma_uint8* pSamplesIn8;
+
+ if (pSamplesOut == NULL || pSamplesIn == NULL) {
+ return;
+ }
+
+ pSamplesOut8 = (ma_uint8*)pSamplesOut;
+ pSamplesIn8 = (ma_uint8*)pSamplesIn;
+
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ ma_int32 sampleS32;
+
+ sampleS32 = (ma_int32)(((ma_uint32)(pSamplesIn8[iSample*3+0]) << 8) | ((ma_uint32)(pSamplesIn8[iSample*3+1]) << 16) | ((ma_uint32)(pSamplesIn8[iSample*3+2])) << 24);
+ sampleS32 = (ma_int32)(sampleS32 * factor);
+
+ pSamplesOut8[iSample*3+0] = (ma_uint8)(((ma_uint32)sampleS32 & 0x0000FF00) >> 8);
+ pSamplesOut8[iSample*3+1] = (ma_uint8)(((ma_uint32)sampleS32 & 0x00FF0000) >> 16);
+ pSamplesOut8[iSample*3+2] = (ma_uint8)(((ma_uint32)sampleS32 & 0xFF000000) >> 24);
+ }
+}
+
+void ma_copy_and_apply_volume_factor_s32(ma_int32* pSamplesOut, const ma_int32* pSamplesIn, ma_uint32 sampleCount, float factor)
+{
+ ma_uint32 iSample;
+
+ if (pSamplesOut == NULL || pSamplesIn == NULL) {
+ return;
+ }
+
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ pSamplesOut[iSample] = (ma_int32)(pSamplesIn[iSample] * factor);
+ }
+}
+
+void ma_copy_and_apply_volume_factor_f32(float* pSamplesOut, const float* pSamplesIn, ma_uint32 sampleCount, float factor)
+{
+ ma_uint32 iSample;
+
+ if (pSamplesOut == NULL || pSamplesIn == NULL) {
+ return;
+ }
+
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ pSamplesOut[iSample] = pSamplesIn[iSample] * factor;
+ }
+}
+
+void ma_apply_volume_factor_u8(ma_uint8* pSamples, ma_uint32 sampleCount, float factor)
+{
+ ma_copy_and_apply_volume_factor_u8(pSamples, pSamples, sampleCount, factor);
+}
+
+void ma_apply_volume_factor_s16(ma_int16* pSamples, ma_uint32 sampleCount, float factor)
+{
+ ma_copy_and_apply_volume_factor_s16(pSamples, pSamples, sampleCount, factor);
+}
+
+void ma_apply_volume_factor_s24(void* pSamples, ma_uint32 sampleCount, float factor)
+{
+ ma_copy_and_apply_volume_factor_s24(pSamples, pSamples, sampleCount, factor);
+}
+
+void ma_apply_volume_factor_s32(ma_int32* pSamples, ma_uint32 sampleCount, float factor)
+{
+ ma_copy_and_apply_volume_factor_s32(pSamples, pSamples, sampleCount, factor);
+}
+
+void ma_apply_volume_factor_f32(float* pSamples, ma_uint32 sampleCount, float factor)
+{
+ ma_copy_and_apply_volume_factor_f32(pSamples, pSamples, sampleCount, factor);
+}
+
+void ma_copy_and_apply_volume_factor_pcm_frames_u8(ma_uint8* pPCMFramesOut, const ma_uint8* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_u8(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor);
+}
+
+void ma_copy_and_apply_volume_factor_pcm_frames_s16(ma_int16* pPCMFramesOut, const ma_int16* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_s16(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor);
+}
+
+void ma_copy_and_apply_volume_factor_pcm_frames_s24(void* pPCMFramesOut, const void* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_s24(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor);
+}
+
+void ma_copy_and_apply_volume_factor_pcm_frames_s32(ma_int32* pPCMFramesOut, const ma_int32* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_s32(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor);
+}
+
+void ma_copy_and_apply_volume_factor_pcm_frames_f32(float* pPCMFramesOut, const float* pPCMFramesIn, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_f32(pPCMFramesOut, pPCMFramesIn, frameCount*channels, factor);
+}
+
+void ma_copy_and_apply_volume_factor_pcm_frames(void* pPCMFramesOut, const void* pPCMFramesIn, ma_uint32 frameCount, ma_format format, ma_uint32 channels, float factor)
+{
+ switch (format)
+ {
+ case ma_format_u8: ma_copy_and_apply_volume_factor_pcm_frames_u8 ((ma_uint8*)pPCMFramesOut, (const ma_uint8*)pPCMFramesIn, frameCount, channels, factor); return;
+ case ma_format_s16: ma_copy_and_apply_volume_factor_pcm_frames_s16((ma_int16*)pPCMFramesOut, (const ma_int16*)pPCMFramesIn, frameCount, channels, factor); return;
+ case ma_format_s24: ma_copy_and_apply_volume_factor_pcm_frames_s24( pPCMFramesOut, pPCMFramesIn, frameCount, channels, factor); return;
+ case ma_format_s32: ma_copy_and_apply_volume_factor_pcm_frames_s32((ma_int32*)pPCMFramesOut, (const ma_int32*)pPCMFramesIn, frameCount, channels, factor); return;
+ case ma_format_f32: ma_copy_and_apply_volume_factor_pcm_frames_f32( (float*)pPCMFramesOut, (const float*)pPCMFramesIn, frameCount, channels, factor); return;
+ default: return; /* Do nothing. */
+ }
+}
+
+void ma_apply_volume_factor_pcm_frames_u8(ma_uint8* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_pcm_frames_u8(pPCMFrames, pPCMFrames, frameCount, channels, factor);
+}
+
+void ma_apply_volume_factor_pcm_frames_s16(ma_int16* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_pcm_frames_s16(pPCMFrames, pPCMFrames, frameCount, channels, factor);
+}
+
+void ma_apply_volume_factor_pcm_frames_s24(void* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_pcm_frames_s24(pPCMFrames, pPCMFrames, frameCount, channels, factor);
+}
+
+void ma_apply_volume_factor_pcm_frames_s32(ma_int32* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_pcm_frames_s32(pPCMFrames, pPCMFrames, frameCount, channels, factor);
+}
+
+void ma_apply_volume_factor_pcm_frames_f32(float* pPCMFrames, ma_uint32 frameCount, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_pcm_frames_f32(pPCMFrames, pPCMFrames, frameCount, channels, factor);
+}
+
+void ma_apply_volume_factor_pcm_frames(void* pPCMFrames, ma_uint32 frameCount, ma_format format, ma_uint32 channels, float factor)
+{
+ ma_copy_and_apply_volume_factor_pcm_frames(pPCMFrames, pPCMFrames, frameCount, format, channels, factor);
+}
+
+
+float ma_factor_to_gain_db(float factor)
+{
+ return (float)(20*ma_log10f(factor));
+}
+
+float ma_gain_db_to_factor(float gain)
+{
+ return (float)ma_powf(10, gain/20.0f);
+}
+
+
+static void ma_device__on_data(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount)
+{
+ float masterVolumeFactor;
+
+ masterVolumeFactor = pDevice->masterVolumeFactor;
+
+ if (pDevice->onData) {
+ if (!pDevice->noPreZeroedOutputBuffer && pFramesOut != NULL) {
+ ma_zero_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels);
+ }
+
+ /* Volume control of input makes things a bit awkward because the input buffer is read-only. We'll need to use a temp buffer and loop in this case. */
+ if (pFramesIn != NULL && masterVolumeFactor < 1) {
+ ma_uint8 tempFramesIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint32 totalFramesProcessed = 0;
+ while (totalFramesProcessed < frameCount) {
+ ma_uint32 framesToProcessThisIteration = frameCount - totalFramesProcessed;
+ if (framesToProcessThisIteration > sizeof(tempFramesIn)/bpfCapture) {
+ framesToProcessThisIteration = sizeof(tempFramesIn)/bpfCapture;
+ }
+
+ ma_copy_and_apply_volume_factor_pcm_frames(tempFramesIn, ma_offset_ptr(pFramesIn, totalFramesProcessed*bpfCapture), framesToProcessThisIteration, pDevice->capture.format, pDevice->capture.channels, masterVolumeFactor);
+
+ pDevice->onData(pDevice, ma_offset_ptr(pFramesOut, totalFramesProcessed*bpfPlayback), tempFramesIn, framesToProcessThisIteration);
+
+ totalFramesProcessed += framesToProcessThisIteration;
+ }
+ } else {
+ pDevice->onData(pDevice, pFramesOut, pFramesIn, frameCount);
+ }
+
+ /* Volume control and clipping for playback devices. */
+ if (pFramesOut != NULL) {
+ if (masterVolumeFactor < 1) {
+ if (pFramesIn == NULL) { /* <-- In full-duplex situations, the volume will have been applied to the input samples before the data callback. Applying it again post-callback will incorrectly compound it. */
+ ma_apply_volume_factor_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels, masterVolumeFactor);
+ }
+ }
+
+ if (!pDevice->noClip && pDevice->playback.format == ma_format_f32) {
+ ma_clip_pcm_frames_f32((float*)pFramesOut, frameCount, pDevice->playback.channels);
+ }
+ }
+ }
+}
+
+
+
+/* A helper function for reading sample data from the client. */
+static void ma_device__read_frames_from_client(ma_device* pDevice, ma_uint32 frameCount, void* pFramesOut)
+{
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(frameCount > 0);
+ MA_ASSERT(pFramesOut != NULL);
+
+ if (pDevice->playback.converter.isPassthrough) {
+ ma_device__on_data(pDevice, pFramesOut, NULL, frameCount);
+ } else {
+ ma_result result;
+ ma_uint64 totalFramesReadOut;
+ ma_uint64 totalFramesReadIn;
+ void* pRunningFramesOut;
+
+ totalFramesReadOut = 0;
+ totalFramesReadIn = 0;
+ pRunningFramesOut = pFramesOut;
+
+ while (totalFramesReadOut < frameCount) {
+ ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In client format. */
+ ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 framesToReadThisIterationIn;
+ ma_uint64 framesReadThisIterationIn;
+ ma_uint64 framesToReadThisIterationOut;
+ ma_uint64 framesReadThisIterationOut;
+ ma_uint64 requiredInputFrameCount;
+
+ framesToReadThisIterationOut = (frameCount - totalFramesReadOut);
+ framesToReadThisIterationIn = framesToReadThisIterationOut;
+ if (framesToReadThisIterationIn > intermediaryBufferCap) {
+ framesToReadThisIterationIn = intermediaryBufferCap;
+ }
+
+ requiredInputFrameCount = ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, frameCount);
+ if (framesToReadThisIterationIn > requiredInputFrameCount) {
+ framesToReadThisIterationIn = requiredInputFrameCount;
+ }
+
+ if (framesToReadThisIterationIn > 0) {
+ ma_device__on_data(pDevice, pIntermediaryBuffer, NULL, (ma_uint32)framesToReadThisIterationIn);
+ totalFramesReadIn += framesToReadThisIterationIn;
+ }
+
+ /*
+ At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any
+ input frames, we still want to try processing frames because there may some output frames generated from cached input data.
+ */
+ framesReadThisIterationIn = framesToReadThisIterationIn;
+ framesReadThisIterationOut = framesToReadThisIterationOut;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ totalFramesReadOut += framesReadThisIterationOut;
+ pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+
+ if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) {
+ break; /* We're done. */
+ }
+ }
+ }
+}
+
+/* A helper for sending sample data to the client. */
+static void ma_device__send_frames_to_client(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat)
+{
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(frameCountInDeviceFormat > 0);
+ MA_ASSERT(pFramesInDeviceFormat != NULL);
+
+ if (pDevice->capture.converter.isPassthrough) {
+ ma_device__on_data(pDevice, NULL, pFramesInDeviceFormat, frameCountInDeviceFormat);
+ } else {
+ ma_result result;
+ ma_uint8 pFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint64 framesInClientFormatCap = sizeof(pFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint64 totalDeviceFramesProcessed = 0;
+ ma_uint64 totalClientFramesProcessed = 0;
+ const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat;
+
+ /* We just keep going until we've exhaused all of our input frames and cannot generate any more output frames. */
+ for (;;) {
+ ma_uint64 deviceFramesProcessedThisIteration;
+ ma_uint64 clientFramesProcessedThisIteration;
+
+ deviceFramesProcessedThisIteration = (frameCountInDeviceFormat - totalDeviceFramesProcessed);
+ clientFramesProcessedThisIteration = framesInClientFormatCap;
+
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &deviceFramesProcessedThisIteration, pFramesInClientFormat, &clientFramesProcessedThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ if (clientFramesProcessedThisIteration > 0) {
+ ma_device__on_data(pDevice, NULL, pFramesInClientFormat, (ma_uint32)clientFramesProcessedThisIteration); /* Safe cast. */
+ }
+
+ pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, deviceFramesProcessedThisIteration * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+ totalDeviceFramesProcessed += deviceFramesProcessedThisIteration;
+ totalClientFramesProcessed += clientFramesProcessedThisIteration;
+
+ if (deviceFramesProcessedThisIteration == 0 && clientFramesProcessedThisIteration == 0) {
+ break; /* We're done. */
+ }
+ }
+ }
+}
+
+
+/* We only want to expose ma_device__handle_duplex_callback_capture() and ma_device__handle_duplex_callback_playback() if we have an asynchronous backend enabled. */
+#if defined(MA_HAS_JACK) || \
+ defined(MA_HAS_COREAUDIO) || \
+ defined(MA_HAS_AAUDIO) || \
+ defined(MA_HAS_OPENSL) || \
+ defined(MA_HAS_WEBAUDIO)
+static ma_result ma_device__handle_duplex_callback_capture(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat, ma_pcm_rb* pRB)
+{
+ ma_result result;
+ ma_uint32 totalDeviceFramesProcessed = 0;
+ const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(frameCountInDeviceFormat > 0);
+ MA_ASSERT(pFramesInDeviceFormat != NULL);
+ MA_ASSERT(pRB != NULL);
+
+ /* Write to the ring buffer. The ring buffer is in the client format which means we need to convert. */
+ for (;;) {
+ ma_uint32 framesToProcessInDeviceFormat = (frameCountInDeviceFormat - totalDeviceFramesProcessed);
+ ma_uint32 framesToProcessInClientFormat = MA_DATA_CONVERTER_STACK_BUFFER_SIZE / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint64 framesProcessedInDeviceFormat;
+ ma_uint64 framesProcessedInClientFormat;
+ void* pFramesInClientFormat;
+
+ result = ma_pcm_rb_acquire_write(pRB, &framesToProcessInClientFormat, &pFramesInClientFormat);
+ if (result != MA_SUCCESS) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to acquire capture PCM frames from ring buffer.", result);
+ break;
+ }
+
+ if (framesToProcessInClientFormat == 0) {
+ if (ma_pcm_rb_pointer_distance(pRB) == (ma_int32)ma_pcm_rb_get_subbuffer_size(pRB)) {
+ break; /* Overrun. Not enough room in the ring buffer for input frame. Excess frames are dropped. */
+ }
+ }
+
+ /* Convert. */
+ framesProcessedInDeviceFormat = framesToProcessInDeviceFormat;
+ framesProcessedInClientFormat = framesToProcessInClientFormat;
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &framesProcessedInDeviceFormat, pFramesInClientFormat, &framesProcessedInClientFormat);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ result = ma_pcm_rb_commit_write(pRB, (ma_uint32)framesProcessedInDeviceFormat, pFramesInClientFormat); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to commit capture PCM frames to ring buffer.", result);
+ break;
+ }
+
+ pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, framesProcessedInDeviceFormat * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+ totalDeviceFramesProcessed += (ma_uint32)framesProcessedInDeviceFormat; /* Safe cast. */
+
+ /* We're done when we're unable to process any client nor device frames. */
+ if (framesProcessedInClientFormat == 0 && framesProcessedInDeviceFormat == 0) {
+ break; /* Done. */
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device__handle_duplex_callback_playback(ma_device* pDevice, ma_uint32 frameCount, void* pFramesInInternalFormat, ma_pcm_rb* pRB)
+{
+ ma_result result;
+ ma_uint8 playbackFramesInExternalFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 silentInputFrames[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 totalFramesToReadFromClient;
+ ma_uint32 totalFramesReadFromClient;
+ ma_uint32 totalFramesReadOut = 0;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(frameCount > 0);
+ MA_ASSERT(pFramesInInternalFormat != NULL);
+ MA_ASSERT(pRB != NULL);
+
+ /*
+ Sitting in the ring buffer should be captured data from the capture callback in external format. If there's not enough data in there for
+ the whole frameCount frames we just use silence instead for the input data.
+ */
+ MA_ZERO_MEMORY(silentInputFrames, sizeof(silentInputFrames));
+
+ /* We need to calculate how many output frames are required to be read from the client to completely fill frameCount internal frames. */
+ totalFramesToReadFromClient = (ma_uint32)ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, frameCount);
+ totalFramesReadFromClient = 0;
+ while (totalFramesReadFromClient < totalFramesToReadFromClient && ma_device_is_started(pDevice)) {
+ ma_uint32 framesRemainingFromClient;
+ ma_uint32 framesToProcessFromClient;
+ ma_uint32 inputFrameCount;
+ void* pInputFrames;
+
+ framesRemainingFromClient = (totalFramesToReadFromClient - totalFramesReadFromClient);
+ framesToProcessFromClient = sizeof(playbackFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ if (framesToProcessFromClient > framesRemainingFromClient) {
+ framesToProcessFromClient = framesRemainingFromClient;
+ }
+
+ /* We need to grab captured samples before firing the callback. If there's not enough input samples we just pass silence. */
+ inputFrameCount = framesToProcessFromClient;
+ result = ma_pcm_rb_acquire_read(pRB, &inputFrameCount, &pInputFrames);
+ if (result == MA_SUCCESS) {
+ if (inputFrameCount > 0) {
+ /* Use actual input frames. */
+ ma_device__on_data(pDevice, playbackFramesInExternalFormat, pInputFrames, inputFrameCount);
+ } else {
+ if (ma_pcm_rb_pointer_distance(pRB) == 0) {
+ break; /* Underrun. */
+ }
+ }
+
+ /* We're done with the captured samples. */
+ result = ma_pcm_rb_commit_read(pRB, inputFrameCount, pInputFrames);
+ if (result != MA_SUCCESS) {
+ break; /* Don't know what to do here... Just abandon ship. */
+ }
+ } else {
+ /* Use silent input frames. */
+ inputFrameCount = ma_min(
+ sizeof(playbackFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels),
+ sizeof(silentInputFrames) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)
+ );
+
+ ma_device__on_data(pDevice, playbackFramesInExternalFormat, silentInputFrames, inputFrameCount);
+ }
+
+ /* We have samples in external format so now we need to convert to internal format and output to the device. */
+ {
+ ma_uint64 framesConvertedIn = inputFrameCount;
+ ma_uint64 framesConvertedOut = (frameCount - totalFramesReadOut);
+ ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackFramesInExternalFormat, &framesConvertedIn, pFramesInInternalFormat, &framesConvertedOut);
+
+ totalFramesReadFromClient += (ma_uint32)framesConvertedIn; /* Safe cast. */
+ totalFramesReadOut += (ma_uint32)framesConvertedOut; /* Safe cast. */
+ pFramesInInternalFormat = ma_offset_ptr(pFramesInInternalFormat, framesConvertedOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+ }
+ }
+
+ return MA_SUCCESS;
+}
+#endif /* Asynchronous backends. */
+
+/* A helper for changing the state of the device. */
+static MA_INLINE void ma_device__set_state(ma_device* pDevice, ma_uint32 newState)
+{
+ ma_atomic_exchange_32(&pDevice->state, newState);
+}
+
+/* A helper for getting the state of the device. */
+static MA_INLINE ma_uint32 ma_device__get_state(ma_device* pDevice)
+{
+ return pDevice->state;
+}
+
+
+#ifdef MA_WIN32
+ GUID MA_GUID_KSDATAFORMAT_SUBTYPE_PCM = {0x00000001, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
+ GUID MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT = {0x00000003, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
+ /*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_ALAW = {0x00000006, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/
+ /*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_MULAW = {0x00000007, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/
+#endif
+
+
+typedef struct
+{
+ ma_device_type deviceType;
+ const ma_device_id* pDeviceID;
+ char* pName;
+ size_t nameBufferSize;
+ ma_bool32 foundDevice;
+} ma_context__try_get_device_name_by_id__enum_callback_data;
+
+static ma_bool32 ma_context__try_get_device_name_by_id__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData)
+{
+ ma_context__try_get_device_name_by_id__enum_callback_data* pData = (ma_context__try_get_device_name_by_id__enum_callback_data*)pUserData;
+ MA_ASSERT(pData != NULL);
+
+ if (pData->deviceType == deviceType) {
+ if (pContext->onDeviceIDEqual(pContext, pData->pDeviceID, &pDeviceInfo->id)) {
+ ma_strncpy_s(pData->pName, pData->nameBufferSize, pDeviceInfo->name, (size_t)-1);
+ pData->foundDevice = MA_TRUE;
+ }
+ }
+
+ return !pData->foundDevice;
+}
+
+/*
+Generic function for retrieving the name of a device by it's ID.
+
+This function simply enumerates every device and then retrieves the name of the first device that has the same ID.
+*/
+static ma_result ma_context__try_get_device_name_by_id(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, char* pName, size_t nameBufferSize)
+{
+ ma_result result;
+ ma_context__try_get_device_name_by_id__enum_callback_data data;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pName != NULL);
+
+ if (pDeviceID == NULL) {
+ return MA_NO_DEVICE;
+ }
+
+ data.deviceType = deviceType;
+ data.pDeviceID = pDeviceID;
+ data.pName = pName;
+ data.nameBufferSize = nameBufferSize;
+ data.foundDevice = MA_FALSE;
+ result = ma_context_enumerate_devices(pContext, ma_context__try_get_device_name_by_id__enum_callback, &data);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (!data.foundDevice) {
+ return MA_NO_DEVICE;
+ } else {
+ return MA_SUCCESS;
+ }
+}
+
+
+ma_uint32 ma_get_format_priority_index(ma_format format) /* Lower = better. */
+{
+ ma_uint32 i;
+ for (i = 0; i < ma_countof(g_maFormatPriorities); ++i) {
+ if (g_maFormatPriorities[i] == format) {
+ return i;
+ }
+ }
+
+ /* Getting here means the format could not be found or is equal to ma_format_unknown. */
+ return (ma_uint32)-1;
+}
+
+static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType);
+
+
+/*******************************************************************************
+
+Null Backend
+
+*******************************************************************************/
+#ifdef MA_HAS_NULL
+
+#define MA_DEVICE_OP_NONE__NULL 0
+#define MA_DEVICE_OP_START__NULL 1
+#define MA_DEVICE_OP_SUSPEND__NULL 2
+#define MA_DEVICE_OP_KILL__NULL 3
+
+static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData)
+{
+ ma_device* pDevice = (ma_device*)pData;
+ MA_ASSERT(pDevice != NULL);
+
+ for (;;) { /* Keep the thread alive until the device is uninitialized. */
+ /* Wait for an operation to be requested. */
+ ma_event_wait(&pDevice->null_device.operationEvent);
+
+ /* At this point an event should have been triggered. */
+
+ /* Starting the device needs to put the thread into a loop. */
+ if (pDevice->null_device.operation == MA_DEVICE_OP_START__NULL) {
+ ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
+
+ /* Reset the timer just in case. */
+ ma_timer_init(&pDevice->null_device.timer);
+
+ /* Keep looping until an operation has been requested. */
+ while (pDevice->null_device.operation != MA_DEVICE_OP_NONE__NULL && pDevice->null_device.operation != MA_DEVICE_OP_START__NULL) {
+ ma_sleep(10); /* Don't hog the CPU. */
+ }
+
+ /* Getting here means a suspend or kill operation has been requested. */
+ ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
+ ma_event_signal(&pDevice->null_device.operationCompletionEvent);
+ continue;
+ }
+
+ /* Suspending the device means we need to stop the timer and just continue the loop. */
+ if (pDevice->null_device.operation == MA_DEVICE_OP_SUSPEND__NULL) {
+ ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
+
+ /* We need to add the current run time to the prior run time, then reset the timer. */
+ pDevice->null_device.priorRunTime += ma_timer_get_time_in_seconds(&pDevice->null_device.timer);
+ ma_timer_init(&pDevice->null_device.timer);
+
+ /* We're done. */
+ ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
+ ma_event_signal(&pDevice->null_device.operationCompletionEvent);
+ continue;
+ }
+
+ /* Killing the device means we need to get out of this loop so that this thread can terminate. */
+ if (pDevice->null_device.operation == MA_DEVICE_OP_KILL__NULL) {
+ ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
+ ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
+ ma_event_signal(&pDevice->null_device.operationCompletionEvent);
+ break;
+ }
+
+ /* Getting a signal on a "none" operation probably means an error. Return invalid operation. */
+ if (pDevice->null_device.operation == MA_DEVICE_OP_NONE__NULL) {
+ MA_ASSERT(MA_FALSE); /* <-- Trigger this in debug mode to ensure developers are aware they're doing something wrong (or there's a bug in a miniaudio). */
+ ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_INVALID_OPERATION);
+ ma_event_signal(&pDevice->null_device.operationCompletionEvent);
+ continue; /* Continue the loop. Don't terminate. */
+ }
+ }
+
+ return (ma_thread_result)0;
+}
+
+static ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation)
+{
+ ma_atomic_exchange_32(&pDevice->null_device.operation, operation);
+ if (!ma_event_signal(&pDevice->null_device.operationEvent)) {
+ return MA_ERROR;
+ }
+
+ if (!ma_event_wait(&pDevice->null_device.operationCompletionEvent)) {
+ return MA_ERROR;
+ }
+
+ return pDevice->null_device.operationResult;
+}
+
+static ma_uint64 ma_device_get_total_run_time_in_frames__null(ma_device* pDevice)
+{
+ ma_uint32 internalSampleRate;
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ internalSampleRate = pDevice->capture.internalSampleRate;
+ } else {
+ internalSampleRate = pDevice->playback.internalSampleRate;
+ }
+
+
+ return (ma_uint64)((pDevice->null_device.priorRunTime + ma_timer_get_time_in_seconds(&pDevice->null_device.timer)) * internalSampleRate);
+}
+
+static ma_bool32 ma_context_is_device_id_equal__null(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
+
+ return pID0->nullbackend == pID1->nullbackend;
+}
+
+static ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_bool32 cbResult = MA_TRUE;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* Playback. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Playback Device", (size_t)-1);
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+
+ /* Capture. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Capture Device", (size_t)-1);
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__null(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+{
+ ma_uint32 iFormat;
+
+ MA_ASSERT(pContext != NULL);
+
+ if (pDeviceID != NULL && pDeviceID->nullbackend != 0) {
+ return MA_NO_DEVICE; /* Don't know the device. */
+ }
+
+ /* Name / Description */
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Playback Device", (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Capture Device", (size_t)-1);
+ }
+
+ /* Support everything on the null backend. */
+ pDeviceInfo->formatCount = ma_format_count - 1; /* Minus one because we don't want to include ma_format_unknown. */
+ for (iFormat = 0; iFormat < pDeviceInfo->formatCount; ++iFormat) {
+ pDeviceInfo->formats[iFormat] = (ma_format)(iFormat + 1); /* +1 to skip over ma_format_unknown. */
+ }
+
+ pDeviceInfo->minChannels = 1;
+ pDeviceInfo->maxChannels = MA_MAX_CHANNELS;
+ pDeviceInfo->minSampleRate = MA_SAMPLE_RATE_8000;
+ pDeviceInfo->maxSampleRate = MA_SAMPLE_RATE_384000;
+
+ (void)pContext;
+ (void)shareMode;
+ return MA_SUCCESS;
+}
+
+
+static void ma_device_uninit__null(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ /* Keep it clean and wait for the device thread to finish before returning. */
+ ma_device_do_operation__null(pDevice, MA_DEVICE_OP_KILL__NULL);
+
+ /* At this point the loop in the device thread is as good as terminated so we can uninitialize our events. */
+ ma_event_uninit(&pDevice->null_device.operationCompletionEvent);
+ ma_event_uninit(&pDevice->null_device.operationEvent);
+}
+
+static ma_result ma_device_init__null(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+{
+ ma_result result;
+ ma_uint32 periodSizeInFrames;
+
+ MA_ASSERT(pDevice != NULL);
+
+ MA_ZERO_OBJECT(&pDevice->null_device);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ periodSizeInFrames = pConfig->periodSizeInFrames;
+ if (periodSizeInFrames == 0) {
+ periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pConfig->sampleRate);
+ }
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), "NULL Capture Device", (size_t)-1);
+ pDevice->capture.internalFormat = pConfig->capture.format;
+ pDevice->capture.internalChannels = pConfig->capture.channels;
+ ma_channel_map_copy(pDevice->capture.internalChannelMap, pConfig->capture.channelMap, pConfig->capture.channels);
+ pDevice->capture.internalPeriodSizeInFrames = periodSizeInFrames;
+ pDevice->capture.internalPeriods = pConfig->periods;
+ }
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), "NULL Playback Device", (size_t)-1);
+ pDevice->playback.internalFormat = pConfig->playback.format;
+ pDevice->playback.internalChannels = pConfig->playback.channels;
+ ma_channel_map_copy(pDevice->playback.internalChannelMap, pConfig->playback.channelMap, pConfig->playback.channels);
+ pDevice->playback.internalPeriodSizeInFrames = periodSizeInFrames;
+ pDevice->playback.internalPeriods = pConfig->periods;
+ }
+
+ /*
+ In order to get timing right, we need to create a thread that does nothing but keeps track of the timer. This timer is started when the
+ first period is "written" to it, and then stopped in ma_device_stop__null().
+ */
+ result = ma_event_init(pContext, &pDevice->null_device.operationEvent);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_event_init(pContext, &pDevice->null_device.operationCompletionEvent);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_thread_create(pContext, &pDevice->thread, ma_device_thread__null, pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_start__null(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ ma_device_do_operation__null(pDevice, MA_DEVICE_OP_START__NULL);
+
+ ma_atomic_exchange_32(&pDevice->null_device.isStarted, MA_TRUE);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__null(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ ma_device_do_operation__null(pDevice, MA_DEVICE_OP_SUSPEND__NULL);
+
+ ma_atomic_exchange_32(&pDevice->null_device.isStarted, MA_FALSE);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
+{
+ ma_result result = MA_SUCCESS;
+ ma_uint32 totalPCMFramesProcessed;
+ ma_bool32 wasStartedOnEntry;
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
+ }
+
+ wasStartedOnEntry = pDevice->null_device.isStarted;
+
+ /* Keep going until everything has been read. */
+ totalPCMFramesProcessed = 0;
+ while (totalPCMFramesProcessed < frameCount) {
+ ma_uint64 targetFrame;
+
+ /* If there are any frames remaining in the current period, consume those first. */
+ if (pDevice->null_device.currentPeriodFramesRemainingPlayback > 0) {
+ ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed);
+ ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingPlayback;
+ if (framesToProcess > framesRemaining) {
+ framesToProcess = framesRemaining;
+ }
+
+ /* We don't actually do anything with pPCMFrames, so just mark it as unused to prevent a warning. */
+ (void)pPCMFrames;
+
+ pDevice->null_device.currentPeriodFramesRemainingPlayback -= framesToProcess;
+ totalPCMFramesProcessed += framesToProcess;
+ }
+
+ /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */
+ if (pDevice->null_device.currentPeriodFramesRemainingPlayback == 0) {
+ pDevice->null_device.currentPeriodFramesRemainingPlayback = 0;
+
+ if (!pDevice->null_device.isStarted && !wasStartedOnEntry) {
+ result = ma_device_start__null(pDevice);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+ }
+ }
+
+ /* If we've consumed the whole buffer we can return now. */
+ MA_ASSERT(totalPCMFramesProcessed <= frameCount);
+ if (totalPCMFramesProcessed == frameCount) {
+ break;
+ }
+
+ /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */
+ targetFrame = pDevice->null_device.lastProcessedFramePlayback;
+ for (;;) {
+ ma_uint64 currentFrame;
+
+ /* Stop waiting if the device has been stopped. */
+ if (!pDevice->null_device.isStarted) {
+ break;
+ }
+
+ currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice);
+ if (currentFrame >= targetFrame) {
+ break;
+ }
+
+ /* Getting here means we haven't yet reached the target sample, so continue waiting. */
+ ma_sleep(10);
+ }
+
+ pDevice->null_device.lastProcessedFramePlayback += pDevice->playback.internalPeriodSizeInFrames;
+ pDevice->null_device.currentPeriodFramesRemainingPlayback = pDevice->playback.internalPeriodSizeInFrames;
+ }
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = totalPCMFramesProcessed;
+ }
+
+ return result;
+}
+
+static ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
+{
+ ma_result result = MA_SUCCESS;
+ ma_uint32 totalPCMFramesProcessed;
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
+ }
+
+ /* Keep going until everything has been read. */
+ totalPCMFramesProcessed = 0;
+ while (totalPCMFramesProcessed < frameCount) {
+ ma_uint64 targetFrame;
+
+ /* If there are any frames remaining in the current period, consume those first. */
+ if (pDevice->null_device.currentPeriodFramesRemainingCapture > 0) {
+ ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed);
+ ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingCapture;
+ if (framesToProcess > framesRemaining) {
+ framesToProcess = framesRemaining;
+ }
+
+ /* We need to ensured the output buffer is zeroed. */
+ MA_ZERO_MEMORY(ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed*bpf), framesToProcess*bpf);
+
+ pDevice->null_device.currentPeriodFramesRemainingCapture -= framesToProcess;
+ totalPCMFramesProcessed += framesToProcess;
+ }
+
+ /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */
+ if (pDevice->null_device.currentPeriodFramesRemainingCapture == 0) {
+ pDevice->null_device.currentPeriodFramesRemainingCapture = 0;
+ }
+
+ /* If we've consumed the whole buffer we can return now. */
+ MA_ASSERT(totalPCMFramesProcessed <= frameCount);
+ if (totalPCMFramesProcessed == frameCount) {
+ break;
+ }
+
+ /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */
+ targetFrame = pDevice->null_device.lastProcessedFrameCapture + pDevice->capture.internalPeriodSizeInFrames;
+ for (;;) {
+ ma_uint64 currentFrame;
+
+ /* Stop waiting if the device has been stopped. */
+ if (!pDevice->null_device.isStarted) {
+ break;
+ }
+
+ currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice);
+ if (currentFrame >= targetFrame) {
+ break;
+ }
+
+ /* Getting here means we haven't yet reached the target sample, so continue waiting. */
+ ma_sleep(10);
+ }
+
+ pDevice->null_device.lastProcessedFrameCapture += pDevice->capture.internalPeriodSizeInFrames;
+ pDevice->null_device.currentPeriodFramesRemainingCapture = pDevice->capture.internalPeriodSizeInFrames;
+ }
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = totalPCMFramesProcessed;
+ }
+
+ return result;
+}
+
+static ma_result ma_device_main_loop__null(ma_device* pDevice)
+{
+ ma_result result = MA_SUCCESS;
+ ma_bool32 exitLoop = MA_FALSE;
+
+ MA_ASSERT(pDevice != NULL);
+
+ /* The capture device needs to be started immediately. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ result = ma_device_start__null(pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
+ switch (pDevice->type)
+ {
+ case ma_device_type_duplex:
+ {
+ /* The process is: device_read -> convert -> callback -> convert -> device_write */
+ ma_uint32 totalCapturedDeviceFramesProcessed = 0;
+ ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames);
+
+ while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) {
+ ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 capturedDeviceFramesRemaining;
+ ma_uint32 capturedDeviceFramesProcessed;
+ ma_uint32 capturedDeviceFramesToProcess;
+ ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed;
+ if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) {
+ capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames;
+ }
+
+ result = ma_device_read__null(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedDeviceFramesRemaining = capturedDeviceFramesToProcess;
+ capturedDeviceFramesProcessed = 0;
+
+ /* At this point we have our captured data in device format and we now need to convert it to client format. */
+ for (;;) {
+ ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames);
+ ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining;
+ ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+
+ /* Convert capture data from device format to client format. */
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ /*
+ If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small
+ which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE.
+ */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+
+ ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/
+
+ capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+ capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+
+ /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */
+ for (;;) {
+ ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration;
+ ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ result = ma_device_write__null(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+ }
+
+ /* In case an error happened from ma_device_write__null()... */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+ }
+
+ totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed;
+ }
+ } break;
+
+ case ma_device_type_capture:
+ {
+ /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[8192];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames;
+ ma_uint32 framesReadThisPeriod = 0;
+ while (framesReadThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToReadThisIteration = framesRemainingInPeriod;
+ if (framesToReadThisIteration > intermediaryBufferSizeInFrames) {
+ framesToReadThisIteration = intermediaryBufferSizeInFrames;
+ }
+
+ result = ma_device_read__null(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer);
+
+ framesReadThisPeriod += framesProcessed;
+ }
+ } break;
+
+ case ma_device_type_playback:
+ {
+ /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[8192];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames;
+ ma_uint32 framesWrittenThisPeriod = 0;
+ while (framesWrittenThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod;
+ if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) {
+ framesToWriteThisIteration = intermediaryBufferSizeInFrames;
+ }
+
+ ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer);
+
+ result = ma_device_write__null(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ framesWrittenThisPeriod += framesProcessed;
+ }
+ } break;
+
+ /* To silence a warning. Will never hit this. */
+ case ma_device_type_loopback:
+ default: break;
+ }
+ }
+
+
+ /* Here is where the device is started. */
+ ma_device_stop__null(pDevice);
+
+ return result;
+}
+
+static ma_result ma_context_uninit__null(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_null);
+
+ (void)pContext;
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__null(const ma_context_config* pConfig, ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+
+ (void)pConfig;
+
+ pContext->onUninit = ma_context_uninit__null;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__null;
+ pContext->onEnumDevices = ma_context_enumerate_devices__null;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__null;
+ pContext->onDeviceInit = ma_device_init__null;
+ pContext->onDeviceUninit = ma_device_uninit__null;
+ pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */
+ pContext->onDeviceStop = NULL; /* Not required for synchronous backends. */
+ pContext->onDeviceMainLoop = ma_device_main_loop__null;
+
+ /* The null backend always works. */
+ return MA_SUCCESS;
+}
+#endif
+
+
+/*******************************************************************************
+
+WIN32 COMMON
+
+*******************************************************************************/
+#if defined(MA_WIN32)
+#if defined(MA_WIN32_DESKTOP)
+ #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) ((MA_PFN_CoInitializeEx)pContext->win32.CoInitializeEx)(pvReserved, dwCoInit)
+ #define ma_CoUninitialize(pContext) ((MA_PFN_CoUninitialize)pContext->win32.CoUninitialize)()
+ #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) ((MA_PFN_CoCreateInstance)pContext->win32.CoCreateInstance)(rclsid, pUnkOuter, dwClsContext, riid, ppv)
+ #define ma_CoTaskMemFree(pContext, pv) ((MA_PFN_CoTaskMemFree)pContext->win32.CoTaskMemFree)(pv)
+ #define ma_PropVariantClear(pContext, pvar) ((MA_PFN_PropVariantClear)pContext->win32.PropVariantClear)(pvar)
+#else
+ #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) CoInitializeEx(pvReserved, dwCoInit)
+ #define ma_CoUninitialize(pContext) CoUninitialize()
+ #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) CoCreateInstance(rclsid, pUnkOuter, dwClsContext, riid, ppv)
+ #define ma_CoTaskMemFree(pContext, pv) CoTaskMemFree(pv)
+ #define ma_PropVariantClear(pContext, pvar) PropVariantClear(pvar)
+#endif
+
+#if !defined(MAXULONG_PTR)
+typedef size_t DWORD_PTR;
+#endif
+
+#if !defined(WAVE_FORMAT_44M08)
+#define WAVE_FORMAT_44M08 0x00000100
+#define WAVE_FORMAT_44S08 0x00000200
+#define WAVE_FORMAT_44M16 0x00000400
+#define WAVE_FORMAT_44S16 0x00000800
+#define WAVE_FORMAT_48M08 0x00001000
+#define WAVE_FORMAT_48S08 0x00002000
+#define WAVE_FORMAT_48M16 0x00004000
+#define WAVE_FORMAT_48S16 0x00008000
+#define WAVE_FORMAT_96M08 0x00010000
+#define WAVE_FORMAT_96S08 0x00020000
+#define WAVE_FORMAT_96M16 0x00040000
+#define WAVE_FORMAT_96S16 0x00080000
+#endif
+
+#ifndef SPEAKER_FRONT_LEFT
+#define SPEAKER_FRONT_LEFT 0x1
+#define SPEAKER_FRONT_RIGHT 0x2
+#define SPEAKER_FRONT_CENTER 0x4
+#define SPEAKER_LOW_FREQUENCY 0x8
+#define SPEAKER_BACK_LEFT 0x10
+#define SPEAKER_BACK_RIGHT 0x20
+#define SPEAKER_FRONT_LEFT_OF_CENTER 0x40
+#define SPEAKER_FRONT_RIGHT_OF_CENTER 0x80
+#define SPEAKER_BACK_CENTER 0x100
+#define SPEAKER_SIDE_LEFT 0x200
+#define SPEAKER_SIDE_RIGHT 0x400
+#define SPEAKER_TOP_CENTER 0x800
+#define SPEAKER_TOP_FRONT_LEFT 0x1000
+#define SPEAKER_TOP_FRONT_CENTER 0x2000
+#define SPEAKER_TOP_FRONT_RIGHT 0x4000
+#define SPEAKER_TOP_BACK_LEFT 0x8000
+#define SPEAKER_TOP_BACK_CENTER 0x10000
+#define SPEAKER_TOP_BACK_RIGHT 0x20000
+#endif
+
+/*
+The SDK that comes with old versions of MSVC (VC6, for example) does not appear to define WAVEFORMATEXTENSIBLE. We
+define our own implementation in this case.
+*/
+#if (defined(_MSC_VER) && !defined(_WAVEFORMATEXTENSIBLE_)) || defined(__DMC__)
+typedef struct
+{
+ WAVEFORMATEX Format;
+ union
+ {
+ WORD wValidBitsPerSample;
+ WORD wSamplesPerBlock;
+ WORD wReserved;
+ } Samples;
+ DWORD dwChannelMask;
+ GUID SubFormat;
+} WAVEFORMATEXTENSIBLE;
+#endif
+
+#ifndef WAVE_FORMAT_EXTENSIBLE
+#define WAVE_FORMAT_EXTENSIBLE 0xFFFE
+#endif
+
+#ifndef WAVE_FORMAT_IEEE_FLOAT
+#define WAVE_FORMAT_IEEE_FLOAT 0x0003
+#endif
+
+static GUID MA_GUID_NULL = {0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}};
+
+/* Converts an individual Win32-style channel identifier (SPEAKER_FRONT_LEFT, etc.) to miniaudio. */
+static ma_uint8 ma_channel_id_to_ma__win32(DWORD id)
+{
+ switch (id)
+ {
+ case SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT;
+ case SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT;
+ case SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER;
+ case SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE;
+ case SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT;
+ case SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT;
+ case SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER;
+ case SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT;
+ case SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT;
+ case SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER;
+ case SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT;
+ case SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER;
+ case SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT;
+ default: return 0;
+ }
+}
+
+/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to Win32-style. */
+static DWORD ma_channel_id_to_win32(DWORD id)
+{
+ switch (id)
+ {
+ case MA_CHANNEL_MONO: return SPEAKER_FRONT_CENTER;
+ case MA_CHANNEL_FRONT_LEFT: return SPEAKER_FRONT_LEFT;
+ case MA_CHANNEL_FRONT_RIGHT: return SPEAKER_FRONT_RIGHT;
+ case MA_CHANNEL_FRONT_CENTER: return SPEAKER_FRONT_CENTER;
+ case MA_CHANNEL_LFE: return SPEAKER_LOW_FREQUENCY;
+ case MA_CHANNEL_BACK_LEFT: return SPEAKER_BACK_LEFT;
+ case MA_CHANNEL_BACK_RIGHT: return SPEAKER_BACK_RIGHT;
+ case MA_CHANNEL_FRONT_LEFT_CENTER: return SPEAKER_FRONT_LEFT_OF_CENTER;
+ case MA_CHANNEL_FRONT_RIGHT_CENTER: return SPEAKER_FRONT_RIGHT_OF_CENTER;
+ case MA_CHANNEL_BACK_CENTER: return SPEAKER_BACK_CENTER;
+ case MA_CHANNEL_SIDE_LEFT: return SPEAKER_SIDE_LEFT;
+ case MA_CHANNEL_SIDE_RIGHT: return SPEAKER_SIDE_RIGHT;
+ case MA_CHANNEL_TOP_CENTER: return SPEAKER_TOP_CENTER;
+ case MA_CHANNEL_TOP_FRONT_LEFT: return SPEAKER_TOP_FRONT_LEFT;
+ case MA_CHANNEL_TOP_FRONT_CENTER: return SPEAKER_TOP_FRONT_CENTER;
+ case MA_CHANNEL_TOP_FRONT_RIGHT: return SPEAKER_TOP_FRONT_RIGHT;
+ case MA_CHANNEL_TOP_BACK_LEFT: return SPEAKER_TOP_BACK_LEFT;
+ case MA_CHANNEL_TOP_BACK_CENTER: return SPEAKER_TOP_BACK_CENTER;
+ case MA_CHANNEL_TOP_BACK_RIGHT: return SPEAKER_TOP_BACK_RIGHT;
+ default: return 0;
+ }
+}
+
+/* Converts a channel mapping to a Win32-style channel mask. */
+static DWORD ma_channel_map_to_channel_mask__win32(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels)
+{
+ DWORD dwChannelMask = 0;
+ ma_uint32 iChannel;
+
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ dwChannelMask |= ma_channel_id_to_win32(channelMap[iChannel]);
+ }
+
+ return dwChannelMask;
+}
+
+/* Converts a Win32-style channel mask to a miniaudio channel map. */
+static void ma_channel_mask_to_channel_map__win32(DWORD dwChannelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+{
+ if (channels == 1 && dwChannelMask == 0) {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } else if (channels == 2 && dwChannelMask == 0) {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ } else {
+ if (channels == 1 && (dwChannelMask & SPEAKER_FRONT_CENTER) != 0) {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } else {
+ /* Just iterate over each bit. */
+ ma_uint32 iChannel = 0;
+ ma_uint32 iBit;
+
+ for (iBit = 0; iBit < 32; ++iBit) {
+ DWORD bitValue = (dwChannelMask & (1UL << iBit));
+ if (bitValue != 0) {
+ /* The bit is set. */
+ channelMap[iChannel] = ma_channel_id_to_ma__win32(bitValue);
+ iChannel += 1;
+ }
+ }
+ }
+ }
+}
+
+#ifdef __cplusplus
+static ma_bool32 ma_is_guid_equal(const void* a, const void* b)
+{
+ return IsEqualGUID(*(const GUID*)a, *(const GUID*)b);
+}
+#else
+#define ma_is_guid_equal(a, b) IsEqualGUID((const GUID*)a, (const GUID*)b)
+#endif
+
+static ma_format ma_format_from_WAVEFORMATEX(const WAVEFORMATEX* pWF)
+{
+ MA_ASSERT(pWF != NULL);
+
+ if (pWF->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
+ const WAVEFORMATEXTENSIBLE* pWFEX = (const WAVEFORMATEXTENSIBLE*)pWF;
+ if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_PCM)) {
+ if (pWFEX->Samples.wValidBitsPerSample == 32) {
+ return ma_format_s32;
+ }
+ if (pWFEX->Samples.wValidBitsPerSample == 24) {
+ if (pWFEX->Format.wBitsPerSample == 32) {
+ /*return ma_format_s24_32;*/
+ }
+ if (pWFEX->Format.wBitsPerSample == 24) {
+ return ma_format_s24;
+ }
+ }
+ if (pWFEX->Samples.wValidBitsPerSample == 16) {
+ return ma_format_s16;
+ }
+ if (pWFEX->Samples.wValidBitsPerSample == 8) {
+ return ma_format_u8;
+ }
+ }
+ if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) {
+ if (pWFEX->Samples.wValidBitsPerSample == 32) {
+ return ma_format_f32;
+ }
+ /*
+ if (pWFEX->Samples.wValidBitsPerSample == 64) {
+ return ma_format_f64;
+ }
+ */
+ }
+ } else {
+ if (pWF->wFormatTag == WAVE_FORMAT_PCM) {
+ if (pWF->wBitsPerSample == 32) {
+ return ma_format_s32;
+ }
+ if (pWF->wBitsPerSample == 24) {
+ return ma_format_s24;
+ }
+ if (pWF->wBitsPerSample == 16) {
+ return ma_format_s16;
+ }
+ if (pWF->wBitsPerSample == 8) {
+ return ma_format_u8;
+ }
+ }
+ if (pWF->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) {
+ if (pWF->wBitsPerSample == 32) {
+ return ma_format_f32;
+ }
+ if (pWF->wBitsPerSample == 64) {
+ /*return ma_format_f64;*/
+ }
+ }
+ }
+
+ return ma_format_unknown;
+}
+#endif
+
+
+/*******************************************************************************
+
+WASAPI Backend
+
+*******************************************************************************/
+#ifdef MA_HAS_WASAPI
+#if 0
+#if defined(_MSC_VER)
+ #pragma warning(push)
+ #pragma warning(disable:4091) /* 'typedef ': ignored on left of '' when no variable is declared */
+#endif
+#include
+#include
+#if defined(_MSC_VER)
+ #pragma warning(pop)
+#endif
+#endif /* 0 */
+
+/* Some compilers don't define VerifyVersionInfoW. Need to write this ourselves. */
+#define MA_WIN32_WINNT_VISTA 0x0600
+#define MA_VER_MINORVERSION 0x01
+#define MA_VER_MAJORVERSION 0x02
+#define MA_VER_SERVICEPACKMAJOR 0x20
+#define MA_VER_GREATER_EQUAL 0x03
+
+typedef struct {
+ DWORD dwOSVersionInfoSize;
+ DWORD dwMajorVersion;
+ DWORD dwMinorVersion;
+ DWORD dwBuildNumber;
+ DWORD dwPlatformId;
+ WCHAR szCSDVersion[128];
+ WORD wServicePackMajor;
+ WORD wServicePackMinor;
+ WORD wSuiteMask;
+ BYTE wProductType;
+ BYTE wReserved;
+} ma_OSVERSIONINFOEXW;
+
+typedef BOOL (WINAPI * ma_PFNVerifyVersionInfoW) (ma_OSVERSIONINFOEXW* lpVersionInfo, DWORD dwTypeMask, DWORDLONG dwlConditionMask);
+typedef ULONGLONG (WINAPI * ma_PFNVerSetConditionMask)(ULONGLONG dwlConditionMask, DWORD dwTypeBitMask, BYTE dwConditionMask);
+
+
+#ifndef PROPERTYKEY_DEFINED
+#define PROPERTYKEY_DEFINED
+typedef struct
+{
+ GUID fmtid;
+ DWORD pid;
+} PROPERTYKEY;
+#endif
+
+/* Some compilers don't define PropVariantInit(). We just do this ourselves since it's just a memset(). */
+static MA_INLINE void ma_PropVariantInit(PROPVARIANT* pProp)
+{
+ MA_ZERO_OBJECT(pProp);
+}
+
+
+static const PROPERTYKEY MA_PKEY_Device_FriendlyName = {{0xA45C254E, 0xDF1C, 0x4EFD, {0x80, 0x20, 0x67, 0xD1, 0x46, 0xA8, 0x50, 0xE0}}, 14};
+static const PROPERTYKEY MA_PKEY_AudioEngine_DeviceFormat = {{0xF19F064D, 0x82C, 0x4E27, {0xBC, 0x73, 0x68, 0x82, 0xA1, 0xBB, 0x8E, 0x4C}}, 0};
+
+static const IID MA_IID_IUnknown = {0x00000000, 0x0000, 0x0000, {0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}; /* 00000000-0000-0000-C000-000000000046 */
+static const IID MA_IID_IAgileObject = {0x94EA2B94, 0xE9CC, 0x49E0, {0xC0, 0xFF, 0xEE, 0x64, 0xCA, 0x8F, 0x5B, 0x90}}; /* 94EA2B94-E9CC-49E0-C0FF-EE64CA8F5B90 */
+
+static const IID MA_IID_IAudioClient = {0x1CB9AD4C, 0xDBFA, 0x4C32, {0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2}}; /* 1CB9AD4C-DBFA-4C32-B178-C2F568A703B2 = __uuidof(IAudioClient) */
+static const IID MA_IID_IAudioClient2 = {0x726778CD, 0xF60A, 0x4EDA, {0x82, 0xDE, 0xE4, 0x76, 0x10, 0xCD, 0x78, 0xAA}}; /* 726778CD-F60A-4EDA-82DE-E47610CD78AA = __uuidof(IAudioClient2) */
+static const IID MA_IID_IAudioClient3 = {0x7ED4EE07, 0x8E67, 0x4CD4, {0x8C, 0x1A, 0x2B, 0x7A, 0x59, 0x87, 0xAD, 0x42}}; /* 7ED4EE07-8E67-4CD4-8C1A-2B7A5987AD42 = __uuidof(IAudioClient3) */
+static const IID MA_IID_IAudioRenderClient = {0xF294ACFC, 0x3146, 0x4483, {0xA7, 0xBF, 0xAD, 0xDC, 0xA7, 0xC2, 0x60, 0xE2}}; /* F294ACFC-3146-4483-A7BF-ADDCA7C260E2 = __uuidof(IAudioRenderClient) */
+static const IID MA_IID_IAudioCaptureClient = {0xC8ADBD64, 0xE71E, 0x48A0, {0xA4, 0xDE, 0x18, 0x5C, 0x39, 0x5C, 0xD3, 0x17}}; /* C8ADBD64-E71E-48A0-A4DE-185C395CD317 = __uuidof(IAudioCaptureClient) */
+static const IID MA_IID_IMMNotificationClient = {0x7991EEC9, 0x7E89, 0x4D85, {0x83, 0x90, 0x6C, 0x70, 0x3C, 0xEC, 0x60, 0xC0}}; /* 7991EEC9-7E89-4D85-8390-6C703CEC60C0 = __uuidof(IMMNotificationClient) */
+#ifndef MA_WIN32_DESKTOP
+static const IID MA_IID_DEVINTERFACE_AUDIO_RENDER = {0xE6327CAD, 0xDCEC, 0x4949, {0xAE, 0x8A, 0x99, 0x1E, 0x97, 0x6A, 0x79, 0xD2}}; /* E6327CAD-DCEC-4949-AE8A-991E976A79D2 */
+static const IID MA_IID_DEVINTERFACE_AUDIO_CAPTURE = {0x2EEF81BE, 0x33FA, 0x4800, {0x96, 0x70, 0x1C, 0xD4, 0x74, 0x97, 0x2C, 0x3F}}; /* 2EEF81BE-33FA-4800-9670-1CD474972C3F */
+static const IID MA_IID_IActivateAudioInterfaceCompletionHandler = {0x41D949AB, 0x9862, 0x444A, {0x80, 0xF6, 0xC2, 0x61, 0x33, 0x4D, 0xA5, 0xEB}}; /* 41D949AB-9862-444A-80F6-C261334DA5EB */
+#endif
+
+static const IID MA_CLSID_MMDeviceEnumerator_Instance = {0xBCDE0395, 0xE52F, 0x467C, {0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E}}; /* BCDE0395-E52F-467C-8E3D-C4579291692E = __uuidof(MMDeviceEnumerator) */
+static const IID MA_IID_IMMDeviceEnumerator_Instance = {0xA95664D2, 0x9614, 0x4F35, {0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6}}; /* A95664D2-9614-4F35-A746-DE8DB63617E6 = __uuidof(IMMDeviceEnumerator) */
+#ifdef __cplusplus
+#define MA_CLSID_MMDeviceEnumerator MA_CLSID_MMDeviceEnumerator_Instance
+#define MA_IID_IMMDeviceEnumerator MA_IID_IMMDeviceEnumerator_Instance
+#else
+#define MA_CLSID_MMDeviceEnumerator &MA_CLSID_MMDeviceEnumerator_Instance
+#define MA_IID_IMMDeviceEnumerator &MA_IID_IMMDeviceEnumerator_Instance
+#endif
+
+typedef struct ma_IUnknown ma_IUnknown;
+#ifdef MA_WIN32_DESKTOP
+#define MA_MM_DEVICE_STATE_ACTIVE 1
+#define MA_MM_DEVICE_STATE_DISABLED 2
+#define MA_MM_DEVICE_STATE_NOTPRESENT 4
+#define MA_MM_DEVICE_STATE_UNPLUGGED 8
+
+typedef struct ma_IMMDeviceEnumerator ma_IMMDeviceEnumerator;
+typedef struct ma_IMMDeviceCollection ma_IMMDeviceCollection;
+typedef struct ma_IMMDevice ma_IMMDevice;
+#else
+typedef struct ma_IActivateAudioInterfaceCompletionHandler ma_IActivateAudioInterfaceCompletionHandler;
+typedef struct ma_IActivateAudioInterfaceAsyncOperation ma_IActivateAudioInterfaceAsyncOperation;
+#endif
+typedef struct ma_IPropertyStore ma_IPropertyStore;
+typedef struct ma_IAudioClient ma_IAudioClient;
+typedef struct ma_IAudioClient2 ma_IAudioClient2;
+typedef struct ma_IAudioClient3 ma_IAudioClient3;
+typedef struct ma_IAudioRenderClient ma_IAudioRenderClient;
+typedef struct ma_IAudioCaptureClient ma_IAudioCaptureClient;
+
+typedef ma_int64 MA_REFERENCE_TIME;
+
+#define MA_AUDCLNT_STREAMFLAGS_CROSSPROCESS 0x00010000
+#define MA_AUDCLNT_STREAMFLAGS_LOOPBACK 0x00020000
+#define MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK 0x00040000
+#define MA_AUDCLNT_STREAMFLAGS_NOPERSIST 0x00080000
+#define MA_AUDCLNT_STREAMFLAGS_RATEADJUST 0x00100000
+#define MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000
+#define MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000
+#define MA_AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED 0x10000000
+#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDE 0x20000000
+#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED 0x40000000
+
+/* Buffer flags. */
+#define MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY 1
+#define MA_AUDCLNT_BUFFERFLAGS_SILENT 2
+#define MA_AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR 4
+
+typedef enum
+{
+ ma_eRender = 0,
+ ma_eCapture = 1,
+ ma_eAll = 2
+} ma_EDataFlow;
+
+typedef enum
+{
+ ma_eConsole = 0,
+ ma_eMultimedia = 1,
+ ma_eCommunications = 2
+} ma_ERole;
+
+typedef enum
+{
+ MA_AUDCLNT_SHAREMODE_SHARED,
+ MA_AUDCLNT_SHAREMODE_EXCLUSIVE
+} MA_AUDCLNT_SHAREMODE;
+
+typedef enum
+{
+ MA_AudioCategory_Other = 0 /* <-- miniaudio is only caring about Other. */
+} MA_AUDIO_STREAM_CATEGORY;
+
+typedef struct
+{
+ UINT32 cbSize;
+ BOOL bIsOffload;
+ MA_AUDIO_STREAM_CATEGORY eCategory;
+} ma_AudioClientProperties;
+
+/* IUnknown */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IUnknown* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IUnknown* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IUnknown* pThis);
+} ma_IUnknownVtbl;
+struct ma_IUnknown
+{
+ ma_IUnknownVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IUnknown_QueryInterface(ma_IUnknown* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IUnknown_AddRef(ma_IUnknown* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { return pThis->lpVtbl->Release(pThis); }
+
+#ifdef MA_WIN32_DESKTOP
+ /* IMMNotificationClient */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMNotificationClient* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IMMNotificationClient* pThis);
+
+ /* IMMNotificationClient */
+ HRESULT (STDMETHODCALLTYPE * OnDeviceStateChanged) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState);
+ HRESULT (STDMETHODCALLTYPE * OnDeviceAdded) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID);
+ HRESULT (STDMETHODCALLTYPE * OnDeviceRemoved) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID);
+ HRESULT (STDMETHODCALLTYPE * OnDefaultDeviceChanged)(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID);
+ HRESULT (STDMETHODCALLTYPE * OnPropertyValueChanged)(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key);
+ } ma_IMMNotificationClientVtbl;
+
+ /* IMMDeviceEnumerator */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceEnumerator* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceEnumerator* pThis);
+
+ /* IMMDeviceEnumerator */
+ HRESULT (STDMETHODCALLTYPE * EnumAudioEndpoints) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices);
+ HRESULT (STDMETHODCALLTYPE * GetDefaultAudioEndpoint) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint);
+ HRESULT (STDMETHODCALLTYPE * GetDevice) (ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice);
+ HRESULT (STDMETHODCALLTYPE * RegisterEndpointNotificationCallback) (ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient);
+ HRESULT (STDMETHODCALLTYPE * UnregisterEndpointNotificationCallback)(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient);
+ } ma_IMMDeviceEnumeratorVtbl;
+ struct ma_IMMDeviceEnumerator
+ {
+ ma_IMMDeviceEnumeratorVtbl* lpVtbl;
+ };
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_QueryInterface(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+ static MA_INLINE ULONG ma_IMMDeviceEnumerator_AddRef(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+ static MA_INLINE ULONG ma_IMMDeviceEnumerator_Release(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->Release(pThis); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_EnumAudioEndpoints(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices) { return pThis->lpVtbl->EnumAudioEndpoints(pThis, dataFlow, dwStateMask, ppDevices); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint) { return pThis->lpVtbl->GetDefaultAudioEndpoint(pThis, dataFlow, role, ppEndpoint); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDevice(ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->GetDevice(pThis, pID, ppDevice); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_RegisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->RegisterEndpointNotificationCallback(pThis, pClient); }
+ static MA_INLINE HRESULT ma_IMMDeviceEnumerator_UnregisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->UnregisterEndpointNotificationCallback(pThis, pClient); }
+
+
+ /* IMMDeviceCollection */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceCollection* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceCollection* pThis);
+
+ /* IMMDeviceCollection */
+ HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IMMDeviceCollection* pThis, UINT* pDevices);
+ HRESULT (STDMETHODCALLTYPE * Item) (ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice);
+ } ma_IMMDeviceCollectionVtbl;
+ struct ma_IMMDeviceCollection
+ {
+ ma_IMMDeviceCollectionVtbl* lpVtbl;
+ };
+ static MA_INLINE HRESULT ma_IMMDeviceCollection_QueryInterface(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+ static MA_INLINE ULONG ma_IMMDeviceCollection_AddRef(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+ static MA_INLINE ULONG ma_IMMDeviceCollection_Release(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->Release(pThis); }
+ static MA_INLINE HRESULT ma_IMMDeviceCollection_GetCount(ma_IMMDeviceCollection* pThis, UINT* pDevices) { return pThis->lpVtbl->GetCount(pThis, pDevices); }
+ static MA_INLINE HRESULT ma_IMMDeviceCollection_Item(ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->Item(pThis, nDevice, ppDevice); }
+
+
+ /* IMMDevice */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDevice* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDevice* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDevice* pThis);
+
+ /* IMMDevice */
+ HRESULT (STDMETHODCALLTYPE * Activate) (ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface);
+ HRESULT (STDMETHODCALLTYPE * OpenPropertyStore)(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties);
+ HRESULT (STDMETHODCALLTYPE * GetId) (ma_IMMDevice* pThis, LPWSTR *pID);
+ HRESULT (STDMETHODCALLTYPE * GetState) (ma_IMMDevice* pThis, DWORD *pState);
+ } ma_IMMDeviceVtbl;
+ struct ma_IMMDevice
+ {
+ ma_IMMDeviceVtbl* lpVtbl;
+ };
+ static MA_INLINE HRESULT ma_IMMDevice_QueryInterface(ma_IMMDevice* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+ static MA_INLINE ULONG ma_IMMDevice_AddRef(ma_IMMDevice* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+ static MA_INLINE ULONG ma_IMMDevice_Release(ma_IMMDevice* pThis) { return pThis->lpVtbl->Release(pThis); }
+ static MA_INLINE HRESULT ma_IMMDevice_Activate(ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface) { return pThis->lpVtbl->Activate(pThis, iid, dwClsCtx, pActivationParams, ppInterface); }
+ static MA_INLINE HRESULT ma_IMMDevice_OpenPropertyStore(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties) { return pThis->lpVtbl->OpenPropertyStore(pThis, stgmAccess, ppProperties); }
+ static MA_INLINE HRESULT ma_IMMDevice_GetId(ma_IMMDevice* pThis, LPWSTR *pID) { return pThis->lpVtbl->GetId(pThis, pID); }
+ static MA_INLINE HRESULT ma_IMMDevice_GetState(ma_IMMDevice* pThis, DWORD *pState) { return pThis->lpVtbl->GetState(pThis, pState); }
+#else
+ /* IActivateAudioInterfaceAsyncOperation */
+ typedef struct
+ {
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IActivateAudioInterfaceAsyncOperation* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IActivateAudioInterfaceAsyncOperation* pThis);
+
+ /* IActivateAudioInterfaceAsyncOperation */
+ HRESULT (STDMETHODCALLTYPE * GetActivateResult)(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface);
+ } ma_IActivateAudioInterfaceAsyncOperationVtbl;
+ struct ma_IActivateAudioInterfaceAsyncOperation
+ {
+ ma_IActivateAudioInterfaceAsyncOperationVtbl* lpVtbl;
+ };
+ static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_QueryInterface(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+ static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_AddRef(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+ static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_Release(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->Release(pThis); }
+ static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface) { return pThis->lpVtbl->GetActivateResult(pThis, pActivateResult, ppActivatedInterface); }
+#endif
+
+/* IPropertyStore */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IPropertyStore* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IPropertyStore* pThis);
- result = ma_strncpy_s(dst, dstSize, srcA, (size_t)-1);
- if (result != 0) {
- return result;
+ /* IPropertyStore */
+ HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IPropertyStore* pThis, DWORD* pPropCount);
+ HRESULT (STDMETHODCALLTYPE * GetAt) (ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey);
+ HRESULT (STDMETHODCALLTYPE * GetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar);
+ HRESULT (STDMETHODCALLTYPE * SetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar);
+ HRESULT (STDMETHODCALLTYPE * Commit) (ma_IPropertyStore* pThis);
+} ma_IPropertyStoreVtbl;
+struct ma_IPropertyStore
+{
+ ma_IPropertyStoreVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IPropertyStore_QueryInterface(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IPropertyStore_AddRef(ma_IPropertyStore* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IPropertyStore_Release(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IPropertyStore_GetCount(ma_IPropertyStore* pThis, DWORD* pPropCount) { return pThis->lpVtbl->GetCount(pThis, pPropCount); }
+static MA_INLINE HRESULT ma_IPropertyStore_GetAt(ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey) { return pThis->lpVtbl->GetAt(pThis, propIndex, pPropKey); }
+static MA_INLINE HRESULT ma_IPropertyStore_GetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar) { return pThis->lpVtbl->GetValue(pThis, pKey, pPropVar); }
+static MA_INLINE HRESULT ma_IPropertyStore_SetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar) { return pThis->lpVtbl->SetValue(pThis, pKey, pPropVar); }
+static MA_INLINE HRESULT ma_IPropertyStore_Commit(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Commit(pThis); }
+
+
+/* IAudioClient */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient* pThis);
+
+ /* IAudioClient */
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames);
+ HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames);
+ HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch);
+ HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat);
+ HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod);
+ HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient* pThis);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient* pThis);
+ HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient* pThis);
+ HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient* pThis, HANDLE eventHandle);
+ HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient* pThis, const IID* const riid, void** pp);
+} ma_IAudioClientVtbl;
+struct ma_IAudioClient
+{
+ ma_IAudioClientVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioClient_QueryInterface(ma_IAudioClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioClient_AddRef(ma_IAudioClient* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioClient_Release(ma_IAudioClient* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient_Initialize(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); }
+static MA_INLINE HRESULT ma_IAudioClient_GetBufferSize(ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); }
+static MA_INLINE HRESULT ma_IAudioClient_GetStreamLatency(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); }
+static MA_INLINE HRESULT ma_IAudioClient_GetCurrentPadding(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); }
+static MA_INLINE HRESULT ma_IAudioClient_IsFormatSupported(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); }
+static MA_INLINE HRESULT ma_IAudioClient_GetMixFormat(ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); }
+static MA_INLINE HRESULT ma_IAudioClient_GetDevicePeriod(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); }
+static MA_INLINE HRESULT ma_IAudioClient_Start(ma_IAudioClient* pThis) { return pThis->lpVtbl->Start(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient_Stop(ma_IAudioClient* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient_Reset(ma_IAudioClient* pThis) { return pThis->lpVtbl->Reset(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient_SetEventHandle(ma_IAudioClient* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); }
+static MA_INLINE HRESULT ma_IAudioClient_GetService(ma_IAudioClient* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); }
+
+/* IAudioClient2 */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient2* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient2* pThis);
+
+ /* IAudioClient */
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames);
+ HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames);
+ HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch);
+ HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat);
+ HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod);
+ HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient2* pThis);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient2* pThis);
+ HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient2* pThis);
+ HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient2* pThis, HANDLE eventHandle);
+ HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient2* pThis, const IID* const riid, void** pp);
+
+ /* IAudioClient2 */
+ HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable);
+ HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration);
+} ma_IAudioClient2Vtbl;
+struct ma_IAudioClient2
+{
+ ma_IAudioClient2Vtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioClient2_QueryInterface(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioClient2_AddRef(ma_IAudioClient2* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioClient2_Release(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient2_Initialize(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSize(ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetStreamLatency(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetCurrentPadding(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); }
+static MA_INLINE HRESULT ma_IAudioClient2_IsFormatSupported(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetMixFormat(ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetDevicePeriod(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); }
+static MA_INLINE HRESULT ma_IAudioClient2_Start(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Start(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient2_Stop(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient2_Reset(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Reset(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient2_SetEventHandle(ma_IAudioClient2* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetService(ma_IAudioClient2* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); }
+static MA_INLINE HRESULT ma_IAudioClient2_IsOffloadCapable(ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); }
+static MA_INLINE HRESULT ma_IAudioClient2_SetClientProperties(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); }
+static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSizeLimits(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); }
+
+
+/* IAudioClient3 */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient3* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient3* pThis);
+
+ /* IAudioClient */
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames);
+ HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames);
+ HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch);
+ HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat);
+ HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod);
+ HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient3* pThis);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient3* pThis);
+ HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient3* pThis);
+ HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient3* pThis, HANDLE eventHandle);
+ HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient3* pThis, const IID* const riid, void** pp);
+
+ /* IAudioClient2 */
+ HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable);
+ HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties);
+ HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration);
+
+ /* IAudioClient3 */
+ HRESULT (STDMETHODCALLTYPE * GetSharedModeEnginePeriod) (ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, UINT32* pDefaultPeriodInFrames, UINT32* pFundamentalPeriodInFrames, UINT32* pMinPeriodInFrames, UINT32* pMaxPeriodInFrames);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentSharedModeEnginePeriod)(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, UINT32* pCurrentPeriodInFrames);
+ HRESULT (STDMETHODCALLTYPE * InitializeSharedAudioStream) (ma_IAudioClient3* pThis, DWORD streamFlags, UINT32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
+} ma_IAudioClient3Vtbl;
+struct ma_IAudioClient3
+{
+ ma_IAudioClient3Vtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioClient3_QueryInterface(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioClient3_AddRef(ma_IAudioClient3* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioClient3_Release(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient3_Initialize(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSize(ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetStreamLatency(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentPadding(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); }
+static MA_INLINE HRESULT ma_IAudioClient3_IsFormatSupported(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetMixFormat(ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetDevicePeriod(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); }
+static MA_INLINE HRESULT ma_IAudioClient3_Start(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Start(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient3_Stop(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient3_Reset(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Reset(pThis); }
+static MA_INLINE HRESULT ma_IAudioClient3_SetEventHandle(ma_IAudioClient3* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetService(ma_IAudioClient3* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); }
+static MA_INLINE HRESULT ma_IAudioClient3_IsOffloadCapable(ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); }
+static MA_INLINE HRESULT ma_IAudioClient3_SetClientProperties(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSizeLimits(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetSharedModeEnginePeriod(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, UINT32* pDefaultPeriodInFrames, UINT32* pFundamentalPeriodInFrames, UINT32* pMinPeriodInFrames, UINT32* pMaxPeriodInFrames) { return pThis->lpVtbl->GetSharedModeEnginePeriod(pThis, pFormat, pDefaultPeriodInFrames, pFundamentalPeriodInFrames, pMinPeriodInFrames, pMaxPeriodInFrames); }
+static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentSharedModeEnginePeriod(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, UINT32* pCurrentPeriodInFrames) { return pThis->lpVtbl->GetCurrentSharedModeEnginePeriod(pThis, ppFormat, pCurrentPeriodInFrames); }
+static MA_INLINE HRESULT ma_IAudioClient3_InitializeSharedAudioStream(ma_IAudioClient3* pThis, DWORD streamFlags, UINT32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGUID) { return pThis->lpVtbl->InitializeSharedAudioStream(pThis, streamFlags, periodInFrames, pFormat, pAudioSessionGUID); }
+
+
+/* IAudioRenderClient */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioRenderClient* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioRenderClient* pThis);
+
+ /* IAudioRenderClient */
+ HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData);
+ HRESULT (STDMETHODCALLTYPE * ReleaseBuffer)(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags);
+} ma_IAudioRenderClientVtbl;
+struct ma_IAudioRenderClient
+{
+ ma_IAudioRenderClientVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioRenderClient_QueryInterface(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioRenderClient_AddRef(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioRenderClient_Release(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioRenderClient_GetBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData) { return pThis->lpVtbl->GetBuffer(pThis, numFramesRequested, ppData); }
+static MA_INLINE HRESULT ma_IAudioRenderClient_ReleaseBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesWritten, dwFlags); }
+
+
+/* IAudioCaptureClient */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioCaptureClient* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioCaptureClient* pThis);
+
+ /* IAudioRenderClient */
+ HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition);
+ HRESULT (STDMETHODCALLTYPE * ReleaseBuffer) (ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead);
+ HRESULT (STDMETHODCALLTYPE * GetNextPacketSize)(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket);
+} ma_IAudioCaptureClientVtbl;
+struct ma_IAudioCaptureClient
+{
+ ma_IAudioCaptureClientVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IAudioCaptureClient_QueryInterface(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IAudioCaptureClient_AddRef(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IAudioCaptureClient_Release(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IAudioCaptureClient_GetBuffer(ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition) { return pThis->lpVtbl->GetBuffer(pThis, ppData, pNumFramesToRead, pFlags, pDevicePosition, pQPCPosition); }
+static MA_INLINE HRESULT ma_IAudioCaptureClient_ReleaseBuffer(ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesRead); }
+static MA_INLINE HRESULT ma_IAudioCaptureClient_GetNextPacketSize(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket) { return pThis->lpVtbl->GetNextPacketSize(pThis, pNumFramesInNextPacket); }
+
+#ifndef MA_WIN32_DESKTOP
+#include
+typedef struct ma_completion_handler_uwp ma_completion_handler_uwp;
+
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_completion_handler_uwp* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_completion_handler_uwp* pThis);
+
+ /* IActivateAudioInterfaceCompletionHandler */
+ HRESULT (STDMETHODCALLTYPE * ActivateCompleted)(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation);
+} ma_completion_handler_uwp_vtbl;
+struct ma_completion_handler_uwp
+{
+ ma_completion_handler_uwp_vtbl* lpVtbl;
+ ma_uint32 counter;
+ HANDLE hEvent;
+};
+
+static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_QueryInterface(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject)
+{
+ /*
+ We need to "implement" IAgileObject which is just an indicator that's used internally by WASAPI for some multithreading management. To
+ "implement" this, we just make sure we return pThis when the IAgileObject is requested.
+ */
+ if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IActivateAudioInterfaceCompletionHandler) && !ma_is_guid_equal(riid, &MA_IID_IAgileObject)) {
+ *ppObject = NULL;
+ return E_NOINTERFACE;
}
- result = ma_strncat_s(dst, dstSize, srcB, (size_t)-1);
- if (result != 0) {
- return result;
+ /* Getting here means the IID is IUnknown or IMMNotificationClient. */
+ *ppObject = (void*)pThis;
+ ((ma_completion_handler_uwp_vtbl*)pThis->lpVtbl)->AddRef(pThis);
+ return S_OK;
+}
+
+static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_AddRef(ma_completion_handler_uwp* pThis)
+{
+ return (ULONG)ma_atomic_increment_32(&pThis->counter);
+}
+
+static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_Release(ma_completion_handler_uwp* pThis)
+{
+ ma_uint32 newRefCount = ma_atomic_decrement_32(&pThis->counter);
+ if (newRefCount == 0) {
+ return 0; /* We don't free anything here because we never allocate the object on the heap. */
}
- return result;
+ return (ULONG)newRefCount;
+}
+
+static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_ActivateCompleted(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation)
+{
+ (void)pActivateOperation;
+ SetEvent(pThis->hEvent);
+ return S_OK;
+}
+
+
+static ma_completion_handler_uwp_vtbl g_maCompletionHandlerVtblInstance = {
+ ma_completion_handler_uwp_QueryInterface,
+ ma_completion_handler_uwp_AddRef,
+ ma_completion_handler_uwp_Release,
+ ma_completion_handler_uwp_ActivateCompleted
+};
+
+static ma_result ma_completion_handler_uwp_init(ma_completion_handler_uwp* pHandler)
+{
+ MA_ASSERT(pHandler != NULL);
+ MA_ZERO_OBJECT(pHandler);
+
+ pHandler->lpVtbl = &g_maCompletionHandlerVtblInstance;
+ pHandler->counter = 1;
+ pHandler->hEvent = CreateEventW(NULL, FALSE, FALSE, NULL);
+ if (pHandler->hEvent == NULL) {
+ return ma_result_from_GetLastError(GetLastError());
+ }
+
+ return MA_SUCCESS;
}
-char* ma_copy_string(const char* src)
+static void ma_completion_handler_uwp_uninit(ma_completion_handler_uwp* pHandler)
{
- size_t sz = strlen(src)+1;
- char* dst = (char*)ma_malloc(sz);
- if (dst == NULL) {
- return NULL;
+ if (pHandler->hEvent != NULL) {
+ CloseHandle(pHandler->hEvent);
}
-
- ma_strcpy_s(dst, sz, src);
-
- return dst;
}
-
-/* Thanks to good old Bit Twiddling Hacks for this one: http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */
-static MA_INLINE unsigned int ma_next_power_of_2(unsigned int x)
+static void ma_completion_handler_uwp_wait(ma_completion_handler_uwp* pHandler)
{
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x++;
-
- return x;
+ WaitForSingleObject(pHandler->hEvent, INFINITE);
}
+#endif /* !MA_WIN32_DESKTOP */
-static MA_INLINE unsigned int ma_prev_power_of_2(unsigned int x)
+/* We need a virtual table for our notification client object that's used for detecting changes to the default device. */
+#ifdef MA_WIN32_DESKTOP
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject)
{
- return ma_next_power_of_2(x) >> 1;
+ /*
+ We care about two interfaces - IUnknown and IMMNotificationClient. If the requested IID is something else
+ we just return E_NOINTERFACE. Otherwise we need to increment the reference counter and return S_OK.
+ */
+ if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IMMNotificationClient)) {
+ *ppObject = NULL;
+ return E_NOINTERFACE;
+ }
+
+ /* Getting here means the IID is IUnknown or IMMNotificationClient. */
+ *ppObject = (void*)pThis;
+ ((ma_IMMNotificationClientVtbl*)pThis->lpVtbl)->AddRef(pThis);
+ return S_OK;
}
-static MA_INLINE unsigned int ma_round_to_power_of_2(unsigned int x)
+static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_AddRef(ma_IMMNotificationClient* pThis)
{
- unsigned int prev = ma_prev_power_of_2(x);
- unsigned int next = ma_next_power_of_2(x);
- if ((next - x) > (x - prev)) {
- return prev;
- } else {
- return next;
- }
+ return (ULONG)ma_atomic_increment_32(&pThis->counter);
}
-static MA_INLINE unsigned int ma_count_set_bits(unsigned int x)
+static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClient* pThis)
{
- unsigned int count = 0;
- while (x != 0) {
- if (x & 1) {
- count += 1;
- }
-
- x = x >> 1;
+ ma_uint32 newRefCount = ma_atomic_decrement_32(&pThis->counter);
+ if (newRefCount == 0) {
+ return 0; /* We don't free anything here because we never allocate the object on the heap. */
}
-
- return count;
-}
+ return (ULONG)newRefCount;
+}
-/* Clamps an f32 sample to -1..1 */
-static MA_INLINE float ma_clip_f32(float x)
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState)
{
- if (x < -1) return -1;
- if (x > +1) return +1;
- return x;
+#ifdef MA_DEBUG_OUTPUT
+ printf("IMMNotificationClient_OnDeviceStateChanged(pDeviceID=%S, dwNewState=%u)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)", (unsigned int)dwNewState);
+#endif
+
+ (void)pThis;
+ (void)pDeviceID;
+ (void)dwNewState;
+ return S_OK;
}
-static MA_INLINE float ma_mix_f32(float x, float y, float a)
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
{
- return x*(1-a) + y*a;
+#ifdef MA_DEBUG_OUTPUT
+ printf("IMMNotificationClient_OnDeviceAdded(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");
+#endif
+
+ /* We don't need to worry about this event for our purposes. */
+ (void)pThis;
+ (void)pDeviceID;
+ return S_OK;
}
-static MA_INLINE float ma_mix_f32_fast(float x, float y, float a)
+
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
{
- float r0 = (y - x);
- float r1 = r0*a;
- return x + r1;
- /*return x + (y - x)*a;*/
+#ifdef MA_DEBUG_OUTPUT
+ printf("IMMNotificationClient_OnDeviceRemoved(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");
+#endif
+
+ /* We don't need to worry about this event for our purposes. */
+ (void)pThis;
+ (void)pDeviceID;
+ return S_OK;
}
-#if defined(MA_SUPPORT_SSE2)
-static MA_INLINE __m128 ma_mix_f32_fast__sse2(__m128 x, __m128 y, __m128 a)
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID)
{
- return _mm_add_ps(x, _mm_mul_ps(_mm_sub_ps(y, x), a));
-}
+#ifdef MA_DEBUG_OUTPUT
+ printf("IMMNotificationClient_OnDefaultDeviceChanged(dataFlow=%d, role=%d, pDefaultDeviceID=%S)\n", dataFlow, role, (pDefaultDeviceID != NULL) ? pDefaultDeviceID : L"(NULL)");
#endif
-#if defined(MA_SUPPORT_AVX2)
-static MA_INLINE __m256 ma_mix_f32_fast__avx2(__m256 x, __m256 y, __m256 a)
-{
- return _mm256_add_ps(x, _mm256_mul_ps(_mm256_sub_ps(y, x), a));
+
+ /* We only ever use the eConsole role in miniaudio. */
+ if (role != ma_eConsole) {
+ return S_OK;
+ }
+
+ /* We only care about devices with the same data flow and role as the current device. */
+ if ((pThis->pDevice->type == ma_device_type_playback && dataFlow != ma_eRender) ||
+ (pThis->pDevice->type == ma_device_type_capture && dataFlow != ma_eCapture)) {
+ return S_OK;
+ }
+
+ /* Don't do automatic stream routing if we're not allowed. */
+ if ((dataFlow == ma_eRender && pThis->pDevice->wasapi.allowPlaybackAutoStreamRouting == MA_FALSE) ||
+ (dataFlow == ma_eCapture && pThis->pDevice->wasapi.allowCaptureAutoStreamRouting == MA_FALSE)) {
+ return S_OK;
+ }
+
+ /*
+ Not currently supporting automatic stream routing in exclusive mode. This is not working correctly on my machine due to
+ AUDCLNT_E_DEVICE_IN_USE errors when reinitializing the device. If this is a bug in miniaudio, we can try re-enabling this once
+ it's fixed.
+ */
+ if ((dataFlow == ma_eRender && pThis->pDevice->playback.shareMode == ma_share_mode_exclusive) ||
+ (dataFlow == ma_eCapture && pThis->pDevice->capture.shareMode == ma_share_mode_exclusive)) {
+ return S_OK;
+ }
+
+ /*
+ We don't change the device here - we change it in the worker thread to keep synchronization simple. To do this I'm just setting a flag to
+ indicate that the default device has changed. Loopback devices are treated as capture devices so we need to do a bit of a dance to handle
+ that properly.
+ */
+ if (dataFlow == ma_eRender && pThis->pDevice->type != ma_device_type_loopback) {
+ ma_atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_TRUE);
+ }
+ if (dataFlow == ma_eCapture || pThis->pDevice->type == ma_device_type_loopback) {
+ ma_atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_TRUE);
+ }
+
+ (void)pDefaultDeviceID;
+ return S_OK;
}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-static MA_INLINE __m512 ma_mix_f32_fast__avx512(__m512 x, __m512 y, __m512 a)
+
+static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key)
{
- return _mm512_add_ps(x, _mm512_mul_ps(_mm512_sub_ps(y, x), a));
-}
+#ifdef MA_DEBUG_OUTPUT
+ printf("IMMNotificationClient_OnPropertyValueChanged(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");
#endif
-#if defined(MA_SUPPORT_NEON)
-static MA_INLINE float32x4_t ma_mix_f32_fast__neon(float32x4_t x, float32x4_t y, float32x4_t a)
-{
- return vaddq_f32(x, vmulq_f32(vsubq_f32(y, x), a));
+
+ (void)pThis;
+ (void)pDeviceID;
+ (void)key;
+ return S_OK;
}
+
+static ma_IMMNotificationClientVtbl g_maNotificationCientVtbl = {
+ ma_IMMNotificationClient_QueryInterface,
+ ma_IMMNotificationClient_AddRef,
+ ma_IMMNotificationClient_Release,
+ ma_IMMNotificationClient_OnDeviceStateChanged,
+ ma_IMMNotificationClient_OnDeviceAdded,
+ ma_IMMNotificationClient_OnDeviceRemoved,
+ ma_IMMNotificationClient_OnDefaultDeviceChanged,
+ ma_IMMNotificationClient_OnPropertyValueChanged
+};
+#endif /* MA_WIN32_DESKTOP */
+
+#ifdef MA_WIN32_DESKTOP
+typedef ma_IMMDevice ma_WASAPIDeviceInterface;
+#else
+typedef ma_IUnknown ma_WASAPIDeviceInterface;
#endif
-static MA_INLINE double ma_mix_f64(double x, double y, double a)
+
+static ma_bool32 ma_context_is_device_id_equal__wasapi(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
{
- return x*(1-a) + y*a;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
+
+ return memcmp(pID0->wasapi, pID1->wasapi, sizeof(pID0->wasapi)) == 0;
}
-static MA_INLINE double ma_mix_f64_fast(double x, double y, double a)
+
+static void ma_set_device_info_from_WAVEFORMATEX(const WAVEFORMATEX* pWF, ma_device_info* pInfo)
{
- return x + (y - x)*a;
+ MA_ASSERT(pWF != NULL);
+ MA_ASSERT(pInfo != NULL);
+
+ pInfo->formatCount = 1;
+ pInfo->formats[0] = ma_format_from_WAVEFORMATEX(pWF);
+ pInfo->minChannels = pWF->nChannels;
+ pInfo->maxChannels = pWF->nChannels;
+ pInfo->minSampleRate = pWF->nSamplesPerSec;
+ pInfo->maxSampleRate = pWF->nSamplesPerSec;
}
-static MA_INLINE float ma_scale_to_range_f32(float x, float lo, float hi)
+static ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pContext, /*ma_IMMDevice**/void* pMMDevice, ma_IAudioClient* pAudioClient, ma_share_mode shareMode, ma_device_info* pInfo)
{
- return lo + x*(hi-lo);
-}
+ MA_ASSERT(pAudioClient != NULL);
+ MA_ASSERT(pInfo != NULL);
+ /* We use a different technique to retrieve the device information depending on whether or not we are using shared or exclusive mode. */
+ if (shareMode == ma_share_mode_shared) {
+ /* Shared Mode. We use GetMixFormat() here. */
+ WAVEFORMATEX* pWF = NULL;
+ HRESULT hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pAudioClient, (WAVEFORMATEX**)&pWF);
+ if (SUCCEEDED(hr)) {
+ ma_set_device_info_from_WAVEFORMATEX(pWF, pInfo);
+ return MA_SUCCESS;
+ } else {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve mix format for device info retrieval.", ma_result_from_HRESULT(hr));
+ }
+ } else {
+ /* Exlcusive Mode. We repeatedly call IsFormatSupported() here. This is not currently support on UWP. */
+#ifdef MA_WIN32_DESKTOP
+ /*
+ The first thing to do is get the format from PKEY_AudioEngine_DeviceFormat. This should give us a channel count we assume is
+ correct which will simplify our searching.
+ */
+ ma_IPropertyStore *pProperties;
+ HRESULT hr = ma_IMMDevice_OpenPropertyStore((ma_IMMDevice*)pMMDevice, STGM_READ, &pProperties);
+ if (SUCCEEDED(hr)) {
+ PROPVARIANT var;
+ ma_PropVariantInit(&var);
-/*
-Random Number Generation
+ hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_AudioEngine_DeviceFormat, &var);
+ if (SUCCEEDED(hr)) {
+ WAVEFORMATEX* pWF = (WAVEFORMATEX*)var.blob.pBlobData;
+ ma_set_device_info_from_WAVEFORMATEX(pWF, pInfo);
-miniaudio uses the LCG random number generation algorithm. This is good enough for audio.
+ /*
+ In my testing, the format returned by PKEY_AudioEngine_DeviceFormat is suitable for exclusive mode so we check this format
+ first. If this fails, fall back to a search.
+ */
+ hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pWF, NULL);
+ ma_PropVariantClear(pContext, &var);
-Note that miniaudio's LCG implementation uses global state which is _not_ thread-local. When this is called across
-multiple threads, results will be unpredictable. However, it won't crash and results will still be random enough
-for miniaudio's purposes.
-*/
-#define MA_LCG_M 2147483647
-#define MA_LCG_A 48271
-#define MA_LCG_C 0
-static ma_int32 g_maLCG;
+ if (FAILED(hr)) {
+ /*
+ The format returned by PKEY_AudioEngine_DeviceFormat is not supported, so fall back to a search. We assume the channel
+ count returned by MA_PKEY_AudioEngine_DeviceFormat is valid and correct. For simplicity we're only returning one format.
+ */
+ ma_uint32 channels = pInfo->minChannels;
+ ma_format formatsToSearch[] = {
+ ma_format_s16,
+ ma_format_s24,
+ /*ma_format_s24_32,*/
+ ma_format_f32,
+ ma_format_s32,
+ ma_format_u8
+ };
+ ma_channel defaultChannelMap[MA_MAX_CHANNELS];
+ WAVEFORMATEXTENSIBLE wf;
+ ma_bool32 found;
+ ma_uint32 iFormat;
-void ma_seed(ma_int32 seed)
-{
- g_maLCG = seed;
-}
+ ma_get_standard_channel_map(ma_standard_channel_map_microsoft, channels, defaultChannelMap);
-ma_int32 ma_rand_s32()
-{
- ma_int32 lcg = g_maLCG;
- ma_int32 r = (MA_LCG_A * lcg + MA_LCG_C) % MA_LCG_M;
- g_maLCG = r;
- return r;
-}
+ MA_ZERO_OBJECT(&wf);
+ wf.Format.cbSize = sizeof(wf);
+ wf.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ wf.Format.nChannels = (WORD)channels;
+ wf.dwChannelMask = ma_channel_map_to_channel_mask__win32(defaultChannelMap, channels);
-double ma_rand_f64()
-{
- return (ma_rand_s32() + 0x80000000) / (double)0x7FFFFFFF;
-}
+ found = MA_FALSE;
+ for (iFormat = 0; iFormat < ma_countof(formatsToSearch); ++iFormat) {
+ ma_format format = formatsToSearch[iFormat];
+ ma_uint32 iSampleRate;
-float ma_rand_f32()
-{
- return (float)ma_rand_f64();
-}
+ wf.Format.wBitsPerSample = (WORD)ma_get_bytes_per_sample(format)*8;
+ wf.Format.nBlockAlign = (wf.Format.nChannels * wf.Format.wBitsPerSample) / 8;
+ wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec;
+ wf.Samples.wValidBitsPerSample = /*(format == ma_format_s24_32) ? 24 :*/ wf.Format.wBitsPerSample;
+ if (format == ma_format_f32) {
+ wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ } else {
+ wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM;
+ }
-static MA_INLINE float ma_rand_range_f32(float lo, float hi)
-{
- return ma_scale_to_range_f32(ma_rand_f32(), lo, hi);
-}
+ for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iSampleRate) {
+ wf.Format.nSamplesPerSec = g_maStandardSampleRatePriorities[iSampleRate];
-static MA_INLINE ma_int32 ma_rand_range_s32(ma_int32 lo, ma_int32 hi)
-{
- double x = ma_rand_f64();
- return lo + (ma_int32)(x*(hi-lo));
-}
+ hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, (WAVEFORMATEX*)&wf, NULL);
+ if (SUCCEEDED(hr)) {
+ ma_set_device_info_from_WAVEFORMATEX((WAVEFORMATEX*)&wf, pInfo);
+ found = MA_TRUE;
+ break;
+ }
+ }
+ if (found) {
+ break;
+ }
+ }
-static MA_INLINE float ma_dither_f32_rectangle(float ditherMin, float ditherMax)
-{
- return ma_rand_range_f32(ditherMin, ditherMax);
-}
+ if (!found) {
+ ma_IPropertyStore_Release(pProperties);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to find suitable device format for device info retrieval.", MA_FORMAT_NOT_SUPPORTED);
+ }
+ }
+ } else {
+ ma_IPropertyStore_Release(pProperties);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve device format for device info retrieval.", ma_result_from_HRESULT(hr));
+ }
-static MA_INLINE float ma_dither_f32_triangle(float ditherMin, float ditherMax)
-{
- float a = ma_rand_range_f32(ditherMin, 0);
- float b = ma_rand_range_f32(0, ditherMax);
- return a + b;
-}
+ ma_IPropertyStore_Release(pProperties);
+ } else {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to open property store for device info retrieval.", ma_result_from_HRESULT(hr));
+ }
-static MA_INLINE float ma_dither_f32(ma_dither_mode ditherMode, float ditherMin, float ditherMax)
-{
- if (ditherMode == ma_dither_mode_rectangle) {
- return ma_dither_f32_rectangle(ditherMin, ditherMax);
- }
- if (ditherMode == ma_dither_mode_triangle) {
- return ma_dither_f32_triangle(ditherMin, ditherMax);
+ return MA_SUCCESS;
+#else
+ /* Exclusive mode not fully supported in UWP right now. */
+ return MA_ERROR;
+#endif
}
-
- return 0;
}
-static MA_INLINE ma_int32 ma_dither_s32(ma_dither_mode ditherMode, ma_int32 ditherMin, ma_int32 ditherMax)
+#ifdef MA_WIN32_DESKTOP
+static ma_EDataFlow ma_device_type_to_EDataFlow(ma_device_type deviceType)
{
- if (ditherMode == ma_dither_mode_rectangle) {
- ma_int32 a = ma_rand_range_s32(ditherMin, ditherMax);
- return a;
- }
- if (ditherMode == ma_dither_mode_triangle) {
- ma_int32 a = ma_rand_range_s32(ditherMin, 0);
- ma_int32 b = ma_rand_range_s32(0, ditherMax);
- return a + b;
+ if (deviceType == ma_device_type_playback) {
+ return ma_eRender;
+ } else if (deviceType == ma_device_type_capture) {
+ return ma_eCapture;
+ } else {
+ MA_ASSERT(MA_FALSE);
+ return ma_eRender; /* Should never hit this. */
}
-
- return 0;
}
-
-/*
-Splits a buffer into parts of equal length and of the given alignment. The returned size of the split buffers will be a
-multiple of the alignment. The alignment must be a power of 2.
-*/
-void ma_split_buffer(void* pBuffer, size_t bufferSize, size_t splitCount, size_t alignment, void** ppBuffersOut, size_t* pSplitSizeOut)
+static ma_result ma_context_create_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator** ppDeviceEnumerator)
{
- ma_uintptr pBufferUnaligned;
- ma_uintptr pBufferAligned;
- size_t unalignedBytes;
- size_t splitSize;
-
- if (pSplitSizeOut) {
- *pSplitSizeOut = 0;
- }
-
- if (pBuffer == NULL || bufferSize == 0 || splitCount == 0) {
- return;
- }
-
- if (alignment == 0) {
- alignment = 1;
- }
+ HRESULT hr;
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
- pBufferUnaligned = (ma_uintptr)pBuffer;
- pBufferAligned = (pBufferUnaligned + (alignment-1)) & ~(alignment-1);
- unalignedBytes = (size_t)(pBufferAligned - pBufferUnaligned);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppDeviceEnumerator != NULL);
- splitSize = 0;
- if (bufferSize >= unalignedBytes) {
- splitSize = (bufferSize - unalignedBytes) / splitCount;
- splitSize = splitSize & ~(alignment-1);
+ hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", ma_result_from_HRESULT(hr));
}
- if (ppBuffersOut != NULL) {
- size_t i;
- for (i = 0; i < splitCount; ++i) {
- ppBuffersOut[i] = (ma_uint8*)(pBufferAligned + (splitSize*i));
- }
- }
+ *ppDeviceEnumerator = pDeviceEnumerator;
- if (pSplitSizeOut) {
- *pSplitSizeOut = splitSize;
- }
+ return MA_SUCCESS;
}
+static LPWSTR ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType)
+{
+ HRESULT hr;
+ ma_IMMDevice* pMMDefaultDevice = NULL;
+ LPWSTR pDefaultDeviceID = NULL;
+ ma_EDataFlow dataFlow;
+ ma_ERole role;
-/******************************************************************************
-
-Atomics
-
-******************************************************************************/
-#if defined(_WIN32) && !defined(__GNUC__)
-#define ma_memory_barrier() MemoryBarrier()
-#define ma_atomic_exchange_32(a, b) InterlockedExchange((LONG*)a, (LONG)b)
-#define ma_atomic_exchange_64(a, b) InterlockedExchange64((LONGLONG*)a, (LONGLONG)b)
-#define ma_atomic_increment_32(a) InterlockedIncrement((LONG*)a)
-#define ma_atomic_decrement_32(a) InterlockedDecrement((LONG*)a)
-#else
-#define ma_memory_barrier() __sync_synchronize()
-#define ma_atomic_exchange_32(a, b) (void)__sync_lock_test_and_set(a, b); __sync_synchronize()
-#define ma_atomic_exchange_64(a, b) (void)__sync_lock_test_and_set(a, b); __sync_synchronize()
-#define ma_atomic_increment_32(a) __sync_add_and_fetch(a, 1)
-#define ma_atomic_decrement_32(a) __sync_sub_and_fetch(a, 1)
-#endif
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDeviceEnumerator != NULL);
-#ifdef MA_64BIT
-#define ma_atomic_exchange_ptr ma_atomic_exchange_64
-#endif
-#ifdef MA_32BIT
-#define ma_atomic_exchange_ptr ma_atomic_exchange_32
-#endif
+ /* Grab the EDataFlow type from the device type. */
+ dataFlow = ma_device_type_to_EDataFlow(deviceType);
+ /* The role is always eConsole, but we may make this configurable later. */
+ role = ma_eConsole;
-ma_uint32 ma_get_standard_sample_rate_priority_index(ma_uint32 sampleRate) /* Lower = higher priority */
-{
- ma_uint32 i;
- for (i = 0; i < ma_countof(g_maStandardSampleRatePriorities); ++i) {
- if (g_maStandardSampleRatePriorities[i] == sampleRate) {
- return i;
- }
+ hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, dataFlow, role, &pMMDefaultDevice);
+ if (FAILED(hr)) {
+ return NULL;
}
- return (ma_uint32)-1;
-}
+ hr = ma_IMMDevice_GetId(pMMDefaultDevice, &pDefaultDeviceID);
-ma_uint64 ma_calculate_frame_count_after_src(ma_uint32 sampleRateOut, ma_uint32 sampleRateIn, ma_uint64 frameCountIn)
-{
- double srcRatio = (double)sampleRateOut / sampleRateIn;
- double frameCountOutF = (ma_int64)frameCountIn * srcRatio; /* Cast to int64 required for VC6. */
- ma_uint64 frameCountOut = (ma_uint64)frameCountOutF;
+ ma_IMMDevice_Release(pMMDefaultDevice);
+ pMMDefaultDevice = NULL;
- /* If the output frame count is fractional, make sure we add an extra frame to ensure there's enough room for that last sample. */
- if ((frameCountOutF - (ma_int64)frameCountOut) > 0.0) {
- frameCountOut += 1;
+ if (FAILED(hr)) {
+ return NULL;
}
- return frameCountOut;
+ return pDefaultDeviceID;
}
-
-/************************************************************************************************************************************************************
-*************************************************************************************************************************************************************
-
-DEVICE I/O
-==========
-
-*************************************************************************************************************************************************************
-************************************************************************************************************************************************************/
-#ifndef MA_NO_DEVICE_IO
-/*
-Unfortunately using runtime linking for pthreads causes problems. This has occurred for me when testing on FreeBSD. When
-using runtime linking, deadlocks can occur (for me it happens when loading data from fread()). It turns out that doing
-compile-time linking fixes this. I'm not sure why this happens, but the safest way I can think of to fix this is to simply
-disable runtime linking by default. To enable runtime linking, #define this before the implementation of this file. I am
-not officially supporting this, but I'm leaving it here in case it's useful for somebody, somewhere.
-*/
-/*#define MA_USE_RUNTIME_LINKING_FOR_PTHREAD*/
-
-/* Disable run-time linking on certain backends. */
-#ifndef MA_NO_RUNTIME_LINKING
- #if defined(MA_ANDROID) || defined(MA_EMSCRIPTEN)
- #define MA_NO_RUNTIME_LINKING
- #endif
-#endif
-
-/*
-Check if we have the necessary development packages for each backend at the top so we can use this to determine whether or not
-certain unused functions and variables can be excluded from the build to avoid warnings.
-*/
-#ifdef MA_ENABLE_WASAPI
- #define MA_HAS_WASAPI /* Every compiler should support WASAPI */
-#endif
-#ifdef MA_ENABLE_DSOUND
- #define MA_HAS_DSOUND /* Every compiler should support DirectSound. */
-#endif
-#ifdef MA_ENABLE_WINMM
- #define MA_HAS_WINMM /* Every compiler I'm aware of supports WinMM. */
-#endif
-#ifdef MA_ENABLE_ALSA
- #define MA_HAS_ALSA
- #ifdef MA_NO_RUNTIME_LINKING
- #ifdef __has_include
- #if !__has_include()
- #undef MA_HAS_ALSA
- #endif
- #endif
- #endif
-#endif
-#ifdef MA_ENABLE_PULSEAUDIO
- #define MA_HAS_PULSEAUDIO
- #ifdef MA_NO_RUNTIME_LINKING
- #ifdef __has_include
- #if !__has_include()
- #undef MA_HAS_PULSEAUDIO
- #endif
- #endif
- #endif
-#endif
-#ifdef MA_ENABLE_JACK
- #define MA_HAS_JACK
- #ifdef MA_NO_RUNTIME_LINKING
- #ifdef __has_include
- #if !__has_include()
- #undef MA_HAS_JACK
- #endif
- #endif
- #endif
-#endif
-#ifdef MA_ENABLE_COREAUDIO
- #define MA_HAS_COREAUDIO
-#endif
-#ifdef MA_ENABLE_SNDIO
- #define MA_HAS_SNDIO
-#endif
-#ifdef MA_ENABLE_AUDIO4
- #define MA_HAS_AUDIO4
-#endif
-#ifdef MA_ENABLE_OSS
- #define MA_HAS_OSS
-#endif
-#ifdef MA_ENABLE_AAUDIO
- #define MA_HAS_AAUDIO
-#endif
-#ifdef MA_ENABLE_OPENSL
- #define MA_HAS_OPENSL
-#endif
-#ifdef MA_ENABLE_WEBAUDIO
- #define MA_HAS_WEBAUDIO
-#endif
-#ifdef MA_ENABLE_NULL
- #define MA_HAS_NULL /* Everything supports the null backend. */
-#endif
-
-const char* ma_get_backend_name(ma_backend backend)
+static LPWSTR ma_context_get_default_device_id__wasapi(ma_context* pContext, ma_device_type deviceType) /* Free the returned pointer with ma_CoTaskMemFree() */
{
- switch (backend)
- {
- case ma_backend_wasapi: return "WASAPI";
- case ma_backend_dsound: return "DirectSound";
- case ma_backend_winmm: return "WinMM";
- case ma_backend_coreaudio: return "Core Audio";
- case ma_backend_sndio: return "sndio";
- case ma_backend_audio4: return "audio(4)";
- case ma_backend_oss: return "OSS";
- case ma_backend_pulseaudio: return "PulseAudio";
- case ma_backend_alsa: return "ALSA";
- case ma_backend_jack: return "JACK";
- case ma_backend_aaudio: return "AAudio";
- case ma_backend_opensl: return "OpenSL|ES";
- case ma_backend_webaudio: return "Web Audio";
- case ma_backend_null: return "Null";
- default: return "Unknown";
+ ma_result result;
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
+ LPWSTR pDefaultDeviceID = NULL;
+
+ MA_ASSERT(pContext != NULL);
+
+ result = ma_context_create_IMMDeviceEnumerator__wasapi(pContext, &pDeviceEnumerator);
+ if (result != MA_SUCCESS) {
+ return NULL;
}
+
+ pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType);
+
+ ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
+ return pDefaultDeviceID;
}
+static ma_result ma_context_get_MMDevice__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IMMDevice** ppMMDevice)
+{
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
+ HRESULT hr;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppMMDevice != NULL);
-#ifdef MA_WIN32
- #define MA_THREADCALL WINAPI
- typedef unsigned long ma_thread_result;
-#else
- #define MA_THREADCALL
- typedef void* ma_thread_result;
-#endif
-typedef ma_thread_result (MA_THREADCALL * ma_thread_entry_proc)(void* pData);
+ hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create IMMDeviceEnumerator.", ma_result_from_HRESULT(hr));
+ }
-#ifdef MA_WIN32
-typedef HRESULT (WINAPI * MA_PFN_CoInitializeEx)(LPVOID pvReserved, DWORD dwCoInit);
-typedef void (WINAPI * MA_PFN_CoUninitialize)();
-typedef HRESULT (WINAPI * MA_PFN_CoCreateInstance)(REFCLSID rclsid, LPUNKNOWN pUnkOuter, DWORD dwClsContext, REFIID riid, LPVOID *ppv);
-typedef void (WINAPI * MA_PFN_CoTaskMemFree)(LPVOID pv);
-typedef HRESULT (WINAPI * MA_PFN_PropVariantClear)(PROPVARIANT *pvar);
-typedef int (WINAPI * MA_PFN_StringFromGUID2)(const GUID* const rguid, LPOLESTR lpsz, int cchMax);
+ if (pDeviceID == NULL) {
+ hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, (deviceType == ma_device_type_capture) ? ma_eCapture : ma_eRender, ma_eConsole, ppMMDevice);
+ } else {
+ hr = ma_IMMDeviceEnumerator_GetDevice(pDeviceEnumerator, pDeviceID->wasapi, ppMMDevice);
+ }
-typedef HWND (WINAPI * MA_PFN_GetForegroundWindow)();
-typedef HWND (WINAPI * MA_PFN_GetDesktopWindow)();
+ ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve IMMDevice.", ma_result_from_HRESULT(hr));
+ }
-/* Microsoft documents these APIs as returning LSTATUS, but the Win32 API shipping with some compilers do not define it. It's just a LONG. */
-typedef LONG (WINAPI * MA_PFN_RegOpenKeyExA)(HKEY hKey, LPCSTR lpSubKey, DWORD ulOptions, REGSAM samDesired, PHKEY phkResult);
-typedef LONG (WINAPI * MA_PFN_RegCloseKey)(HKEY hKey);
-typedef LONG (WINAPI * MA_PFN_RegQueryValueExA)(HKEY hKey, LPCSTR lpValueName, LPDWORD lpReserved, LPDWORD lpType, LPBYTE lpData, LPDWORD lpcbData);
-#endif
+ return MA_SUCCESS;
+}
+static ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, ma_share_mode shareMode, LPWSTR pDefaultDeviceID, ma_bool32 onlySimpleInfo, ma_device_info* pInfo)
+{
+ LPWSTR pDeviceID;
+ HRESULT hr;
-#define MA_STATE_UNINITIALIZED 0
-#define MA_STATE_STOPPED 1 /* The device's default state after initialization. */
-#define MA_STATE_STARTED 2 /* The worker thread is in it's main loop waiting for the driver to request or deliver audio data. */
-#define MA_STATE_STARTING 3 /* Transitioning from a stopped state to started. */
-#define MA_STATE_STOPPING 4 /* Transitioning from a started state to stopped. */
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pMMDevice != NULL);
+ MA_ASSERT(pInfo != NULL);
-#define MA_DEFAULT_PLAYBACK_DEVICE_NAME "Default Playback Device"
-#define MA_DEFAULT_CAPTURE_DEVICE_NAME "Default Capture Device"
+ /* ID. */
+ hr = ma_IMMDevice_GetId(pMMDevice, &pDeviceID);
+ if (SUCCEEDED(hr)) {
+ size_t idlen = wcslen(pDeviceID);
+ if (idlen+1 > ma_countof(pInfo->id.wasapi)) {
+ ma_CoTaskMemFree(pContext, pDeviceID);
+ MA_ASSERT(MA_FALSE); /* NOTE: If this is triggered, please report it. It means the format of the ID must haved change and is too long to fit in our fixed sized buffer. */
+ return MA_ERROR;
+ }
+ MA_COPY_MEMORY(pInfo->id.wasapi, pDeviceID, idlen * sizeof(wchar_t));
+ pInfo->id.wasapi[idlen] = '\0';
+
+ if (pDefaultDeviceID != NULL) {
+ if (wcscmp(pDeviceID, pDefaultDeviceID) == 0) {
+ /* It's a default device. */
+ pInfo->_private.isDefault = MA_TRUE;
+ }
+ }
+
+ ma_CoTaskMemFree(pContext, pDeviceID);
+ }
-const char* ma_log_level_to_string(ma_uint32 logLevel)
-{
- switch (logLevel)
{
- case MA_LOG_LEVEL_VERBOSE: return "";
- case MA_LOG_LEVEL_INFO: return "INFO";
- case MA_LOG_LEVEL_WARNING: return "WARNING";
- case MA_LOG_LEVEL_ERROR: return "ERROR";
- default: return "ERROR";
+ ma_IPropertyStore *pProperties;
+ hr = ma_IMMDevice_OpenPropertyStore(pMMDevice, STGM_READ, &pProperties);
+ if (SUCCEEDED(hr)) {
+ PROPVARIANT var;
+
+ /* Description / Friendly Name */
+ ma_PropVariantInit(&var);
+ hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &var);
+ if (SUCCEEDED(hr)) {
+ WideCharToMultiByte(CP_UTF8, 0, var.pwszVal, -1, pInfo->name, sizeof(pInfo->name), 0, FALSE);
+ ma_PropVariantClear(pContext, &var);
+ }
+
+ ma_IPropertyStore_Release(pProperties);
+ }
+ }
+
+ /* Format */
+ if (!onlySimpleInfo) {
+ ma_IAudioClient* pAudioClient;
+ hr = ma_IMMDevice_Activate(pMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient);
+ if (SUCCEEDED(hr)) {
+ ma_result result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, pMMDevice, pAudioClient, shareMode, pInfo);
+
+ ma_IAudioClient_Release(pAudioClient);
+ return result;
+ } else {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate audio client for device info retrieval.", ma_result_from_HRESULT(hr));
+ }
}
+
+ return MA_SUCCESS;
}
-/* Posts a log message. */
-void ma_log(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message)
+static ma_result ma_context_enumerate_devices_by_type__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType, ma_enum_devices_callback_proc callback, void* pUserData)
{
- if (pContext == NULL) {
- return;
- }
+ ma_result result = MA_SUCCESS;
+ UINT deviceCount;
+ HRESULT hr;
+ ma_uint32 iDevice;
+ LPWSTR pDefaultDeviceID = NULL;
+ ma_IMMDeviceCollection* pDeviceCollection = NULL;
-#if defined(MA_LOG_LEVEL)
- if (logLevel <= MA_LOG_LEVEL) {
- ma_log_proc onLog;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
- #if defined(MA_DEBUG_OUTPUT)
- if (logLevel <= MA_LOG_LEVEL) {
- printf("%s: %s\n", ma_log_level_to_string(logLevel), message);
+ /* Grab the default device. We use this to know whether or not flag the returned device info as being the default. */
+ pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType);
+
+ /* We need to enumerate the devices which returns a device collection. */
+ hr = ma_IMMDeviceEnumerator_EnumAudioEndpoints(pDeviceEnumerator, ma_device_type_to_EDataFlow(deviceType), MA_MM_DEVICE_STATE_ACTIVE, &pDeviceCollection);
+ if (SUCCEEDED(hr)) {
+ hr = ma_IMMDeviceCollection_GetCount(pDeviceCollection, &deviceCount);
+ if (FAILED(hr)) {
+ result = ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to get device count.", ma_result_from_HRESULT(hr));
+ goto done;
}
- #endif
-
- onLog = pContext->logCallback;
- if (onLog) {
- onLog(pContext, pDevice, logLevel, message);
+
+ for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
+ ma_device_info deviceInfo;
+ ma_IMMDevice* pMMDevice;
+
+ MA_ZERO_OBJECT(&deviceInfo);
+
+ hr = ma_IMMDeviceCollection_Item(pDeviceCollection, iDevice, &pMMDevice);
+ if (SUCCEEDED(hr)) {
+ result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, ma_share_mode_shared, pDefaultDeviceID, MA_TRUE, &deviceInfo); /* MA_TRUE = onlySimpleInfo. */
+
+ ma_IMMDevice_Release(pMMDevice);
+ if (result == MA_SUCCESS) {
+ ma_bool32 cbResult = callback(pContext, deviceType, &deviceInfo, pUserData);
+ if (cbResult == MA_FALSE) {
+ break;
+ }
+ }
+ }
}
}
-#endif
-}
-/* Posts an log message. Throw a breakpoint in here if you're needing to debug. The return value is always "resultCode". */
-ma_result ma_context_post_error(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode)
-{
- /* Derive the context from the device if necessary. */
- if (pContext == NULL) {
- if (pDevice != NULL) {
- pContext = pDevice->pContext;
- }
+done:
+ if (pDefaultDeviceID != NULL) {
+ ma_CoTaskMemFree(pContext, pDefaultDeviceID);
+ pDefaultDeviceID = NULL;
}
- ma_log(pContext, pDevice, logLevel, message);
- return resultCode;
+ if (pDeviceCollection != NULL) {
+ ma_IMMDeviceCollection_Release(pDeviceCollection);
+ pDeviceCollection = NULL;
+ }
+
+ return result;
}
-ma_result ma_post_error(ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode)
+static ma_result ma_context_get_IAudioClient_Desktop__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IMMDevice** ppMMDevice)
{
- return ma_context_post_error(NULL, pDevice, logLevel, message, resultCode);
-}
+ ma_result result;
+ HRESULT hr;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppAudioClient != NULL);
+ MA_ASSERT(ppMMDevice != NULL);
-/*******************************************************************************
+ result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, ppMMDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-Timing
+ hr = ma_IMMDevice_Activate(*ppMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)ppAudioClient);
+ if (FAILED(hr)) {
+ return ma_result_from_HRESULT(hr);
+ }
-*******************************************************************************/
-#ifdef MA_WIN32
-LARGE_INTEGER g_ma_TimerFrequency = {{0}};
-void ma_timer_init(ma_timer* pTimer)
+ return MA_SUCCESS;
+}
+#else
+static ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IUnknown** ppActivatedInterface)
{
- LARGE_INTEGER counter;
+ ma_IActivateAudioInterfaceAsyncOperation *pAsyncOp = NULL;
+ ma_completion_handler_uwp completionHandler;
+ IID iid;
+ LPOLESTR iidStr;
+ HRESULT hr;
+ ma_result result;
+ HRESULT activateResult;
+ ma_IUnknown* pActivatedInterface;
- if (g_ma_TimerFrequency.QuadPart == 0) {
- QueryPerformanceFrequency(&g_ma_TimerFrequency);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppAudioClient != NULL);
+
+ if (pDeviceID != NULL) {
+ MA_COPY_MEMORY(&iid, pDeviceID->wasapi, sizeof(iid));
+ } else {
+ if (deviceType == ma_device_type_playback) {
+ iid = MA_IID_DEVINTERFACE_AUDIO_RENDER;
+ } else {
+ iid = MA_IID_DEVINTERFACE_AUDIO_CAPTURE;
+ }
}
- QueryPerformanceCounter(&counter);
- pTimer->counter = counter.QuadPart;
-}
+#if defined(__cplusplus)
+ hr = StringFromIID(iid, &iidStr);
+#else
+ hr = StringFromIID(&iid, &iidStr);
+#endif
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to convert device IID to string for ActivateAudioInterfaceAsync(). Out of memory.", ma_result_from_HRESULT(hr));
+ }
-double ma_timer_get_time_in_seconds(ma_timer* pTimer)
-{
- LARGE_INTEGER counter;
- if (!QueryPerformanceCounter(&counter)) {
- return 0;
+ result = ma_completion_handler_uwp_init(&completionHandler);
+ if (result != MA_SUCCESS) {
+ ma_CoTaskMemFree(pContext, iidStr);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for waiting for ActivateAudioInterfaceAsync().", result);
}
- return (double)(counter.QuadPart - pTimer->counter) / g_ma_TimerFrequency.QuadPart;
-}
-#elif defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200)
-ma_uint64 g_ma_TimerFrequency = 0;
-void ma_timer_init(ma_timer* pTimer)
-{
- mach_timebase_info_data_t baseTime;
- mach_timebase_info(&baseTime);
- g_ma_TimerFrequency = (baseTime.denom * 1e9) / baseTime.numer;
+#if defined(__cplusplus)
+ hr = ActivateAudioInterfaceAsync(iidStr, MA_IID_IAudioClient, NULL, (IActivateAudioInterfaceCompletionHandler*)&completionHandler, (IActivateAudioInterfaceAsyncOperation**)&pAsyncOp);
+#else
+ hr = ActivateAudioInterfaceAsync(iidStr, &MA_IID_IAudioClient, NULL, (IActivateAudioInterfaceCompletionHandler*)&completionHandler, (IActivateAudioInterfaceAsyncOperation**)&pAsyncOp);
+#endif
+ if (FAILED(hr)) {
+ ma_completion_handler_uwp_uninit(&completionHandler);
+ ma_CoTaskMemFree(pContext, iidStr);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] ActivateAudioInterfaceAsync() failed.", ma_result_from_HRESULT(hr));
+ }
- pTimer->counter = mach_absolute_time();
-}
+ ma_CoTaskMemFree(pContext, iidStr);
-double ma_timer_get_time_in_seconds(ma_timer* pTimer)
-{
- ma_uint64 newTimeCounter = mach_absolute_time();
- ma_uint64 oldTimeCounter = pTimer->counter;
+ /* Wait for the async operation for finish. */
+ ma_completion_handler_uwp_wait(&completionHandler);
+ ma_completion_handler_uwp_uninit(&completionHandler);
- return (newTimeCounter - oldTimeCounter) / g_ma_TimerFrequency;
-}
-#elif defined(MA_EMSCRIPTEN)
-void ma_timer_init(ma_timer* pTimer)
-{
- pTimer->counterD = emscripten_get_now();
+ hr = ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(pAsyncOp, &activateResult, &pActivatedInterface);
+ ma_IActivateAudioInterfaceAsyncOperation_Release(pAsyncOp);
+
+ if (FAILED(hr) || FAILED(activateResult)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate device.", FAILED(hr) ? ma_result_from_HRESULT(hr) : ma_result_from_HRESULT(activateResult));
+ }
+
+ /* Here is where we grab the IAudioClient interface. */
+ hr = ma_IUnknown_QueryInterface(pActivatedInterface, &MA_IID_IAudioClient, (void**)ppAudioClient);
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to query IAudioClient interface.", ma_result_from_HRESULT(hr));
+ }
+
+ if (ppActivatedInterface) {
+ *ppActivatedInterface = pActivatedInterface;
+ } else {
+ ma_IUnknown_Release(pActivatedInterface);
+ }
+
+ return MA_SUCCESS;
}
+#endif
-double ma_timer_get_time_in_seconds(ma_timer* pTimer)
+static ma_result ma_context_get_IAudioClient__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_WASAPIDeviceInterface** ppDeviceInterface)
{
- return (emscripten_get_now() - pTimer->counterD) / 1000; /* Emscripten is in milliseconds. */
-}
-#else
-#if _POSIX_C_SOURCE >= 199309L
-#if defined(CLOCK_MONOTONIC)
- #define MA_CLOCK_ID CLOCK_MONOTONIC
+#ifdef MA_WIN32_DESKTOP
+ return ma_context_get_IAudioClient_Desktop__wasapi(pContext, deviceType, pDeviceID, ppAudioClient, ppDeviceInterface);
#else
- #define MA_CLOCK_ID CLOCK_REALTIME
+ return ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, ppAudioClient, ppDeviceInterface);
#endif
-
-void ma_timer_init(ma_timer* pTimer)
-{
- struct timespec newTime;
- clock_gettime(MA_CLOCK_ID, &newTime);
-
- pTimer->counter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec;
}
-double ma_timer_get_time_in_seconds(ma_timer* pTimer)
-{
- ma_uint64 newTimeCounter;
- ma_uint64 oldTimeCounter;
-
- struct timespec newTime;
- clock_gettime(MA_CLOCK_ID, &newTime);
-
- newTimeCounter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec;
- oldTimeCounter = pTimer->counter;
- return (newTimeCounter - oldTimeCounter) / 1000000000.0;
-}
-#else
-void ma_timer_init(ma_timer* pTimer)
+static ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
{
- struct timeval newTime;
- gettimeofday(&newTime, NULL);
+ /* Different enumeration for desktop and UWP. */
+#ifdef MA_WIN32_DESKTOP
+ /* Desktop */
+ HRESULT hr;
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
- pTimer->counter = (newTime.tv_sec * 1000000) + newTime.tv_usec;
-}
+ hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", ma_result_from_HRESULT(hr));
+ }
-double ma_timer_get_time_in_seconds(ma_timer* pTimer)
-{
- ma_uint64 newTimeCounter;
- ma_uint64 oldTimeCounter;
+ ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_playback, callback, pUserData);
+ ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_capture, callback, pUserData);
- struct timeval newTime;
- gettimeofday(&newTime, NULL);
+ ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
+#else
+ /*
+ UWP
+
+ The MMDevice API is only supported on desktop applications. For now, while I'm still figuring out how to properly enumerate
+ over devices without using MMDevice, I'm restricting devices to defaults.
+
+ Hint: DeviceInformation::FindAllAsync() with DeviceClass.AudioCapture/AudioRender. https://blogs.windows.com/buildingapps/2014/05/15/real-time-audio-in-windows-store-and-windows-phone-apps/
+ */
+ if (callback) {
+ ma_bool32 cbResult = MA_TRUE;
- newTimeCounter = (newTime.tv_sec * 1000000) + newTime.tv_usec;
- oldTimeCounter = pTimer->counter;
+ /* Playback. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ deviceInfo._private.isDefault = MA_TRUE;
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
- return (newTimeCounter - oldTimeCounter) / 1000000.0;
-}
-#endif
+ /* Capture. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ deviceInfo._private.isDefault = MA_TRUE;
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+ }
#endif
+ return MA_SUCCESS;
+}
-/*******************************************************************************
+static ma_result ma_context_get_device_info__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+{
+#ifdef MA_WIN32_DESKTOP
+ ma_result result;
+ ma_IMMDevice* pMMDevice = NULL;
+ LPWSTR pDefaultDeviceID = NULL;
+
+ result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, &pMMDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-Dynamic Linking
+ /* We need the default device ID so we can set the isDefault flag in the device info. */
+ pDefaultDeviceID = ma_context_get_default_device_id__wasapi(pContext, deviceType);
-*******************************************************************************/
-ma_handle ma_dlopen(ma_context* pContext, const char* filename)
-{
- ma_handle handle;
+ result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, shareMode, pDefaultDeviceID, MA_FALSE, pDeviceInfo); /* MA_FALSE = !onlySimpleInfo. */
-#if MA_LOG_LEVEL >= MA_LOG_LEVEL_VERBOSE
- if (pContext != NULL) {
- char message[256];
- ma_strappend(message, sizeof(message), "Loading library: ", filename);
- ma_log(pContext, NULL, MA_LOG_LEVEL_VERBOSE, message);
+ if (pDefaultDeviceID != NULL) {
+ ma_CoTaskMemFree(pContext, pDefaultDeviceID);
+ pDefaultDeviceID = NULL;
}
-#endif
-#ifdef _WIN32
-#ifdef MA_WIN32_DESKTOP
- handle = (ma_handle)LoadLibraryA(filename);
+ ma_IMMDevice_Release(pMMDevice);
+
+ return result;
#else
- /* *sigh* It appears there is no ANSI version of LoadPackagedLibrary()... */
- WCHAR filenameW[4096];
- if (MultiByteToWideChar(CP_UTF8, 0, filename, -1, filenameW, sizeof(filenameW)) == 0) {
- handle = NULL;
+ ma_IAudioClient* pAudioClient;
+ ma_result result;
+
+ /* UWP currently only uses default devices. */
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
} else {
- handle = (ma_handle)LoadPackagedLibrary(filenameW, 0);
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
}
-#endif
-#else
- handle = (ma_handle)dlopen(filename, RTLD_NOW);
-#endif
- /*
- I'm not considering failure to load a library an error nor a warning because seamlessly falling through to a lower-priority
- backend is a deliberate design choice. Instead I'm logging it as an informational message.
- */
-#if MA_LOG_LEVEL >= MA_LOG_LEVEL_INFO
- if (handle == NULL) {
- char message[256];
- ma_strappend(message, sizeof(message), "Failed to load library: ", filename);
- ma_log(pContext, NULL, MA_LOG_LEVEL_INFO, message);
+ /* Not currently supporting exclusive mode on UWP. */
+ if (shareMode == ma_share_mode_exclusive) {
+ return MA_ERROR;
}
-#endif
- (void)pContext; /* It's possible for pContext to be unused. */
- return handle;
-}
+ result = ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, &pAudioClient, NULL);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-void ma_dlclose(ma_context* pContext, ma_handle handle)
-{
-#ifdef _WIN32
- FreeLibrary((HMODULE)handle);
-#else
- dlclose((void*)handle);
-#endif
+ result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, NULL, pAudioClient, shareMode, pDeviceInfo);
- (void)pContext;
+ pDeviceInfo->_private.isDefault = MA_TRUE; /* UWP only supports default devices. */
+
+ ma_IAudioClient_Release(pAudioClient);
+ return result;
+#endif
}
-ma_proc ma_dlsym(ma_context* pContext, ma_handle handle, const char* symbol)
+static void ma_device_uninit__wasapi(ma_device* pDevice)
{
- ma_proc proc;
+ MA_ASSERT(pDevice != NULL);
-#if MA_LOG_LEVEL >= MA_LOG_LEVEL_VERBOSE
- if (pContext != NULL) {
- char message[256];
- ma_strappend(message, sizeof(message), "Loading symbol: ", symbol);
- ma_log(pContext, NULL, MA_LOG_LEVEL_VERBOSE, message);
+#ifdef MA_WIN32_DESKTOP
+ if (pDevice->wasapi.pDeviceEnumerator) {
+ ((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator)->lpVtbl->UnregisterEndpointNotificationCallback((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator, &pDevice->wasapi.notificationClient);
+ ma_IMMDeviceEnumerator_Release((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator);
}
#endif
-#ifdef _WIN32
- proc = (ma_proc)GetProcAddress((HMODULE)handle, symbol);
-#else
-#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
- #pragma GCC diagnostic push
- #pragma GCC diagnostic ignored "-Wpedantic"
-#endif
- proc = (ma_proc)dlsym((void*)handle, symbol);
-#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
- #pragma GCC diagnostic pop
-#endif
-#endif
+ if (pDevice->wasapi.pRenderClient) {
+ ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient);
+ }
+ if (pDevice->wasapi.pCaptureClient) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ }
-#if MA_LOG_LEVEL >= MA_LOG_LEVEL_WARNING
- if (handle == NULL) {
- char message[256];
- ma_strappend(message, sizeof(message), "Failed to load symbol: ", symbol);
- ma_log(pContext, NULL, MA_LOG_LEVEL_WARNING, message);
+ if (pDevice->wasapi.pAudioClientPlayback) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ }
+ if (pDevice->wasapi.pAudioClientCapture) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
}
-#endif
- (void)pContext; /* It's possible for pContext to be unused. */
- return proc;
+ if (pDevice->wasapi.hEventPlayback) {
+ CloseHandle(pDevice->wasapi.hEventPlayback);
+ }
+ if (pDevice->wasapi.hEventCapture) {
+ CloseHandle(pDevice->wasapi.hEventCapture);
+ }
}
-/*******************************************************************************
+typedef struct
+{
+ /* Input. */
+ ma_format formatIn;
+ ma_uint32 channelsIn;
+ ma_uint32 sampleRateIn;
+ ma_channel channelMapIn[MA_MAX_CHANNELS];
+ ma_uint32 periodSizeInFramesIn;
+ ma_uint32 periodSizeInMillisecondsIn;
+ ma_uint32 periodsIn;
+ ma_bool32 usingDefaultFormat;
+ ma_bool32 usingDefaultChannels;
+ ma_bool32 usingDefaultSampleRate;
+ ma_bool32 usingDefaultChannelMap;
+ ma_share_mode shareMode;
+ ma_bool32 noAutoConvertSRC;
+ ma_bool32 noDefaultQualitySRC;
+ ma_bool32 noHardwareOffloading;
-Threading
+ /* Output. */
+ ma_IAudioClient* pAudioClient;
+ ma_IAudioRenderClient* pRenderClient;
+ ma_IAudioCaptureClient* pCaptureClient;
+ ma_format formatOut;
+ ma_uint32 channelsOut;
+ ma_uint32 sampleRateOut;
+ ma_channel channelMapOut[MA_MAX_CHANNELS];
+ ma_uint32 periodSizeInFramesOut;
+ ma_uint32 periodsOut;
+ ma_bool32 usingAudioClient3;
+ char deviceName[256];
+} ma_device_init_internal_data__wasapi;
-*******************************************************************************/
-#ifdef MA_WIN32
-int ma_thread_priority_to_win32(ma_thread_priority priority)
+static ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__wasapi* pData)
{
- switch (priority) {
- case ma_thread_priority_idle: return THREAD_PRIORITY_IDLE;
- case ma_thread_priority_lowest: return THREAD_PRIORITY_LOWEST;
- case ma_thread_priority_low: return THREAD_PRIORITY_BELOW_NORMAL;
- case ma_thread_priority_normal: return THREAD_PRIORITY_NORMAL;
- case ma_thread_priority_high: return THREAD_PRIORITY_ABOVE_NORMAL;
- case ma_thread_priority_highest: return THREAD_PRIORITY_HIGHEST;
- case ma_thread_priority_realtime: return THREAD_PRIORITY_TIME_CRITICAL;
- default: return THREAD_PRIORITY_NORMAL;
- }
-}
+ HRESULT hr;
+ ma_result result = MA_SUCCESS;
+ const char* errorMsg = "";
+ MA_AUDCLNT_SHAREMODE shareMode = MA_AUDCLNT_SHAREMODE_SHARED;
+ DWORD streamFlags = 0;
+ MA_REFERENCE_TIME periodDurationInMicroseconds;
+ ma_bool32 wasInitializedUsingIAudioClient3 = MA_FALSE;
+ WAVEFORMATEXTENSIBLE wf = {0};
+ ma_WASAPIDeviceInterface* pDeviceInterface = NULL;
+ ma_IAudioClient2* pAudioClient2;
+ ma_uint32 nativeSampleRate;
-ma_result ma_thread_create__win32(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData)
-{
- pThread->win32.hThread = CreateThread(NULL, 0, entryProc, pData, 0, NULL);
- if (pThread->win32.hThread == NULL) {
- return MA_FAILED_TO_CREATE_THREAD;
- }
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pData != NULL);
- SetThreadPriority((HANDLE)pThread->win32.hThread, ma_thread_priority_to_win32(pContext->threadPriority));
+ /* This function is only used to initialize one device type: either playback, capture or loopback. Never full-duplex. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
+ }
- return MA_SUCCESS;
-}
+ pData->pAudioClient = NULL;
+ pData->pRenderClient = NULL;
+ pData->pCaptureClient = NULL;
-void ma_thread_wait__win32(ma_thread* pThread)
-{
- WaitForSingleObject(pThread->win32.hThread, INFINITE);
-}
+ streamFlags = MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ if (!pData->noAutoConvertSRC && !pData->usingDefaultSampleRate && pData->shareMode != ma_share_mode_exclusive) { /* <-- Exclusive streams must use the native sample rate. */
+ streamFlags |= MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM;
+ }
+ if (!pData->noDefaultQualitySRC && !pData->usingDefaultSampleRate && (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) != 0) {
+ streamFlags |= MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
+ }
+ if (deviceType == ma_device_type_loopback) {
+ streamFlags |= MA_AUDCLNT_STREAMFLAGS_LOOPBACK;
+ }
-void ma_sleep__win32(ma_uint32 milliseconds)
-{
- Sleep((DWORD)milliseconds);
-}
+ result = ma_context_get_IAudioClient__wasapi(pContext, deviceType, pDeviceID, &pData->pAudioClient, &pDeviceInterface);
+ if (result != MA_SUCCESS) {
+ goto done;
+ }
-ma_result ma_mutex_init__win32(ma_context* pContext, ma_mutex* pMutex)
-{
- (void)pContext;
+ /* Try enabling hardware offloading. */
+ if (!pData->noHardwareOffloading) {
+ hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient2, (void**)&pAudioClient2);
+ if (SUCCEEDED(hr)) {
+ BOOL isHardwareOffloadingSupported = 0;
+ hr = ma_IAudioClient2_IsOffloadCapable(pAudioClient2, MA_AudioCategory_Other, &isHardwareOffloadingSupported);
+ if (SUCCEEDED(hr) && isHardwareOffloadingSupported) {
+ ma_AudioClientProperties clientProperties;
+ MA_ZERO_OBJECT(&clientProperties);
+ clientProperties.cbSize = sizeof(clientProperties);
+ clientProperties.bIsOffload = 1;
+ clientProperties.eCategory = MA_AudioCategory_Other;
+ ma_IAudioClient2_SetClientProperties(pAudioClient2, &clientProperties);
+ }
- pMutex->win32.hMutex = CreateEventA(NULL, FALSE, TRUE, NULL);
- if (pMutex->win32.hMutex == NULL) {
- return MA_FAILED_TO_CREATE_MUTEX;
+ pAudioClient2->lpVtbl->Release(pAudioClient2);
+ }
}
- return MA_SUCCESS;
-}
+ /* Here is where we try to determine the best format to use with the device. If the client if wanting exclusive mode, first try finding the best format for that. If this fails, fall back to shared mode. */
+ result = MA_FORMAT_NOT_SUPPORTED;
+ if (pData->shareMode == ma_share_mode_exclusive) {
+ #ifdef MA_WIN32_DESKTOP
+ /* In exclusive mode on desktop we always use the backend's native format. */
+ ma_IPropertyStore* pStore = NULL;
+ hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pStore);
+ if (SUCCEEDED(hr)) {
+ PROPVARIANT prop;
+ ma_PropVariantInit(&prop);
+ hr = ma_IPropertyStore_GetValue(pStore, &MA_PKEY_AudioEngine_DeviceFormat, &prop);
+ if (SUCCEEDED(hr)) {
+ WAVEFORMATEX* pActualFormat = (WAVEFORMATEX*)prop.blob.pBlobData;
+ hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pData->pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pActualFormat, NULL);
+ if (SUCCEEDED(hr)) {
+ MA_COPY_MEMORY(&wf, pActualFormat, sizeof(WAVEFORMATEXTENSIBLE));
+ }
-void ma_mutex_uninit__win32(ma_mutex* pMutex)
-{
- CloseHandle(pMutex->win32.hMutex);
-}
+ ma_PropVariantClear(pContext, &prop);
+ }
-void ma_mutex_lock__win32(ma_mutex* pMutex)
-{
- WaitForSingleObject(pMutex->win32.hMutex, INFINITE);
-}
+ ma_IPropertyStore_Release(pStore);
+ }
+ #else
+ /*
+ I do not know how to query the device's native format on UWP so for now I'm just disabling support for
+ exclusive mode. The alternative is to enumerate over different formats and check IsFormatSupported()
+ until you find one that works.
+
+ TODO: Add support for exclusive mode to UWP.
+ */
+ hr = S_FALSE;
+ #endif
-void ma_mutex_unlock__win32(ma_mutex* pMutex)
-{
- SetEvent(pMutex->win32.hMutex);
-}
+ if (hr == S_OK) {
+ shareMode = MA_AUDCLNT_SHAREMODE_EXCLUSIVE;
+ result = MA_SUCCESS;
+ } else {
+ result = MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+ } else {
+ /* In shared mode we are always using the format reported by the operating system. */
+ WAVEFORMATEXTENSIBLE* pNativeFormat = NULL;
+ hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pData->pAudioClient, (WAVEFORMATEX**)&pNativeFormat);
+ if (hr != S_OK) {
+ result = MA_FORMAT_NOT_SUPPORTED;
+ } else {
+ MA_COPY_MEMORY(&wf, pNativeFormat, sizeof(wf));
+ result = MA_SUCCESS;
+ }
+ ma_CoTaskMemFree(pContext, pNativeFormat);
-ma_result ma_event_init__win32(ma_context* pContext, ma_event* pEvent)
-{
- (void)pContext;
+ shareMode = MA_AUDCLNT_SHAREMODE_SHARED;
+ }
- pEvent->win32.hEvent = CreateEventW(NULL, FALSE, FALSE, NULL);
- if (pEvent->win32.hEvent == NULL) {
- return MA_FAILED_TO_CREATE_EVENT;
+ /* Return an error if we still haven't found a format. */
+ if (result != MA_SUCCESS) {
+ errorMsg = "[WASAPI] Failed to find best device mix format.";
+ goto done;
}
- return MA_SUCCESS;
-}
+ /*
+ Override the native sample rate with the one requested by the caller, but only if we're not using the default sample rate. We'll use
+ WASAPI to perform the sample rate conversion.
+ */
+ nativeSampleRate = wf.Format.nSamplesPerSec;
+ if (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) {
+ wf.Format.nSamplesPerSec = pData->sampleRateIn;
+ wf.Format.nAvgBytesPerSec = wf.Format.nSamplesPerSec * wf.Format.nBlockAlign;
+ }
-void ma_event_uninit__win32(ma_event* pEvent)
-{
- CloseHandle(pEvent->win32.hEvent);
-}
+ pData->formatOut = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)&wf);
+ pData->channelsOut = wf.Format.nChannels;
+ pData->sampleRateOut = wf.Format.nSamplesPerSec;
-ma_bool32 ma_event_wait__win32(ma_event* pEvent)
-{
- return WaitForSingleObject(pEvent->win32.hEvent, INFINITE) == WAIT_OBJECT_0;
-}
+ /* Get the internal channel map based on the channel mask. */
+ ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pData->channelsOut, pData->channelMapOut);
-ma_bool32 ma_event_signal__win32(ma_event* pEvent)
-{
- return SetEvent(pEvent->win32.hEvent);
-}
-#endif
+ /* Period size. */
+ pData->periodsOut = pData->periodsIn;
+ pData->periodSizeInFramesOut = pData->periodSizeInFramesIn;
+ if (pData->periodSizeInFramesOut == 0) {
+ pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, wf.Format.nSamplesPerSec);
+ }
+ periodDurationInMicroseconds = ((ma_uint64)pData->periodSizeInFramesOut * 1000 * 1000) / wf.Format.nSamplesPerSec;
-#ifdef MA_POSIX
-#include
-typedef int (* ma_pthread_create_proc)(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
-typedef int (* ma_pthread_join_proc)(pthread_t thread, void **retval);
-typedef int (* ma_pthread_mutex_init_proc)(pthread_mutex_t *__mutex, const pthread_mutexattr_t *__mutexattr);
-typedef int (* ma_pthread_mutex_destroy_proc)(pthread_mutex_t *__mutex);
-typedef int (* ma_pthread_mutex_lock_proc)(pthread_mutex_t *__mutex);
-typedef int (* ma_pthread_mutex_unlock_proc)(pthread_mutex_t *__mutex);
-typedef int (* ma_pthread_cond_init_proc)(pthread_cond_t *__restrict __cond, const pthread_condattr_t *__restrict __cond_attr);
-typedef int (* ma_pthread_cond_destroy_proc)(pthread_cond_t *__cond);
-typedef int (* ma_pthread_cond_signal_proc)(pthread_cond_t *__cond);
-typedef int (* ma_pthread_cond_wait_proc)(pthread_cond_t *__restrict __cond, pthread_mutex_t *__restrict __mutex);
-typedef int (* ma_pthread_attr_init_proc)(pthread_attr_t *attr);
-typedef int (* ma_pthread_attr_destroy_proc)(pthread_attr_t *attr);
-typedef int (* ma_pthread_attr_setschedpolicy_proc)(pthread_attr_t *attr, int policy);
-typedef int (* ma_pthread_attr_getschedparam_proc)(const pthread_attr_t *attr, struct sched_param *param);
-typedef int (* ma_pthread_attr_setschedparam_proc)(pthread_attr_t *attr, const struct sched_param *param);
+ /* Slightly different initialization for shared and exclusive modes. We try exclusive mode first, and if it fails, fall back to shared mode. */
+ if (shareMode == MA_AUDCLNT_SHAREMODE_EXCLUSIVE) {
+ MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * 10;
-ma_result ma_thread_create__posix(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData)
-{
- int result;
- pthread_attr_t* pAttr = NULL;
+ /*
+ If the periodicy is too small, Initialize() will fail with AUDCLNT_E_INVALID_DEVICE_PERIOD. In this case we should just keep increasing
+ it and trying it again.
+ */
+ hr = E_FAIL;
+ for (;;) {
+ hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (WAVEFORMATEX*)&wf, NULL);
+ if (hr == MA_AUDCLNT_E_INVALID_DEVICE_PERIOD) {
+ if (bufferDuration > 500*10000) {
+ break;
+ } else {
+ if (bufferDuration == 0) { /* <-- Just a sanity check to prevent an infinit loop. Should never happen, but it makes me feel better. */
+ break;
+ }
-#if !defined(__EMSCRIPTEN__)
- /* Try setting the thread priority. It's not critical if anything fails here. */
- pthread_attr_t attr;
- if (((ma_pthread_attr_init_proc)pContext->posix.pthread_attr_init)(&attr) == 0) {
- int scheduler = -1;
- if (pContext->threadPriority == ma_thread_priority_idle) {
-#ifdef SCHED_IDLE
- if (((ma_pthread_attr_setschedpolicy_proc)pContext->posix.pthread_attr_setschedpolicy)(&attr, SCHED_IDLE) == 0) {
- scheduler = SCHED_IDLE;
- }
-#endif
- } else if (pContext->threadPriority == ma_thread_priority_realtime) {
-#ifdef SCHED_FIFO
- if (((ma_pthread_attr_setschedpolicy_proc)pContext->posix.pthread_attr_setschedpolicy)(&attr, SCHED_FIFO) == 0) {
- scheduler = SCHED_FIFO;
+ bufferDuration = bufferDuration * 2;
+ continue;
+ }
+ } else {
+ break;
}
-#endif
-#ifdef MA_LINUX
- } else {
- scheduler = sched_getscheduler(0);
-#endif
}
+
+ if (hr == MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
+ ma_uint32 bufferSizeInFrames;
+ hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames);
+ if (SUCCEEDED(hr)) {
+ bufferDuration = (MA_REFERENCE_TIME)((10000.0 * 1000 / wf.Format.nSamplesPerSec * bufferSizeInFrames) + 0.5);
- if (scheduler != -1) {
- int priorityMin = sched_get_priority_min(scheduler);
- int priorityMax = sched_get_priority_max(scheduler);
- int priorityStep = (priorityMax - priorityMin) / 7; /* 7 = number of priorities supported by miniaudio. */
+ /* Unfortunately we need to release and re-acquire the audio client according to MSDN. Seems silly - why not just call IAudioClient_Initialize() again?! */
+ ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient);
- struct sched_param sched;
- if (((ma_pthread_attr_getschedparam_proc)pContext->posix.pthread_attr_getschedparam)(&attr, &sched) == 0) {
- if (pContext->threadPriority == ma_thread_priority_idle) {
- sched.sched_priority = priorityMin;
- } else if (pContext->threadPriority == ma_thread_priority_realtime) {
- sched.sched_priority = priorityMax;
- } else {
- sched.sched_priority += ((int)pContext->threadPriority + 5) * priorityStep; /* +5 because the lowest priority is -5. */
- if (sched.sched_priority < priorityMin) {
- sched.sched_priority = priorityMin;
- }
- if (sched.sched_priority > priorityMax) {
- sched.sched_priority = priorityMax;
- }
- }
+ #ifdef MA_WIN32_DESKTOP
+ hr = ma_IMMDevice_Activate(pDeviceInterface, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pData->pAudioClient);
+ #else
+ hr = ma_IUnknown_QueryInterface(pDeviceInterface, &MA_IID_IAudioClient, (void**)&pData->pAudioClient);
+ #endif
- if (((ma_pthread_attr_setschedparam_proc)pContext->posix.pthread_attr_setschedparam)(&attr, &sched) == 0) {
- pAttr = &attr;
+ if (SUCCEEDED(hr)) {
+ hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (WAVEFORMATEX*)&wf, NULL);
}
}
}
- ((ma_pthread_attr_destroy_proc)pContext->posix.pthread_attr_destroy)(&attr);
- }
-#endif
-
- result = ((ma_pthread_create_proc)pContext->posix.pthread_create)(&pThread->posix.thread, pAttr, entryProc, pData);
- if (result != 0) {
- return MA_FAILED_TO_CREATE_THREAD;
+ if (FAILED(hr)) {
+ /* Failed to initialize in exclusive mode. Don't fall back to shared mode - instead tell the client about it. They can reinitialize in shared mode if they want. */
+ if (hr == E_ACCESSDENIED) {
+ errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Access denied.", result = MA_ACCESS_DENIED;
+ } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) {
+ errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Device in use.", result = MA_BUSY;
+ } else {
+ errorMsg = "[WASAPI] Failed to initialize device in exclusive mode."; result = ma_result_from_HRESULT(hr);
+ }
+ goto done;
+ }
}
- return MA_SUCCESS;
-}
-
-void ma_thread_wait__posix(ma_thread* pThread)
-{
- ((ma_pthread_join_proc)pThread->pContext->posix.pthread_join)(pThread->posix.thread, NULL);
-}
-
-void ma_sleep__posix(ma_uint32 milliseconds)
-{
-#ifdef MA_EMSCRIPTEN
- (void)milliseconds;
- ma_assert(MA_FALSE); /* The Emscripten build should never sleep. */
-#else
- #if _POSIX_C_SOURCE >= 199309L
- struct timespec ts;
- ts.tv_sec = milliseconds / 1000000;
- ts.tv_nsec = milliseconds % 1000000 * 1000000;
- nanosleep(&ts, NULL);
- #else
- struct timeval tv;
- tv.tv_sec = milliseconds / 1000;
- tv.tv_usec = milliseconds % 1000 * 1000;
- select(0, NULL, NULL, NULL, &tv);
+ if (shareMode == MA_AUDCLNT_SHAREMODE_SHARED) {
+ /*
+ Low latency shared mode via IAudioClient3.
+
+ NOTE
+ ====
+ Contrary to the documentation on MSDN (https://docs.microsoft.com/en-us/windows/win32/api/audioclient/nf-audioclient-iaudioclient3-initializesharedaudiostream), the
+ use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM and AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY with IAudioClient3_InitializeSharedAudioStream() absolutely does not work. Using
+ any of these flags will result in HRESULT code 0x88890021. The other problem is that calling IAudioClient3_GetSharedModeEnginePeriod() with a sample rate different to
+ that returned by IAudioClient_GetMixFormat() also results in an error. I'm therefore disabling low-latency shared mode with AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM.
+ */
+#ifndef MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE
+ if ((streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) == 0 || nativeSampleRate == wf.Format.nSamplesPerSec) {
+ ma_IAudioClient3* pAudioClient3 = NULL;
+ hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient3, (void**)&pAudioClient3);
+ if (SUCCEEDED(hr)) {
+ UINT32 defaultPeriodInFrames;
+ UINT32 fundamentalPeriodInFrames;
+ UINT32 minPeriodInFrames;
+ UINT32 maxPeriodInFrames;
+ hr = ma_IAudioClient3_GetSharedModeEnginePeriod(pAudioClient3, (WAVEFORMATEX*)&wf, &defaultPeriodInFrames, &fundamentalPeriodInFrames, &minPeriodInFrames, &maxPeriodInFrames);
+ if (SUCCEEDED(hr)) {
+ UINT32 desiredPeriodInFrames = pData->periodSizeInFramesOut;
+ UINT32 actualPeriodInFrames = desiredPeriodInFrames;
+
+ /* Make sure the period size is a multiple of fundamentalPeriodInFrames. */
+ actualPeriodInFrames = actualPeriodInFrames / fundamentalPeriodInFrames;
+ actualPeriodInFrames = actualPeriodInFrames * fundamentalPeriodInFrames;
+
+ /* The period needs to be clamped between minPeriodInFrames and maxPeriodInFrames. */
+ actualPeriodInFrames = ma_clamp(actualPeriodInFrames, minPeriodInFrames, maxPeriodInFrames);
+
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[WASAPI] Trying IAudioClient3_InitializeSharedAudioStream(actualPeriodInFrames=%d)\n", actualPeriodInFrames);
+ printf(" defaultPeriodInFrames=%d\n", defaultPeriodInFrames);
+ printf(" fundamentalPeriodInFrames=%d\n", fundamentalPeriodInFrames);
+ printf(" minPeriodInFrames=%d\n", minPeriodInFrames);
+ printf(" maxPeriodInFrames=%d\n", maxPeriodInFrames);
+ #endif
+
+ /* If the client requested a largish buffer than we don't actually want to use low latency shared mode because it forces small buffers. */
+ if (actualPeriodInFrames >= desiredPeriodInFrames) {
+ /*
+ MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY must not be in the stream flags. If either of these are specified,
+ IAudioClient3_InitializeSharedAudioStream() will fail.
+ */
+ hr = ma_IAudioClient3_InitializeSharedAudioStream(pAudioClient3, streamFlags & ~(MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY), actualPeriodInFrames, (WAVEFORMATEX*)&wf, NULL);
+ if (SUCCEEDED(hr)) {
+ wasInitializedUsingIAudioClient3 = MA_TRUE;
+ pData->periodSizeInFramesOut = actualPeriodInFrames;
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[WASAPI] Using IAudioClient3\n");
+ printf(" periodSizeInFramesOut=%d\n", pData->periodSizeInFramesOut);
+ #endif
+ } else {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[WASAPI] IAudioClient3_InitializeSharedAudioStream failed. Falling back to IAudioClient.\n");
+ #endif
+ }
+ } else {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[WASAPI] Not using IAudioClient3 because the desired period size is larger than the maximum supported by IAudioClient3.\n");
+ #endif
+ }
+ } else {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[WASAPI] IAudioClient3_GetSharedModeEnginePeriod failed. Falling back to IAudioClient.\n");
+ #endif
+ }
+
+ ma_IAudioClient3_Release(pAudioClient3);
+ pAudioClient3 = NULL;
+ }
+ }
+#else
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[WASAPI] Not using IAudioClient3 because MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE is enabled.\n");
#endif
#endif
-}
+ /* If we don't have an IAudioClient3 then we need to use the normal initialization routine. */
+ if (!wasInitializedUsingIAudioClient3) {
+ MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * pData->periodsOut * 10; /* <-- Multiply by 10 for microseconds to 100-nanoseconds. */
+ hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, 0, (WAVEFORMATEX*)&wf, NULL);
+ if (FAILED(hr)) {
+ if (hr == E_ACCESSDENIED) {
+ errorMsg = "[WASAPI] Failed to initialize device. Access denied.", result = MA_ACCESS_DENIED;
+ } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) {
+ errorMsg = "[WASAPI] Failed to initialize device. Device in use.", result = MA_BUSY;
+ } else {
+ errorMsg = "[WASAPI] Failed to initialize device.", result = ma_result_from_HRESULT(hr);
+ }
-ma_result ma_mutex_init__posix(ma_context* pContext, ma_mutex* pMutex)
-{
- int result = ((ma_pthread_mutex_init_proc)pContext->posix.pthread_mutex_init)(&pMutex->posix.mutex, NULL);
- if (result != 0) {
- return MA_FAILED_TO_CREATE_MUTEX;
+ goto done;
+ }
+ }
}
- return MA_SUCCESS;
-}
-
-void ma_mutex_uninit__posix(ma_mutex* pMutex)
-{
- ((ma_pthread_mutex_destroy_proc)pMutex->pContext->posix.pthread_mutex_destroy)(&pMutex->posix.mutex);
-}
-
-void ma_mutex_lock__posix(ma_mutex* pMutex)
-{
- ((ma_pthread_mutex_lock_proc)pMutex->pContext->posix.pthread_mutex_lock)(&pMutex->posix.mutex);
-}
+ if (!wasInitializedUsingIAudioClient3) {
+ ma_uint32 bufferSizeInFrames;
+ hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames);
+ if (FAILED(hr)) {
+ errorMsg = "[WASAPI] Failed to get audio client's actual buffer size.", result = ma_result_from_HRESULT(hr);
+ goto done;
+ }
-void ma_mutex_unlock__posix(ma_mutex* pMutex)
-{
- ((ma_pthread_mutex_unlock_proc)pMutex->pContext->posix.pthread_mutex_unlock)(&pMutex->posix.mutex);
-}
+ pData->periodSizeInFramesOut = bufferSizeInFrames / pData->periodsOut;
+ }
+ pData->usingAudioClient3 = wasInitializedUsingIAudioClient3;
-ma_result ma_event_init__posix(ma_context* pContext, ma_event* pEvent)
-{
- if (((ma_pthread_mutex_init_proc)pContext->posix.pthread_mutex_init)(&pEvent->posix.mutex, NULL) != 0) {
- return MA_FAILED_TO_CREATE_MUTEX;
+ if (deviceType == ma_device_type_playback) {
+ hr = ma_IAudioClient_GetService((ma_IAudioClient*)pData->pAudioClient, &MA_IID_IAudioRenderClient, (void**)&pData->pRenderClient);
+ } else {
+ hr = ma_IAudioClient_GetService((ma_IAudioClient*)pData->pAudioClient, &MA_IID_IAudioCaptureClient, (void**)&pData->pCaptureClient);
}
- if (((ma_pthread_cond_init_proc)pContext->posix.pthread_cond_init)(&pEvent->posix.condition, NULL) != 0) {
- return MA_FAILED_TO_CREATE_EVENT;
+ if (FAILED(hr)) {
+ errorMsg = "[WASAPI] Failed to get audio client service.", result = ma_result_from_HRESULT(hr);
+ goto done;
}
- pEvent->posix.value = 0;
- return MA_SUCCESS;
-}
-
-void ma_event_uninit__posix(ma_event* pEvent)
-{
- ((ma_pthread_cond_destroy_proc)pEvent->pContext->posix.pthread_cond_destroy)(&pEvent->posix.condition);
- ((ma_pthread_mutex_destroy_proc)pEvent->pContext->posix.pthread_mutex_destroy)(&pEvent->posix.mutex);
-}
-ma_bool32 ma_event_wait__posix(ma_event* pEvent)
-{
- ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex);
+ /* Grab the name of the device. */
+#ifdef MA_WIN32_DESKTOP
{
- while (pEvent->posix.value == 0) {
- ((ma_pthread_cond_wait_proc)pEvent->pContext->posix.pthread_cond_wait)(&pEvent->posix.condition, &pEvent->posix.mutex);
- }
- pEvent->posix.value = 0; /* Auto-reset. */
- }
- ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex);
-
- return MA_TRUE;
-}
+ ma_IPropertyStore *pProperties;
+ hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pProperties);
+ if (SUCCEEDED(hr)) {
+ PROPVARIANT varName;
+ ma_PropVariantInit(&varName);
+ hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &varName);
+ if (SUCCEEDED(hr)) {
+ WideCharToMultiByte(CP_UTF8, 0, varName.pwszVal, -1, pData->deviceName, sizeof(pData->deviceName), 0, FALSE);
+ ma_PropVariantClear(pContext, &varName);
+ }
-ma_bool32 ma_event_signal__posix(ma_event* pEvent)
-{
- ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex);
- {
- pEvent->posix.value = 1;
- ((ma_pthread_cond_signal_proc)pEvent->pContext->posix.pthread_cond_signal)(&pEvent->posix.condition);
+ ma_IPropertyStore_Release(pProperties);
+ }
}
- ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex);
-
- return MA_TRUE;
-}
#endif
-ma_result ma_thread_create(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData)
-{
- if (pContext == NULL || pThread == NULL || entryProc == NULL) {
- return MA_FALSE;
+done:
+ /* Clean up. */
+#ifdef MA_WIN32_DESKTOP
+ if (pDeviceInterface != NULL) {
+ ma_IMMDevice_Release(pDeviceInterface);
+ }
+#else
+ if (pDeviceInterface != NULL) {
+ ma_IUnknown_Release(pDeviceInterface);
}
+#endif
- pThread->pContext = pContext;
+ if (result != MA_SUCCESS) {
+ if (pData->pRenderClient) {
+ ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pData->pRenderClient);
+ pData->pRenderClient = NULL;
+ }
+ if (pData->pCaptureClient) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pData->pCaptureClient);
+ pData->pCaptureClient = NULL;
+ }
+ if (pData->pAudioClient) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient);
+ pData->pAudioClient = NULL;
+ }
-#ifdef MA_WIN32
- return ma_thread_create__win32(pContext, pThread, entryProc, pData);
-#endif
-#ifdef MA_POSIX
- return ma_thread_create__posix(pContext, pThread, entryProc, pData);
-#endif
-}
+ if (errorMsg != NULL && errorMsg[0] != '\0') {
+ ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, errorMsg, result);
+ }
-void ma_thread_wait(ma_thread* pThread)
-{
- if (pThread == NULL) {
- return;
+ return result;
+ } else {
+ return MA_SUCCESS;
}
-
-#ifdef MA_WIN32
- ma_thread_wait__win32(pThread);
-#endif
-#ifdef MA_POSIX
- ma_thread_wait__posix(pThread);
-#endif
}
-void ma_sleep(ma_uint32 milliseconds)
+static ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType)
{
-#ifdef MA_WIN32
- ma_sleep__win32(milliseconds);
-#endif
-#ifdef MA_POSIX
- ma_sleep__posix(milliseconds);
-#endif
-}
+ ma_device_init_internal_data__wasapi data;
+ ma_result result;
+ MA_ASSERT(pDevice != NULL);
-ma_result ma_mutex_init(ma_context* pContext, ma_mutex* pMutex)
-{
- if (pContext == NULL || pMutex == NULL) {
+ /* We only re-initialize the playback or capture device. Never a full-duplex device. */
+ if (deviceType == ma_device_type_duplex) {
return MA_INVALID_ARGS;
}
- pMutex->pContext = pContext;
-
-#ifdef MA_WIN32
- return ma_mutex_init__win32(pContext, pMutex);
-#endif
-#ifdef MA_POSIX
- return ma_mutex_init__posix(pContext, pMutex);
-#endif
-}
-
-void ma_mutex_uninit(ma_mutex* pMutex)
-{
- if (pMutex == NULL || pMutex->pContext == NULL) {
- return;
+ if (deviceType == ma_device_type_playback) {
+ data.formatIn = pDevice->playback.format;
+ data.channelsIn = pDevice->playback.channels;
+ MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap));
+ data.shareMode = pDevice->playback.shareMode;
+ data.usingDefaultFormat = pDevice->playback.usingDefaultFormat;
+ data.usingDefaultChannels = pDevice->playback.usingDefaultChannels;
+ data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap;
+ } else {
+ data.formatIn = pDevice->capture.format;
+ data.channelsIn = pDevice->capture.channels;
+ MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap));
+ data.shareMode = pDevice->capture.shareMode;
+ data.usingDefaultFormat = pDevice->capture.usingDefaultFormat;
+ data.usingDefaultChannels = pDevice->capture.usingDefaultChannels;
+ data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap;
}
-
-#ifdef MA_WIN32
- ma_mutex_uninit__win32(pMutex);
-#endif
-#ifdef MA_POSIX
- ma_mutex_uninit__posix(pMutex);
-#endif
-}
-
-void ma_mutex_lock(ma_mutex* pMutex)
-{
- if (pMutex == NULL || pMutex->pContext == NULL) {
- return;
+
+ data.sampleRateIn = pDevice->sampleRate;
+ data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
+ data.periodSizeInFramesIn = pDevice->wasapi.originalPeriodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pDevice->wasapi.originalPeriodSizeInMilliseconds;
+ data.periodsIn = pDevice->wasapi.originalPeriods;
+ data.noAutoConvertSRC = pDevice->wasapi.noAutoConvertSRC;
+ data.noDefaultQualitySRC = pDevice->wasapi.noDefaultQualitySRC;
+ data.noHardwareOffloading = pDevice->wasapi.noHardwareOffloading;
+ result = ma_device_init_internal__wasapi(pDevice->pContext, deviceType, NULL, &data);
+ if (result != MA_SUCCESS) {
+ return result;
}
-#ifdef MA_WIN32
- ma_mutex_lock__win32(pMutex);
-#endif
-#ifdef MA_POSIX
- ma_mutex_lock__posix(pMutex);
-#endif
-}
-
-void ma_mutex_unlock(ma_mutex* pMutex)
-{
- if (pMutex == NULL || pMutex->pContext == NULL) {
- return;
-}
-
-#ifdef MA_WIN32
- ma_mutex_unlock__win32(pMutex);
-#endif
-#ifdef MA_POSIX
- ma_mutex_unlock__posix(pMutex);
-#endif
-}
-
+ /* At this point we have some new objects ready to go. We need to uninitialize the previous ones and then set the new ones. */
+ if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) {
+ if (pDevice->wasapi.pCaptureClient) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ pDevice->wasapi.pCaptureClient = NULL;
+ }
-ma_result ma_event_init(ma_context* pContext, ma_event* pEvent)
-{
- if (pContext == NULL || pEvent == NULL) {
- return MA_FALSE;
- }
+ if (pDevice->wasapi.pAudioClientCapture) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ pDevice->wasapi.pAudioClientCapture = NULL;
+ }
- pEvent->pContext = pContext;
+ pDevice->wasapi.pAudioClientCapture = data.pAudioClient;
+ pDevice->wasapi.pCaptureClient = data.pCaptureClient;
-#ifdef MA_WIN32
- return ma_event_init__win32(pContext, pEvent);
-#endif
-#ifdef MA_POSIX
- return ma_event_init__posix(pContext, pEvent);
-#endif
-}
+ pDevice->capture.internalFormat = data.formatOut;
+ pDevice->capture.internalChannels = data.channelsOut;
+ pDevice->capture.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->capture.internalPeriods = data.periodsOut;
+ ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName);
-void ma_event_uninit(ma_event* pEvent)
-{
- if (pEvent == NULL || pEvent->pContext == NULL) {
- return;
- }
+ ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, pDevice->wasapi.hEventCapture);
-#ifdef MA_WIN32
- ma_event_uninit__win32(pEvent);
-#endif
-#ifdef MA_POSIX
- ma_event_uninit__posix(pEvent);
-#endif
-}
+ pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut;
+ ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualPeriodSizeInFramesCapture);
-ma_bool32 ma_event_wait(ma_event* pEvent)
-{
- if (pEvent == NULL || pEvent->pContext == NULL) {
- return MA_FALSE;
+ /* The device may be in a started state. If so we need to immediately restart it. */
+ if (pDevice->wasapi.isStartedCapture) {
+ HRESULT hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device after reinitialization.", ma_result_from_HRESULT(hr));
+ }
+ }
}
-#ifdef MA_WIN32
- return ma_event_wait__win32(pEvent);
-#endif
-#ifdef MA_POSIX
- return ma_event_wait__posix(pEvent);
-#endif
-}
+ if (deviceType == ma_device_type_playback) {
+ if (pDevice->wasapi.pRenderClient) {
+ ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient);
+ pDevice->wasapi.pRenderClient = NULL;
+ }
-ma_bool32 ma_event_signal(ma_event* pEvent)
-{
- if (pEvent == NULL || pEvent->pContext == NULL) {
- return MA_FALSE;
- }
+ if (pDevice->wasapi.pAudioClientPlayback) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ pDevice->wasapi.pAudioClientPlayback = NULL;
+ }
-#ifdef MA_WIN32
- return ma_event_signal__win32(pEvent);
-#endif
-#ifdef MA_POSIX
- return ma_event_signal__posix(pEvent);
-#endif
-}
+ pDevice->wasapi.pAudioClientPlayback = data.pAudioClient;
+ pDevice->wasapi.pRenderClient = data.pRenderClient;
+ pDevice->playback.internalFormat = data.formatOut;
+ pDevice->playback.internalChannels = data.channelsOut;
+ pDevice->playback.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->playback.internalPeriods = data.periodsOut;
+ ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName);
-ma_uint32 ma_get_best_sample_rate_within_range(ma_uint32 sampleRateMin, ma_uint32 sampleRateMax)
-{
- /* Normalize the range in case we were given something stupid. */
- if (sampleRateMin < MA_MIN_SAMPLE_RATE) {
- sampleRateMin = MA_MIN_SAMPLE_RATE;
- }
- if (sampleRateMax > MA_MAX_SAMPLE_RATE) {
- sampleRateMax = MA_MAX_SAMPLE_RATE;
- }
- if (sampleRateMin > sampleRateMax) {
- sampleRateMin = sampleRateMax;
- }
+ ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, pDevice->wasapi.hEventPlayback);
- if (sampleRateMin == sampleRateMax) {
- return sampleRateMax;
- } else {
- size_t iStandardRate;
- for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) {
- ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate];
- if (standardRate >= sampleRateMin && standardRate <= sampleRateMax) {
- return standardRate;
+ pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut;
+ ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualPeriodSizeInFramesPlayback);
+
+ /* The device may be in a started state. If so we need to immediately restart it. */
+ if (pDevice->wasapi.isStartedPlayback) {
+ HRESULT hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device after reinitialization.", ma_result_from_HRESULT(hr));
}
}
}
- /* Should never get here. */
- ma_assert(MA_FALSE);
- return 0;
+ return MA_SUCCESS;
}
-ma_uint32 ma_get_closest_standard_sample_rate(ma_uint32 sampleRateIn)
+static ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
- ma_uint32 closestRate = 0;
- ma_uint32 closestDiff = 0xFFFFFFFF;
- size_t iStandardRate;
+ ma_result result = MA_SUCCESS;
- for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) {
- ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate];
- ma_uint32 diff;
+ (void)pContext;
- if (sampleRateIn > standardRate) {
- diff = sampleRateIn - standardRate;
- } else {
- diff = standardRate - sampleRateIn;
- }
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDevice != NULL);
- if (diff == 0) {
- return standardRate; /* The input sample rate is a standard rate. */
- }
+ MA_ZERO_OBJECT(&pDevice->wasapi);
+ pDevice->wasapi.originalPeriodSizeInFrames = pConfig->periodSizeInFrames;
+ pDevice->wasapi.originalPeriodSizeInMilliseconds = pConfig->periodSizeInMilliseconds;
+ pDevice->wasapi.originalPeriods = pConfig->periods;
+ pDevice->wasapi.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC;
+ pDevice->wasapi.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC;
+ pDevice->wasapi.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading;
- if (closestDiff > diff) {
- closestDiff = diff;
- closestRate = standardRate;
- }
+ /* Exclusive mode is not allowed with loopback. */
+ if (pConfig->deviceType == ma_device_type_loopback && pConfig->playback.shareMode == ma_share_mode_exclusive) {
+ return MA_INVALID_DEVICE_CONFIG;
}
- return closestRate;
-}
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) {
+ ma_device_init_internal_data__wasapi data;
+ data.formatIn = pConfig->capture.format;
+ data.channelsIn = pConfig->capture.channels;
+ data.sampleRateIn = pConfig->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap));
+ data.usingDefaultFormat = pDevice->capture.usingDefaultFormat;
+ data.usingDefaultChannels = pDevice->capture.usingDefaultChannels;
+ data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
+ data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap;
+ data.shareMode = pConfig->capture.shareMode;
+ data.periodSizeInFramesIn = pConfig->periodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pConfig->periodSizeInMilliseconds;
+ data.periodsIn = pConfig->periods;
+ data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC;
+ data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC;
+ data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading;
+ result = ma_device_init_internal__wasapi(pDevice->pContext, (pConfig->deviceType == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture, pConfig->capture.pDeviceID, &data);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-ma_uint32 ma_scale_buffer_size(ma_uint32 baseBufferSize, float scale)
-{
- return ma_max(1, (ma_uint32)(baseBufferSize*scale));
-}
+ pDevice->wasapi.pAudioClientCapture = data.pAudioClient;
+ pDevice->wasapi.pCaptureClient = data.pCaptureClient;
-ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate)
-{
- return bufferSizeInFrames / (sampleRate/1000);
-}
+ pDevice->capture.internalFormat = data.formatOut;
+ pDevice->capture.internalChannels = data.channelsOut;
+ pDevice->capture.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->capture.internalPeriods = data.periodsOut;
+ ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName);
-ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate)
-{
- return bufferSizeInMilliseconds * (sampleRate/1000);
-}
+ /*
+ The event for capture needs to be manual reset for the same reason as playback. We keep the initial state set to unsignaled,
+ however, because we want to block until we actually have something for the first call to ma_device_read().
+ */
+ pDevice->wasapi.hEventCapture = CreateEventW(NULL, FALSE, FALSE, NULL); /* Auto reset, unsignaled by default. */
+ if (pDevice->wasapi.hEventCapture == NULL) {
+ result = ma_result_from_GetLastError(GetLastError());
-ma_uint32 ma_get_default_buffer_size_in_milliseconds(ma_performance_profile performanceProfile)
-{
- if (performanceProfile == ma_performance_profile_low_latency) {
- return MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY;
- } else {
- return MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE;
- }
-}
+ if (pDevice->wasapi.pCaptureClient != NULL) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ pDevice->wasapi.pCaptureClient = NULL;
+ }
+ if (pDevice->wasapi.pAudioClientCapture != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ pDevice->wasapi.pAudioClientCapture = NULL;
+ }
-ma_uint32 ma_get_default_buffer_size_in_frames(ma_performance_profile performanceProfile, ma_uint32 sampleRate)
-{
- ma_uint32 bufferSizeInMilliseconds;
- ma_uint32 sampleRateMS;
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for capture.", result);
+ }
+ ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, pDevice->wasapi.hEventCapture);
- bufferSizeInMilliseconds = ma_get_default_buffer_size_in_milliseconds(performanceProfile);
- if (bufferSizeInMilliseconds == 0) {
- bufferSizeInMilliseconds = 1;
+ pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut;
+ ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualPeriodSizeInFramesCapture);
}
- sampleRateMS = (sampleRate/1000);
- if (sampleRateMS == 0) {
- sampleRateMS = 1;
- }
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_device_init_internal_data__wasapi data;
+ data.formatIn = pConfig->playback.format;
+ data.channelsIn = pConfig->playback.channels;
+ data.sampleRateIn = pConfig->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap));
+ data.usingDefaultFormat = pDevice->playback.usingDefaultFormat;
+ data.usingDefaultChannels = pDevice->playback.usingDefaultChannels;
+ data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
+ data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap;
+ data.shareMode = pConfig->playback.shareMode;
+ data.periodSizeInFramesIn = pConfig->periodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pConfig->periodSizeInMilliseconds;
+ data.periodsIn = pConfig->periods;
+ data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC;
+ data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC;
+ data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading;
- return bufferSizeInMilliseconds * sampleRateMS;
-}
+ result = ma_device_init_internal__wasapi(pDevice->pContext, ma_device_type_playback, pConfig->playback.pDeviceID, &data);
+ if (result != MA_SUCCESS) {
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ if (pDevice->wasapi.pCaptureClient != NULL) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ pDevice->wasapi.pCaptureClient = NULL;
+ }
+ if (pDevice->wasapi.pAudioClientCapture != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ pDevice->wasapi.pAudioClientCapture = NULL;
+ }
-ma_uint32 ma_get_fragment_size_in_bytes(ma_uint32 bufferSizeInFrames, ma_uint32 periods, ma_format format, ma_uint32 channels)
-{
- ma_uint32 fragmentSizeInFrames = bufferSizeInFrames / periods;
- return fragmentSizeInFrames * ma_get_bytes_per_frame(format, channels);
-}
+ CloseHandle(pDevice->wasapi.hEventCapture);
+ pDevice->wasapi.hEventCapture = NULL;
+ }
+ return result;
+ }
-void ma_zero_pcm_frames(void* p, ma_uint32 frameCount, ma_format format, ma_uint32 channels)
-{
- ma_zero_memory(p, frameCount * ma_get_bytes_per_frame(format, channels));
-}
+ pDevice->wasapi.pAudioClientPlayback = data.pAudioClient;
+ pDevice->wasapi.pRenderClient = data.pRenderClient;
+ pDevice->playback.internalFormat = data.formatOut;
+ pDevice->playback.internalChannels = data.channelsOut;
+ pDevice->playback.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->playback.internalPeriods = data.periodsOut;
+ ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName);
+ /*
+ The event for playback is needs to be manual reset because we want to explicitly control the fact that it becomes signalled
+ only after the whole available space has been filled, never before.
-/* The callback for reading from the client -> DSP -> device. */
-ma_uint32 ma_device__on_read_from_client(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData)
-{
- ma_device* pDevice = (ma_device*)pUserData;
- ma_device_callback_proc onData;
+ The playback event also needs to be initially set to a signaled state so that the first call to ma_device_write() is able
+ to get passed WaitForMultipleObjects().
+ */
+ pDevice->wasapi.hEventPlayback = CreateEventW(NULL, FALSE, TRUE, NULL); /* Auto reset, signaled by default. */
+ if (pDevice->wasapi.hEventPlayback == NULL) {
+ result = ma_result_from_GetLastError(GetLastError());
- ma_assert(pDevice != NULL);
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ if (pDevice->wasapi.pCaptureClient != NULL) {
+ ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ pDevice->wasapi.pCaptureClient = NULL;
+ }
+ if (pDevice->wasapi.pAudioClientCapture != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ pDevice->wasapi.pAudioClientCapture = NULL;
+ }
- ma_zero_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels);
+ CloseHandle(pDevice->wasapi.hEventCapture);
+ pDevice->wasapi.hEventCapture = NULL;
+ }
- onData = pDevice->onData;
- if (onData) {
- onData(pDevice, pFramesOut, NULL, frameCount);
- return frameCount;
- }
+ if (pDevice->wasapi.pRenderClient != NULL) {
+ ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient);
+ pDevice->wasapi.pRenderClient = NULL;
+ }
+ if (pDevice->wasapi.pAudioClientPlayback != NULL) {
+ ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ pDevice->wasapi.pAudioClientPlayback = NULL;
+ }
- (void)pDSP;
- return 0;
-}
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for playback.", result);
+ }
+ ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, pDevice->wasapi.hEventPlayback);
-/* The PCM converter callback for reading from a buffer. */
-ma_uint32 ma_device__pcm_converter__on_read_from_buffer_capture(ma_pcm_converter* pConverter, void* pFramesOut, ma_uint32 frameCount, void* pUserData)
-{
- ma_device* pDevice = (ma_device*)pUserData;
- ma_uint32 framesToRead;
- ma_uint32 bytesToRead;
+ pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut;
+ ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualPeriodSizeInFramesPlayback);
+ }
+
+ /*
+ We need to get notifications of when the default device changes. We do this through a device enumerator by
+ registering a IMMNotificationClient with it. We only care about this if it's the default device.
+ */
+#ifdef MA_WIN32_DESKTOP
+ if (pConfig->wasapi.noAutoStreamRouting == MA_FALSE) {
+ if ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID == NULL) {
+ pDevice->wasapi.allowCaptureAutoStreamRouting = MA_TRUE;
+ }
+ if ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID == NULL) {
+ pDevice->wasapi.allowPlaybackAutoStreamRouting = MA_TRUE;
+ }
- ma_assert(pDevice != NULL);
+ if (pDevice->wasapi.allowCaptureAutoStreamRouting || pDevice->wasapi.allowPlaybackAutoStreamRouting) {
+ ma_IMMDeviceEnumerator* pDeviceEnumerator;
+ HRESULT hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
+ if (FAILED(hr)) {
+ ma_device_uninit__wasapi(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", ma_result_from_HRESULT(hr));
+ }
- if (pDevice->capture._dspFrameCount == 0) {
- return 0; /* Nothing left. */
- }
+ pDevice->wasapi.notificationClient.lpVtbl = (void*)&g_maNotificationCientVtbl;
+ pDevice->wasapi.notificationClient.counter = 1;
+ pDevice->wasapi.notificationClient.pDevice = pDevice;
- framesToRead = frameCount;
- if (framesToRead > pDevice->capture._dspFrameCount) {
- framesToRead = pDevice->capture._dspFrameCount;
+ hr = pDeviceEnumerator->lpVtbl->RegisterEndpointNotificationCallback(pDeviceEnumerator, &pDevice->wasapi.notificationClient);
+ if (SUCCEEDED(hr)) {
+ pDevice->wasapi.pDeviceEnumerator = (ma_ptr)pDeviceEnumerator;
+ } else {
+ /* Not the end of the world if we fail to register the notification callback. We just won't support automatic stream routing. */
+ ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
+ }
+ }
}
+#endif
- bytesToRead = framesToRead * ma_get_bytes_per_frame(pConverter->formatConverterIn.config.formatIn, pConverter->channelRouter.config.channelsIn);
- ma_copy_memory(pFramesOut, pDevice->capture._dspFrames, bytesToRead);
- pDevice->capture._dspFrameCount -= framesToRead;
- pDevice->capture._dspFrames += bytesToRead;
+ ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
+ ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
- return framesToRead;
+ return MA_SUCCESS;
}
-ma_uint32 ma_device__pcm_converter__on_read_from_buffer_playback(ma_pcm_converter* pConverter, void* pFramesOut, ma_uint32 frameCount, void* pUserData)
+static ma_result ma_device__get_available_frames__wasapi(ma_device* pDevice, ma_IAudioClient* pAudioClient, ma_uint32* pFrameCount)
{
- ma_device* pDevice = (ma_device*)pUserData;
- ma_uint32 framesToRead;
- ma_uint32 bytesToRead;
+ ma_uint32 paddingFramesCount;
+ HRESULT hr;
+ ma_share_mode shareMode;
- ma_assert(pDevice != NULL);
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pFrameCount != NULL);
+
+ *pFrameCount = 0;
- if (pDevice->playback._dspFrameCount == 0) {
- return 0; /* Nothing left. */
+ if ((ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientPlayback && (ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientCapture) {
+ return MA_INVALID_OPERATION;
}
- framesToRead = frameCount;
- if (framesToRead > pDevice->playback._dspFrameCount) {
- framesToRead = pDevice->playback._dspFrameCount;
+ hr = ma_IAudioClient_GetCurrentPadding(pAudioClient, &paddingFramesCount);
+ if (FAILED(hr)) {
+ return ma_result_from_HRESULT(hr);
}
- bytesToRead = framesToRead * ma_get_bytes_per_frame(pConverter->formatConverterIn.config.formatIn, pConverter->channelRouter.config.channelsIn);
- ma_copy_memory(pFramesOut, pDevice->playback._dspFrames, bytesToRead);
- pDevice->playback._dspFrameCount -= framesToRead;
- pDevice->playback._dspFrames += bytesToRead;
-
- return framesToRead;
-}
-
-
-
-/* A helper function for reading sample data from the client. */
-static MA_INLINE void ma_device__read_frames_from_client(ma_device* pDevice, ma_uint32 frameCount, void* pSamples)
-{
- ma_device_callback_proc onData;
-
- ma_assert(pDevice != NULL);
- ma_assert(frameCount > 0);
- ma_assert(pSamples != NULL);
-
- onData = pDevice->onData;
- if (onData) {
- if (pDevice->playback.converter.isPassthrough) {
- ma_zero_pcm_frames(pSamples, frameCount, pDevice->playback.format, pDevice->playback.channels);
- onData(pDevice, pSamples, NULL, frameCount);
+ /* Slightly different rules for exclusive and shared modes. */
+ shareMode = ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) ? pDevice->playback.shareMode : pDevice->capture.shareMode;
+ if (shareMode == ma_share_mode_exclusive) {
+ *pFrameCount = paddingFramesCount;
+ } else {
+ if ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) {
+ *pFrameCount = pDevice->wasapi.actualPeriodSizeInFramesPlayback - paddingFramesCount;
} else {
- ma_pcm_converter_read(&pDevice->playback.converter, pSamples, frameCount);
+ *pFrameCount = paddingFramesCount;
}
}
+
+ return MA_SUCCESS;
}
-/* A helper for sending sample data to the client. */
-static MA_INLINE void ma_device__send_frames_to_client(ma_device* pDevice, ma_uint32 frameCount, const void* pSamples)
+static ma_bool32 ma_device_is_reroute_required__wasapi(ma_device* pDevice, ma_device_type deviceType)
{
- ma_device_callback_proc onData;
-
- ma_assert(pDevice != NULL);
- ma_assert(frameCount > 0);
- ma_assert(pSamples != NULL);
-
- onData = pDevice->onData;
- if (onData) {
- if (pDevice->capture.converter.isPassthrough) {
- onData(pDevice, NULL, pSamples, frameCount);
- } else {
- ma_uint8 chunkBuffer[4096];
- ma_uint32 chunkFrameCount;
-
- pDevice->capture._dspFrameCount = frameCount;
- pDevice->capture._dspFrames = (const ma_uint8*)pSamples;
-
- chunkFrameCount = sizeof(chunkBuffer) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
-
- for (;;) {
- ma_uint32 framesJustRead = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, chunkBuffer, chunkFrameCount);
- if (framesJustRead == 0) {
- break;
- }
+ MA_ASSERT(pDevice != NULL);
- onData(pDevice, NULL, chunkBuffer, framesJustRead);
+ if (deviceType == ma_device_type_playback) {
+ return pDevice->wasapi.hasDefaultPlaybackDeviceChanged;
+ }
- if (framesJustRead < chunkFrameCount) {
- break;
- }
- }
- }
+ if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) {
+ return pDevice->wasapi.hasDefaultCaptureDeviceChanged;
}
+
+ return MA_FALSE;
}
-static MA_INLINE ma_result ma_device__handle_duplex_callback_capture(ma_device* pDevice, ma_uint32 frameCount, const void* pFramesInInternalFormat, ma_pcm_rb* pRB)
+static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType)
{
ma_result result;
- ma_assert(pDevice != NULL);
- ma_assert(frameCount > 0);
- ma_assert(pFramesInInternalFormat != NULL);
- ma_assert(pRB != NULL);
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ ma_atomic_exchange_32(&pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_FALSE);
+ }
+ if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) {
+ ma_atomic_exchange_32(&pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_FALSE);
+ }
- pDevice->capture._dspFrameCount = (ma_uint32)frameCount;
- pDevice->capture._dspFrames = (const ma_uint8*)pFramesInInternalFormat;
- /* Write to the ring buffer. The ring buffer is in the external format. */
- for (;;) {
- ma_uint32 framesProcessed;
- ma_uint32 framesToProcess = 256;
- void* pFramesInExternalFormat;
+ #ifdef MA_DEBUG_OUTPUT
+ printf("=== CHANGING DEVICE ===\n");
+ #endif
- result = ma_pcm_rb_acquire_write(pRB, &framesToProcess, &pFramesInExternalFormat);
- if (result != MA_SUCCESS) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to acquire capture PCM frames from ring buffer.", result);
- break;
- }
+ result = ma_device_reinit__wasapi(pDevice, deviceType);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- if (framesToProcess == 0) {
- if (ma_pcm_rb_pointer_disance(pRB) == (ma_int32)ma_pcm_rb_get_subbuffer_size(pRB)) {
- break; /* Overrun. Not enough room in the ring buffer for input frame. Excess frames are dropped. */
- }
- }
+ ma_device__post_init_setup(pDevice, deviceType);
- /* Convert. */
- framesProcessed = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, pFramesInExternalFormat, framesToProcess);
+ return MA_SUCCESS;
+}
- result = ma_pcm_rb_commit_write(pRB, framesProcessed, pFramesInExternalFormat);
- if (result != MA_SUCCESS) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to commit capture PCM frames to ring buffer.", result);
- break;
- }
- if (framesProcessed < framesToProcess) {
- break; /* Done. */
- }
+static ma_result ma_device_stop__wasapi(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ /*
+ We need to explicitly signal the capture event in loopback mode to ensure we return from WaitForSingleObject() when nothing is being played. When nothing
+ is being played, the event is never signalled internally by WASAPI which means we will deadlock when stopping the device.
+ */
+ if (pDevice->type == ma_device_type_loopback) {
+ SetEvent((HANDLE)pDevice->wasapi.hEventCapture);
}
return MA_SUCCESS;
}
-static MA_INLINE ma_result ma_device__handle_duplex_callback_playback(ma_device* pDevice, ma_uint32 frameCount, void* pFramesInInternalFormat, ma_pcm_rb* pRB)
+
+static ma_result ma_device_main_loop__wasapi(ma_device* pDevice)
{
ma_result result;
- ma_uint8 playbackFramesInExternalFormat[4096];
- ma_uint8 silentInputFrames[4096];
- ma_uint32 totalFramesToReadFromClient;
- ma_uint32 totalFramesReadFromClient;
-
- ma_assert(pDevice != NULL);
- ma_assert(frameCount > 0);
- ma_assert(pFramesInInternalFormat != NULL);
- ma_assert(pRB != NULL);
-
- /*
- Sitting in the ring buffer should be captured data from the capture callback in external format. If there's not enough data in there for
- the whole frameCount frames we just use silence instead for the input data.
- */
- ma_zero_memory(silentInputFrames, sizeof(silentInputFrames));
-
- /* We need to calculate how many output frames are required to be read from the client to completely fill frameCount internal frames. */
- totalFramesToReadFromClient = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->playback.internalSampleRate, frameCount); /* ma_pcm_converter_get_required_input_frame_count(&pDevice->playback.converter, (ma_uint32)frameCount); */
- totalFramesReadFromClient = 0;
- while (totalFramesReadFromClient < totalFramesToReadFromClient && ma_device_is_started(pDevice)) {
- ma_uint32 framesRemainingFromClient;
- ma_uint32 framesToProcessFromClient;
- ma_uint32 inputFrameCount;
- void* pInputFrames;
+ HRESULT hr;
+ ma_bool32 exitLoop = MA_FALSE;
+ ma_uint32 framesWrittenToPlaybackDevice = 0;
+ ma_uint32 mappedDeviceBufferSizeInFramesCapture = 0;
+ ma_uint32 mappedDeviceBufferSizeInFramesPlayback = 0;
+ ma_uint32 mappedDeviceBufferFramesRemainingCapture = 0;
+ ma_uint32 mappedDeviceBufferFramesRemainingPlayback = 0;
+ BYTE* pMappedDeviceBufferCapture = NULL;
+ BYTE* pMappedDeviceBufferPlayback = NULL;
+ ma_uint32 bpfCaptureDevice = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 bpfPlaybackDevice = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 bpfCaptureClient = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 bpfPlaybackClient = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint8 inputDataInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 inputDataInClientFormatCap = sizeof(inputDataInClientFormat) / bpfCaptureClient;
+ ma_uint8 outputDataInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 outputDataInClientFormatCap = sizeof(outputDataInClientFormat) / bpfPlaybackClient;
+ ma_uint32 outputDataInClientFormatCount = 0;
+ ma_uint32 outputDataInClientFormatConsumed = 0;
+ ma_uint32 periodSizeInFramesCapture = 0;
+
+ MA_ASSERT(pDevice != NULL);
+
+ /* The capture device needs to be started immediately. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) {
+ periodSizeInFramesCapture = pDevice->capture.internalPeriodSizeInFrames;
- framesRemainingFromClient = (totalFramesToReadFromClient - totalFramesReadFromClient);
- framesToProcessFromClient = sizeof(playbackFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
- if (framesToProcessFromClient > framesRemainingFromClient) {
- framesToProcessFromClient = framesRemainingFromClient;
+ hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device.", ma_result_from_HRESULT(hr));
}
+ ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_TRUE);
+ }
- /* We need to grab captured samples before firing the callback. If there's not enough input samples we just pass silence. */
- inputFrameCount = framesToProcessFromClient;
- result = ma_pcm_rb_acquire_read(pRB, &inputFrameCount, &pInputFrames);
- if (result == MA_SUCCESS) {
- if (inputFrameCount > 0) {
- /* Use actual input frames. */
- pDevice->onData(pDevice, playbackFramesInExternalFormat, pInputFrames, inputFrameCount);
- } else {
- if (ma_pcm_rb_pointer_disance(pRB) == 0) {
- break; /* Underrun. */
- }
+ while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
+ /* We may need to reroute the device. */
+ if (ma_device_is_reroute_required__wasapi(pDevice, ma_device_type_playback)) {
+ result = ma_device_reroute__wasapi(pDevice, ma_device_type_playback);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
}
-
- /* We're done with the captured samples. */
- result = ma_pcm_rb_commit_read(pRB, inputFrameCount, pInputFrames);
+ }
+ if (ma_device_is_reroute_required__wasapi(pDevice, ma_device_type_capture)) {
+ result = ma_device_reroute__wasapi(pDevice, (pDevice->type == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture);
if (result != MA_SUCCESS) {
- break; /* Don't know what to do here... Just abandon ship. */
+ exitLoop = MA_TRUE;
+ break;
}
- } else {
- /* Use silent input frames. */
- inputFrameCount = ma_min(
- sizeof(playbackFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels),
- sizeof(silentInputFrames) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)
- );
-
- pDevice->onData(pDevice, playbackFramesInExternalFormat, silentInputFrames, inputFrameCount);
}
- /* We have samples in external format so now we need to convert to internal format and output to the device. */
- pDevice->playback._dspFrameCount = inputFrameCount;
- pDevice->playback._dspFrames = (const ma_uint8*)playbackFramesInExternalFormat;
- ma_pcm_converter_read(&pDevice->playback.converter, pFramesInInternalFormat, inputFrameCount);
-
- totalFramesReadFromClient += inputFrameCount;
- pFramesInInternalFormat = ma_offset_ptr(pFramesInInternalFormat, inputFrameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
- }
-
- return MA_SUCCESS;
-}
-
-/* A helper for changing the state of the device. */
-static MA_INLINE void ma_device__set_state(ma_device* pDevice, ma_uint32 newState)
-{
- ma_atomic_exchange_32(&pDevice->state, newState);
-}
+ switch (pDevice->type)
+ {
+ case ma_device_type_duplex:
+ {
+ ma_uint32 framesAvailableCapture;
+ ma_uint32 framesAvailablePlayback;
+ DWORD flagsCapture; /* Passed to IAudioCaptureClient_GetBuffer(). */
-/* A helper for getting the state of the device. */
-static MA_INLINE ma_uint32 ma_device__get_state(ma_device* pDevice)
-{
- return pDevice->state;
-}
+ /* The process is to map the playback buffer and fill it as quickly as possible from input data. */
+ if (pMappedDeviceBufferPlayback == NULL) {
+ /* WASAPI is weird with exclusive mode. You need to wait on the event _before_ querying the available frames. */
+ if (pDevice->playback.shareMode == ma_share_mode_exclusive) {
+ if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) {
+ return MA_ERROR; /* Wait failed. */
+ }
+ }
-/* A helper for determining whether or not the device is running in async mode. */
-static MA_INLINE ma_bool32 ma_device__is_async(ma_device* pDevice)
-{
- return pDevice->onData != NULL;
-}
+ result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ /*printf("TRACE 1: framesAvailablePlayback=%d\n", framesAvailablePlayback);*/
-#ifdef MA_WIN32
- GUID MA_GUID_KSDATAFORMAT_SUBTYPE_PCM = {0x00000001, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
- GUID MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT = {0x00000003, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
- /*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_ALAW = {0x00000006, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/
- /*GUID MA_GUID_KSDATAFORMAT_SUBTYPE_MULAW = {0x00000007, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/
-#endif
+ /* In exclusive mode, the frame count needs to exactly match the value returned by GetCurrentPadding(). */
+ if (pDevice->playback.shareMode != ma_share_mode_exclusive) {
+ if (framesAvailablePlayback > pDevice->wasapi.periodSizeInFramesPlayback) {
+ framesAvailablePlayback = pDevice->wasapi.periodSizeInFramesPlayback;
+ }
+ }
-ma_bool32 ma_context__device_id_equal(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
-{
- ma_assert(pContext != NULL);
+ /* If there's no frames available in the playback device we need to wait for more. */
+ if (framesAvailablePlayback == 0) {
+ /* In exclusive mode we waited at the top. */
+ if (pDevice->playback.shareMode != ma_share_mode_exclusive) {
+ if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) {
+ return MA_ERROR; /* Wait failed. */
+ }
+ }
- if (pID0 == pID1) return MA_TRUE;
+ continue;
+ }
- if ((pID0 == NULL && pID1 != NULL) ||
- (pID0 != NULL && pID1 == NULL)) {
- return MA_FALSE;
- }
+ /* We're ready to map the playback device's buffer. We don't release this until it's been entirely filled. */
+ hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedDeviceBufferPlayback);
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
- if (pContext->onDeviceIDEqual) {
- return pContext->onDeviceIDEqual(pContext, pID0, pID1);
- }
+ mappedDeviceBufferSizeInFramesPlayback = framesAvailablePlayback;
+ mappedDeviceBufferFramesRemainingPlayback = framesAvailablePlayback;
+ }
- return MA_FALSE;
-}
+ /* At this point we should have a buffer available for output. We need to keep writing input samples to it. */
+ for (;;) {
+ /* Try grabbing some captured data if we haven't already got a mapped buffer. */
+ if (pMappedDeviceBufferCapture == NULL) {
+ if (pDevice->capture.shareMode == ma_share_mode_shared) {
+ if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) {
+ return MA_ERROR; /* Wait failed. */
+ }
+ }
+ result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &framesAvailableCapture);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
-typedef struct
-{
- ma_device_type deviceType;
- const ma_device_id* pDeviceID;
- char* pName;
- size_t nameBufferSize;
- ma_bool32 foundDevice;
-} ma_context__try_get_device_name_by_id__enum_callback_data;
+ /*printf("TRACE 2: framesAvailableCapture=%d\n", framesAvailableCapture);*/
-ma_bool32 ma_context__try_get_device_name_by_id__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData)
-{
- ma_context__try_get_device_name_by_id__enum_callback_data* pData = (ma_context__try_get_device_name_by_id__enum_callback_data*)pUserData;
- ma_assert(pData != NULL);
+ /* Wait for more if nothing is available. */
+ if (framesAvailableCapture == 0) {
+ /* In exclusive mode we waited at the top. */
+ if (pDevice->capture.shareMode != ma_share_mode_shared) {
+ if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) {
+ return MA_ERROR; /* Wait failed. */
+ }
+ }
- if (pData->deviceType == deviceType) {
- if (pContext->onDeviceIDEqual(pContext, pData->pDeviceID, &pDeviceInfo->id)) {
- ma_strncpy_s(pData->pName, pData->nameBufferSize, pDeviceInfo->name, (size_t)-1);
- pData->foundDevice = MA_TRUE;
- }
- }
+ continue;
+ }
- return !pData->foundDevice;
-}
+ /* Getting here means there's data available for writing to the output device. */
+ mappedDeviceBufferSizeInFramesCapture = ma_min(framesAvailableCapture, periodSizeInFramesCapture);
+ hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL);
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
-/*
-Generic function for retrieving the name of a device by it's ID.
-This function simply enumerates every device and then retrieves the name of the first device that has the same ID.
-*/
-ma_result ma_context__try_get_device_name_by_id(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, char* pName, size_t nameBufferSize)
-{
- ma_result result;
- ma_context__try_get_device_name_by_id__enum_callback_data data;
+ /* Overrun detection. */
+ if ((flagsCapture & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) {
+ /* Glitched. Probably due to an overrun. */
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[WASAPI] Data discontinuity (possible overrun). framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture);
+ #endif
- ma_assert(pContext != NULL);
- ma_assert(pName != NULL);
+ /*
+ Exeriment: If we get an overrun it probably means we're straddling the end of the buffer. In order to prevent a never-ending sequence of glitches let's experiment
+ by dropping every frame until we're left with only a single period. To do this we just keep retrieving and immediately releasing buffers until we're down to the
+ last period.
+ */
+ if (framesAvailableCapture >= pDevice->wasapi.actualPeriodSizeInFramesCapture) {
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[WASAPI] Synchronizing capture stream. ");
+ #endif
+ do
+ {
+ hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture);
+ if (FAILED(hr)) {
+ break;
+ }
- if (pDeviceID == NULL) {
- return MA_NO_DEVICE;
- }
+ framesAvailableCapture -= mappedDeviceBufferSizeInFramesCapture;
+
+ if (framesAvailableCapture > 0) {
+ mappedDeviceBufferSizeInFramesCapture = ma_min(framesAvailableCapture, periodSizeInFramesCapture);
+ hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL);
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
+ } else {
+ pMappedDeviceBufferCapture = NULL;
+ mappedDeviceBufferSizeInFramesCapture = 0;
+ }
+ } while (framesAvailableCapture > periodSizeInFramesCapture);
+ #ifdef MA_DEBUG_OUTPUT
+ printf("framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture);
+ #endif
+ }
+ } else {
+ #ifdef MA_DEBUG_OUTPUT
+ if (flagsCapture != 0) {
+ printf("[WASAPI] Capture Flags: %d\n", flagsCapture);
+ }
+ #endif
+ }
- data.deviceType = deviceType;
- data.pDeviceID = pDeviceID;
- data.pName = pName;
- data.nameBufferSize = nameBufferSize;
- data.foundDevice = MA_FALSE;
- result = ma_context_enumerate_devices(pContext, ma_context__try_get_device_name_by_id__enum_callback, &data);
- if (result != MA_SUCCESS) {
- return result;
- }
+ mappedDeviceBufferFramesRemainingCapture = mappedDeviceBufferSizeInFramesCapture;
+ }
- if (!data.foundDevice) {
- return MA_NO_DEVICE;
- } else {
- return MA_SUCCESS;
- }
-}
+ /* At this point we should have both input and output data available. We now need to convert the data and post it to the client. */
+ for (;;) {
+ BYTE* pRunningDeviceBufferCapture;
+ BYTE* pRunningDeviceBufferPlayback;
+ ma_uint32 framesToProcess;
+ ma_uint32 framesProcessed;
-ma_uint32 ma_get_format_priority_index(ma_format format) /* Lower = better. */
-{
- ma_uint32 i;
- for (i = 0; i < ma_countof(g_maFormatPriorities); ++i) {
- if (g_maFormatPriorities[i] == format) {
- return i;
- }
- }
+ pRunningDeviceBufferCapture = pMappedDeviceBufferCapture + ((mappedDeviceBufferSizeInFramesCapture - mappedDeviceBufferFramesRemainingCapture ) * bpfCaptureDevice);
+ pRunningDeviceBufferPlayback = pMappedDeviceBufferPlayback + ((mappedDeviceBufferSizeInFramesPlayback - mappedDeviceBufferFramesRemainingPlayback) * bpfPlaybackDevice);
+
+ /* There may be some data sitting in the converter that needs to be processed first. Once this is exhaused, run the data callback again. */
+ if (!pDevice->playback.converter.isPassthrough && outputDataInClientFormatConsumed < outputDataInClientFormatCount) {
+ ma_uint64 convertedFrameCountClient = (outputDataInClientFormatCount - outputDataInClientFormatConsumed);
+ ma_uint64 convertedFrameCountDevice = mappedDeviceBufferFramesRemainingPlayback;
+ void* pConvertedFramesClient = outputDataInClientFormat + (outputDataInClientFormatConsumed * bpfPlaybackClient);
+ void* pConvertedFramesDevice = pRunningDeviceBufferPlayback;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pConvertedFramesClient, &convertedFrameCountClient, pConvertedFramesDevice, &convertedFrameCountDevice);
+ if (result != MA_SUCCESS) {
+ break;
+ }
- /* Getting here means the format could not be found or is equal to ma_format_unknown. */
- return (ma_uint32)-1;
-}
+ outputDataInClientFormatConsumed += (ma_uint32)convertedFrameCountClient; /* Safe cast. */
+ mappedDeviceBufferFramesRemainingPlayback -= (ma_uint32)convertedFrameCountDevice; /* Safe cast. */
-void ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType);
+ if (mappedDeviceBufferFramesRemainingPlayback == 0) {
+ break;
+ }
+ }
+ /*
+ Getting here means we need to fire the callback. If format conversion is unnecessary, we can optimize this by passing the pointers to the internal
+ buffers directly to the callback.
+ */
+ if (pDevice->capture.converter.isPassthrough && pDevice->playback.converter.isPassthrough) {
+ /* Optimal path. We can pass mapped pointers directly to the callback. */
+ framesToProcess = ma_min(mappedDeviceBufferFramesRemainingCapture, mappedDeviceBufferFramesRemainingPlayback);
+ framesProcessed = framesToProcess;
-/*******************************************************************************
+ ma_device__on_data(pDevice, pRunningDeviceBufferPlayback, pRunningDeviceBufferCapture, framesToProcess);
-Null Backend
+ mappedDeviceBufferFramesRemainingCapture -= framesProcessed;
+ mappedDeviceBufferFramesRemainingPlayback -= framesProcessed;
-*******************************************************************************/
-#ifdef MA_HAS_NULL
+ if (mappedDeviceBufferFramesRemainingCapture == 0) {
+ break; /* Exhausted input data. */
+ }
+ if (mappedDeviceBufferFramesRemainingPlayback == 0) {
+ break; /* Exhausted output data. */
+ }
+ } else if (pDevice->capture.converter.isPassthrough) {
+ /* The input buffer is a passthrough, but the playback buffer requires a conversion. */
+ framesToProcess = ma_min(mappedDeviceBufferFramesRemainingCapture, outputDataInClientFormatCap);
+ framesProcessed = framesToProcess;
-#define MA_DEVICE_OP_NONE__NULL 0
-#define MA_DEVICE_OP_START__NULL 1
-#define MA_DEVICE_OP_SUSPEND__NULL 2
-#define MA_DEVICE_OP_KILL__NULL 3
+ ma_device__on_data(pDevice, outputDataInClientFormat, pRunningDeviceBufferCapture, framesToProcess);
+ outputDataInClientFormatCount = framesProcessed;
+ outputDataInClientFormatConsumed = 0;
-ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData)
-{
- ma_device* pDevice = (ma_device*)pData;
- ma_assert(pDevice != NULL);
+ mappedDeviceBufferFramesRemainingCapture -= framesProcessed;
+ if (mappedDeviceBufferFramesRemainingCapture == 0) {
+ break; /* Exhausted input data. */
+ }
+ } else if (pDevice->playback.converter.isPassthrough) {
+ /* The input buffer requires conversion, the playback buffer is passthrough. */
+ ma_uint64 capturedDeviceFramesToProcess = mappedDeviceBufferFramesRemainingCapture;
+ ma_uint64 capturedClientFramesToProcess = ma_min(inputDataInClientFormatCap, mappedDeviceBufferFramesRemainingPlayback);
- for (;;) { /* Keep the thread alive until the device is uninitialized. */
- /* Wait for an operation to be requested. */
- ma_event_wait(&pDevice->null_device.operationEvent);
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningDeviceBufferCapture, &capturedDeviceFramesToProcess, inputDataInClientFormat, &capturedClientFramesToProcess);
+ if (result != MA_SUCCESS) {
+ break;
+ }
- /* At this point an event should have been triggered. */
+ if (capturedClientFramesToProcess == 0) {
+ break;
+ }
- /* Starting the device needs to put the thread into a loop. */
- if (pDevice->null_device.operation == MA_DEVICE_OP_START__NULL) {
- ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
+ ma_device__on_data(pDevice, pRunningDeviceBufferPlayback, inputDataInClientFormat, (ma_uint32)capturedClientFramesToProcess); /* Safe cast. */
- /* Reset the timer just in case. */
- ma_timer_init(&pDevice->null_device.timer);
+ mappedDeviceBufferFramesRemainingCapture -= (ma_uint32)capturedDeviceFramesToProcess;
+ mappedDeviceBufferFramesRemainingPlayback -= (ma_uint32)capturedClientFramesToProcess;
+ } else {
+ ma_uint64 capturedDeviceFramesToProcess = mappedDeviceBufferFramesRemainingCapture;
+ ma_uint64 capturedClientFramesToProcess = ma_min(inputDataInClientFormatCap, outputDataInClientFormatCap);
- /* Keep looping until an operation has been requested. */
- while (pDevice->null_device.operation != MA_DEVICE_OP_NONE__NULL && pDevice->null_device.operation != MA_DEVICE_OP_START__NULL) {
- ma_sleep(10); /* Don't hog the CPU. */
- }
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningDeviceBufferCapture, &capturedDeviceFramesToProcess, inputDataInClientFormat, &capturedClientFramesToProcess);
+ if (result != MA_SUCCESS) {
+ break;
+ }
- /* Getting here means a suspend or kill operation has been requested. */
- ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
- ma_event_signal(&pDevice->null_device.operationCompletionEvent);
- continue;
- }
+ if (capturedClientFramesToProcess == 0) {
+ break;
+ }
- /* Suspending the device means we need to stop the timer and just continue the loop. */
- if (pDevice->null_device.operation == MA_DEVICE_OP_SUSPEND__NULL) {
- ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
+ ma_device__on_data(pDevice, outputDataInClientFormat, inputDataInClientFormat, (ma_uint32)capturedClientFramesToProcess);
+
+ mappedDeviceBufferFramesRemainingCapture -= (ma_uint32)capturedDeviceFramesToProcess;
+ outputDataInClientFormatCount = (ma_uint32)capturedClientFramesToProcess;
+ outputDataInClientFormatConsumed = 0;
+ }
+ }
- /* We need to add the current run time to the prior run time, then reset the timer. */
- pDevice->null_device.priorRunTime += ma_timer_get_time_in_seconds(&pDevice->null_device.timer);
- ma_timer_init(&pDevice->null_device.timer);
- /* We're done. */
- ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
- ma_event_signal(&pDevice->null_device.operationCompletionEvent);
- continue;
- }
+ /* If at this point we've run out of capture data we need to release the buffer. */
+ if (mappedDeviceBufferFramesRemainingCapture == 0 && pMappedDeviceBufferCapture != NULL) {
+ hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture);
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from capture device after reading from the device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
- /* Killing the device means we need to get out of this loop so that this thread can terminate. */
- if (pDevice->null_device.operation == MA_DEVICE_OP_KILL__NULL) {
- ma_atomic_exchange_32(&pDevice->null_device.operation, MA_DEVICE_OP_NONE__NULL);
- ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_SUCCESS);
- ma_event_signal(&pDevice->null_device.operationCompletionEvent);
- break;
- }
+ /*printf("TRACE: Released capture buffer\n");*/
- /* Getting a signal on a "none" operation probably means an error. Return invalid operation. */
- if (pDevice->null_device.operation == MA_DEVICE_OP_NONE__NULL) {
- ma_assert(MA_FALSE); /* <-- Trigger this in debug mode to ensure developers are aware they're doing something wrong (or there's a bug in a miniaudio). */
- ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_INVALID_OPERATION);
- ma_event_signal(&pDevice->null_device.operationCompletionEvent);
- continue; /* Continue the loop. Don't terminate. */
- }
- }
+ pMappedDeviceBufferCapture = NULL;
+ mappedDeviceBufferFramesRemainingCapture = 0;
+ mappedDeviceBufferSizeInFramesCapture = 0;
+ }
- return (ma_thread_result)0;
-}
+ /* Get out of this loop if we're run out of room in the playback buffer. */
+ if (mappedDeviceBufferFramesRemainingPlayback == 0) {
+ break;
+ }
+ }
-ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation)
-{
- ma_atomic_exchange_32(&pDevice->null_device.operation, operation);
- if (!ma_event_signal(&pDevice->null_device.operationEvent)) {
- return MA_ERROR;
- }
- if (!ma_event_wait(&pDevice->null_device.operationCompletionEvent)) {
- return MA_ERROR;
- }
+ /* If at this point we've run out of data we need to release the buffer. */
+ if (mappedDeviceBufferFramesRemainingPlayback == 0 && pMappedDeviceBufferPlayback != NULL) {
+ hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedDeviceBufferSizeInFramesPlayback, 0);
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from playback device after writing to the device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
- return pDevice->null_device.operationResult;
-}
+ /*printf("TRACE: Released playback buffer\n");*/
+ framesWrittenToPlaybackDevice += mappedDeviceBufferSizeInFramesPlayback;
-ma_uint64 ma_device_get_total_run_time_in_frames__null(ma_device* pDevice)
-{
- ma_uint32 internalSampleRate;
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- internalSampleRate = pDevice->capture.internalSampleRate;
- } else {
- internalSampleRate = pDevice->playback.internalSampleRate;
- }
+ pMappedDeviceBufferPlayback = NULL;
+ mappedDeviceBufferFramesRemainingPlayback = 0;
+ mappedDeviceBufferSizeInFramesPlayback = 0;
+ }
+ if (!pDevice->wasapi.isStartedPlayback) {
+ ma_uint32 startThreshold = pDevice->playback.internalPeriodSizeInFrames * 1;
- return (ma_uint64)((pDevice->null_device.priorRunTime + ma_timer_get_time_in_seconds(&pDevice->null_device.timer)) * internalSampleRate);
-}
+ /* Prevent a deadlock. If we don't clamp against the actual buffer size we'll never end up starting the playback device which will result in a deadlock. */
+ if (startThreshold > pDevice->wasapi.actualPeriodSizeInFramesPlayback) {
+ startThreshold = pDevice->wasapi.actualPeriodSizeInFramesPlayback;
+ }
-ma_bool32 ma_context_is_device_id_equal__null(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
-{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
+ if (pDevice->playback.shareMode == ma_share_mode_exclusive || framesWrittenToPlaybackDevice >= startThreshold) {
+ hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ if (FAILED(hr)) {
+ ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device.", ma_result_from_HRESULT(hr));
+ }
+ ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE);
+ }
+ }
+ } break;
- return pID0->nullbackend == pID1->nullbackend;
-}
-ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
-{
- ma_bool32 cbResult = MA_TRUE;
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
+ case ma_device_type_capture:
+ case ma_device_type_loopback:
+ {
+ ma_uint32 framesAvailableCapture;
+ DWORD flagsCapture; /* Passed to IAudioCaptureClient_GetBuffer(). */
- /* Playback. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Playback Device", (size_t)-1);
- cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
- }
+ /* Wait for data to become available first. */
+ if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) {
+ exitLoop = MA_TRUE;
+ break; /* Wait failed. */
+ }
- /* Capture. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Capture Device", (size_t)-1);
- cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- }
+ /* See how many frames are available. Since we waited at the top, I don't think this should ever return 0. I'm checking for this anyway. */
+ result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &framesAvailableCapture);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
- return MA_SUCCESS;
-}
+ if (framesAvailableCapture < pDevice->wasapi.periodSizeInFramesCapture) {
+ continue; /* Nothing available. Keep waiting. */
+ }
-ma_result ma_context_get_device_info__null(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
-{
- ma_uint32 iFormat;
+ /* Map the data buffer in preparation for sending to the client. */
+ mappedDeviceBufferSizeInFramesCapture = framesAvailableCapture;
+ hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL);
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
- ma_assert(pContext != NULL);
+ /* We should have a buffer at this point. */
+ ma_device__send_frames_to_client(pDevice, mappedDeviceBufferSizeInFramesCapture, pMappedDeviceBufferCapture);
- if (pDeviceID != NULL && pDeviceID->nullbackend != 0) {
- return MA_NO_DEVICE; /* Don't know the device. */
- }
+ /* At this point we're done with the buffer. */
+ hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture);
+ pMappedDeviceBufferCapture = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */
+ mappedDeviceBufferSizeInFramesCapture = 0;
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from capture device after reading from the device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
+ } break;
- /* Name / Description */
- if (deviceType == ma_device_type_playback) {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Playback Device", (size_t)-1);
- } else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Capture Device", (size_t)-1);
- }
- /* Support everything on the null backend. */
- pDeviceInfo->formatCount = ma_format_count - 1; /* Minus one because we don't want to include ma_format_unknown. */
- for (iFormat = 0; iFormat < pDeviceInfo->formatCount; ++iFormat) {
- pDeviceInfo->formats[iFormat] = (ma_format)(iFormat + 1); /* +1 to skip over ma_format_unknown. */
- }
- pDeviceInfo->minChannels = 1;
- pDeviceInfo->maxChannels = MA_MAX_CHANNELS;
- pDeviceInfo->minSampleRate = MA_SAMPLE_RATE_8000;
- pDeviceInfo->maxSampleRate = MA_SAMPLE_RATE_384000;
+ case ma_device_type_playback:
+ {
+ ma_uint32 framesAvailablePlayback;
- (void)pContext;
- (void)shareMode;
- return MA_SUCCESS;
-}
+ /* Wait for space to become available first. */
+ if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) {
+ exitLoop = MA_TRUE;
+ break; /* Wait failed. */
+ }
+ /* Check how much space is available. If this returns 0 we just keep waiting. */
+ result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
-void ma_device_uninit__null(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
+ if (framesAvailablePlayback < pDevice->wasapi.periodSizeInFramesPlayback) {
+ continue; /* No space available. */
+ }
- /* Keep it clean and wait for the device thread to finish before returning. */
- ma_device_do_operation__null(pDevice, MA_DEVICE_OP_KILL__NULL);
+ /* Map a the data buffer in preparation for the callback. */
+ hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedDeviceBufferPlayback);
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
- /* At this point the loop in the device thread is as good as terminated so we can uninitialize our events. */
- ma_event_uninit(&pDevice->null_device.operationCompletionEvent);
- ma_event_uninit(&pDevice->null_device.operationEvent);
-}
+ /* We should have a buffer at this point. */
+ ma_device__read_frames_from_client(pDevice, framesAvailablePlayback, pMappedDeviceBufferPlayback);
-ma_result ma_device_init__null(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
-{
- ma_result result;
- ma_uint32 bufferSizeInFrames;
+ /* At this point we're done writing to the device and we just need to release the buffer. */
+ hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, 0);
+ pMappedDeviceBufferPlayback = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */
+ mappedDeviceBufferSizeInFramesPlayback = 0;
- ma_assert(pDevice != NULL);
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from playback device after writing to the device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
- ma_zero_object(&pDevice->null_device);
+ framesWrittenToPlaybackDevice += framesAvailablePlayback;
+ if (!pDevice->wasapi.isStartedPlayback) {
+ if (pDevice->playback.shareMode == ma_share_mode_exclusive || framesWrittenToPlaybackDevice >= pDevice->playback.internalPeriodSizeInFrames*1) {
+ hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ if (FAILED(hr)) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device.", ma_result_from_HRESULT(hr));
+ exitLoop = MA_TRUE;
+ break;
+ }
+ ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE);
+ }
+ }
+ } break;
- bufferSizeInFrames = pConfig->bufferSizeInFrames;
- if (bufferSizeInFrames == 0) {
- bufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pConfig->sampleRate);
+ default: return MA_INVALID_ARGS;
+ }
}
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), "NULL Capture Device", (size_t)-1);
- pDevice->capture.internalFormat = pConfig->capture.format;
- pDevice->capture.internalChannels = pConfig->capture.channels;
- ma_channel_map_copy(pDevice->capture.internalChannelMap, pConfig->capture.channelMap, pConfig->capture.channels);
- pDevice->capture.internalBufferSizeInFrames = bufferSizeInFrames;
- pDevice->capture.internalPeriods = pConfig->periods;
- }
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), "NULL Playback Device", (size_t)-1);
- pDevice->playback.internalFormat = pConfig->playback.format;
- pDevice->playback.internalChannels = pConfig->playback.channels;
- ma_channel_map_copy(pDevice->playback.internalChannelMap, pConfig->playback.channelMap, pConfig->playback.channels);
- pDevice->playback.internalBufferSizeInFrames = bufferSizeInFrames;
- pDevice->playback.internalPeriods = pConfig->periods;
- }
+ /* Here is where the device needs to be stopped. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) {
+ /* Any mapped buffers need to be released. */
+ if (pMappedDeviceBufferCapture != NULL) {
+ hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture);
+ }
- /*
- In order to get timing right, we need to create a thread that does nothing but keeps track of the timer. This timer is started when the
- first period is "written" to it, and then stopped in ma_device_stop__null().
- */
- result = ma_event_init(pContext, &pDevice->null_device.operationEvent);
- if (result != MA_SUCCESS) {
- return result;
- }
+ hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal capture device.", ma_result_from_HRESULT(hr));
+ }
- result = ma_event_init(pContext, &pDevice->null_device.operationCompletionEvent);
- if (result != MA_SUCCESS) {
- return result;
- }
+ /* The audio client needs to be reset otherwise restarting will fail. */
+ hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal capture device.", ma_result_from_HRESULT(hr));
+ }
- result = ma_thread_create(pContext, &pDevice->thread, ma_device_thread__null, pDevice);
- if (result != MA_SUCCESS) {
- return result;
+ ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
}
- return MA_SUCCESS;
-}
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ /* Any mapped buffers need to be released. */
+ if (pMappedDeviceBufferPlayback != NULL) {
+ hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedDeviceBufferSizeInFramesPlayback, 0);
+ }
-ma_result ma_device_start__null(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
+ /*
+ The buffer needs to be drained before stopping the device. Not doing this will result in the last few frames not getting output to
+ the speakers. This is a problem for very short sounds because it'll result in a significant portion of it not getting played.
+ */
+ if (pDevice->wasapi.isStartedPlayback) {
+ if (pDevice->playback.shareMode == ma_share_mode_exclusive) {
+ WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE);
+ } else {
+ ma_uint32 prevFramesAvaialablePlayback = (ma_uint32)-1;
+ ma_uint32 framesAvailablePlayback;
+ for (;;) {
+ result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback);
+ if (result != MA_SUCCESS) {
+ break;
+ }
- ma_device_do_operation__null(pDevice, MA_DEVICE_OP_START__NULL);
+ if (framesAvailablePlayback >= pDevice->wasapi.actualPeriodSizeInFramesPlayback) {
+ break;
+ }
+
+ /*
+ Just a safety check to avoid an infinite loop. If this iteration results in a situation where the number of available frames
+ has not changed, get out of the loop. I don't think this should ever happen, but I think it's nice to have just in case.
+ */
+ if (framesAvailablePlayback == prevFramesAvaialablePlayback) {
+ break;
+ }
+ prevFramesAvaialablePlayback = framesAvailablePlayback;
+
+ WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE);
+ ResetEvent(pDevice->wasapi.hEventPlayback); /* Manual reset. */
+ }
+ }
+ }
+
+ hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal playback device.", ma_result_from_HRESULT(hr));
+ }
+
+ /* The audio client needs to be reset otherwise restarting will fail. */
+ hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal playback device.", ma_result_from_HRESULT(hr));
+ }
+
+ ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
+ }
- ma_atomic_exchange_32(&pDevice->null_device.isStarted, MA_TRUE);
return MA_SUCCESS;
}
-ma_result ma_device_stop__null(ma_device* pDevice)
+static ma_result ma_context_uninit__wasapi(ma_context* pContext)
{
- ma_assert(pDevice != NULL);
-
- ma_device_do_operation__null(pDevice, MA_DEVICE_OP_SUSPEND__NULL);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_wasapi);
+ (void)pContext;
- ma_atomic_exchange_32(&pDevice->null_device.isStarted, MA_FALSE);
return MA_SUCCESS;
}
-ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount)
+static ma_result ma_context_init__wasapi(const ma_context_config* pConfig, ma_context* pContext)
{
ma_result result = MA_SUCCESS;
- ma_uint32 totalPCMFramesProcessed;
- ma_bool32 wasStartedOnEntry;
-
- wasStartedOnEntry = pDevice->null_device.isStarted;
- /* Keep going until everything has been read. */
- totalPCMFramesProcessed = 0;
- while (totalPCMFramesProcessed < frameCount) {
- ma_uint64 targetFrame;
-
- /* If there are any frames remaining in the current period, consume those first. */
- if (pDevice->null_device.currentPeriodFramesRemainingPlayback > 0) {
- ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed);
- ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingPlayback;
- if (framesToProcess > framesRemaining) {
- framesToProcess = framesRemaining;
- }
+ MA_ASSERT(pContext != NULL);
- /* We don't actually do anything with pPCMFrames, so just mark it as unused to prevent a warning. */
- (void)pPCMFrames;
+ (void)pConfig;
- pDevice->null_device.currentPeriodFramesRemainingPlayback -= framesToProcess;
- totalPCMFramesProcessed += framesToProcess;
- }
+#ifdef MA_WIN32_DESKTOP
+ /*
+ WASAPI is only supported in Vista SP1 and newer. The reason for SP1 and not the base version of Vista is that event-driven
+ exclusive mode does not work until SP1.
- /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */
- if (pDevice->null_device.currentPeriodFramesRemainingPlayback == 0) {
- pDevice->null_device.currentPeriodFramesRemainingPlayback = 0;
+ Unfortunately older compilers don't define these functions so we need to dynamically load them in order to avoid a lin error.
+ */
+ {
+ ma_OSVERSIONINFOEXW osvi;
+ ma_handle kernel32DLL;
+ ma_PFNVerifyVersionInfoW _VerifyVersionInfoW;
+ ma_PFNVerSetConditionMask _VerSetConditionMask;
- if (!pDevice->null_device.isStarted && !wasStartedOnEntry) {
- result = ma_device_start__null(pDevice);
- if (result != MA_SUCCESS) {
- break;
- }
- }
+ kernel32DLL = ma_dlopen(pContext, "kernel32.dll");
+ if (kernel32DLL == NULL) {
+ return MA_NO_BACKEND;
}
- /* If we've consumed the whole buffer we can return now. */
- ma_assert(totalPCMFramesProcessed <= frameCount);
- if (totalPCMFramesProcessed == frameCount) {
- break;
+ _VerifyVersionInfoW = (ma_PFNVerifyVersionInfoW)ma_dlsym(pContext, kernel32DLL, "VerifyVersionInfoW");
+ _VerSetConditionMask = (ma_PFNVerSetConditionMask)ma_dlsym(pContext, kernel32DLL, "VerSetConditionMask");
+ if (_VerifyVersionInfoW == NULL || _VerSetConditionMask == NULL) {
+ ma_dlclose(pContext, kernel32DLL);
+ return MA_NO_BACKEND;
}
- /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */
- targetFrame = pDevice->null_device.lastProcessedFramePlayback;
- for (;;) {
- ma_uint64 currentFrame;
-
- /* Stop waiting if the device has been stopped. */
- if (!pDevice->null_device.isStarted) {
- break;
- }
-
- currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice);
- if (currentFrame >= targetFrame) {
- break;
- }
-
- /* Getting here means we haven't yet reached the target sample, so continue waiting. */
- ma_sleep(10);
+ MA_ZERO_OBJECT(&osvi);
+ osvi.dwOSVersionInfoSize = sizeof(osvi);
+ osvi.dwMajorVersion = HIBYTE(MA_WIN32_WINNT_VISTA);
+ osvi.dwMinorVersion = LOBYTE(MA_WIN32_WINNT_VISTA);
+ osvi.wServicePackMajor = 1;
+ if (_VerifyVersionInfoW(&osvi, MA_VER_MAJORVERSION | MA_VER_MINORVERSION | MA_VER_SERVICEPACKMAJOR, _VerSetConditionMask(_VerSetConditionMask(_VerSetConditionMask(0, MA_VER_MAJORVERSION, MA_VER_GREATER_EQUAL), MA_VER_MINORVERSION, MA_VER_GREATER_EQUAL), MA_VER_SERVICEPACKMAJOR, MA_VER_GREATER_EQUAL))) {
+ result = MA_SUCCESS;
+ } else {
+ result = MA_NO_BACKEND;
}
- pDevice->null_device.lastProcessedFramePlayback += pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods;
- pDevice->null_device.currentPeriodFramesRemainingPlayback = pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods;
- }
-
- return result;
-}
-
-ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount)
-{
- ma_result result = MA_SUCCESS;
- ma_uint32 totalPCMFramesProcessed;
-
- /* The device needs to be started immediately. */
- if (!pDevice->null_device.isStarted) {
- result = ma_device_start__null(pDevice);
- if (result != MA_SUCCESS) {
- return result;
- }
+ ma_dlclose(pContext, kernel32DLL);
}
+#endif
- /* Keep going until everything has been read. */
- totalPCMFramesProcessed = 0;
- while (totalPCMFramesProcessed < frameCount) {
- ma_uint64 targetFrame;
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- /* If there are any frames remaining in the current period, consume those first. */
- if (pDevice->null_device.currentPeriodFramesRemainingCapture > 0) {
- ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed);
- ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingCapture;
- if (framesToProcess > framesRemaining) {
- framesToProcess = framesRemaining;
- }
+ pContext->onUninit = ma_context_uninit__wasapi;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__wasapi;
+ pContext->onEnumDevices = ma_context_enumerate_devices__wasapi;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__wasapi;
+ pContext->onDeviceInit = ma_device_init__wasapi;
+ pContext->onDeviceUninit = ma_device_uninit__wasapi;
+ pContext->onDeviceStart = NULL; /* Not used. Started in onDeviceMainLoop. */
+ pContext->onDeviceStop = ma_device_stop__wasapi; /* Required to ensure the capture event is signalled when stopping a loopback device while nothing is playing. */
+ pContext->onDeviceMainLoop = ma_device_main_loop__wasapi;
- /* We need to ensured the output buffer is zeroed. */
- ma_zero_memory(ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed*bpf), framesToProcess*bpf);
+ return result;
+}
+#endif
- pDevice->null_device.currentPeriodFramesRemainingCapture -= framesToProcess;
- totalPCMFramesProcessed += framesToProcess;
- }
+/******************************************************************************
- /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */
- if (pDevice->null_device.currentPeriodFramesRemainingCapture == 0) {
- pDevice->null_device.currentPeriodFramesRemainingCapture = 0;
- }
+DirectSound Backend
- /* If we've consumed the whole buffer we can return now. */
- ma_assert(totalPCMFramesProcessed <= frameCount);
- if (totalPCMFramesProcessed == frameCount) {
- break;
- }
+******************************************************************************/
+#ifdef MA_HAS_DSOUND
+/*#include */
- /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */
- targetFrame = pDevice->null_device.lastProcessedFrameCapture + (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods);
- for (;;) {
- ma_uint64 currentFrame;
+static const GUID MA_GUID_IID_DirectSoundNotify = {0xb0210783, 0x89cd, 0x11d0, {0xaf, 0x08, 0x00, 0xa0, 0xc9, 0x25, 0xcd, 0x16}};
- /* Stop waiting if the device has been stopped. */
- if (!pDevice->null_device.isStarted) {
- break;
- }
+/* miniaudio only uses priority or exclusive modes. */
+#define MA_DSSCL_NORMAL 1
+#define MA_DSSCL_PRIORITY 2
+#define MA_DSSCL_EXCLUSIVE 3
+#define MA_DSSCL_WRITEPRIMARY 4
- currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice);
- if (currentFrame >= targetFrame) {
- break;
- }
+#define MA_DSCAPS_PRIMARYMONO 0x00000001
+#define MA_DSCAPS_PRIMARYSTEREO 0x00000002
+#define MA_DSCAPS_PRIMARY8BIT 0x00000004
+#define MA_DSCAPS_PRIMARY16BIT 0x00000008
+#define MA_DSCAPS_CONTINUOUSRATE 0x00000010
+#define MA_DSCAPS_EMULDRIVER 0x00000020
+#define MA_DSCAPS_CERTIFIED 0x00000040
+#define MA_DSCAPS_SECONDARYMONO 0x00000100
+#define MA_DSCAPS_SECONDARYSTEREO 0x00000200
+#define MA_DSCAPS_SECONDARY8BIT 0x00000400
+#define MA_DSCAPS_SECONDARY16BIT 0x00000800
- /* Getting here means we haven't yet reached the target sample, so continue waiting. */
- ma_sleep(10);
- }
+#define MA_DSBCAPS_PRIMARYBUFFER 0x00000001
+#define MA_DSBCAPS_STATIC 0x00000002
+#define MA_DSBCAPS_LOCHARDWARE 0x00000004
+#define MA_DSBCAPS_LOCSOFTWARE 0x00000008
+#define MA_DSBCAPS_CTRL3D 0x00000010
+#define MA_DSBCAPS_CTRLFREQUENCY 0x00000020
+#define MA_DSBCAPS_CTRLPAN 0x00000040
+#define MA_DSBCAPS_CTRLVOLUME 0x00000080
+#define MA_DSBCAPS_CTRLPOSITIONNOTIFY 0x00000100
+#define MA_DSBCAPS_CTRLFX 0x00000200
+#define MA_DSBCAPS_STICKYFOCUS 0x00004000
+#define MA_DSBCAPS_GLOBALFOCUS 0x00008000
+#define MA_DSBCAPS_GETCURRENTPOSITION2 0x00010000
+#define MA_DSBCAPS_MUTE3DATMAXDISTANCE 0x00020000
+#define MA_DSBCAPS_LOCDEFER 0x00040000
+#define MA_DSBCAPS_TRUEPLAYPOSITION 0x00080000
- pDevice->null_device.lastProcessedFrameCapture += pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods;
- pDevice->null_device.currentPeriodFramesRemainingCapture = pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods;
- }
+#define MA_DSBPLAY_LOOPING 0x00000001
+#define MA_DSBPLAY_LOCHARDWARE 0x00000002
+#define MA_DSBPLAY_LOCSOFTWARE 0x00000004
+#define MA_DSBPLAY_TERMINATEBY_TIME 0x00000008
+#define MA_DSBPLAY_TERMINATEBY_DISTANCE 0x00000010
+#define MA_DSBPLAY_TERMINATEBY_PRIORITY 0x00000020
- return result;
-}
+#define MA_DSCBSTART_LOOPING 0x00000001
-ma_result ma_context_uninit__null(ma_context* pContext)
+typedef struct
{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_null);
-
- (void)pContext;
- return MA_SUCCESS;
-}
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwBufferBytes;
+ DWORD dwReserved;
+ WAVEFORMATEX* lpwfxFormat;
+ GUID guid3DAlgorithm;
+} MA_DSBUFFERDESC;
-ma_result ma_context_init__null(const ma_context_config* pConfig, ma_context* pContext)
+typedef struct
{
- ma_assert(pContext != NULL);
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwBufferBytes;
+ DWORD dwReserved;
+ WAVEFORMATEX* lpwfxFormat;
+ DWORD dwFXCount;
+ void* lpDSCFXDesc; /* <-- miniaudio doesn't use this, so set to void*. */
+} MA_DSCBUFFERDESC;
- (void)pConfig;
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwMinSecondarySampleRate;
+ DWORD dwMaxSecondarySampleRate;
+ DWORD dwPrimaryBuffers;
+ DWORD dwMaxHwMixingAllBuffers;
+ DWORD dwMaxHwMixingStaticBuffers;
+ DWORD dwMaxHwMixingStreamingBuffers;
+ DWORD dwFreeHwMixingAllBuffers;
+ DWORD dwFreeHwMixingStaticBuffers;
+ DWORD dwFreeHwMixingStreamingBuffers;
+ DWORD dwMaxHw3DAllBuffers;
+ DWORD dwMaxHw3DStaticBuffers;
+ DWORD dwMaxHw3DStreamingBuffers;
+ DWORD dwFreeHw3DAllBuffers;
+ DWORD dwFreeHw3DStaticBuffers;
+ DWORD dwFreeHw3DStreamingBuffers;
+ DWORD dwTotalHwMemBytes;
+ DWORD dwFreeHwMemBytes;
+ DWORD dwMaxContigFreeHwMemBytes;
+ DWORD dwUnlockTransferRateHwBuffers;
+ DWORD dwPlayCpuOverheadSwBuffers;
+ DWORD dwReserved1;
+ DWORD dwReserved2;
+} MA_DSCAPS;
- pContext->onUninit = ma_context_uninit__null;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__null;
- pContext->onEnumDevices = ma_context_enumerate_devices__null;
- pContext->onGetDeviceInfo = ma_context_get_device_info__null;
- pContext->onDeviceInit = ma_device_init__null;
- pContext->onDeviceUninit = ma_device_uninit__null;
- pContext->onDeviceStart = ma_device_start__null;
- pContext->onDeviceStop = ma_device_stop__null;
- pContext->onDeviceWrite = ma_device_write__null;
- pContext->onDeviceRead = ma_device_read__null;
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwBufferBytes;
+ DWORD dwUnlockTransferRate;
+ DWORD dwPlayCpuOverhead;
+} MA_DSBCAPS;
- /* The null backend always works. */
- return MA_SUCCESS;
-}
-#endif
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwFormats;
+ DWORD dwChannels;
+} MA_DSCCAPS;
+typedef struct
+{
+ DWORD dwSize;
+ DWORD dwFlags;
+ DWORD dwBufferBytes;
+ DWORD dwReserved;
+} MA_DSCBCAPS;
-/*******************************************************************************
+typedef struct
+{
+ DWORD dwOffset;
+ HANDLE hEventNotify;
+} MA_DSBPOSITIONNOTIFY;
-WIN32 COMMON
+typedef struct ma_IDirectSound ma_IDirectSound;
+typedef struct ma_IDirectSoundBuffer ma_IDirectSoundBuffer;
+typedef struct ma_IDirectSoundCapture ma_IDirectSoundCapture;
+typedef struct ma_IDirectSoundCaptureBuffer ma_IDirectSoundCaptureBuffer;
+typedef struct ma_IDirectSoundNotify ma_IDirectSoundNotify;
-*******************************************************************************/
-#if defined(MA_WIN32)
-#if defined(MA_WIN32_DESKTOP)
- #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) ((MA_PFN_CoInitializeEx)pContext->win32.CoInitializeEx)(pvReserved, dwCoInit)
- #define ma_CoUninitialize(pContext) ((MA_PFN_CoUninitialize)pContext->win32.CoUninitialize)()
- #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) ((MA_PFN_CoCreateInstance)pContext->win32.CoCreateInstance)(rclsid, pUnkOuter, dwClsContext, riid, ppv)
- #define ma_CoTaskMemFree(pContext, pv) ((MA_PFN_CoTaskMemFree)pContext->win32.CoTaskMemFree)(pv)
- #define ma_PropVariantClear(pContext, pvar) ((MA_PFN_PropVariantClear)pContext->win32.PropVariantClear)(pvar)
-#else
- #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) CoInitializeEx(pvReserved, dwCoInit)
- #define ma_CoUninitialize(pContext) CoUninitialize()
- #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) CoCreateInstance(rclsid, pUnkOuter, dwClsContext, riid, ppv)
- #define ma_CoTaskMemFree(pContext, pv) CoTaskMemFree(pv)
- #define ma_PropVariantClear(pContext, pvar) PropVariantClear(pvar)
-#endif
-#if !defined(MAXULONG_PTR)
-typedef size_t DWORD_PTR;
-#endif
+/*
+COM objects. The way these work is that you have a vtable (a list of function pointers, kind of
+like how C++ works internally), and then you have a structure with a single member, which is a
+pointer to the vtable. The vtable is where the methods of the object are defined. Methods need
+to be in a specific order, and parent classes need to have their methods declared first.
+*/
-#if !defined(WAVE_FORMAT_44M08)
-#define WAVE_FORMAT_44M08 0x00000100
-#define WAVE_FORMAT_44S08 0x00000200
-#define WAVE_FORMAT_44M16 0x00000400
-#define WAVE_FORMAT_44S16 0x00000800
-#define WAVE_FORMAT_48M08 0x00001000
-#define WAVE_FORMAT_48S08 0x00002000
-#define WAVE_FORMAT_48M16 0x00004000
-#define WAVE_FORMAT_48S16 0x00008000
-#define WAVE_FORMAT_96M08 0x00010000
-#define WAVE_FORMAT_96S08 0x00020000
-#define WAVE_FORMAT_96M16 0x00040000
-#define WAVE_FORMAT_96S16 0x00080000
-#endif
+/* IDirectSound */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSound* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSound* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSound* pThis);
-#ifndef SPEAKER_FRONT_LEFT
-#define SPEAKER_FRONT_LEFT 0x1
-#define SPEAKER_FRONT_RIGHT 0x2
-#define SPEAKER_FRONT_CENTER 0x4
-#define SPEAKER_LOW_FREQUENCY 0x8
-#define SPEAKER_BACK_LEFT 0x10
-#define SPEAKER_BACK_RIGHT 0x20
-#define SPEAKER_FRONT_LEFT_OF_CENTER 0x40
-#define SPEAKER_FRONT_RIGHT_OF_CENTER 0x80
-#define SPEAKER_BACK_CENTER 0x100
-#define SPEAKER_SIDE_LEFT 0x200
-#define SPEAKER_SIDE_RIGHT 0x400
-#define SPEAKER_TOP_CENTER 0x800
-#define SPEAKER_TOP_FRONT_LEFT 0x1000
-#define SPEAKER_TOP_FRONT_CENTER 0x2000
-#define SPEAKER_TOP_FRONT_RIGHT 0x4000
-#define SPEAKER_TOP_BACK_LEFT 0x8000
-#define SPEAKER_TOP_BACK_CENTER 0x10000
-#define SPEAKER_TOP_BACK_RIGHT 0x20000
-#endif
+ /* IDirectSound */
+ HRESULT (STDMETHODCALLTYPE * CreateSoundBuffer) (ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter);
+ HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps);
+ HRESULT (STDMETHODCALLTYPE * DuplicateSoundBuffer)(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate);
+ HRESULT (STDMETHODCALLTYPE * SetCooperativeLevel) (ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel);
+ HRESULT (STDMETHODCALLTYPE * Compact) (ma_IDirectSound* pThis);
+ HRESULT (STDMETHODCALLTYPE * GetSpeakerConfig) (ma_IDirectSound* pThis, DWORD* pSpeakerConfig);
+ HRESULT (STDMETHODCALLTYPE * SetSpeakerConfig) (ma_IDirectSound* pThis, DWORD dwSpeakerConfig);
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSound* pThis, const GUID* pGuidDevice);
+} ma_IDirectSoundVtbl;
+struct ma_IDirectSound
+{
+ ma_IDirectSoundVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSound_QueryInterface(ma_IDirectSound* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSound_AddRef(ma_IDirectSound* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSound_Release(ma_IDirectSound* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSound_CreateSoundBuffer(ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateSoundBuffer(pThis, pDSBufferDesc, ppDSBuffer, pUnkOuter); }
+static MA_INLINE HRESULT ma_IDirectSound_GetCaps(ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCaps); }
+static MA_INLINE HRESULT ma_IDirectSound_DuplicateSoundBuffer(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate) { return pThis->lpVtbl->DuplicateSoundBuffer(pThis, pDSBufferOriginal, ppDSBufferDuplicate); }
+static MA_INLINE HRESULT ma_IDirectSound_SetCooperativeLevel(ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel) { return pThis->lpVtbl->SetCooperativeLevel(pThis, hwnd, dwLevel); }
+static MA_INLINE HRESULT ma_IDirectSound_Compact(ma_IDirectSound* pThis) { return pThis->lpVtbl->Compact(pThis); }
+static MA_INLINE HRESULT ma_IDirectSound_GetSpeakerConfig(ma_IDirectSound* pThis, DWORD* pSpeakerConfig) { return pThis->lpVtbl->GetSpeakerConfig(pThis, pSpeakerConfig); }
+static MA_INLINE HRESULT ma_IDirectSound_SetSpeakerConfig(ma_IDirectSound* pThis, DWORD dwSpeakerConfig) { return pThis->lpVtbl->SetSpeakerConfig(pThis, dwSpeakerConfig); }
+static MA_INLINE HRESULT ma_IDirectSound_Initialize(ma_IDirectSound* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); }
-/*
-The SDK that comes with old versions of MSVC (VC6, for example) does not appear to define WAVEFORMATEXTENSIBLE. We
-define our own implementation in this case.
-*/
-#if (defined(_MSC_VER) && !defined(_WAVEFORMATEXTENSIBLE_)) || defined(__DMC__)
+
+/* IDirectSoundBuffer */
typedef struct
{
- WAVEFORMATEX Format;
- union
- {
- WORD wValidBitsPerSample;
- WORD wSamplesPerBlock;
- WORD wReserved;
- } Samples;
- DWORD dwChannelMask;
- GUID SubFormat;
-} WAVEFORMATEXTENSIBLE;
-#endif
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundBuffer* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundBuffer* pThis);
-#ifndef WAVE_FORMAT_EXTENSIBLE
-#define WAVE_FORMAT_EXTENSIBLE 0xFFFE
-#endif
+ /* IDirectSoundBuffer */
+ HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor);
+ HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten);
+ HRESULT (STDMETHODCALLTYPE * GetVolume) (ma_IDirectSoundBuffer* pThis, LONG* pVolume);
+ HRESULT (STDMETHODCALLTYPE * GetPan) (ma_IDirectSoundBuffer* pThis, LONG* pPan);
+ HRESULT (STDMETHODCALLTYPE * GetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD* pFrequency);
+ HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundBuffer* pThis, DWORD* pStatus);
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc);
+ HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags);
+ HRESULT (STDMETHODCALLTYPE * Play) (ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags);
+ HRESULT (STDMETHODCALLTYPE * SetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition);
+ HRESULT (STDMETHODCALLTYPE * SetFormat) (ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat);
+ HRESULT (STDMETHODCALLTYPE * SetVolume) (ma_IDirectSoundBuffer* pThis, LONG volume);
+ HRESULT (STDMETHODCALLTYPE * SetPan) (ma_IDirectSoundBuffer* pThis, LONG pan);
+ HRESULT (STDMETHODCALLTYPE * SetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD dwFrequency);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundBuffer* pThis);
+ HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2);
+ HRESULT (STDMETHODCALLTYPE * Restore) (ma_IDirectSoundBuffer* pThis);
+} ma_IDirectSoundBufferVtbl;
+struct ma_IDirectSoundBuffer
+{
+ ma_IDirectSoundBufferVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_QueryInterface(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSoundBuffer_AddRef(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSoundBuffer_Release(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCaps(ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSBufferCaps); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCurrentPlayCursor, pCurrentWriteCursor); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFormat(ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetVolume(ma_IDirectSoundBuffer* pThis, LONG* pVolume) { return pThis->lpVtbl->GetVolume(pThis, pVolume); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetPan(ma_IDirectSoundBuffer* pThis, LONG* pPan) { return pThis->lpVtbl->GetPan(pThis, pPan); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFrequency(ma_IDirectSoundBuffer* pThis, DWORD* pFrequency) { return pThis->lpVtbl->GetFrequency(pThis, pFrequency); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetStatus(ma_IDirectSoundBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Initialize(ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSound, pDSBufferDesc); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Lock(ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Play(ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags) { return pThis->lpVtbl->Play(pThis, dwReserved1, dwPriority, dwFlags); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition) { return pThis->lpVtbl->SetCurrentPosition(pThis, dwNewPosition); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFormat(ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat) { return pThis->lpVtbl->SetFormat(pThis, pFormat); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetVolume(ma_IDirectSoundBuffer* pThis, LONG volume) { return pThis->lpVtbl->SetVolume(pThis, volume); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetPan(ma_IDirectSoundBuffer* pThis, LONG pan) { return pThis->lpVtbl->SetPan(pThis, pan); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFrequency(ma_IDirectSoundBuffer* pThis, DWORD dwFrequency) { return pThis->lpVtbl->SetFrequency(pThis, dwFrequency); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Stop(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Unlock(ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); }
+static MA_INLINE HRESULT ma_IDirectSoundBuffer_Restore(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Restore(pThis); }
-#ifndef WAVE_FORMAT_IEEE_FLOAT
-#define WAVE_FORMAT_IEEE_FLOAT 0x0003
-#endif
-GUID MA_GUID_NULL = {0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}};
+/* IDirectSoundCapture */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCapture* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCapture* pThis);
-/* Converts an individual Win32-style channel identifier (SPEAKER_FRONT_LEFT, etc.) to miniaudio. */
-ma_uint8 ma_channel_id_to_ma__win32(DWORD id)
+ /* IDirectSoundCapture */
+ HRESULT (STDMETHODCALLTYPE * CreateCaptureBuffer)(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter);
+ HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps);
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice);
+} ma_IDirectSoundCaptureVtbl;
+struct ma_IDirectSoundCapture
{
- switch (id)
- {
- case SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT;
- case SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT;
- case SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER;
- case SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE;
- case SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT;
- case SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT;
- case SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER;
- case SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER;
- case SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER;
- case SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT;
- case SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT;
- case SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER;
- case SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT;
- case SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER;
- case SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT;
- case SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT;
- case SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER;
- case SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT;
- default: return 0;
- }
-}
+ ma_IDirectSoundCaptureVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSoundCapture_QueryInterface(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSoundCapture_AddRef(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSoundCapture_Release(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundCapture_CreateCaptureBuffer(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateCaptureBuffer(pThis, pDSCBufferDesc, ppDSCBuffer, pUnkOuter); }
+static MA_INLINE HRESULT ma_IDirectSoundCapture_GetCaps (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCCaps); }
+static MA_INLINE HRESULT ma_IDirectSoundCapture_Initialize (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); }
-/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to Win32-style. */
-DWORD ma_channel_id_to_win32(DWORD id)
+
+/* IDirectSoundCaptureBuffer */
+typedef struct
{
- switch (id)
- {
- case MA_CHANNEL_MONO: return SPEAKER_FRONT_CENTER;
- case MA_CHANNEL_FRONT_LEFT: return SPEAKER_FRONT_LEFT;
- case MA_CHANNEL_FRONT_RIGHT: return SPEAKER_FRONT_RIGHT;
- case MA_CHANNEL_FRONT_CENTER: return SPEAKER_FRONT_CENTER;
- case MA_CHANNEL_LFE: return SPEAKER_LOW_FREQUENCY;
- case MA_CHANNEL_BACK_LEFT: return SPEAKER_BACK_LEFT;
- case MA_CHANNEL_BACK_RIGHT: return SPEAKER_BACK_RIGHT;
- case MA_CHANNEL_FRONT_LEFT_CENTER: return SPEAKER_FRONT_LEFT_OF_CENTER;
- case MA_CHANNEL_FRONT_RIGHT_CENTER: return SPEAKER_FRONT_RIGHT_OF_CENTER;
- case MA_CHANNEL_BACK_CENTER: return SPEAKER_BACK_CENTER;
- case MA_CHANNEL_SIDE_LEFT: return SPEAKER_SIDE_LEFT;
- case MA_CHANNEL_SIDE_RIGHT: return SPEAKER_SIDE_RIGHT;
- case MA_CHANNEL_TOP_CENTER: return SPEAKER_TOP_CENTER;
- case MA_CHANNEL_TOP_FRONT_LEFT: return SPEAKER_TOP_FRONT_LEFT;
- case MA_CHANNEL_TOP_FRONT_CENTER: return SPEAKER_TOP_FRONT_CENTER;
- case MA_CHANNEL_TOP_FRONT_RIGHT: return SPEAKER_TOP_FRONT_RIGHT;
- case MA_CHANNEL_TOP_BACK_LEFT: return SPEAKER_TOP_BACK_LEFT;
- case MA_CHANNEL_TOP_BACK_CENTER: return SPEAKER_TOP_BACK_CENTER;
- case MA_CHANNEL_TOP_BACK_RIGHT: return SPEAKER_TOP_BACK_RIGHT;
- default: return 0;
- }
-}
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCaptureBuffer* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCaptureBuffer* pThis);
-/* Converts a channel mapping to a Win32-style channel mask. */
-DWORD ma_channel_map_to_channel_mask__win32(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels)
+ /* IDirectSoundCaptureBuffer */
+ HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps);
+ HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition);
+ HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten);
+ HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus);
+ HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc);
+ HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags);
+ HRESULT (STDMETHODCALLTYPE * Start) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags);
+ HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundCaptureBuffer* pThis);
+ HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2);
+} ma_IDirectSoundCaptureBufferVtbl;
+struct ma_IDirectSoundCaptureBuffer
{
- DWORD dwChannelMask = 0;
- ma_uint32 iChannel;
+ ma_IDirectSoundCaptureBufferVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_QueryInterface(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_AddRef(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_Release(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCaps(ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCBCaps); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCurrentPosition(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCapturePosition, pReadPosition); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetFormat(ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetStatus(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Initialize(ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSoundCapture, pDSCBufferDesc); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Lock(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Start(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags) { return pThis->lpVtbl->Start(pThis, dwFlags); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Stop(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Unlock(ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); }
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- dwChannelMask |= ma_channel_id_to_win32(channelMap[iChannel]);
- }
- return dwChannelMask;
-}
+/* IDirectSoundNotify */
+typedef struct
+{
+ /* IUnknown */
+ HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject);
+ ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundNotify* pThis);
+ ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundNotify* pThis);
-/* Converts a Win32-style channel mask to a miniaudio channel map. */
-void ma_channel_mask_to_channel_map__win32(DWORD dwChannelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+ /* IDirectSoundNotify */
+ HRESULT (STDMETHODCALLTYPE * SetNotificationPositions)(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies);
+} ma_IDirectSoundNotifyVtbl;
+struct ma_IDirectSoundNotify
{
- if (channels == 1 && dwChannelMask == 0) {
- channelMap[0] = MA_CHANNEL_MONO;
- } else if (channels == 2 && dwChannelMask == 0) {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- } else {
- if (channels == 1 && (dwChannelMask & SPEAKER_FRONT_CENTER) != 0) {
- channelMap[0] = MA_CHANNEL_MONO;
- } else {
- /* Just iterate over each bit. */
- ma_uint32 iChannel = 0;
- ma_uint32 iBit;
+ ma_IDirectSoundNotifyVtbl* lpVtbl;
+};
+static MA_INLINE HRESULT ma_IDirectSoundNotify_QueryInterface(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
+static MA_INLINE ULONG ma_IDirectSoundNotify_AddRef(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->AddRef(pThis); }
+static MA_INLINE ULONG ma_IDirectSoundNotify_Release(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->Release(pThis); }
+static MA_INLINE HRESULT ma_IDirectSoundNotify_SetNotificationPositions(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies) { return pThis->lpVtbl->SetNotificationPositions(pThis, dwPositionNotifies, pPositionNotifies); }
- for (iBit = 0; iBit < 32; ++iBit) {
- DWORD bitValue = (dwChannelMask & (1UL << iBit));
- if (bitValue != 0) {
- /* The bit is set. */
- channelMap[iChannel] = ma_channel_id_to_ma__win32(bitValue);
- iChannel += 1;
- }
- }
- }
- }
-}
-#ifdef __cplusplus
-ma_bool32 ma_is_guid_equal(const void* a, const void* b)
-{
- return IsEqualGUID(*(const GUID*)a, *(const GUID*)b);
-}
-#else
-#define ma_is_guid_equal(a, b) IsEqualGUID((const GUID*)a, (const GUID*)b)
-#endif
+typedef BOOL (CALLBACK * ma_DSEnumCallbackAProc) (LPGUID pDeviceGUID, LPCSTR pDeviceDescription, LPCSTR pModule, LPVOID pContext);
+typedef HRESULT (WINAPI * ma_DirectSoundCreateProc) (const GUID* pcGuidDevice, ma_IDirectSound** ppDS8, LPUNKNOWN pUnkOuter);
+typedef HRESULT (WINAPI * ma_DirectSoundEnumerateAProc) (ma_DSEnumCallbackAProc pDSEnumCallback, LPVOID pContext);
+typedef HRESULT (WINAPI * ma_DirectSoundCaptureCreateProc) (const GUID* pcGuidDevice, ma_IDirectSoundCapture** ppDSC8, LPUNKNOWN pUnkOuter);
+typedef HRESULT (WINAPI * ma_DirectSoundCaptureEnumerateAProc)(ma_DSEnumCallbackAProc pDSEnumCallback, LPVOID pContext);
-ma_format ma_format_from_WAVEFORMATEX(const WAVEFORMATEX* pWF)
+static ma_uint32 ma_get_best_sample_rate_within_range(ma_uint32 sampleRateMin, ma_uint32 sampleRateMax)
{
- ma_assert(pWF != NULL);
-
- if (pWF->wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
- const WAVEFORMATEXTENSIBLE* pWFEX = (const WAVEFORMATEXTENSIBLE*)pWF;
- if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_PCM)) {
- if (pWFEX->Samples.wValidBitsPerSample == 32) {
- return ma_format_s32;
- }
- if (pWFEX->Samples.wValidBitsPerSample == 24) {
- if (pWFEX->Format.wBitsPerSample == 32) {
- /*return ma_format_s24_32;*/
- }
- if (pWFEX->Format.wBitsPerSample == 24) {
- return ma_format_s24;
- }
- }
- if (pWFEX->Samples.wValidBitsPerSample == 16) {
- return ma_format_s16;
- }
- if (pWFEX->Samples.wValidBitsPerSample == 8) {
- return ma_format_u8;
- }
- }
- if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) {
- if (pWFEX->Samples.wValidBitsPerSample == 32) {
- return ma_format_f32;
- }
- /*
- if (pWFEX->Samples.wValidBitsPerSample == 64) {
- return ma_format_f64;
- }
- */
- }
- } else {
- if (pWF->wFormatTag == WAVE_FORMAT_PCM) {
- if (pWF->wBitsPerSample == 32) {
- return ma_format_s32;
- }
- if (pWF->wBitsPerSample == 24) {
- return ma_format_s24;
- }
- if (pWF->wBitsPerSample == 16) {
- return ma_format_s16;
- }
- if (pWF->wBitsPerSample == 8) {
- return ma_format_u8;
- }
- }
- if (pWF->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) {
- if (pWF->wBitsPerSample == 32) {
- return ma_format_f32;
- }
- if (pWF->wBitsPerSample == 64) {
- /*return ma_format_f64;*/
+ /* Normalize the range in case we were given something stupid. */
+ if (sampleRateMin < MA_MIN_SAMPLE_RATE) {
+ sampleRateMin = MA_MIN_SAMPLE_RATE;
+ }
+ if (sampleRateMax > MA_MAX_SAMPLE_RATE) {
+ sampleRateMax = MA_MAX_SAMPLE_RATE;
+ }
+ if (sampleRateMin > sampleRateMax) {
+ sampleRateMin = sampleRateMax;
+ }
+
+ if (sampleRateMin == sampleRateMax) {
+ return sampleRateMax;
+ } else {
+ size_t iStandardRate;
+ for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) {
+ ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate];
+ if (standardRate >= sampleRateMin && standardRate <= sampleRateMax) {
+ return standardRate;
}
}
}
- return ma_format_unknown;
+ /* Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return 0;
}
-#endif
+/*
+Retrieves the channel count and channel map for the given speaker configuration. If the speaker configuration is unknown,
+the channel count and channel map will be left unmodified.
+*/
+static void ma_get_channels_from_speaker_config__dsound(DWORD speakerConfig, WORD* pChannelsOut, DWORD* pChannelMapOut)
+{
+ WORD channels;
+ DWORD channelMap;
-/*******************************************************************************
+ channels = 0;
+ if (pChannelsOut != NULL) {
+ channels = *pChannelsOut;
+ }
-WASAPI Backend
+ channelMap = 0;
+ if (pChannelMapOut != NULL) {
+ channelMap = *pChannelMapOut;
+ }
-*******************************************************************************/
-#ifdef MA_HAS_WASAPI
-#if 0
-#if defined(_MSC_VER)
- #pragma warning(push)
- #pragma warning(disable:4091) /* 'typedef ': ignored on left of '' when no variable is declared */
-#endif
-#include
-#include
-#if defined(_MSC_VER)
- #pragma warning(pop)
-#endif
-#endif /* 0 */
+ /*
+ The speaker configuration is a combination of speaker config and speaker geometry. The lower 8 bits is what we care about. The upper
+ 16 bits is for the geometry.
+ */
+ switch ((BYTE)(speakerConfig)) {
+ case 1 /*DSSPEAKER_HEADPHONE*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break;
+ case 2 /*DSSPEAKER_MONO*/: channels = 1; channelMap = SPEAKER_FRONT_CENTER; break;
+ case 3 /*DSSPEAKER_QUAD*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break;
+ case 4 /*DSSPEAKER_STEREO*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break;
+ case 5 /*DSSPEAKER_SURROUND*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_BACK_CENTER; break;
+ case 6 /*DSSPEAKER_5POINT1_BACK*/ /*DSSPEAKER_5POINT1*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break;
+ case 7 /*DSSPEAKER_7POINT1_WIDE*/ /*DSSPEAKER_7POINT1*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_FRONT_LEFT_OF_CENTER | SPEAKER_FRONT_RIGHT_OF_CENTER; break;
+ case 8 /*DSSPEAKER_7POINT1_SURROUND*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break;
+ case 9 /*DSSPEAKER_5POINT1_SURROUND*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break;
+ default: break;
+ }
+ if (pChannelsOut != NULL) {
+ *pChannelsOut = channels;
+ }
-/* Some compilers don't define VerifyVersionInfoW. Need to write this ourselves. */
-#define MA_WIN32_WINNT_VISTA 0x0600
-#define MA_VER_MINORVERSION 0x01
-#define MA_VER_MAJORVERSION 0x02
-#define MA_VER_SERVICEPACKMAJOR 0x20
-#define MA_VER_GREATER_EQUAL 0x03
+ if (pChannelMapOut != NULL) {
+ *pChannelMapOut = channelMap;
+ }
+}
-typedef struct {
- DWORD dwOSVersionInfoSize;
- DWORD dwMajorVersion;
- DWORD dwMinorVersion;
- DWORD dwBuildNumber;
- DWORD dwPlatformId;
- WCHAR szCSDVersion[128];
- WORD wServicePackMajor;
- WORD wServicePackMinor;
- WORD wSuiteMask;
- BYTE wProductType;
- BYTE wReserved;
-} ma_OSVERSIONINFOEXW;
-typedef BOOL (WINAPI * ma_PFNVerifyVersionInfoW) (ma_OSVERSIONINFOEXW* lpVersionInfo, DWORD dwTypeMask, DWORDLONG dwlConditionMask);
-typedef ULONGLONG (WINAPI * ma_PFNVerSetConditionMask)(ULONGLONG dwlConditionMask, DWORD dwTypeBitMask, BYTE dwConditionMask);
+static ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSound** ppDirectSound)
+{
+ ma_IDirectSound* pDirectSound;
+ HWND hWnd;
+ HRESULT hr;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppDirectSound != NULL);
-#ifndef PROPERTYKEY_DEFINED
-#define PROPERTYKEY_DEFINED
-typedef struct
-{
- GUID fmtid;
- DWORD pid;
-} PROPERTYKEY;
-#endif
+ *ppDirectSound = NULL;
+ pDirectSound = NULL;
-/* Some compilers don't define PropVariantInit(). We just do this ourselves since it's just a memset(). */
-static MA_INLINE void ma_PropVariantInit(PROPVARIANT* pProp)
-{
- ma_zero_object(pProp);
+ if (FAILED(((ma_DirectSoundCreateProc)pContext->dsound.DirectSoundCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSound, NULL))) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCreate() failed for playback device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ }
+
+ /* The cooperative level must be set before doing anything else. */
+ hWnd = ((MA_PFN_GetForegroundWindow)pContext->win32.GetForegroundWindow)();
+ if (hWnd == NULL) {
+ hWnd = ((MA_PFN_GetDesktopWindow)pContext->win32.GetDesktopWindow)();
+ }
+
+ hr = ma_IDirectSound_SetCooperativeLevel(pDirectSound, hWnd, (shareMode == ma_share_mode_exclusive) ? MA_DSSCL_EXCLUSIVE : MA_DSSCL_PRIORITY);
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_SetCooperateiveLevel() failed for playback device.", ma_result_from_HRESULT(hr));
+ }
+
+ *ppDirectSound = pDirectSound;
+ return MA_SUCCESS;
}
+static ma_result ma_context_create_IDirectSoundCapture__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSoundCapture** ppDirectSoundCapture)
+{
+ ma_IDirectSoundCapture* pDirectSoundCapture;
+ HRESULT hr;
-const PROPERTYKEY MA_PKEY_Device_FriendlyName = {{0xA45C254E, 0xDF1C, 0x4EFD, {0x80, 0x20, 0x67, 0xD1, 0x46, 0xA8, 0x50, 0xE0}}, 14};
-const PROPERTYKEY MA_PKEY_AudioEngine_DeviceFormat = {{0xF19F064D, 0x82C, 0x4E27, {0xBC, 0x73, 0x68, 0x82, 0xA1, 0xBB, 0x8E, 0x4C}}, 0};
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppDirectSoundCapture != NULL);
-const IID MA_IID_IUnknown = {0x00000000, 0x0000, 0x0000, {0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}; /* 00000000-0000-0000-C000-000000000046 */
-const IID MA_IID_IAgileObject = {0x94EA2B94, 0xE9CC, 0x49E0, {0xC0, 0xFF, 0xEE, 0x64, 0xCA, 0x8F, 0x5B, 0x90}}; /* 94EA2B94-E9CC-49E0-C0FF-EE64CA8F5B90 */
+ /* DirectSound does not support exclusive mode for capture. */
+ if (shareMode == ma_share_mode_exclusive) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
-const IID MA_IID_IAudioClient = {0x1CB9AD4C, 0xDBFA, 0x4C32, {0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2}}; /* 1CB9AD4C-DBFA-4C32-B178-C2F568A703B2 = __uuidof(IAudioClient) */
-const IID MA_IID_IAudioClient2 = {0x726778CD, 0xF60A, 0x4EDA, {0x82, 0xDE, 0xE4, 0x76, 0x10, 0xCD, 0x78, 0xAA}}; /* 726778CD-F60A-4EDA-82DE-E47610CD78AA = __uuidof(IAudioClient2) */
-const IID MA_IID_IAudioClient3 = {0x7ED4EE07, 0x8E67, 0x4CD4, {0x8C, 0x1A, 0x2B, 0x7A, 0x59, 0x87, 0xAD, 0x42}}; /* 7ED4EE07-8E67-4CD4-8C1A-2B7A5987AD42 = __uuidof(IAudioClient3) */
-const IID MA_IID_IAudioRenderClient = {0xF294ACFC, 0x3146, 0x4483, {0xA7, 0xBF, 0xAD, 0xDC, 0xA7, 0xC2, 0x60, 0xE2}}; /* F294ACFC-3146-4483-A7BF-ADDCA7C260E2 = __uuidof(IAudioRenderClient) */
-const IID MA_IID_IAudioCaptureClient = {0xC8ADBD64, 0xE71E, 0x48A0, {0xA4, 0xDE, 0x18, 0x5C, 0x39, 0x5C, 0xD3, 0x17}}; /* C8ADBD64-E71E-48A0-A4DE-185C395CD317 = __uuidof(IAudioCaptureClient) */
-const IID MA_IID_IMMNotificationClient = {0x7991EEC9, 0x7E89, 0x4D85, {0x83, 0x90, 0x6C, 0x70, 0x3C, 0xEC, 0x60, 0xC0}}; /* 7991EEC9-7E89-4D85-8390-6C703CEC60C0 = __uuidof(IMMNotificationClient) */
-#ifndef MA_WIN32_DESKTOP
-const IID MA_IID_DEVINTERFACE_AUDIO_RENDER = {0xE6327CAD, 0xDCEC, 0x4949, {0xAE, 0x8A, 0x99, 0x1E, 0x97, 0x6A, 0x79, 0xD2}}; /* E6327CAD-DCEC-4949-AE8A-991E976A79D2 */
-const IID MA_IID_DEVINTERFACE_AUDIO_CAPTURE = {0x2EEF81BE, 0x33FA, 0x4800, {0x96, 0x70, 0x1C, 0xD4, 0x74, 0x97, 0x2C, 0x3F}}; /* 2EEF81BE-33FA-4800-9670-1CD474972C3F */
-const IID MA_IID_IActivateAudioInterfaceCompletionHandler = {0x41D949AB, 0x9862, 0x444A, {0x80, 0xF6, 0xC2, 0x61, 0x33, 0x4D, 0xA5, 0xEB}}; /* 41D949AB-9862-444A-80F6-C261334DA5EB */
-#endif
+ *ppDirectSoundCapture = NULL;
+ pDirectSoundCapture = NULL;
-const IID MA_CLSID_MMDeviceEnumerator_Instance = {0xBCDE0395, 0xE52F, 0x467C, {0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E}}; /* BCDE0395-E52F-467C-8E3D-C4579291692E = __uuidof(MMDeviceEnumerator) */
-const IID MA_IID_IMMDeviceEnumerator_Instance = {0xA95664D2, 0x9614, 0x4F35, {0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6}}; /* A95664D2-9614-4F35-A746-DE8DB63617E6 = __uuidof(IMMDeviceEnumerator) */
-#ifdef __cplusplus
-#define MA_CLSID_MMDeviceEnumerator MA_CLSID_MMDeviceEnumerator_Instance
-#define MA_IID_IMMDeviceEnumerator MA_IID_IMMDeviceEnumerator_Instance
-#else
-#define MA_CLSID_MMDeviceEnumerator &MA_CLSID_MMDeviceEnumerator_Instance
-#define MA_IID_IMMDeviceEnumerator &MA_IID_IMMDeviceEnumerator_Instance
-#endif
+ hr = ((ma_DirectSoundCaptureCreateProc)pContext->dsound.DirectSoundCaptureCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSoundCapture, NULL);
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCaptureCreate() failed for capture device.", ma_result_from_HRESULT(hr));
+ }
-typedef struct ma_IUnknown ma_IUnknown;
-#ifdef MA_WIN32_DESKTOP
-#define MA_MM_DEVICE_STATE_ACTIVE 1
-#define MA_MM_DEVICE_STATE_DISABLED 2
-#define MA_MM_DEVICE_STATE_NOTPRESENT 4
-#define MA_MM_DEVICE_STATE_UNPLUGGED 8
+ *ppDirectSoundCapture = pDirectSoundCapture;
+ return MA_SUCCESS;
+}
-typedef struct ma_IMMDeviceEnumerator ma_IMMDeviceEnumerator;
-typedef struct ma_IMMDeviceCollection ma_IMMDeviceCollection;
-typedef struct ma_IMMDevice ma_IMMDevice;
-#else
-typedef struct ma_IActivateAudioInterfaceCompletionHandler ma_IActivateAudioInterfaceCompletionHandler;
-typedef struct ma_IActivateAudioInterfaceAsyncOperation ma_IActivateAudioInterfaceAsyncOperation;
-#endif
-typedef struct ma_IPropertyStore ma_IPropertyStore;
-typedef struct ma_IAudioClient ma_IAudioClient;
-typedef struct ma_IAudioClient2 ma_IAudioClient2;
-typedef struct ma_IAudioClient3 ma_IAudioClient3;
-typedef struct ma_IAudioRenderClient ma_IAudioRenderClient;
-typedef struct ma_IAudioCaptureClient ma_IAudioCaptureClient;
+static ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* pContext, ma_IDirectSoundCapture* pDirectSoundCapture, WORD* pChannels, WORD* pBitsPerSample, DWORD* pSampleRate)
+{
+ HRESULT hr;
+ MA_DSCCAPS caps;
+ WORD bitsPerSample;
+ DWORD sampleRate;
-typedef ma_int64 MA_REFERENCE_TIME;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDirectSoundCapture != NULL);
-#define MA_AUDCLNT_STREAMFLAGS_CROSSPROCESS 0x00010000
-#define MA_AUDCLNT_STREAMFLAGS_LOOPBACK 0x00020000
-#define MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK 0x00040000
-#define MA_AUDCLNT_STREAMFLAGS_NOPERSIST 0x00080000
-#define MA_AUDCLNT_STREAMFLAGS_RATEADJUST 0x00100000
-#define MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000
-#define MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000
-#define MA_AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED 0x10000000
-#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDE 0x20000000
-#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED 0x40000000
+ if (pChannels) {
+ *pChannels = 0;
+ }
+ if (pBitsPerSample) {
+ *pBitsPerSample = 0;
+ }
+ if (pSampleRate) {
+ *pSampleRate = 0;
+ }
+
+ MA_ZERO_OBJECT(&caps);
+ caps.dwSize = sizeof(caps);
+ hr = ma_IDirectSoundCapture_GetCaps(pDirectSoundCapture, &caps);
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_GetCaps() failed for capture device.", ma_result_from_HRESULT(hr));
+ }
+
+ if (pChannels) {
+ *pChannels = (WORD)caps.dwChannels;
+ }
+
+ /* The device can support multiple formats. We just go through the different formats in order of priority and pick the first one. This the same type of system as the WinMM backend. */
+ bitsPerSample = 16;
+ sampleRate = 48000;
+
+ if (caps.dwChannels == 1) {
+ if ((caps.dwFormats & WAVE_FORMAT_48M16) != 0) {
+ sampleRate = 48000;
+ } else if ((caps.dwFormats & WAVE_FORMAT_44M16) != 0) {
+ sampleRate = 44100;
+ } else if ((caps.dwFormats & WAVE_FORMAT_2M16) != 0) {
+ sampleRate = 22050;
+ } else if ((caps.dwFormats & WAVE_FORMAT_1M16) != 0) {
+ sampleRate = 11025;
+ } else if ((caps.dwFormats & WAVE_FORMAT_96M16) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 8;
+ if ((caps.dwFormats & WAVE_FORMAT_48M08) != 0) {
+ sampleRate = 48000;
+ } else if ((caps.dwFormats & WAVE_FORMAT_44M08) != 0) {
+ sampleRate = 44100;
+ } else if ((caps.dwFormats & WAVE_FORMAT_2M08) != 0) {
+ sampleRate = 22050;
+ } else if ((caps.dwFormats & WAVE_FORMAT_1M08) != 0) {
+ sampleRate = 11025;
+ } else if ((caps.dwFormats & WAVE_FORMAT_96M08) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */
+ }
+ }
+ } else if (caps.dwChannels == 2) {
+ if ((caps.dwFormats & WAVE_FORMAT_48S16) != 0) {
+ sampleRate = 48000;
+ } else if ((caps.dwFormats & WAVE_FORMAT_44S16) != 0) {
+ sampleRate = 44100;
+ } else if ((caps.dwFormats & WAVE_FORMAT_2S16) != 0) {
+ sampleRate = 22050;
+ } else if ((caps.dwFormats & WAVE_FORMAT_1S16) != 0) {
+ sampleRate = 11025;
+ } else if ((caps.dwFormats & WAVE_FORMAT_96S16) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 8;
+ if ((caps.dwFormats & WAVE_FORMAT_48S08) != 0) {
+ sampleRate = 48000;
+ } else if ((caps.dwFormats & WAVE_FORMAT_44S08) != 0) {
+ sampleRate = 44100;
+ } else if ((caps.dwFormats & WAVE_FORMAT_2S08) != 0) {
+ sampleRate = 22050;
+ } else if ((caps.dwFormats & WAVE_FORMAT_1S08) != 0) {
+ sampleRate = 11025;
+ } else if ((caps.dwFormats & WAVE_FORMAT_96S08) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */
+ }
+ }
+ }
-/* We only care about a few error codes. */
-#define MA_AUDCLNT_E_INVALID_DEVICE_PERIOD (-2004287456)
-#define MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED (-2004287463)
-#define MA_AUDCLNT_S_BUFFER_EMPTY (143196161)
-#define MA_AUDCLNT_E_DEVICE_IN_USE (-2004287478)
+ if (pBitsPerSample) {
+ *pBitsPerSample = bitsPerSample;
+ }
+ if (pSampleRate) {
+ *pSampleRate = sampleRate;
+ }
-typedef enum
-{
- ma_eRender = 0,
- ma_eCapture = 1,
- ma_eAll = 2
-} ma_EDataFlow;
+ return MA_SUCCESS;
+}
-typedef enum
+static ma_bool32 ma_context_is_device_id_equal__dsound(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
{
- ma_eConsole = 0,
- ma_eMultimedia = 1,
- ma_eCommunications = 2
-} ma_ERole;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
-typedef enum
-{
- MA_AUDCLNT_SHAREMODE_SHARED,
- MA_AUDCLNT_SHAREMODE_EXCLUSIVE
-} MA_AUDCLNT_SHAREMODE;
+ return memcmp(pID0->dsound, pID1->dsound, sizeof(pID0->dsound)) == 0;
+}
-typedef enum
-{
- MA_AudioCategory_Other = 0 /* <-- miniaudio is only caring about Other. */
-} MA_AUDIO_STREAM_CATEGORY;
typedef struct
{
- UINT32 cbSize;
- BOOL bIsOffload;
- MA_AUDIO_STREAM_CATEGORY eCategory;
-} ma_AudioClientProperties;
+ ma_context* pContext;
+ ma_device_type deviceType;
+ ma_enum_devices_callback_proc callback;
+ void* pUserData;
+ ma_bool32 terminated;
+} ma_context_enumerate_devices_callback_data__dsound;
-/* IUnknown */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IUnknown* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IUnknown* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IUnknown* pThis);
-} ma_IUnknownVtbl;
-struct ma_IUnknown
+static BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext)
{
- ma_IUnknownVtbl* lpVtbl;
-};
-HRESULT ma_IUnknown_QueryInterface(ma_IUnknown* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IUnknown_AddRef(ma_IUnknown* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { return pThis->lpVtbl->Release(pThis); }
-
-#ifdef MA_WIN32_DESKTOP
- /* IMMNotificationClient */
- typedef struct
- {
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMNotificationClient* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IMMNotificationClient* pThis);
+ ma_context_enumerate_devices_callback_data__dsound* pData = (ma_context_enumerate_devices_callback_data__dsound*)lpContext;
+ ma_device_info deviceInfo;
- /* IMMNotificationClient */
- HRESULT (STDMETHODCALLTYPE * OnDeviceStateChanged) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState);
- HRESULT (STDMETHODCALLTYPE * OnDeviceAdded) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID);
- HRESULT (STDMETHODCALLTYPE * OnDeviceRemoved) (ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID);
- HRESULT (STDMETHODCALLTYPE * OnDefaultDeviceChanged)(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID);
- HRESULT (STDMETHODCALLTYPE * OnPropertyValueChanged)(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key);
- } ma_IMMNotificationClientVtbl;
+ MA_ZERO_OBJECT(&deviceInfo);
- /* IMMDeviceEnumerator */
- typedef struct
- {
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceEnumerator* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceEnumerator* pThis);
+ /* ID. */
+ if (lpGuid != NULL) {
+ MA_COPY_MEMORY(deviceInfo.id.dsound, lpGuid, 16);
+ } else {
+ MA_ZERO_MEMORY(deviceInfo.id.dsound, 16);
+ }
- /* IMMDeviceEnumerator */
- HRESULT (STDMETHODCALLTYPE * EnumAudioEndpoints) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices);
- HRESULT (STDMETHODCALLTYPE * GetDefaultAudioEndpoint) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint);
- HRESULT (STDMETHODCALLTYPE * GetDevice) (ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice);
- HRESULT (STDMETHODCALLTYPE * RegisterEndpointNotificationCallback) (ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient);
- HRESULT (STDMETHODCALLTYPE * UnregisterEndpointNotificationCallback)(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient);
- } ma_IMMDeviceEnumeratorVtbl;
- struct ma_IMMDeviceEnumerator
- {
- ma_IMMDeviceEnumeratorVtbl* lpVtbl;
- };
- HRESULT ma_IMMDeviceEnumerator_QueryInterface(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
- ULONG ma_IMMDeviceEnumerator_AddRef(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->AddRef(pThis); }
- ULONG ma_IMMDeviceEnumerator_Release(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->Release(pThis); }
- HRESULT ma_IMMDeviceEnumerator_EnumAudioEndpoints(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices) { return pThis->lpVtbl->EnumAudioEndpoints(pThis, dataFlow, dwStateMask, ppDevices); }
- HRESULT ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint) { return pThis->lpVtbl->GetDefaultAudioEndpoint(pThis, dataFlow, role, ppEndpoint); }
- HRESULT ma_IMMDeviceEnumerator_GetDevice(ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->GetDevice(pThis, pID, ppDevice); }
- HRESULT ma_IMMDeviceEnumerator_RegisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->RegisterEndpointNotificationCallback(pThis, pClient); }
- HRESULT ma_IMMDeviceEnumerator_UnregisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->UnregisterEndpointNotificationCallback(pThis, pClient); }
+ /* Name / Description */
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), lpcstrDescription, (size_t)-1);
- /* IMMDeviceCollection */
- typedef struct
- {
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceCollection* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceCollection* pThis);
+ /* Call the callback function, but make sure we stop enumerating if the callee requested so. */
+ MA_ASSERT(pData != NULL);
+ pData->terminated = !pData->callback(pData->pContext, pData->deviceType, &deviceInfo, pData->pUserData);
+ if (pData->terminated) {
+ return FALSE; /* Stop enumeration. */
+ } else {
+ return TRUE; /* Continue enumeration. */
+ }
- /* IMMDeviceCollection */
- HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IMMDeviceCollection* pThis, UINT* pDevices);
- HRESULT (STDMETHODCALLTYPE * Item) (ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice);
- } ma_IMMDeviceCollectionVtbl;
- struct ma_IMMDeviceCollection
- {
- ma_IMMDeviceCollectionVtbl* lpVtbl;
- };
- HRESULT ma_IMMDeviceCollection_QueryInterface(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
- ULONG ma_IMMDeviceCollection_AddRef(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->AddRef(pThis); }
- ULONG ma_IMMDeviceCollection_Release(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->Release(pThis); }
- HRESULT ma_IMMDeviceCollection_GetCount(ma_IMMDeviceCollection* pThis, UINT* pDevices) { return pThis->lpVtbl->GetCount(pThis, pDevices); }
- HRESULT ma_IMMDeviceCollection_Item(ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->Item(pThis, nDevice, ppDevice); }
+ (void)lpcstrModule;
+}
+static ma_result ma_context_enumerate_devices__dsound(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_context_enumerate_devices_callback_data__dsound data;
- /* IMMDevice */
- typedef struct
- {
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDevice* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDevice* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDevice* pThis);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
- /* IMMDevice */
- HRESULT (STDMETHODCALLTYPE * Activate) (ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface);
- HRESULT (STDMETHODCALLTYPE * OpenPropertyStore)(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties);
- HRESULT (STDMETHODCALLTYPE * GetId) (ma_IMMDevice* pThis, LPWSTR *pID);
- HRESULT (STDMETHODCALLTYPE * GetState) (ma_IMMDevice* pThis, DWORD *pState);
- } ma_IMMDeviceVtbl;
- struct ma_IMMDevice
- {
- ma_IMMDeviceVtbl* lpVtbl;
- };
- HRESULT ma_IMMDevice_QueryInterface(ma_IMMDevice* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
- ULONG ma_IMMDevice_AddRef(ma_IMMDevice* pThis) { return pThis->lpVtbl->AddRef(pThis); }
- ULONG ma_IMMDevice_Release(ma_IMMDevice* pThis) { return pThis->lpVtbl->Release(pThis); }
- HRESULT ma_IMMDevice_Activate(ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface) { return pThis->lpVtbl->Activate(pThis, iid, dwClsCtx, pActivationParams, ppInterface); }
- HRESULT ma_IMMDevice_OpenPropertyStore(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties) { return pThis->lpVtbl->OpenPropertyStore(pThis, stgmAccess, ppProperties); }
- HRESULT ma_IMMDevice_GetId(ma_IMMDevice* pThis, LPWSTR *pID) { return pThis->lpVtbl->GetId(pThis, pID); }
- HRESULT ma_IMMDevice_GetState(ma_IMMDevice* pThis, DWORD *pState) { return pThis->lpVtbl->GetState(pThis, pState); }
-#else
- /* IActivateAudioInterfaceAsyncOperation */
- typedef struct
- {
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IActivateAudioInterfaceAsyncOperation* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IActivateAudioInterfaceAsyncOperation* pThis);
+ data.pContext = pContext;
+ data.callback = callback;
+ data.pUserData = pUserData;
+ data.terminated = MA_FALSE;
- /* IActivateAudioInterfaceAsyncOperation */
- HRESULT (STDMETHODCALLTYPE * GetActivateResult)(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface);
- } ma_IActivateAudioInterfaceAsyncOperationVtbl;
- struct ma_IActivateAudioInterfaceAsyncOperation
- {
- ma_IActivateAudioInterfaceAsyncOperationVtbl* lpVtbl;
- };
- HRESULT ma_IActivateAudioInterfaceAsyncOperation_QueryInterface(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
- ULONG ma_IActivateAudioInterfaceAsyncOperation_AddRef(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->AddRef(pThis); }
- ULONG ma_IActivateAudioInterfaceAsyncOperation_Release(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->Release(pThis); }
- HRESULT ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface) { return pThis->lpVtbl->GetActivateResult(pThis, pActivateResult, ppActivatedInterface); }
-#endif
+ /* Playback. */
+ if (!data.terminated) {
+ data.deviceType = ma_device_type_playback;
+ ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data);
+ }
-/* IPropertyStore */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IPropertyStore* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IPropertyStore* pThis);
+ /* Capture. */
+ if (!data.terminated) {
+ data.deviceType = ma_device_type_capture;
+ ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data);
+ }
- /* IPropertyStore */
- HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IPropertyStore* pThis, DWORD* pPropCount);
- HRESULT (STDMETHODCALLTYPE * GetAt) (ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey);
- HRESULT (STDMETHODCALLTYPE * GetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar);
- HRESULT (STDMETHODCALLTYPE * SetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar);
- HRESULT (STDMETHODCALLTYPE * Commit) (ma_IPropertyStore* pThis);
-} ma_IPropertyStoreVtbl;
-struct ma_IPropertyStore
-{
- ma_IPropertyStoreVtbl* lpVtbl;
-};
-HRESULT ma_IPropertyStore_QueryInterface(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IPropertyStore_AddRef(ma_IPropertyStore* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IPropertyStore_Release(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IPropertyStore_GetCount(ma_IPropertyStore* pThis, DWORD* pPropCount) { return pThis->lpVtbl->GetCount(pThis, pPropCount); }
-HRESULT ma_IPropertyStore_GetAt(ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey) { return pThis->lpVtbl->GetAt(pThis, propIndex, pPropKey); }
-HRESULT ma_IPropertyStore_GetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar) { return pThis->lpVtbl->GetValue(pThis, pKey, pPropVar); }
-HRESULT ma_IPropertyStore_SetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar) { return pThis->lpVtbl->SetValue(pThis, pKey, pPropVar); }
-HRESULT ma_IPropertyStore_Commit(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Commit(pThis); }
+ return MA_SUCCESS;
+}
-/* IAudioClient */
typedef struct
{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient* pThis);
+ const ma_device_id* pDeviceID;
+ ma_device_info* pDeviceInfo;
+ ma_bool32 found;
+} ma_context_get_device_info_callback_data__dsound;
- /* IAudioClient */
- HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
- HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames);
- HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency);
- HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames);
- HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch);
- HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat);
- HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod);
- HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient* pThis);
- HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient* pThis);
- HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient* pThis);
- HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient* pThis, HANDLE eventHandle);
- HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient* pThis, const IID* const riid, void** pp);
-} ma_IAudioClientVtbl;
-struct ma_IAudioClient
+static BOOL CALLBACK ma_context_get_device_info_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext)
{
- ma_IAudioClientVtbl* lpVtbl;
-};
-HRESULT ma_IAudioClient_QueryInterface(ma_IAudioClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IAudioClient_AddRef(ma_IAudioClient* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IAudioClient_Release(ma_IAudioClient* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IAudioClient_Initialize(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); }
-HRESULT ma_IAudioClient_GetBufferSize(ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); }
-HRESULT ma_IAudioClient_GetStreamLatency(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); }
-HRESULT ma_IAudioClient_GetCurrentPadding(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); }
-HRESULT ma_IAudioClient_IsFormatSupported(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); }
-HRESULT ma_IAudioClient_GetMixFormat(ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); }
-HRESULT ma_IAudioClient_GetDevicePeriod(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); }
-HRESULT ma_IAudioClient_Start(ma_IAudioClient* pThis) { return pThis->lpVtbl->Start(pThis); }
-HRESULT ma_IAudioClient_Stop(ma_IAudioClient* pThis) { return pThis->lpVtbl->Stop(pThis); }
-HRESULT ma_IAudioClient_Reset(ma_IAudioClient* pThis) { return pThis->lpVtbl->Reset(pThis); }
-HRESULT ma_IAudioClient_SetEventHandle(ma_IAudioClient* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); }
-HRESULT ma_IAudioClient_GetService(ma_IAudioClient* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); }
+ ma_context_get_device_info_callback_data__dsound* pData = (ma_context_get_device_info_callback_data__dsound*)lpContext;
+ MA_ASSERT(pData != NULL);
-/* IAudioClient2 */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient2* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient2* pThis);
+ if ((pData->pDeviceID == NULL || ma_is_guid_equal(pData->pDeviceID->dsound, &MA_GUID_NULL)) && (lpGuid == NULL || ma_is_guid_equal(lpGuid, &MA_GUID_NULL))) {
+ /* Default device. */
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1);
+ pData->found = MA_TRUE;
+ return FALSE; /* Stop enumeration. */
+ } else {
+ /* Not the default device. */
+ if (lpGuid != NULL && pData->pDeviceID != NULL) {
+ if (memcmp(pData->pDeviceID->dsound, lpGuid, sizeof(pData->pDeviceID->dsound)) == 0) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1);
+ pData->found = MA_TRUE;
+ return FALSE; /* Stop enumeration. */
+ }
+ }
+ }
- /* IAudioClient */
- HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
- HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames);
- HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency);
- HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames);
- HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch);
- HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat);
- HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod);
- HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient2* pThis);
- HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient2* pThis);
- HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient2* pThis);
- HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient2* pThis, HANDLE eventHandle);
- HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient2* pThis, const IID* const riid, void** pp);
+ (void)lpcstrModule;
+ return TRUE;
+}
- /* IAudioClient2 */
- HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable);
- HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties);
- HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration);
-} ma_IAudioClient2Vtbl;
-struct ma_IAudioClient2
+static ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
{
- ma_IAudioClient2Vtbl* lpVtbl;
-};
-HRESULT ma_IAudioClient2_QueryInterface(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IAudioClient2_AddRef(ma_IAudioClient2* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IAudioClient2_Release(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IAudioClient2_Initialize(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); }
-HRESULT ma_IAudioClient2_GetBufferSize(ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); }
-HRESULT ma_IAudioClient2_GetStreamLatency(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); }
-HRESULT ma_IAudioClient2_GetCurrentPadding(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); }
-HRESULT ma_IAudioClient2_IsFormatSupported(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); }
-HRESULT ma_IAudioClient2_GetMixFormat(ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); }
-HRESULT ma_IAudioClient2_GetDevicePeriod(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); }
-HRESULT ma_IAudioClient2_Start(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Start(pThis); }
-HRESULT ma_IAudioClient2_Stop(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Stop(pThis); }
-HRESULT ma_IAudioClient2_Reset(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Reset(pThis); }
-HRESULT ma_IAudioClient2_SetEventHandle(ma_IAudioClient2* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); }
-HRESULT ma_IAudioClient2_GetService(ma_IAudioClient2* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); }
-HRESULT ma_IAudioClient2_IsOffloadCapable(ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); }
-HRESULT ma_IAudioClient2_SetClientProperties(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); }
-HRESULT ma_IAudioClient2_GetBufferSizeLimits(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); }
+ ma_result result;
+ HRESULT hr;
+ /* Exclusive mode and capture not supported with DirectSound. */
+ if (deviceType == ma_device_type_capture && shareMode == ma_share_mode_exclusive) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
-/* IAudioClient3 */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient3* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient3* pThis);
+ if (pDeviceID != NULL) {
+ ma_context_get_device_info_callback_data__dsound data;
- /* IAudioClient */
- HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
- HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames);
- HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency);
- HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames);
- HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch);
- HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat);
- HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod);
- HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient3* pThis);
- HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient3* pThis);
- HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient3* pThis);
- HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient3* pThis, HANDLE eventHandle);
- HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient3* pThis, const IID* const riid, void** pp);
+ /* ID. */
+ MA_COPY_MEMORY(pDeviceInfo->id.dsound, pDeviceID->dsound, 16);
- /* IAudioClient2 */
- HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable);
- HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties);
- HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration);
+ /* Name / Description. This is retrieved by enumerating over each device until we find that one that matches the input ID. */
+ data.pDeviceID = pDeviceID;
+ data.pDeviceInfo = pDeviceInfo;
+ data.found = MA_FALSE;
+ if (deviceType == ma_device_type_playback) {
+ ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_get_device_info_callback__dsound, &data);
+ } else {
+ ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_get_device_info_callback__dsound, &data);
+ }
- /* IAudioClient3 */
- HRESULT (STDMETHODCALLTYPE * GetSharedModeEnginePeriod) (ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, UINT32* pDefaultPeriodInFrames, UINT32* pFundamentalPeriodInFrames, UINT32* pMinPeriodInFrames, UINT32* pMaxPeriodInFrames);
- HRESULT (STDMETHODCALLTYPE * GetCurrentSharedModeEnginePeriod)(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, UINT32* pCurrentPeriodInFrames);
- HRESULT (STDMETHODCALLTYPE * InitializeSharedAudioStream) (ma_IAudioClient3* pThis, DWORD streamFlags, UINT32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid);
-} ma_IAudioClient3Vtbl;
-struct ma_IAudioClient3
-{
- ma_IAudioClient3Vtbl* lpVtbl;
-};
-HRESULT ma_IAudioClient3_QueryInterface(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IAudioClient3_AddRef(ma_IAudioClient3* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IAudioClient3_Release(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IAudioClient3_Initialize(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); }
-HRESULT ma_IAudioClient3_GetBufferSize(ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); }
-HRESULT ma_IAudioClient3_GetStreamLatency(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); }
-HRESULT ma_IAudioClient3_GetCurrentPadding(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); }
-HRESULT ma_IAudioClient3_IsFormatSupported(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); }
-HRESULT ma_IAudioClient3_GetMixFormat(ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); }
-HRESULT ma_IAudioClient3_GetDevicePeriod(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); }
-HRESULT ma_IAudioClient3_Start(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Start(pThis); }
-HRESULT ma_IAudioClient3_Stop(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Stop(pThis); }
-HRESULT ma_IAudioClient3_Reset(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Reset(pThis); }
-HRESULT ma_IAudioClient3_SetEventHandle(ma_IAudioClient3* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); }
-HRESULT ma_IAudioClient3_GetService(ma_IAudioClient3* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); }
-HRESULT ma_IAudioClient3_IsOffloadCapable(ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); }
-HRESULT ma_IAudioClient3_SetClientProperties(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); }
-HRESULT ma_IAudioClient3_GetBufferSizeLimits(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); }
-HRESULT ma_IAudioClient3_GetSharedModeEnginePeriod(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, UINT32* pDefaultPeriodInFrames, UINT32* pFundamentalPeriodInFrames, UINT32* pMinPeriodInFrames, UINT32* pMaxPeriodInFrames) { return pThis->lpVtbl->GetSharedModeEnginePeriod(pThis, pFormat, pDefaultPeriodInFrames, pFundamentalPeriodInFrames, pMinPeriodInFrames, pMaxPeriodInFrames); }
-HRESULT ma_IAudioClient3_GetCurrentSharedModeEnginePeriod(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, UINT32* pCurrentPeriodInFrames) { return pThis->lpVtbl->GetCurrentSharedModeEnginePeriod(pThis, ppFormat, pCurrentPeriodInFrames); }
-HRESULT ma_IAudioClient3_InitializeSharedAudioStream(ma_IAudioClient3* pThis, DWORD streamFlags, UINT32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGUID) { return pThis->lpVtbl->InitializeSharedAudioStream(pThis, streamFlags, periodInFrames, pFormat, pAudioSessionGUID); }
+ if (!data.found) {
+ return MA_NO_DEVICE;
+ }
+ } else {
+ /* I don't think there's a way to get the name of the default device with DirectSound. In this case we just need to use defaults. */
+ /* ID */
+ MA_ZERO_MEMORY(pDeviceInfo->id.dsound, 16);
-/* IAudioRenderClient */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioRenderClient* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioRenderClient* pThis);
+ /* Name / Description */
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ }
+ }
- /* IAudioRenderClient */
- HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData);
- HRESULT (STDMETHODCALLTYPE * ReleaseBuffer)(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags);
-} ma_IAudioRenderClientVtbl;
-struct ma_IAudioRenderClient
-{
- ma_IAudioRenderClientVtbl* lpVtbl;
-};
-HRESULT ma_IAudioRenderClient_QueryInterface(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IAudioRenderClient_AddRef(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IAudioRenderClient_Release(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IAudioRenderClient_GetBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData) { return pThis->lpVtbl->GetBuffer(pThis, numFramesRequested, ppData); }
-HRESULT ma_IAudioRenderClient_ReleaseBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesWritten, dwFlags); }
+ /* Retrieving detailed information is slightly different depending on the device type. */
+ if (deviceType == ma_device_type_playback) {
+ /* Playback. */
+ ma_IDirectSound* pDirectSound;
+ MA_DSCAPS caps;
+ ma_uint32 iFormat;
+ result = ma_context_create_IDirectSound__dsound(pContext, shareMode, pDeviceID, &pDirectSound);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-/* IAudioCaptureClient */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioCaptureClient* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioCaptureClient* pThis);
+ MA_ZERO_OBJECT(&caps);
+ caps.dwSize = sizeof(caps);
+ hr = ma_IDirectSound_GetCaps(pDirectSound, &caps);
+ if (FAILED(hr)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device.", ma_result_from_HRESULT(hr));
+ }
- /* IAudioRenderClient */
- HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition);
- HRESULT (STDMETHODCALLTYPE * ReleaseBuffer) (ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead);
- HRESULT (STDMETHODCALLTYPE * GetNextPacketSize)(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket);
-} ma_IAudioCaptureClientVtbl;
-struct ma_IAudioCaptureClient
-{
- ma_IAudioCaptureClientVtbl* lpVtbl;
-};
-HRESULT ma_IAudioCaptureClient_QueryInterface(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IAudioCaptureClient_AddRef(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IAudioCaptureClient_Release(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IAudioCaptureClient_GetBuffer(ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition) { return pThis->lpVtbl->GetBuffer(pThis, ppData, pNumFramesToRead, pFlags, pDevicePosition, pQPCPosition); }
-HRESULT ma_IAudioCaptureClient_ReleaseBuffer(ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesRead); }
-HRESULT ma_IAudioCaptureClient_GetNextPacketSize(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket) { return pThis->lpVtbl->GetNextPacketSize(pThis, pNumFramesInNextPacket); }
+ if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) {
+ /* It supports at least stereo, but could support more. */
+ WORD channels = 2;
-#ifndef MA_WIN32_DESKTOP
-#include
-typedef struct ma_completion_handler_uwp ma_completion_handler_uwp;
+ /* Look at the speaker configuration to get a better idea on the channel count. */
+ DWORD speakerConfig;
+ hr = ma_IDirectSound_GetSpeakerConfig(pDirectSound, &speakerConfig);
+ if (SUCCEEDED(hr)) {
+ ma_get_channels_from_speaker_config__dsound(speakerConfig, &channels, NULL);
+ }
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_completion_handler_uwp* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_completion_handler_uwp* pThis);
+ pDeviceInfo->minChannels = channels;
+ pDeviceInfo->maxChannels = channels;
+ } else {
+ /* It does not support stereo, which means we are stuck with mono. */
+ pDeviceInfo->minChannels = 1;
+ pDeviceInfo->maxChannels = 1;
+ }
- /* IActivateAudioInterfaceCompletionHandler */
- HRESULT (STDMETHODCALLTYPE * ActivateCompleted)(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation);
-} ma_completion_handler_uwp_vtbl;
-struct ma_completion_handler_uwp
-{
- ma_completion_handler_uwp_vtbl* lpVtbl;
- ma_uint32 counter;
- HANDLE hEvent;
-};
+ /* Sample rate. */
+ if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) {
+ pDeviceInfo->minSampleRate = caps.dwMinSecondarySampleRate;
+ pDeviceInfo->maxSampleRate = caps.dwMaxSecondarySampleRate;
-HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_QueryInterface(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject)
-{
- /*
- We need to "implement" IAgileObject which is just an indicator that's used internally by WASAPI for some multithreading management. To
- "implement" this, we just make sure we return pThis when the IAgileObject is requested.
- */
- if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IActivateAudioInterfaceCompletionHandler) && !ma_is_guid_equal(riid, &MA_IID_IAgileObject)) {
- *ppObject = NULL;
- return E_NOINTERFACE;
+ /*
+ On my machine the min and max sample rates can return 100 and 200000 respectively. I'd rather these be within
+ the range of our standard sample rates so I'm clamping.
+ */
+ if (caps.dwMinSecondarySampleRate < MA_MIN_SAMPLE_RATE && caps.dwMaxSecondarySampleRate >= MA_MIN_SAMPLE_RATE) {
+ pDeviceInfo->minSampleRate = MA_MIN_SAMPLE_RATE;
+ }
+ if (caps.dwMaxSecondarySampleRate > MA_MAX_SAMPLE_RATE && caps.dwMinSecondarySampleRate <= MA_MAX_SAMPLE_RATE) {
+ pDeviceInfo->maxSampleRate = MA_MAX_SAMPLE_RATE;
+ }
+ } else {
+ /* Only supports a single sample rate. Set both min an max to the same thing. Do not clamp within the standard rates. */
+ pDeviceInfo->minSampleRate = caps.dwMaxSecondarySampleRate;
+ pDeviceInfo->maxSampleRate = caps.dwMaxSecondarySampleRate;
+ }
+
+ /* DirectSound can support all formats. */
+ pDeviceInfo->formatCount = ma_format_count - 1; /* Minus one because we don't want to include ma_format_unknown. */
+ for (iFormat = 0; iFormat < pDeviceInfo->formatCount; ++iFormat) {
+ pDeviceInfo->formats[iFormat] = (ma_format)(iFormat + 1); /* +1 to skip over ma_format_unknown. */
+ }
+
+ ma_IDirectSound_Release(pDirectSound);
+ } else {
+ /*
+ Capture. This is a little different to playback due to the say the supported formats are reported. Technically capture
+ devices can support a number of different formats, but for simplicity and consistency with ma_device_init() I'm just
+ reporting the best format.
+ */
+ ma_IDirectSoundCapture* pDirectSoundCapture;
+ WORD channels;
+ WORD bitsPerSample;
+ DWORD sampleRate;
+
+ result = ma_context_create_IDirectSoundCapture__dsound(pContext, shareMode, pDeviceID, &pDirectSoundCapture);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pContext, pDirectSoundCapture, &channels, &bitsPerSample, &sampleRate);
+ if (result != MA_SUCCESS) {
+ ma_IDirectSoundCapture_Release(pDirectSoundCapture);
+ return result;
+ }
+
+ pDeviceInfo->minChannels = channels;
+ pDeviceInfo->maxChannels = channels;
+ pDeviceInfo->minSampleRate = sampleRate;
+ pDeviceInfo->maxSampleRate = sampleRate;
+ pDeviceInfo->formatCount = 1;
+ if (bitsPerSample == 8) {
+ pDeviceInfo->formats[0] = ma_format_u8;
+ } else if (bitsPerSample == 16) {
+ pDeviceInfo->formats[0] = ma_format_s16;
+ } else if (bitsPerSample == 24) {
+ pDeviceInfo->formats[0] = ma_format_s24;
+ } else if (bitsPerSample == 32) {
+ pDeviceInfo->formats[0] = ma_format_s32;
+ } else {
+ ma_IDirectSoundCapture_Release(pDirectSoundCapture);
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+
+ ma_IDirectSoundCapture_Release(pDirectSoundCapture);
}
- /* Getting here means the IID is IUnknown or IMMNotificationClient. */
- *ppObject = (void*)pThis;
- ((ma_completion_handler_uwp_vtbl*)pThis->lpVtbl)->AddRef(pThis);
- return S_OK;
+ return MA_SUCCESS;
}
-ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_AddRef(ma_completion_handler_uwp* pThis)
-{
- return (ULONG)ma_atomic_increment_32(&pThis->counter);
-}
-ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_Release(ma_completion_handler_uwp* pThis)
+
+static void ma_device_uninit__dsound(ma_device* pDevice)
{
- ma_uint32 newRefCount = ma_atomic_decrement_32(&pThis->counter);
- if (newRefCount == 0) {
- return 0; /* We don't free anything here because we never allocate the object on the heap. */
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->dsound.pCaptureBuffer != NULL) {
+ ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+ }
+ if (pDevice->dsound.pCapture != NULL) {
+ ma_IDirectSoundCapture_Release((ma_IDirectSoundCapture*)pDevice->dsound.pCapture);
}
- return (ULONG)newRefCount;
+ if (pDevice->dsound.pPlaybackBuffer != NULL) {
+ ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer);
+ }
+ if (pDevice->dsound.pPlaybackPrimaryBuffer != NULL) {
+ ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer);
+ }
+ if (pDevice->dsound.pPlayback != NULL) {
+ ma_IDirectSound_Release((ma_IDirectSound*)pDevice->dsound.pPlayback);
+ }
}
-HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_ActivateCompleted(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation)
+static ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* pChannelMap, WAVEFORMATEXTENSIBLE* pWF)
{
- (void)pActivateOperation;
- SetEvent(pThis->hEvent);
- return S_OK;
-}
-
+ GUID subformat;
-static ma_completion_handler_uwp_vtbl g_maCompletionHandlerVtblInstance = {
- ma_completion_handler_uwp_QueryInterface,
- ma_completion_handler_uwp_AddRef,
- ma_completion_handler_uwp_Release,
- ma_completion_handler_uwp_ActivateCompleted
-};
+ switch (format)
+ {
+ case ma_format_u8:
+ case ma_format_s16:
+ case ma_format_s24:
+ /*case ma_format_s24_32:*/
+ case ma_format_s32:
+ {
+ subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM;
+ } break;
-ma_result ma_completion_handler_uwp_init(ma_completion_handler_uwp* pHandler)
-{
- ma_assert(pHandler != NULL);
- ma_zero_object(pHandler);
+ case ma_format_f32:
+ {
+ subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ } break;
- pHandler->lpVtbl = &g_maCompletionHandlerVtblInstance;
- pHandler->counter = 1;
- pHandler->hEvent = CreateEventA(NULL, FALSE, FALSE, NULL);
- if (pHandler->hEvent == NULL) {
- return MA_ERROR;
+ default:
+ return MA_FORMAT_NOT_SUPPORTED;
}
+ MA_ZERO_OBJECT(pWF);
+ pWF->Format.cbSize = sizeof(*pWF);
+ pWF->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
+ pWF->Format.nChannels = (WORD)channels;
+ pWF->Format.nSamplesPerSec = (DWORD)sampleRate;
+ pWF->Format.wBitsPerSample = (WORD)ma_get_bytes_per_sample(format)*8;
+ pWF->Format.nBlockAlign = (pWF->Format.nChannels * pWF->Format.wBitsPerSample) / 8;
+ pWF->Format.nAvgBytesPerSec = pWF->Format.nBlockAlign * pWF->Format.nSamplesPerSec;
+ pWF->Samples.wValidBitsPerSample = pWF->Format.wBitsPerSample;
+ pWF->dwChannelMask = ma_channel_map_to_channel_mask__win32(pChannelMap, channels);
+ pWF->SubFormat = subformat;
+
return MA_SUCCESS;
}
-void ma_completion_handler_uwp_uninit(ma_completion_handler_uwp* pHandler)
+static ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
- if (pHandler->hEvent != NULL) {
- CloseHandle(pHandler->hEvent);
+ ma_result result;
+ HRESULT hr;
+ ma_uint32 periodSizeInMilliseconds;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ZERO_OBJECT(&pDevice->dsound);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
}
-}
-void ma_completion_handler_uwp_wait(ma_completion_handler_uwp* pHandler)
-{
- WaitForSingleObject(pHandler->hEvent, INFINITE);
-}
-#endif /* !MA_WIN32_DESKTOP */
+ periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds;
+ if (periodSizeInMilliseconds == 0) {
+ periodSizeInMilliseconds = ma_calculate_buffer_size_in_milliseconds_from_frames(pConfig->periodSizeInFrames, pConfig->sampleRate);
+ }
+
+ /* DirectSound should use a latency of about 20ms per period for low latency mode. */
+ if (pDevice->usingDefaultBufferSize) {
+ if (pConfig->performanceProfile == ma_performance_profile_low_latency) {
+ periodSizeInMilliseconds = 20;
+ } else {
+ periodSizeInMilliseconds = 200;
+ }
+ }
+
+ /* DirectSound breaks down with tiny buffer sizes (bad glitching and silent output). I am therefore restricting the size of the buffer to a minimum of 20 milliseconds. */
+ if (periodSizeInMilliseconds < 20) {
+ periodSizeInMilliseconds = 20;
+ }
-/* We need a virtual table for our notification client object that's used for detecting changes to the default device. */
-#ifdef MA_WIN32_DESKTOP
-HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject)
-{
/*
- We care about two interfaces - IUnknown and IMMNotificationClient. If the requested IID is something else
- we just return E_NOINTERFACE. Otherwise we need to increment the reference counter and return S_OK.
+ Unfortunately DirectSound uses different APIs and data structures for playback and catpure devices. We need to initialize
+ the capture device first because we'll want to match it's buffer size and period count on the playback side if we're using
+ full-duplex mode.
*/
- if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IMMNotificationClient)) {
- *ppObject = NULL;
- return E_NOINTERFACE;
- }
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ WAVEFORMATEXTENSIBLE wf;
+ MA_DSCBUFFERDESC descDS;
+ ma_uint32 periodSizeInFrames;
+ char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */
+ WAVEFORMATEXTENSIBLE* pActualFormat;
- /* Getting here means the IID is IUnknown or IMMNotificationClient. */
- *ppObject = (void*)pThis;
- ((ma_IMMNotificationClientVtbl*)pThis->lpVtbl)->AddRef(pThis);
- return S_OK;
-}
+ result = ma_config_to_WAVEFORMATEXTENSIBLE(pConfig->capture.format, pConfig->capture.channels, pConfig->sampleRate, pConfig->capture.channelMap, &wf);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_AddRef(ma_IMMNotificationClient* pThis)
-{
- return (ULONG)ma_atomic_increment_32(&pThis->counter);
-}
+ result = ma_context_create_IDirectSoundCapture__dsound(pContext, pConfig->capture.shareMode, pConfig->capture.pDeviceID, (ma_IDirectSoundCapture**)&pDevice->dsound.pCapture);
+ if (result != MA_SUCCESS) {
+ ma_device_uninit__dsound(pDevice);
+ return result;
+ }
-ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClient* pThis)
-{
- ma_uint32 newRefCount = ma_atomic_decrement_32(&pThis->counter);
- if (newRefCount == 0) {
- return 0; /* We don't free anything here because we never allocate the object on the heap. */
+ result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pContext, (ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &wf.Format.nChannels, &wf.Format.wBitsPerSample, &wf.Format.nSamplesPerSec);
+ if (result != MA_SUCCESS) {
+ ma_device_uninit__dsound(pDevice);
+ return result;
+ }
+
+ wf.Format.nBlockAlign = (wf.Format.nChannels * wf.Format.wBitsPerSample) / 8;
+ wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec;
+ wf.Samples.wValidBitsPerSample = wf.Format.wBitsPerSample;
+ wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM;
+
+ /* The size of the buffer must be a clean multiple of the period count. */
+ periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, wf.Format.nSamplesPerSec);
+
+ MA_ZERO_OBJECT(&descDS);
+ descDS.dwSize = sizeof(descDS);
+ descDS.dwFlags = 0;
+ descDS.dwBufferBytes = periodSizeInFrames * pConfig->periods * ma_get_bytes_per_frame(pDevice->capture.internalFormat, wf.Format.nChannels);
+ descDS.lpwfxFormat = (WAVEFORMATEX*)&wf;
+ hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_CreateCaptureBuffer() failed for capture device.", ma_result_from_HRESULT(hr));
+ }
+
+ /* Get the _actual_ properties of the buffer. */
+ pActualFormat = (WAVEFORMATEXTENSIBLE*)rawdata;
+ hr = ma_IDirectSoundCaptureBuffer_GetFormat((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, (WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the capture device's buffer.", ma_result_from_HRESULT(hr));
+ }
+
+ pDevice->capture.internalFormat = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)pActualFormat);
+ pDevice->capture.internalChannels = pActualFormat->Format.nChannels;
+ pDevice->capture.internalSampleRate = pActualFormat->Format.nSamplesPerSec;
+
+ /* Get the internal channel map based on the channel mask. */
+ if (pActualFormat->Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
+ ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+ } else {
+ ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+ }
+
+ /*
+ After getting the actual format the size of the buffer in frames may have actually changed. However, we want this to be as close to what the
+ user has asked for as possible, so let's go ahead and release the old capture buffer and create a new one in this case.
+ */
+ if (periodSizeInFrames != (descDS.dwBufferBytes / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels) / pConfig->periods)) {
+ descDS.dwBufferBytes = periodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, wf.Format.nChannels) * pConfig->periods;
+ ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+
+ hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Second attempt at IDirectSoundCapture_CreateCaptureBuffer() failed for capture device.", ma_result_from_HRESULT(hr));
+ }
+ }
+
+ /* DirectSound should give us a buffer exactly the size we asked for. */
+ pDevice->capture.internalPeriodSizeInFrames = periodSizeInFrames;
+ pDevice->capture.internalPeriods = pConfig->periods;
}
- return (ULONG)newRefCount;
-}
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ WAVEFORMATEXTENSIBLE wf;
+ MA_DSBUFFERDESC descDSPrimary;
+ MA_DSCAPS caps;
+ char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */
+ WAVEFORMATEXTENSIBLE* pActualFormat;
+ ma_uint32 periodSizeInFrames;
+ MA_DSBUFFERDESC descDS;
+ result = ma_config_to_WAVEFORMATEXTENSIBLE(pConfig->playback.format, pConfig->playback.channels, pConfig->sampleRate, pConfig->playback.channelMap, &wf);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState)
-{
-#ifdef MA_DEBUG_OUTPUT
- printf("IMMNotificationClient_OnDeviceStateChanged(pDeviceID=%S, dwNewState=%u)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)", (unsigned int)dwNewState);
-#endif
+ result = ma_context_create_IDirectSound__dsound(pContext, pConfig->playback.shareMode, pConfig->playback.pDeviceID, (ma_IDirectSound**)&pDevice->dsound.pPlayback);
+ if (result != MA_SUCCESS) {
+ ma_device_uninit__dsound(pDevice);
+ return result;
+ }
- (void)pThis;
- (void)pDeviceID;
- (void)dwNewState;
- return S_OK;
-}
+ MA_ZERO_OBJECT(&descDSPrimary);
+ descDSPrimary.dwSize = sizeof(MA_DSBUFFERDESC);
+ descDSPrimary.dwFlags = MA_DSBCAPS_PRIMARYBUFFER | MA_DSBCAPS_CTRLVOLUME;
+ hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDSPrimary, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackPrimaryBuffer, NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's primary buffer.", ma_result_from_HRESULT(hr));
+ }
-HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
-{
-#ifdef MA_DEBUG_OUTPUT
- printf("IMMNotificationClient_OnDeviceAdded(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");
-#endif
- /* We don't need to worry about this event for our purposes. */
- (void)pThis;
- (void)pDeviceID;
- return S_OK;
-}
+ /* We may want to make some adjustments to the format if we are using defaults. */
+ MA_ZERO_OBJECT(&caps);
+ caps.dwSize = sizeof(caps);
+ hr = ma_IDirectSound_GetCaps((ma_IDirectSound*)pDevice->dsound.pPlayback, &caps);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device.", ma_result_from_HRESULT(hr));
+ }
-HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
-{
-#ifdef MA_DEBUG_OUTPUT
- printf("IMMNotificationClient_OnDeviceRemoved(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");
-#endif
+ if (pDevice->playback.usingDefaultChannels) {
+ if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) {
+ DWORD speakerConfig;
- /* We don't need to worry about this event for our purposes. */
- (void)pThis;
- (void)pDeviceID;
- return S_OK;
-}
+ /* It supports at least stereo, but could support more. */
+ wf.Format.nChannels = 2;
-HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID)
-{
-#ifdef MA_DEBUG_OUTPUT
- printf("IMMNotificationClient_OnDefaultDeviceChanged(dataFlow=%d, role=%d, pDefaultDeviceID=%S)\n", dataFlow, role, (pDefaultDeviceID != NULL) ? pDefaultDeviceID : L"(NULL)");
-#endif
+ /* Look at the speaker configuration to get a better idea on the channel count. */
+ if (SUCCEEDED(ma_IDirectSound_GetSpeakerConfig((ma_IDirectSound*)pDevice->dsound.pPlayback, &speakerConfig))) {
+ ma_get_channels_from_speaker_config__dsound(speakerConfig, &wf.Format.nChannels, &wf.dwChannelMask);
+ }
+ } else {
+ /* It does not support stereo, which means we are stuck with mono. */
+ wf.Format.nChannels = 1;
+ }
+ }
+
+ if (pDevice->usingDefaultSampleRate) {
+ /* We base the sample rate on the values returned by GetCaps(). */
+ if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) {
+ wf.Format.nSamplesPerSec = ma_get_best_sample_rate_within_range(caps.dwMinSecondarySampleRate, caps.dwMaxSecondarySampleRate);
+ } else {
+ wf.Format.nSamplesPerSec = caps.dwMaxSecondarySampleRate;
+ }
+ }
+
+ wf.Format.nBlockAlign = (wf.Format.nChannels * wf.Format.wBitsPerSample) / 8;
+ wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec;
+
+ /*
+ From MSDN:
+
+ The method succeeds even if the hardware does not support the requested format; DirectSound sets the buffer to the closest
+ supported format. To determine whether this has happened, an application can call the GetFormat method for the primary buffer
+ and compare the result with the format that was requested with the SetFormat method.
+ */
+ hr = ma_IDirectSoundBuffer_SetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (WAVEFORMATEX*)&wf);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to set format of playback device's primary buffer.", ma_result_from_HRESULT(hr));
+ }
+
+ /* Get the _actual_ properties of the buffer. */
+ pActualFormat = (WAVEFORMATEXTENSIBLE*)rawdata;
+ hr = ma_IDirectSoundBuffer_GetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the playback device's primary buffer.", ma_result_from_HRESULT(hr));
+ }
- /* We only ever use the eConsole role in miniaudio. */
- if (role != ma_eConsole) {
- return S_OK;
- }
+ pDevice->playback.internalFormat = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)pActualFormat);
+ pDevice->playback.internalChannels = pActualFormat->Format.nChannels;
+ pDevice->playback.internalSampleRate = pActualFormat->Format.nSamplesPerSec;
- /* We only care about devices with the same data flow and role as the current device. */
- if ((pThis->pDevice->type == ma_device_type_playback && dataFlow != ma_eRender) ||
- (pThis->pDevice->type == ma_device_type_capture && dataFlow != ma_eCapture)) {
- return S_OK;
- }
+ /* Get the internal channel map based on the channel mask. */
+ if (pActualFormat->Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
+ ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
+ } else {
+ ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
+ }
- /*
- Not currently supporting automatic stream routing in exclusive mode. This is not working correctly on my machine due to
- AUDCLNT_E_DEVICE_IN_USE errors when reinitializing the device. If this is a bug in miniaudio, we can try re-enabling this once
- it's fixed.
- */
- if ((dataFlow == ma_eRender && pThis->pDevice->playback.shareMode == ma_share_mode_exclusive) ||
- (dataFlow == ma_eCapture && pThis->pDevice->capture.shareMode == ma_share_mode_exclusive)) {
- return S_OK;
- }
+ /* The size of the buffer must be a clean multiple of the period count. */
+ periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, pDevice->playback.internalSampleRate);
- /*
- We don't change the device here - we change it in the worker thread to keep synchronization simple. To do this I'm just setting a flag to
- indicate that the default device has changed.
- */
- if (dataFlow == ma_eRender) {
- ma_atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_TRUE);
- }
- if (dataFlow == ma_eCapture) {
- ma_atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_TRUE);
+ /*
+ Meaning of dwFlags (from MSDN):
+
+ DSBCAPS_CTRLPOSITIONNOTIFY
+ The buffer has position notification capability.
+
+ DSBCAPS_GLOBALFOCUS
+ With this flag set, an application using DirectSound can continue to play its buffers if the user switches focus to
+ another application, even if the new application uses DirectSound.
+
+ DSBCAPS_GETCURRENTPOSITION2
+ In the first version of DirectSound, the play cursor was significantly ahead of the actual playing sound on emulated
+ sound cards; it was directly behind the write cursor. Now, if the DSBCAPS_GETCURRENTPOSITION2 flag is specified, the
+ application can get a more accurate play cursor.
+ */
+ MA_ZERO_OBJECT(&descDS);
+ descDS.dwSize = sizeof(descDS);
+ descDS.dwFlags = MA_DSBCAPS_CTRLPOSITIONNOTIFY | MA_DSBCAPS_GLOBALFOCUS | MA_DSBCAPS_GETCURRENTPOSITION2;
+ descDS.dwBufferBytes = periodSizeInFrames * pConfig->periods * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ descDS.lpwfxFormat = (WAVEFORMATEX*)&wf;
+ hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDS, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackBuffer, NULL);
+ if (FAILED(hr)) {
+ ma_device_uninit__dsound(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's secondary buffer.", ma_result_from_HRESULT(hr));
+ }
+
+ /* DirectSound should give us a buffer exactly the size we asked for. */
+ pDevice->playback.internalPeriodSizeInFrames = periodSizeInFrames;
+ pDevice->playback.internalPeriods = pConfig->periods;
}
- (void)pDefaultDeviceID;
- return S_OK;
+ (void)pContext;
+ return MA_SUCCESS;
}
-HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key)
+
+static ma_result ma_device_main_loop__dsound(ma_device* pDevice)
{
-#ifdef MA_DEBUG_OUTPUT
- printf("IMMNotificationClient_OnPropertyValueChanged(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");
-#endif
+ ma_result result = MA_SUCCESS;
+ ma_uint32 bpfDeviceCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 bpfDevicePlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ HRESULT hr;
+ DWORD lockOffsetInBytesCapture;
+ DWORD lockSizeInBytesCapture;
+ DWORD mappedSizeInBytesCapture;
+ DWORD mappedDeviceFramesProcessedCapture;
+ void* pMappedDeviceBufferCapture;
+ DWORD lockOffsetInBytesPlayback;
+ DWORD lockSizeInBytesPlayback;
+ DWORD mappedSizeInBytesPlayback;
+ void* pMappedDeviceBufferPlayback;
+ DWORD prevReadCursorInBytesCapture = 0;
+ DWORD prevPlayCursorInBytesPlayback = 0;
+ ma_bool32 physicalPlayCursorLoopFlagPlayback = 0;
+ DWORD virtualWriteCursorInBytesPlayback = 0;
+ ma_bool32 virtualWriteCursorLoopFlagPlayback = 0;
+ ma_bool32 isPlaybackDeviceStarted = MA_FALSE;
+ ma_uint32 framesWrittenToPlaybackDevice = 0; /* For knowing whether or not the playback device needs to be started. */
+ ma_uint32 waitTimeInMilliseconds = 1;
- (void)pThis;
- (void)pDeviceID;
- (void)key;
- return S_OK;
-}
+ MA_ASSERT(pDevice != NULL);
-static ma_IMMNotificationClientVtbl g_maNotificationCientVtbl = {
- ma_IMMNotificationClient_QueryInterface,
- ma_IMMNotificationClient_AddRef,
- ma_IMMNotificationClient_Release,
- ma_IMMNotificationClient_OnDeviceStateChanged,
- ma_IMMNotificationClient_OnDeviceAdded,
- ma_IMMNotificationClient_OnDeviceRemoved,
- ma_IMMNotificationClient_OnDefaultDeviceChanged,
- ma_IMMNotificationClient_OnPropertyValueChanged
-};
-#endif /* MA_WIN32_DESKTOP */
+ /* The first thing to do is start the capture device. The playback device is only started after the first period is written. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ if (FAILED(ma_IDirectSoundCaptureBuffer_Start((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, MA_DSCBSTART_LOOPING))) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Start() failed.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ }
+ }
+
+ while (ma_device__get_state(pDevice) == MA_STATE_STARTED) {
+ switch (pDevice->type)
+ {
+ case ma_device_type_duplex:
+ {
+ DWORD physicalCaptureCursorInBytes;
+ DWORD physicalReadCursorInBytes;
+ hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes);
+ if (FAILED(hr)) {
+ return ma_result_from_HRESULT(hr);
+ }
-#ifdef MA_WIN32_DESKTOP
-typedef ma_IMMDevice ma_WASAPIDeviceInterface;
-#else
-typedef ma_IUnknown ma_WASAPIDeviceInterface;
-#endif
+ /* If nothing is available we just sleep for a bit and return from this iteration. */
+ if (physicalReadCursorInBytes == prevReadCursorInBytesCapture) {
+ ma_sleep(waitTimeInMilliseconds);
+ continue; /* Nothing is available in the capture buffer. */
+ }
+
+ /*
+ The current position has moved. We need to map all of the captured samples and write them to the playback device, making sure
+ we don't return until every frame has been copied over.
+ */
+ if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) {
+ /* The capture position has not looped. This is the simple case. */
+ lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
+ lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture);
+ } else {
+ /*
+ The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything,
+ do it again from the start.
+ */
+ if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) {
+ /* Lock up to the end of the buffer. */
+ lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
+ lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture;
+ } else {
+ /* Lock starting from the start of the buffer. */
+ lockOffsetInBytesCapture = 0;
+ lockSizeInBytesCapture = physicalReadCursorInBytes;
+ }
+ }
+ if (lockSizeInBytesCapture == 0) {
+ ma_sleep(waitTimeInMilliseconds);
+ continue; /* Nothing is available in the capture buffer. */
+ }
+ hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
+ }
-ma_bool32 ma_context_is_device_id_equal__wasapi(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
-{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
- return memcmp(pID0->wasapi, pID1->wasapi, sizeof(pID0->wasapi)) == 0;
-}
+ /* At this point we have some input data that we need to output. We do not return until every mapped frame of the input data is written to the playback device. */
+ mappedDeviceFramesProcessedCapture = 0;
-void ma_set_device_info_from_WAVEFORMATEX(const WAVEFORMATEX* pWF, ma_device_info* pInfo)
-{
- ma_assert(pWF != NULL);
- ma_assert(pInfo != NULL);
+ for (;;) { /* Keep writing to the playback device. */
+ ma_uint8 inputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 inputFramesInClientFormatCap = sizeof(inputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint8 outputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 outputFramesInClientFormatCap = sizeof(outputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint32 outputFramesInClientFormatCount;
+ ma_uint32 outputFramesInClientFormatConsumed = 0;
+ ma_uint64 clientCapturedFramesToProcess = ma_min(inputFramesInClientFormatCap, outputFramesInClientFormatCap);
+ ma_uint64 deviceCapturedFramesToProcess = (mappedSizeInBytesCapture / bpfDeviceCapture) - mappedDeviceFramesProcessedCapture;
+ void* pRunningMappedDeviceBufferCapture = ma_offset_ptr(pMappedDeviceBufferCapture, mappedDeviceFramesProcessedCapture * bpfDeviceCapture);
+
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningMappedDeviceBufferCapture, &deviceCapturedFramesToProcess, inputFramesInClientFormat, &clientCapturedFramesToProcess);
+ if (result != MA_SUCCESS) {
+ break;
+ }
- pInfo->formatCount = 1;
- pInfo->formats[0] = ma_format_from_WAVEFORMATEX(pWF);
- pInfo->minChannels = pWF->nChannels;
- pInfo->maxChannels = pWF->nChannels;
- pInfo->minSampleRate = pWF->nSamplesPerSec;
- pInfo->maxSampleRate = pWF->nSamplesPerSec;
-}
+ outputFramesInClientFormatCount = (ma_uint32)clientCapturedFramesToProcess;
+ mappedDeviceFramesProcessedCapture += (ma_uint32)deviceCapturedFramesToProcess;
-ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pContext, /*ma_IMMDevice**/void* pMMDevice, ma_IAudioClient* pAudioClient, ma_share_mode shareMode, ma_device_info* pInfo)
-{
- ma_assert(pAudioClient != NULL);
- ma_assert(pInfo != NULL);
+ ma_device__on_data(pDevice, outputFramesInClientFormat, inputFramesInClientFormat, (ma_uint32)clientCapturedFramesToProcess);
- /* We use a different technique to retrieve the device information depending on whether or not we are using shared or exclusive mode. */
- if (shareMode == ma_share_mode_shared) {
- /* Shared Mode. We use GetMixFormat() here. */
- WAVEFORMATEX* pWF = NULL;
- HRESULT hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pAudioClient, (WAVEFORMATEX**)&pWF);
- if (SUCCEEDED(hr)) {
- ma_set_device_info_from_WAVEFORMATEX(pWF, pInfo);
- return MA_SUCCESS;
- } else {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve mix format for device info retrieval.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
- } else {
- /* Exlcusive Mode. We repeatedly call IsFormatSupported() here. This is not currently support on UWP. */
-#ifdef MA_WIN32_DESKTOP
- /*
- The first thing to do is get the format from PKEY_AudioEngine_DeviceFormat. This should give us a channel count we assume is
- correct which will simplify our searching.
- */
- ma_IPropertyStore *pProperties;
- HRESULT hr = ma_IMMDevice_OpenPropertyStore((ma_IMMDevice*)pMMDevice, STGM_READ, &pProperties);
- if (SUCCEEDED(hr)) {
- PROPVARIANT var;
- ma_PropVariantInit(&var);
+ /* At this point we have input and output data in client format. All we need to do now is convert it to the output device format. This may take a few passes. */
+ for (;;) {
+ ma_uint32 framesWrittenThisIteration;
+ DWORD physicalPlayCursorInBytes;
+ DWORD physicalWriteCursorInBytes;
+ DWORD availableBytesPlayback;
+ DWORD silentPaddingInBytes = 0; /* <-- Must be initialized to 0. */
- hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_AudioEngine_DeviceFormat, &var);
- if (SUCCEEDED(hr)) {
- WAVEFORMATEX* pWF = (WAVEFORMATEX*)var.blob.pBlobData;
- ma_set_device_info_from_WAVEFORMATEX(pWF, pInfo);
+ /* We need the physical play and write cursors. */
+ if (FAILED(ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes))) {
+ break;
+ }
- /*
- In my testing, the format returned by PKEY_AudioEngine_DeviceFormat is suitable for exclusive mode so we check this format
- first. If this fails, fall back to a search.
- */
- hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pWF, NULL);
- ma_PropVariantClear(pContext, &var);
+ if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) {
+ physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback;
+ }
+ prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes;
- if (FAILED(hr)) {
- /*
- The format returned by PKEY_AudioEngine_DeviceFormat is not supported, so fall back to a search. We assume the channel
- count returned by MA_PKEY_AudioEngine_DeviceFormat is valid and correct. For simplicity we're only returning one format.
- */
- ma_uint32 channels = pInfo->minChannels;
- ma_format formatsToSearch[] = {
- ma_format_s16,
- ma_format_s24,
- /*ma_format_s24_32,*/
- ma_format_f32,
- ma_format_s32,
- ma_format_u8
- };
- ma_channel defaultChannelMap[MA_MAX_CHANNELS];
- WAVEFORMATEXTENSIBLE wf;
- ma_bool32 found;
- ma_uint32 iFormat;
+ /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
+ availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */
+ } else {
+ /* This is an error. */
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[DirectSound] (Duplex/Playback) WARNING: Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
+ #endif
+ availableBytesPlayback = 0;
+ }
+ } else {
+ /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
+ } else {
+ /* This is an error. */
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[DirectSound] (Duplex/Playback) WARNING: Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
+ #endif
+ availableBytesPlayback = 0;
+ }
+ }
- ma_get_standard_channel_map(ma_standard_channel_map_microsoft, channels, defaultChannelMap);
+ #ifdef MA_DEBUG_OUTPUT
+ /*printf("[DirectSound] (Duplex/Playback) physicalPlayCursorInBytes=%d, availableBytesPlayback=%d\n", physicalPlayCursorInBytes, availableBytesPlayback);*/
+ #endif
- ma_zero_object(&wf);
- wf.Format.cbSize = sizeof(wf);
- wf.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- wf.Format.nChannels = (WORD)channels;
- wf.dwChannelMask = ma_channel_map_to_channel_mask__win32(defaultChannelMap, channels);
+ /* If there's no room available for writing we need to wait for more. */
+ if (availableBytesPlayback == 0) {
+ /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */
+ if (!isPlaybackDeviceStarted) {
+ hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
+ if (FAILED(hr)) {
+ ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", ma_result_from_HRESULT(hr));
+ }
+ isPlaybackDeviceStarted = MA_TRUE;
+ } else {
+ ma_sleep(waitTimeInMilliseconds);
+ continue;
+ }
+ }
- found = MA_FALSE;
- for (iFormat = 0; iFormat < ma_countof(formatsToSearch); ++iFormat) {
- ma_format format = formatsToSearch[iFormat];
- ma_uint32 iSampleRate;
- wf.Format.wBitsPerSample = (WORD)ma_get_bytes_per_sample(format)*8;
- wf.Format.nBlockAlign = (wf.Format.nChannels * wf.Format.wBitsPerSample) / 8;
- wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec;
- wf.Samples.wValidBitsPerSample = /*(format == ma_format_s24_32) ? 24 :*/ wf.Format.wBitsPerSample;
- if (format == ma_format_f32) {
- wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
+ /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */
+ lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback;
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. Go up to the end of the buffer. */
+ lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
} else {
- wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM;
+ /* Different loop iterations. Go up to the physical play cursor. */
+ lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
}
- for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iSampleRate) {
- wf.Format.nSamplesPerSec = g_maStandardSampleRatePriorities[iSampleRate];
+ hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0);
+ if (FAILED(hr)) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
+ break;
+ }
- hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, (WAVEFORMATEX*)&wf, NULL);
- if (SUCCEEDED(hr)) {
- ma_set_device_info_from_WAVEFORMATEX((WAVEFORMATEX*)&wf, pInfo);
- found = MA_TRUE;
+ /*
+ Experiment: If the playback buffer is being starved, pad it with some silence to get it back in sync. This will cause a glitch, but it may prevent
+ endless glitching due to it constantly running out of data.
+ */
+ if (isPlaybackDeviceStarted) {
+ DWORD bytesQueuedForPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - availableBytesPlayback;
+ if (bytesQueuedForPlayback < (pDevice->playback.internalPeriodSizeInFrames*bpfDevicePlayback)) {
+ silentPaddingInBytes = (pDevice->playback.internalPeriodSizeInFrames*2*bpfDevicePlayback) - bytesQueuedForPlayback;
+ if (silentPaddingInBytes > lockSizeInBytesPlayback) {
+ silentPaddingInBytes = lockSizeInBytesPlayback;
+ }
+
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[DirectSound] (Duplex/Playback) Playback buffer starved. availableBytesPlayback=%d, silentPaddingInBytes=%d\n", availableBytesPlayback, silentPaddingInBytes);
+ #endif
+ }
+ }
+
+ /* At this point we have a buffer for output. */
+ if (silentPaddingInBytes > 0) {
+ MA_ZERO_MEMORY(pMappedDeviceBufferPlayback, silentPaddingInBytes);
+ framesWrittenThisIteration = silentPaddingInBytes/bpfDevicePlayback;
+ } else {
+ ma_uint64 convertedFrameCountIn = (outputFramesInClientFormatCount - outputFramesInClientFormatConsumed);
+ ma_uint64 convertedFrameCountOut = mappedSizeInBytesPlayback/bpfDevicePlayback;
+ void* pConvertedFramesIn = ma_offset_ptr(outputFramesInClientFormat, outputFramesInClientFormatConsumed * bpfDevicePlayback);
+ void* pConvertedFramesOut = pMappedDeviceBufferPlayback;
+
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pConvertedFramesIn, &convertedFrameCountIn, pConvertedFramesOut, &convertedFrameCountOut);
+ if (result != MA_SUCCESS) {
break;
}
+
+ outputFramesInClientFormatConsumed += (ma_uint32)convertedFrameCountOut;
+ framesWrittenThisIteration = (ma_uint32)convertedFrameCountOut;
}
+
- if (found) {
+ hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, framesWrittenThisIteration*bpfDevicePlayback, NULL, 0);
+ if (FAILED(hr)) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.", ma_result_from_HRESULT(hr));
break;
}
+
+ virtualWriteCursorInBytesPlayback += framesWrittenThisIteration*bpfDevicePlayback;
+ if ((virtualWriteCursorInBytesPlayback/bpfDevicePlayback) == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods) {
+ virtualWriteCursorInBytesPlayback = 0;
+ virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback;
+ }
+
+ /*
+ We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds
+ a bit of a buffer to prevent the playback buffer from getting starved.
+ */
+ framesWrittenToPlaybackDevice += framesWrittenThisIteration;
+ if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= (pDevice->playback.internalPeriodSizeInFrames*2)) {
+ hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
+ if (FAILED(hr)) {
+ ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", ma_result_from_HRESULT(hr));
+ }
+ isPlaybackDeviceStarted = MA_TRUE;
+ }
+
+ if (framesWrittenThisIteration < mappedSizeInBytesPlayback/bpfDevicePlayback) {
+ break; /* We're finished with the output data.*/
+ }
}
- if (!found) {
- ma_IPropertyStore_Release(pProperties);
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to find suitable device format for device info retrieval.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ if (clientCapturedFramesToProcess == 0) {
+ break; /* We just consumed every input sample. */
}
}
- } else {
- ma_IPropertyStore_Release(pProperties);
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve device format for device info retrieval.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
- } else {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to open property store for device info retrieval.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
- return MA_SUCCESS;
-#else
- /* Exclusive mode not fully supported in UWP right now. */
- return MA_ERROR;
-#endif
- }
-}
-#ifdef MA_WIN32_DESKTOP
-ma_result ma_context_get_MMDevice__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IMMDevice** ppMMDevice)
-{
- ma_IMMDeviceEnumerator* pDeviceEnumerator;
- HRESULT hr;
+ /* At this point we're done with the mapped portion of the capture buffer. */
+ hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.", ma_result_from_HRESULT(hr));
+ }
+ prevReadCursorInBytesCapture = (lockOffsetInBytesCapture + mappedSizeInBytesCapture);
+ } break;
- ma_assert(pContext != NULL);
- ma_assert(ppMMDevice != NULL);
- hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
- if (FAILED(hr)) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create IMMDeviceEnumerator.", MA_FAILED_TO_INIT_BACKEND);
- }
- if (pDeviceID == NULL) {
- hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, (deviceType == ma_device_type_playback) ? ma_eRender : ma_eCapture, ma_eConsole, ppMMDevice);
- } else {
- hr = ma_IMMDeviceEnumerator_GetDevice(pDeviceEnumerator, pDeviceID->wasapi, ppMMDevice);
- }
+ case ma_device_type_capture:
+ {
+ DWORD physicalCaptureCursorInBytes;
+ DWORD physicalReadCursorInBytes;
+ hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes);
+ if (FAILED(hr)) {
+ return MA_ERROR;
+ }
- ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
- if (FAILED(hr)) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve IMMDevice.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ /* If the previous capture position is the same as the current position we need to wait a bit longer. */
+ if (prevReadCursorInBytesCapture == physicalReadCursorInBytes) {
+ ma_sleep(waitTimeInMilliseconds);
+ continue;
+ }
- return MA_SUCCESS;
-}
+ /* Getting here means we have capture data available. */
+ if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) {
+ /* The capture position has not looped. This is the simple case. */
+ lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
+ lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture);
+ } else {
+ /*
+ The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything,
+ do it again from the start.
+ */
+ if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) {
+ /* Lock up to the end of the buffer. */
+ lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
+ lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture;
+ } else {
+ /* Lock starting from the start of the buffer. */
+ lockOffsetInBytesCapture = 0;
+ lockSizeInBytesCapture = physicalReadCursorInBytes;
+ }
+ }
-ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, ma_share_mode shareMode, ma_bool32 onlySimpleInfo, ma_device_info* pInfo)
-{
- LPWSTR id;
- HRESULT hr;
+ #ifdef MA_DEBUG_OUTPUT
+ /*printf("[DirectSound] (Capture) physicalCaptureCursorInBytes=%d, physicalReadCursorInBytes=%d\n", physicalCaptureCursorInBytes, physicalReadCursorInBytes);*/
+ /*printf("[DirectSound] (Capture) lockOffsetInBytesCapture=%d, lockSizeInBytesCapture=%d\n", lockOffsetInBytesCapture, lockSizeInBytesCapture);*/
+ #endif
- ma_assert(pContext != NULL);
- ma_assert(pMMDevice != NULL);
- ma_assert(pInfo != NULL);
+ if (lockSizeInBytesCapture < pDevice->capture.internalPeriodSizeInFrames) {
+ ma_sleep(waitTimeInMilliseconds);
+ continue; /* Nothing is available in the capture buffer. */
+ }
- /* ID. */
- hr = ma_IMMDevice_GetId(pMMDevice, &id);
- if (SUCCEEDED(hr)) {
- size_t idlen = wcslen(id);
- if (idlen+1 > ma_countof(pInfo->id.wasapi)) {
- ma_CoTaskMemFree(pContext, id);
- ma_assert(MA_FALSE); /* NOTE: If this is triggered, please report it. It means the format of the ID must haved change and is too long to fit in our fixed sized buffer. */
- return MA_ERROR;
- }
+ hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
+ }
- ma_copy_memory(pInfo->id.wasapi, id, idlen * sizeof(wchar_t));
- pInfo->id.wasapi[idlen] = '\0';
+ #ifdef MA_DEBUG_OUTPUT
+ if (lockSizeInBytesCapture != mappedSizeInBytesCapture) {
+ printf("[DirectSound] (Capture) lockSizeInBytesCapture=%d != mappedSizeInBytesCapture=%d\n", lockSizeInBytesCapture, mappedSizeInBytesCapture);
+ }
+ #endif
+
+ ma_device__send_frames_to_client(pDevice, mappedSizeInBytesCapture/bpfDeviceCapture, pMappedDeviceBufferCapture);
+
+ hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.", ma_result_from_HRESULT(hr));
+ }
+ prevReadCursorInBytesCapture = lockOffsetInBytesCapture + mappedSizeInBytesCapture;
+
+ if (prevReadCursorInBytesCapture == (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture)) {
+ prevReadCursorInBytesCapture = 0;
+ }
+ } break;
+
+
+
+ case ma_device_type_playback:
+ {
+ DWORD availableBytesPlayback;
+ DWORD physicalPlayCursorInBytes;
+ DWORD physicalWriteCursorInBytes;
+ hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes);
+ if (FAILED(hr)) {
+ break;
+ }
+
+ if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) {
+ physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback;
+ }
+ prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes;
+
+ /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
+ availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */
+ } else {
+ /* This is an error. */
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[DirectSound] (Playback) WARNING: Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
+ #endif
+ availableBytesPlayback = 0;
+ }
+ } else {
+ /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
+ } else {
+ /* This is an error. */
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[DirectSound] (Playback) WARNING: Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
+ #endif
+ availableBytesPlayback = 0;
+ }
+ }
+
+ #ifdef MA_DEBUG_OUTPUT
+ /*printf("[DirectSound] (Playback) physicalPlayCursorInBytes=%d, availableBytesPlayback=%d\n", physicalPlayCursorInBytes, availableBytesPlayback);*/
+ #endif
+
+ /* If there's no room available for writing we need to wait for more. */
+ if (availableBytesPlayback < pDevice->playback.internalPeriodSizeInFrames) {
+ /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */
+ if (availableBytesPlayback == 0 && !isPlaybackDeviceStarted) {
+ hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", ma_result_from_HRESULT(hr));
+ }
+ isPlaybackDeviceStarted = MA_TRUE;
+ } else {
+ ma_sleep(waitTimeInMilliseconds);
+ continue;
+ }
+ }
+
+ /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */
+ lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback;
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. Go up to the end of the buffer. */
+ lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
+ } else {
+ /* Different loop iterations. Go up to the physical play cursor. */
+ lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
+ }
+
+ hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0);
+ if (FAILED(hr)) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
+ break;
+ }
+
+ /* At this point we have a buffer for output. */
+ ma_device__read_frames_from_client(pDevice, (mappedSizeInBytesPlayback/bpfDevicePlayback), pMappedDeviceBufferPlayback);
- ma_CoTaskMemFree(pContext, id);
- }
+ hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, mappedSizeInBytesPlayback, NULL, 0);
+ if (FAILED(hr)) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.", ma_result_from_HRESULT(hr));
+ break;
+ }
- {
- ma_IPropertyStore *pProperties;
- hr = ma_IMMDevice_OpenPropertyStore(pMMDevice, STGM_READ, &pProperties);
- if (SUCCEEDED(hr)) {
- PROPVARIANT var;
+ virtualWriteCursorInBytesPlayback += mappedSizeInBytesPlayback;
+ if (virtualWriteCursorInBytesPlayback == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) {
+ virtualWriteCursorInBytesPlayback = 0;
+ virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback;
+ }
+
+ /*
+ We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds
+ a bit of a buffer to prevent the playback buffer from getting starved.
+ */
+ framesWrittenToPlaybackDevice += mappedSizeInBytesPlayback/bpfDevicePlayback;
+ if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= pDevice->playback.internalPeriodSizeInFrames) {
+ hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", ma_result_from_HRESULT(hr));
+ }
+ isPlaybackDeviceStarted = MA_TRUE;
+ }
+ } break;
- /* Description / Friendly Name */
- ma_PropVariantInit(&var);
- hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &var);
- if (SUCCEEDED(hr)) {
- WideCharToMultiByte(CP_UTF8, 0, var.pwszVal, -1, pInfo->name, sizeof(pInfo->name), 0, FALSE);
- ma_PropVariantClear(pContext, &var);
- }
- ma_IPropertyStore_Release(pProperties);
+ default: return MA_INVALID_ARGS; /* Invalid device type. */
}
- }
- /* Format */
- if (!onlySimpleInfo) {
- ma_IAudioClient* pAudioClient;
- hr = ma_IMMDevice_Activate(pMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient);
- if (SUCCEEDED(hr)) {
- ma_result result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, pMMDevice, pAudioClient, shareMode, pInfo);
-
- ma_IAudioClient_Release(pAudioClient);
+ if (result != MA_SUCCESS) {
return result;
- } else {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate audio client for device info retrieval.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
}
}
- return MA_SUCCESS;
-}
-
-ma_result ma_context_enumerate_device_collection__wasapi(ma_context* pContext, ma_IMMDeviceCollection* pDeviceCollection, ma_device_type deviceType, ma_enum_devices_callback_proc callback, void* pUserData)
-{
- UINT deviceCount;
- HRESULT hr;
- ma_uint32 iDevice;
-
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
-
- hr = ma_IMMDeviceCollection_GetCount(pDeviceCollection, &deviceCount);
- if (FAILED(hr)) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to get playback device count.", MA_NO_DEVICE);
+ /* Getting here means the device is being stopped. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ hr = ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Stop() failed.", ma_result_from_HRESULT(hr));
+ }
}
- for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
- ma_device_info deviceInfo;
- ma_IMMDevice* pMMDevice;
-
- ma_zero_object(&deviceInfo);
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ /* The playback device should be drained before stopping. All we do is wait until the available bytes is equal to the size of the buffer. */
+ if (isPlaybackDeviceStarted) {
+ for (;;) {
+ DWORD availableBytesPlayback = 0;
+ DWORD physicalPlayCursorInBytes;
+ DWORD physicalWriteCursorInBytes;
+ hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes);
+ if (FAILED(hr)) {
+ break;
+ }
- hr = ma_IMMDeviceCollection_Item(pDeviceCollection, iDevice, &pMMDevice);
- if (SUCCEEDED(hr)) {
- ma_result result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, ma_share_mode_shared, MA_TRUE, &deviceInfo); /* MA_TRUE = onlySimpleInfo. */
+ if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) {
+ physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback;
+ }
+ prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes;
- ma_IMMDevice_Release(pMMDevice);
- if (result == MA_SUCCESS) {
- ma_bool32 cbResult = callback(pContext, deviceType, &deviceInfo, pUserData);
- if (cbResult == MA_FALSE) {
+ if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
+ /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
+ availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */
+ } else {
+ break;
+ }
+ } else {
+ /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
+ if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
+ availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
+ } else {
+ break;
+ }
+ }
+
+ if (availableBytesPlayback >= (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback)) {
break;
}
+
+ ma_sleep(waitTimeInMilliseconds);
}
}
+
+ hr = ma_IDirectSoundBuffer_Stop((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer);
+ if (FAILED(hr)) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Stop() failed.", ma_result_from_HRESULT(hr));
+ }
+
+ ma_IDirectSoundBuffer_SetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0);
}
return MA_SUCCESS;
}
-#endif
-#ifdef MA_WIN32_DESKTOP
-ma_result ma_context_get_IAudioClient_Desktop__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IMMDevice** ppMMDevice)
+static ma_result ma_context_uninit__dsound(ma_context* pContext)
{
- ma_result result;
- HRESULT hr;
-
- ma_assert(pContext != NULL);
- ma_assert(ppAudioClient != NULL);
- ma_assert(ppMMDevice != NULL);
-
- result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, ppMMDevice);
- if (result != MA_SUCCESS) {
- return result;
- }
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_dsound);
- hr = ma_IMMDevice_Activate(*ppMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)ppAudioClient);
- if (FAILED(hr)) {
- return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
- }
+ ma_dlclose(pContext, pContext->dsound.hDSoundDLL);
return MA_SUCCESS;
}
-#else
-ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IUnknown** ppActivatedInterface)
+
+static ma_result ma_context_init__dsound(const ma_context_config* pConfig, ma_context* pContext)
{
- ma_IActivateAudioInterfaceAsyncOperation *pAsyncOp = NULL;
- ma_completion_handler_uwp completionHandler;
- IID iid;
- LPOLESTR iidStr;
- HRESULT hr;
- ma_result result;
- HRESULT activateResult;
- ma_IUnknown* pActivatedInterface;
+ MA_ASSERT(pContext != NULL);
- ma_assert(pContext != NULL);
- ma_assert(ppAudioClient != NULL);
+ (void)pConfig;
- if (pDeviceID != NULL) {
- ma_copy_memory(&iid, pDeviceID->wasapi, sizeof(iid));
- } else {
- if (deviceType == ma_device_type_playback) {
- iid = MA_IID_DEVINTERFACE_AUDIO_RENDER;
- } else {
- iid = MA_IID_DEVINTERFACE_AUDIO_CAPTURE;
- }
+ pContext->dsound.hDSoundDLL = ma_dlopen(pContext, "dsound.dll");
+ if (pContext->dsound.hDSoundDLL == NULL) {
+ return MA_API_NOT_FOUND;
}
-#if defined(__cplusplus)
- hr = StringFromIID(iid, &iidStr);
-#else
- hr = StringFromIID(&iid, &iidStr);
-#endif
- if (FAILED(hr)) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to convert device IID to string for ActivateAudioInterfaceAsync(). Out of memory.", MA_OUT_OF_MEMORY);
- }
+ pContext->dsound.DirectSoundCreate = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCreate");
+ pContext->dsound.DirectSoundEnumerateA = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundEnumerateA");
+ pContext->dsound.DirectSoundCaptureCreate = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCaptureCreate");
+ pContext->dsound.DirectSoundCaptureEnumerateA = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCaptureEnumerateA");
- result = ma_completion_handler_uwp_init(&completionHandler);
- if (result != MA_SUCCESS) {
- ma_CoTaskMemFree(pContext, iidStr);
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for waiting for ActivateAudioInterfaceAsync().", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ pContext->onUninit = ma_context_uninit__dsound;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__dsound;
+ pContext->onEnumDevices = ma_context_enumerate_devices__dsound;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__dsound;
+ pContext->onDeviceInit = ma_device_init__dsound;
+ pContext->onDeviceUninit = ma_device_uninit__dsound;
+ pContext->onDeviceStart = NULL; /* Not used. Started in onDeviceMainLoop. */
+ pContext->onDeviceStop = NULL; /* Not used. Stopped in onDeviceMainLoop. */
+ pContext->onDeviceMainLoop = ma_device_main_loop__dsound;
-#if defined(__cplusplus)
- hr = ActivateAudioInterfaceAsync(iidStr, MA_IID_IAudioClient, NULL, (IActivateAudioInterfaceCompletionHandler*)&completionHandler, (IActivateAudioInterfaceAsyncOperation**)&pAsyncOp);
-#else
- hr = ActivateAudioInterfaceAsync(iidStr, &MA_IID_IAudioClient, NULL, (IActivateAudioInterfaceCompletionHandler*)&completionHandler, (IActivateAudioInterfaceAsyncOperation**)&pAsyncOp);
+ return MA_SUCCESS;
+}
#endif
- if (FAILED(hr)) {
- ma_completion_handler_uwp_uninit(&completionHandler);
- ma_CoTaskMemFree(pContext, iidStr);
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] ActivateAudioInterfaceAsync() failed.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
-
- ma_CoTaskMemFree(pContext, iidStr);
-
- /* Wait for the async operation for finish. */
- ma_completion_handler_uwp_wait(&completionHandler);
- ma_completion_handler_uwp_uninit(&completionHandler);
- hr = ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(pAsyncOp, &activateResult, &pActivatedInterface);
- ma_IActivateAudioInterfaceAsyncOperation_Release(pAsyncOp);
- if (FAILED(hr) || FAILED(activateResult)) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
- /* Here is where we grab the IAudioClient interface. */
- hr = ma_IUnknown_QueryInterface(pActivatedInterface, &MA_IID_IAudioClient, (void**)ppAudioClient);
- if (FAILED(hr)) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to query IAudioClient interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+/******************************************************************************
- if (ppActivatedInterface) {
- *ppActivatedInterface = pActivatedInterface;
- } else {
- ma_IUnknown_Release(pActivatedInterface);
- }
+WinMM Backend
- return MA_SUCCESS;
-}
-#endif
+******************************************************************************/
+#ifdef MA_HAS_WINMM
-ma_result ma_context_get_IAudioClient__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_WASAPIDeviceInterface** ppDeviceInterface)
+/*
+Some older compilers don't have WAVEOUTCAPS2A and WAVEINCAPS2A, so we'll need to write this ourselves. These structures
+are exactly the same as the older ones but they have a few GUIDs for manufacturer/product/name identification. I'm keeping
+the names the same as the Win32 library for consistency, but namespaced to avoid naming conflicts with the Win32 version.
+*/
+typedef struct
{
-#ifdef MA_WIN32_DESKTOP
- return ma_context_get_IAudioClient_Desktop__wasapi(pContext, deviceType, pDeviceID, ppAudioClient, ppDeviceInterface);
-#else
- return ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, ppAudioClient, ppDeviceInterface);
-#endif
-}
-
-
-ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+ WORD wMid;
+ WORD wPid;
+ MMVERSION vDriverVersion;
+ CHAR szPname[MAXPNAMELEN];
+ DWORD dwFormats;
+ WORD wChannels;
+ WORD wReserved1;
+ DWORD dwSupport;
+ GUID ManufacturerGuid;
+ GUID ProductGuid;
+ GUID NameGuid;
+} MA_WAVEOUTCAPS2A;
+typedef struct
{
- /* Different enumeration for desktop and UWP. */
-#ifdef MA_WIN32_DESKTOP
- /* Desktop */
- HRESULT hr;
- ma_IMMDeviceEnumerator* pDeviceEnumerator;
- ma_IMMDeviceCollection* pDeviceCollection;
+ WORD wMid;
+ WORD wPid;
+ MMVERSION vDriverVersion;
+ CHAR szPname[MAXPNAMELEN];
+ DWORD dwFormats;
+ WORD wChannels;
+ WORD wReserved1;
+ GUID ManufacturerGuid;
+ GUID ProductGuid;
+ GUID NameGuid;
+} MA_WAVEINCAPS2A;
+
+typedef UINT (WINAPI * MA_PFN_waveOutGetNumDevs)(void);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutGetDevCapsA)(ma_uintptr uDeviceID, LPWAVEOUTCAPSA pwoc, UINT cbwoc);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutOpen)(LPHWAVEOUT phwo, UINT uDeviceID, LPCWAVEFORMATEX pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutClose)(HWAVEOUT hwo);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutPrepareHeader)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutUnprepareHeader)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutWrite)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveOutReset)(HWAVEOUT hwo);
+typedef UINT (WINAPI * MA_PFN_waveInGetNumDevs)(void);
+typedef MMRESULT (WINAPI * MA_PFN_waveInGetDevCapsA)(ma_uintptr uDeviceID, LPWAVEINCAPSA pwic, UINT cbwic);
+typedef MMRESULT (WINAPI * MA_PFN_waveInOpen)(LPHWAVEIN phwi, UINT uDeviceID, LPCWAVEFORMATEX pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen);
+typedef MMRESULT (WINAPI * MA_PFN_waveInClose)(HWAVEIN hwi);
+typedef MMRESULT (WINAPI * MA_PFN_waveInPrepareHeader)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveInUnprepareHeader)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveInAddBuffer)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh);
+typedef MMRESULT (WINAPI * MA_PFN_waveInStart)(HWAVEIN hwi);
+typedef MMRESULT (WINAPI * MA_PFN_waveInReset)(HWAVEIN hwi);
- hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
- if (FAILED(hr)) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+static ma_result ma_result_from_MMRESULT(MMRESULT resultMM)
+{
+ switch (resultMM) {
+ case MMSYSERR_NOERROR: return MA_SUCCESS;
+ case MMSYSERR_BADDEVICEID: return MA_INVALID_ARGS;
+ case MMSYSERR_INVALHANDLE: return MA_INVALID_ARGS;
+ case MMSYSERR_NOMEM: return MA_OUT_OF_MEMORY;
+ case MMSYSERR_INVALFLAG: return MA_INVALID_ARGS;
+ case MMSYSERR_INVALPARAM: return MA_INVALID_ARGS;
+ case MMSYSERR_HANDLEBUSY: return MA_BUSY;
+ case MMSYSERR_ERROR: return MA_ERROR;
+ default: return MA_ERROR;
}
+}
- /* Playback. */
- hr = ma_IMMDeviceEnumerator_EnumAudioEndpoints(pDeviceEnumerator, ma_eRender, MA_MM_DEVICE_STATE_ACTIVE, &pDeviceCollection);
- if (SUCCEEDED(hr)) {
- ma_context_enumerate_device_collection__wasapi(pContext, pDeviceCollection, ma_device_type_playback, callback, pUserData);
- ma_IMMDeviceCollection_Release(pDeviceCollection);
- }
+static char* ma_find_last_character(char* str, char ch)
+{
+ char* last;
- /* Capture. */
- hr = ma_IMMDeviceEnumerator_EnumAudioEndpoints(pDeviceEnumerator, ma_eCapture, MA_MM_DEVICE_STATE_ACTIVE, &pDeviceCollection);
- if (SUCCEEDED(hr)) {
- ma_context_enumerate_device_collection__wasapi(pContext, pDeviceCollection, ma_device_type_capture, callback, pUserData);
- ma_IMMDeviceCollection_Release(pDeviceCollection);
+ if (str == NULL) {
+ return NULL;
}
- ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
-#else
- /*
- UWP
-
- The MMDevice API is only supported on desktop applications. For now, while I'm still figuring out how to properly enumerate
- over devices without using MMDevice, I'm restricting devices to defaults.
-
- Hint: DeviceInformation::FindAllAsync() with DeviceClass.AudioCapture/AudioRender. https://blogs.windows.com/buildingapps/2014/05/15/real-time-audio-in-windows-store-and-windows-phone-apps/
- */
- if (callback) {
- ma_bool32 cbResult = MA_TRUE;
-
- /* Playback. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ last = NULL;
+ while (*str != '\0') {
+ if (*str == ch) {
+ last = str;
}
- /* Capture. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
- cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- }
+ str += 1;
}
-#endif
- return MA_SUCCESS;
+ return last;
}
-ma_result ma_context_get_device_info__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+static ma_uint32 ma_get_period_size_in_bytes(ma_uint32 periodSizeInFrames, ma_format format, ma_uint32 channels)
{
-#ifdef MA_WIN32_DESKTOP
- ma_IMMDevice* pMMDevice = NULL;
- ma_result result;
-
- result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, &pMMDevice);
- if (result != MA_SUCCESS) {
- return result;
- }
+ return periodSizeInFrames * ma_get_bytes_per_frame(format, channels);
+}
- result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, shareMode, MA_FALSE, pDeviceInfo); /* MA_FALSE = !onlySimpleInfo. */
- ma_IMMDevice_Release(pMMDevice);
- return result;
-#else
- ma_IAudioClient* pAudioClient;
- ma_result result;
+/*
+Our own "WAVECAPS" structure that contains generic information shared between WAVEOUTCAPS2 and WAVEINCAPS2 so
+we can do things generically and typesafely. Names are being kept the same for consistency.
+*/
+typedef struct
+{
+ CHAR szPname[MAXPNAMELEN];
+ DWORD dwFormats;
+ WORD wChannels;
+ GUID NameGuid;
+} MA_WAVECAPSA;
- /* UWP currently only uses default devices. */
- if (deviceType == ma_device_type_playback) {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- } else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
- }
+static ma_result ma_get_best_info_from_formats_flags__winmm(DWORD dwFormats, WORD channels, WORD* pBitsPerSample, DWORD* pSampleRate)
+{
+ WORD bitsPerSample = 0;
+ DWORD sampleRate = 0;
- /* Not currently supporting exclusive mode on UWP. */
- if (shareMode == ma_share_mode_exclusive) {
- return MA_ERROR;
+ if (pBitsPerSample) {
+ *pBitsPerSample = 0;
+ }
+ if (pSampleRate) {
+ *pSampleRate = 0;
}
- result = ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, &pAudioClient, NULL);
- if (result != MA_SUCCESS) {
- return result;
+ if (channels == 1) {
+ bitsPerSample = 16;
+ if ((dwFormats & WAVE_FORMAT_48M16) != 0) {
+ sampleRate = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44M16) != 0) {
+ sampleRate = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2M16) != 0) {
+ sampleRate = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1M16) != 0) {
+ sampleRate = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96M16) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 8;
+ if ((dwFormats & WAVE_FORMAT_48M08) != 0) {
+ sampleRate = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44M08) != 0) {
+ sampleRate = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2M08) != 0) {
+ sampleRate = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1M08) != 0) {
+ sampleRate = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96M08) != 0) {
+ sampleRate = 96000;
+ } else {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ bitsPerSample = 16;
+ if ((dwFormats & WAVE_FORMAT_48S16) != 0) {
+ sampleRate = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44S16) != 0) {
+ sampleRate = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2S16) != 0) {
+ sampleRate = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1S16) != 0) {
+ sampleRate = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96S16) != 0) {
+ sampleRate = 96000;
+ } else {
+ bitsPerSample = 8;
+ if ((dwFormats & WAVE_FORMAT_48S08) != 0) {
+ sampleRate = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44S08) != 0) {
+ sampleRate = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2S08) != 0) {
+ sampleRate = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1S08) != 0) {
+ sampleRate = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96S08) != 0) {
+ sampleRate = 96000;
+ } else {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ }
}
- result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, NULL, pAudioClient, shareMode, pDeviceInfo);
+ if (pBitsPerSample) {
+ *pBitsPerSample = bitsPerSample;
+ }
+ if (pSampleRate) {
+ *pSampleRate = sampleRate;
+ }
- ma_IAudioClient_Release(pAudioClient);
- return result;
-#endif
+ return MA_SUCCESS;
}
-void ma_device_uninit__wasapi(ma_device* pDevice)
+static ma_result ma_formats_flags_to_WAVEFORMATEX__winmm(DWORD dwFormats, WORD channels, WAVEFORMATEX* pWF)
{
- ma_assert(pDevice != NULL);
+ MA_ASSERT(pWF != NULL);
-#ifdef MA_WIN32_DESKTOP
- if (pDevice->wasapi.pDeviceEnumerator) {
- ((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator)->lpVtbl->UnregisterEndpointNotificationCallback((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator, &pDevice->wasapi.notificationClient);
- ma_IMMDeviceEnumerator_Release((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator);
+ MA_ZERO_OBJECT(pWF);
+ pWF->cbSize = sizeof(*pWF);
+ pWF->wFormatTag = WAVE_FORMAT_PCM;
+ pWF->nChannels = (WORD)channels;
+ if (pWF->nChannels > 2) {
+ pWF->nChannels = 2;
}
-#endif
- if (pDevice->wasapi.pRenderClient) {
- ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient);
- }
- if (pDevice->wasapi.pCaptureClient) {
- ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
+ if (channels == 1) {
+ pWF->wBitsPerSample = 16;
+ if ((dwFormats & WAVE_FORMAT_48M16) != 0) {
+ pWF->nSamplesPerSec = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44M16) != 0) {
+ pWF->nSamplesPerSec = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2M16) != 0) {
+ pWF->nSamplesPerSec = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1M16) != 0) {
+ pWF->nSamplesPerSec = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96M16) != 0) {
+ pWF->nSamplesPerSec = 96000;
+ } else {
+ pWF->wBitsPerSample = 8;
+ if ((dwFormats & WAVE_FORMAT_48M08) != 0) {
+ pWF->nSamplesPerSec = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44M08) != 0) {
+ pWF->nSamplesPerSec = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2M08) != 0) {
+ pWF->nSamplesPerSec = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1M08) != 0) {
+ pWF->nSamplesPerSec = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96M08) != 0) {
+ pWF->nSamplesPerSec = 96000;
+ } else {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ pWF->wBitsPerSample = 16;
+ if ((dwFormats & WAVE_FORMAT_48S16) != 0) {
+ pWF->nSamplesPerSec = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44S16) != 0) {
+ pWF->nSamplesPerSec = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2S16) != 0) {
+ pWF->nSamplesPerSec = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1S16) != 0) {
+ pWF->nSamplesPerSec = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96S16) != 0) {
+ pWF->nSamplesPerSec = 96000;
+ } else {
+ pWF->wBitsPerSample = 8;
+ if ((dwFormats & WAVE_FORMAT_48S08) != 0) {
+ pWF->nSamplesPerSec = 48000;
+ } else if ((dwFormats & WAVE_FORMAT_44S08) != 0) {
+ pWF->nSamplesPerSec = 44100;
+ } else if ((dwFormats & WAVE_FORMAT_2S08) != 0) {
+ pWF->nSamplesPerSec = 22050;
+ } else if ((dwFormats & WAVE_FORMAT_1S08) != 0) {
+ pWF->nSamplesPerSec = 11025;
+ } else if ((dwFormats & WAVE_FORMAT_96S08) != 0) {
+ pWF->nSamplesPerSec = 96000;
+ } else {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+ }
}
- if (pDevice->wasapi.pAudioClientPlayback) {
- ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
- }
- if (pDevice->wasapi.pAudioClientCapture) {
- ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- }
+ pWF->nBlockAlign = (pWF->nChannels * pWF->wBitsPerSample) / 8;
+ pWF->nAvgBytesPerSec = pWF->nBlockAlign * pWF->nSamplesPerSec;
- if (pDevice->wasapi.hEventPlayback) {
- CloseHandle(pDevice->wasapi.hEventPlayback);
- }
- if (pDevice->wasapi.hEventCapture) {
- CloseHandle(pDevice->wasapi.hEventCapture);
- }
+ return MA_SUCCESS;
}
-
-typedef struct
+static ma_result ma_context_get_device_info_from_WAVECAPS(ma_context* pContext, MA_WAVECAPSA* pCaps, ma_device_info* pDeviceInfo)
{
- /* Input. */
- ma_format formatIn;
- ma_uint32 channelsIn;
- ma_uint32 sampleRateIn;
- ma_channel channelMapIn[MA_MAX_CHANNELS];
- ma_uint32 bufferSizeInFramesIn;
- ma_uint32 bufferSizeInMillisecondsIn;
- ma_uint32 periodsIn;
- ma_bool32 usingDefaultFormat;
- ma_bool32 usingDefaultChannels;
- ma_bool32 usingDefaultSampleRate;
- ma_bool32 usingDefaultChannelMap;
- ma_share_mode shareMode;
+ WORD bitsPerSample;
+ DWORD sampleRate;
+ ma_result result;
- /* Output. */
- ma_IAudioClient* pAudioClient;
- ma_IAudioRenderClient* pRenderClient;
- ma_IAudioCaptureClient* pCaptureClient;
- ma_format formatOut;
- ma_uint32 channelsOut;
- ma_uint32 sampleRateOut;
- ma_channel channelMapOut[MA_MAX_CHANNELS];
- ma_uint32 bufferSizeInFramesOut;
- ma_uint32 periodSizeInFramesOut;
- ma_uint32 periodsOut;
- char deviceName[256];
-} ma_device_init_internal_data__wasapi;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pCaps != NULL);
+ MA_ASSERT(pDeviceInfo != NULL);
-ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__wasapi* pData)
-{
- HRESULT hr;
- ma_result result = MA_SUCCESS;
- const char* errorMsg = "";
- MA_AUDCLNT_SHAREMODE shareMode = MA_AUDCLNT_SHAREMODE_SHARED;
- MA_REFERENCE_TIME bufferDurationInMicroseconds;
- ma_bool32 wasInitializedUsingIAudioClient3 = MA_FALSE;
- WAVEFORMATEXTENSIBLE wf;
- ma_WASAPIDeviceInterface* pDeviceInterface = NULL;
- ma_IAudioClient2* pAudioClient2;
+ /*
+ Name / Description
+
+ Unfortunately the name specified in WAVE(OUT/IN)CAPS2 is limited to 31 characters. This results in an unprofessional looking
+ situation where the names of the devices are truncated. To help work around this, we need to look at the name GUID and try
+ looking in the registry for the full name. If we can't find it there, we need to just fall back to the default name.
+ */
- ma_assert(pContext != NULL);
- ma_assert(pData != NULL);
+ /* Set the default to begin with. */
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), pCaps->szPname, (size_t)-1);
- /* This function is only used to initialize one device type: either playback or capture. Never full-duplex. */
- if (deviceType == ma_device_type_duplex) {
- return MA_INVALID_ARGS;
- }
+ /*
+ Now try the registry. There's a few things to consider here:
+ - The name GUID can be null, in which we case we just need to stick to the original 31 characters.
+ - If the name GUID is not present in the registry we'll also need to stick to the original 31 characters.
+ - I like consistency, so I want the returned device names to be consistent with those returned by WASAPI and DirectSound. The
+ problem, however is that WASAPI and DirectSound use " ()" format (such as "Speakers (High Definition Audio)"),
+ but WinMM does not specificy the component name. From my admittedly limited testing, I've notice the component name seems to
+ usually fit within the 31 characters of the fixed sized buffer, so what I'm going to do is parse that string for the component
+ name, and then concatenate the name from the registry.
+ */
+ if (!ma_is_guid_equal(&pCaps->NameGuid, &MA_GUID_NULL)) {
+ wchar_t guidStrW[256];
+ if (((MA_PFN_StringFromGUID2)pContext->win32.StringFromGUID2)(&pCaps->NameGuid, guidStrW, ma_countof(guidStrW)) > 0) {
+ char guidStr[256];
+ char keyStr[1024];
+ HKEY hKey;
- pData->pAudioClient = NULL;
- pData->pRenderClient = NULL;
- pData->pCaptureClient = NULL;
+ WideCharToMultiByte(CP_UTF8, 0, guidStrW, -1, guidStr, sizeof(guidStr), 0, FALSE);
- result = ma_context_get_IAudioClient__wasapi(pContext, deviceType, pDeviceID, &pData->pAudioClient, &pDeviceInterface);
- if (result != MA_SUCCESS) {
- goto done;
- }
+ ma_strcpy_s(keyStr, sizeof(keyStr), "SYSTEM\\CurrentControlSet\\Control\\MediaCategories\\");
+ ma_strcat_s(keyStr, sizeof(keyStr), guidStr);
+ if (((MA_PFN_RegOpenKeyExA)pContext->win32.RegOpenKeyExA)(HKEY_LOCAL_MACHINE, keyStr, 0, KEY_READ, &hKey) == ERROR_SUCCESS) {
+ BYTE nameFromReg[512];
+ DWORD nameFromRegSize = sizeof(nameFromReg);
+ result = ((MA_PFN_RegQueryValueExA)pContext->win32.RegQueryValueExA)(hKey, "Name", 0, NULL, (LPBYTE)nameFromReg, (LPDWORD)&nameFromRegSize);
+ ((MA_PFN_RegCloseKey)pContext->win32.RegCloseKey)(hKey);
- /* Try enabling hardware offloading. */
- hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient2, (void**)&pAudioClient2);
- if (SUCCEEDED(hr)) {
- BOOL isHardwareOffloadingSupported = 0;
- hr = ma_IAudioClient2_IsOffloadCapable(pAudioClient2, MA_AudioCategory_Other, &isHardwareOffloadingSupported);
- if (SUCCEEDED(hr) && isHardwareOffloadingSupported) {
- ma_AudioClientProperties clientProperties;
- ma_zero_object(&clientProperties);
- clientProperties.cbSize = sizeof(clientProperties);
- clientProperties.bIsOffload = 1;
- clientProperties.eCategory = MA_AudioCategory_Other;
- ma_IAudioClient2_SetClientProperties(pAudioClient2, &clientProperties);
- }
- }
+ if (result == ERROR_SUCCESS) {
+ /* We have the value from the registry, so now we need to construct the name string. */
+ char name[1024];
+ if (ma_strcpy_s(name, sizeof(name), pDeviceInfo->name) == 0) {
+ char* nameBeg = ma_find_last_character(name, '(');
+ if (nameBeg != NULL) {
+ size_t leadingLen = (nameBeg - name);
+ ma_strncpy_s(nameBeg + 1, sizeof(name) - leadingLen, (const char*)nameFromReg, (size_t)-1);
+ /* The closing ")", if it can fit. */
+ if (leadingLen + nameFromRegSize < sizeof(name)-1) {
+ ma_strcat_s(name, sizeof(name), ")");
+ }
- /* Here is where we try to determine the best format to use with the device. If the client if wanting exclusive mode, first try finding the best format for that. If this fails, fall back to shared mode. */
- result = MA_FORMAT_NOT_SUPPORTED;
- if (pData->shareMode == ma_share_mode_exclusive) {
- #ifdef MA_WIN32_DESKTOP
- /* In exclusive mode on desktop we always use the backend's native format. */
- ma_IPropertyStore* pStore = NULL;
- hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pStore);
- if (SUCCEEDED(hr)) {
- PROPVARIANT prop;
- ma_PropVariantInit(&prop);
- hr = ma_IPropertyStore_GetValue(pStore, &MA_PKEY_AudioEngine_DeviceFormat, &prop);
- if (SUCCEEDED(hr)) {
- WAVEFORMATEX* pActualFormat = (WAVEFORMATEX*)prop.blob.pBlobData;
- hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pData->pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pActualFormat, NULL);
- if (SUCCEEDED(hr)) {
- ma_copy_memory(&wf, pActualFormat, sizeof(WAVEFORMATEXTENSIBLE));
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), name, (size_t)-1);
+ }
+ }
}
-
- ma_PropVariantClear(pContext, &prop);
}
-
- ma_IPropertyStore_Release(pStore);
- }
- #else
- /*
- I do not know how to query the device's native format on UWP so for now I'm just disabling support for
- exclusive mode. The alternative is to enumerate over different formats and check IsFormatSupported()
- until you find one that works.
-
- TODO: Add support for exclusive mode to UWP.
- */
- hr = S_FALSE;
- #endif
-
- if (hr == S_OK) {
- shareMode = MA_AUDCLNT_SHAREMODE_EXCLUSIVE;
- result = MA_SUCCESS;
- } else {
- result = MA_SHARE_MODE_NOT_SUPPORTED;
- }
- } else {
- /* In shared mode we are always using the format reported by the operating system. */
- WAVEFORMATEXTENSIBLE* pNativeFormat = NULL;
- hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pData->pAudioClient, (WAVEFORMATEX**)&pNativeFormat);
- if (hr != S_OK) {
- result = MA_FORMAT_NOT_SUPPORTED;
- } else {
- ma_copy_memory(&wf, pNativeFormat, sizeof(wf));
- result = MA_SUCCESS;
}
+ }
- ma_CoTaskMemFree(pContext, pNativeFormat);
- shareMode = MA_AUDCLNT_SHAREMODE_SHARED;
+ result = ma_get_best_info_from_formats_flags__winmm(pCaps->dwFormats, pCaps->wChannels, &bitsPerSample, &sampleRate);
+ if (result != MA_SUCCESS) {
+ return result;
}
- /* Return an error if we still haven't found a format. */
- if (result != MA_SUCCESS) {
- errorMsg = "[WASAPI] Failed to find best device mix format.";
- goto done;
+ pDeviceInfo->minChannels = pCaps->wChannels;
+ pDeviceInfo->maxChannels = pCaps->wChannels;
+ pDeviceInfo->minSampleRate = sampleRate;
+ pDeviceInfo->maxSampleRate = sampleRate;
+ pDeviceInfo->formatCount = 1;
+ if (bitsPerSample == 8) {
+ pDeviceInfo->formats[0] = ma_format_u8;
+ } else if (bitsPerSample == 16) {
+ pDeviceInfo->formats[0] = ma_format_s16;
+ } else if (bitsPerSample == 24) {
+ pDeviceInfo->formats[0] = ma_format_s24;
+ } else if (bitsPerSample == 32) {
+ pDeviceInfo->formats[0] = ma_format_s32;
+ } else {
+ return MA_FORMAT_NOT_SUPPORTED;
}
- pData->formatOut = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)&wf);
- pData->channelsOut = wf.Format.nChannels;
- pData->sampleRateOut = wf.Format.nSamplesPerSec;
+ return MA_SUCCESS;
+}
- /* Get the internal channel map based on the channel mask. */
- ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pData->channelsOut, pData->channelMapOut);
+static ma_result ma_context_get_device_info_from_WAVEOUTCAPS2(ma_context* pContext, MA_WAVEOUTCAPS2A* pCaps, ma_device_info* pDeviceInfo)
+{
+ MA_WAVECAPSA caps;
- /* If we're using a default buffer size we need to calculate it based on the efficiency of the system. */
- pData->periodsOut = pData->periodsIn;
- pData->bufferSizeInFramesOut = pData->bufferSizeInFramesIn;
- if (pData->bufferSizeInFramesOut == 0) {
- pData->bufferSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->bufferSizeInMillisecondsIn, pData->sampleRateOut);
- }
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pCaps != NULL);
+ MA_ASSERT(pDeviceInfo != NULL);
- bufferDurationInMicroseconds = ((ma_uint64)pData->bufferSizeInFramesOut * 1000 * 1000) / pData->sampleRateOut;
+ MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname));
+ caps.dwFormats = pCaps->dwFormats;
+ caps.wChannels = pCaps->wChannels;
+ caps.NameGuid = pCaps->NameGuid;
+ return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo);
+}
+static ma_result ma_context_get_device_info_from_WAVEINCAPS2(ma_context* pContext, MA_WAVEINCAPS2A* pCaps, ma_device_info* pDeviceInfo)
+{
+ MA_WAVECAPSA caps;
- /* Slightly different initialization for shared and exclusive modes. We try exclusive mode first, and if it fails, fall back to shared mode. */
- if (shareMode == MA_AUDCLNT_SHAREMODE_EXCLUSIVE) {
- MA_REFERENCE_TIME bufferDuration = (bufferDurationInMicroseconds / pData->periodsOut) * 10;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pCaps != NULL);
+ MA_ASSERT(pDeviceInfo != NULL);
- /*
- If the periodicy is too small, Initialize() will fail with AUDCLNT_E_INVALID_DEVICE_PERIOD. In this case we should just keep increasing
- it and trying it again.
- */
- hr = E_FAIL;
- for (;;) {
- hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK, bufferDuration, bufferDuration, (WAVEFORMATEX*)&wf, NULL);
- if (hr == MA_AUDCLNT_E_INVALID_DEVICE_PERIOD) {
- if (bufferDuration > 500*10000) {
- break;
- } else {
- if (bufferDuration == 0) { /* <-- Just a sanity check to prevent an infinit loop. Should never happen, but it makes me feel better. */
- break;
- }
+ MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname));
+ caps.dwFormats = pCaps->dwFormats;
+ caps.wChannels = pCaps->wChannels;
+ caps.NameGuid = pCaps->NameGuid;
+ return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo);
+}
- bufferDuration = bufferDuration * 2;
- continue;
- }
- } else {
- break;
- }
- }
-
- if (hr == MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) {
- UINT bufferSizeInFrames;
- hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames);
- if (SUCCEEDED(hr)) {
- bufferDuration = (MA_REFERENCE_TIME)((10000.0 * 1000 / wf.Format.nSamplesPerSec * bufferSizeInFrames) + 0.5);
- /* Unfortunately we need to release and re-acquire the audio client according to MSDN. Seems silly - why not just call IAudioClient_Initialize() again?! */
- ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient);
+static ma_bool32 ma_context_is_device_id_equal__winmm(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
- #ifdef MA_WIN32_DESKTOP
- hr = ma_IMMDevice_Activate(pDeviceInterface, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pData->pAudioClient);
- #else
- hr = ma_IUnknown_QueryInterface(pDeviceInterface, &MA_IID_IAudioClient, (void**)&pData->pAudioClient);
- #endif
+ return pID0->winmm == pID1->winmm;
+}
- if (SUCCEEDED(hr)) {
- hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK, bufferDuration, bufferDuration, (WAVEFORMATEX*)&wf, NULL);
- }
- }
- }
+static ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ UINT playbackDeviceCount;
+ UINT captureDeviceCount;
+ UINT iPlaybackDevice;
+ UINT iCaptureDevice;
- if (FAILED(hr)) {
- /* Failed to initialize in exclusive mode. Don't fall back to shared mode - instead tell the client about it. They can reinitialize in shared mode if they want. */
- if (hr == E_ACCESSDENIED) {
- errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Access denied.", result = MA_ACCESS_DENIED;
- } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) {
- errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Device in use.", result = MA_DEVICE_BUSY;
- } else {
- errorMsg = "[WASAPI] Failed to initialize device in exclusive mode."; result = MA_SHARE_MODE_NOT_SUPPORTED;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* Playback. */
+ playbackDeviceCount = ((MA_PFN_waveOutGetNumDevs)pContext->winmm.waveOutGetNumDevs)();
+ for (iPlaybackDevice = 0; iPlaybackDevice < playbackDeviceCount; ++iPlaybackDevice) {
+ MMRESULT result;
+ MA_WAVEOUTCAPS2A caps;
+
+ MA_ZERO_OBJECT(&caps);
+
+ result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(iPlaybackDevice, (WAVEOUTCAPSA*)&caps, sizeof(caps));
+ if (result == MMSYSERR_NOERROR) {
+ ma_device_info deviceInfo;
+
+ MA_ZERO_OBJECT(&deviceInfo);
+ deviceInfo.id.winmm = iPlaybackDevice;
+
+ if (ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) {
+ ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ if (cbResult == MA_FALSE) {
+ return MA_SUCCESS; /* Enumeration was stopped. */
+ }
}
- goto done;
}
}
- if (shareMode == MA_AUDCLNT_SHAREMODE_SHARED) {
- /* Low latency shared mode via IAudioClient3. */
-#ifndef MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE
- ma_IAudioClient3* pAudioClient3 = NULL;
- hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient3, (void**)&pAudioClient3);
- if (SUCCEEDED(hr)) {
- UINT32 defaultPeriodInFrames;
- UINT32 fundamentalPeriodInFrames;
- UINT32 minPeriodInFrames;
- UINT32 maxPeriodInFrames;
- hr = ma_IAudioClient3_GetSharedModeEnginePeriod(pAudioClient3, (WAVEFORMATEX*)&wf, &defaultPeriodInFrames, &fundamentalPeriodInFrames, &minPeriodInFrames, &maxPeriodInFrames);
- if (SUCCEEDED(hr)) {
- UINT32 desiredPeriodInFrames = pData->bufferSizeInFramesOut / pData->periodsOut;
- UINT32 actualPeriodInFrames = desiredPeriodInFrames;
-
- /* Make sure the period size is a multiple of fundamentalPeriodInFrames. */
- actualPeriodInFrames = actualPeriodInFrames / fundamentalPeriodInFrames;
- actualPeriodInFrames = actualPeriodInFrames * fundamentalPeriodInFrames;
-
- /* The period needs to be clamped between minPeriodInFrames and maxPeriodInFrames. */
- actualPeriodInFrames = ma_clamp(actualPeriodInFrames, minPeriodInFrames, maxPeriodInFrames);
-
- /* If the client requested a largish buffer than we don't actually want to use low latency shared mode because it forces small buffers. */
- if (actualPeriodInFrames >= desiredPeriodInFrames) {
- hr = ma_IAudioClient3_InitializeSharedAudioStream(pAudioClient3, MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK, actualPeriodInFrames, (WAVEFORMATEX*)&wf, NULL);
- if (SUCCEEDED(hr)) {
- wasInitializedUsingIAudioClient3 = MA_TRUE;
- pData->periodSizeInFramesOut = actualPeriodInFrames;
- pData->bufferSizeInFramesOut = actualPeriodInFrames * pData->periodsOut;
- }
- }
- }
+ /* Capture. */
+ captureDeviceCount = ((MA_PFN_waveInGetNumDevs)pContext->winmm.waveInGetNumDevs)();
+ for (iCaptureDevice = 0; iCaptureDevice < captureDeviceCount; ++iCaptureDevice) {
+ MMRESULT result;
+ MA_WAVEINCAPS2A caps;
- ma_IAudioClient3_Release(pAudioClient3);
- pAudioClient3 = NULL;
- }
-#endif
+ MA_ZERO_OBJECT(&caps);
- /* If we don't have an IAudioClient3 then we need to use the normal initialization routine. */
- if (!wasInitializedUsingIAudioClient3) {
- MA_REFERENCE_TIME bufferDuration = bufferDurationInMicroseconds*10;
- hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK, bufferDuration, 0, (WAVEFORMATEX*)&wf, NULL);
- if (FAILED(hr)) {
- if (hr == E_ACCESSDENIED) {
- errorMsg = "[WASAPI] Failed to initialize device. Access denied.", result = MA_ACCESS_DENIED;
- } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) {
- errorMsg = "[WASAPI] Failed to initialize device. Device in use.", result = MA_DEVICE_BUSY;
- } else {
- errorMsg = "[WASAPI] Failed to initialize device.", result = MA_FAILED_TO_OPEN_BACKEND_DEVICE;
- }
+ result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(iCaptureDevice, (WAVEINCAPSA*)&caps, sizeof(caps));
+ if (result == MMSYSERR_NOERROR) {
+ ma_device_info deviceInfo;
- goto done;
+ MA_ZERO_OBJECT(&deviceInfo);
+ deviceInfo.id.winmm = iCaptureDevice;
+
+ if (ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) {
+ ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ if (cbResult == MA_FALSE) {
+ return MA_SUCCESS; /* Enumeration was stopped. */
+ }
}
}
}
- if (!wasInitializedUsingIAudioClient3) {
- hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &pData->bufferSizeInFramesOut);
- if (FAILED(hr)) {
- errorMsg = "[WASAPI] Failed to get audio client's actual buffer size.", result = MA_FAILED_TO_OPEN_BACKEND_DEVICE;
- goto done;
- }
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+{
+ UINT winMMDeviceID;
+
+ MA_ASSERT(pContext != NULL);
+
+ if (shareMode == ma_share_mode_exclusive) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
- pData->periodSizeInFramesOut = pData->bufferSizeInFramesOut / pData->periodsOut;
+ winMMDeviceID = 0;
+ if (pDeviceID != NULL) {
+ winMMDeviceID = (UINT)pDeviceID->winmm;
}
+ pDeviceInfo->id.winmm = winMMDeviceID;
+
if (deviceType == ma_device_type_playback) {
- hr = ma_IAudioClient_GetService((ma_IAudioClient*)pData->pAudioClient, &MA_IID_IAudioRenderClient, (void**)&pData->pRenderClient);
+ MMRESULT result;
+ MA_WAVEOUTCAPS2A caps;
+
+ MA_ZERO_OBJECT(&caps);
+
+ result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceID, (WAVEOUTCAPSA*)&caps, sizeof(caps));
+ if (result == MMSYSERR_NOERROR) {
+ return ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, pDeviceInfo);
+ }
} else {
- hr = ma_IAudioClient_GetService((ma_IAudioClient*)pData->pAudioClient, &MA_IID_IAudioCaptureClient, (void**)&pData->pCaptureClient);
+ MMRESULT result;
+ MA_WAVEINCAPS2A caps;
+
+ MA_ZERO_OBJECT(&caps);
+
+ result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceID, (WAVEINCAPSA*)&caps, sizeof(caps));
+ if (result == MMSYSERR_NOERROR) {
+ return ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, pDeviceInfo);
+ }
}
- if (FAILED(hr)) {
- errorMsg = "[WASAPI] Failed to get audio client service.", result = MA_API_NOT_FOUND;
- goto done;
+ return MA_NO_DEVICE;
+}
+
+
+static void ma_device_uninit__winmm(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((HWAVEIN)pDevice->winmm.hDeviceCapture);
+ CloseHandle((HANDLE)pDevice->winmm.hEventCapture);
}
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
+ ((MA_PFN_waveOutClose)pDevice->pContext->winmm.waveOutClose)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
+ CloseHandle((HANDLE)pDevice->winmm.hEventPlayback);
+ }
- /* Grab the name of the device. */
-#ifdef MA_WIN32_DESKTOP
- {
- ma_IPropertyStore *pProperties;
- hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pProperties);
- if (SUCCEEDED(hr)) {
- PROPVARIANT varName;
- ma_PropVariantInit(&varName);
- hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &varName);
- if (SUCCEEDED(hr)) {
- WideCharToMultiByte(CP_UTF8, 0, varName.pwszVal, -1, pData->deviceName, sizeof(pData->deviceName), 0, FALSE);
- ma_PropVariantClear(pContext, &varName);
- }
+ ma__free_from_callbacks(pDevice->winmm._pHeapData, &pDevice->pContext->allocationCallbacks);
- ma_IPropertyStore_Release(pProperties);
+ MA_ZERO_OBJECT(&pDevice->winmm); /* Safety. */
+}
+
+static ma_result ma_device_init__winmm(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+{
+ const char* errorMsg = "";
+ ma_result errorCode = MA_ERROR;
+ ma_result result = MA_SUCCESS;
+ ma_uint32 heapSize;
+ UINT winMMDeviceIDPlayback = 0;
+ UINT winMMDeviceIDCapture = 0;
+ ma_uint32 periodSizeInMilliseconds;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ZERO_OBJECT(&pDevice->winmm);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ /* No exlusive mode with WinMM. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+
+ periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds;
+ if (periodSizeInMilliseconds == 0) {
+ periodSizeInMilliseconds = ma_calculate_buffer_size_in_milliseconds_from_frames(pConfig->periodSizeInFrames, pConfig->sampleRate);
+ }
+
+ /* WinMM has horrible latency. */
+ if (pDevice->usingDefaultBufferSize) {
+ if (pConfig->performanceProfile == ma_performance_profile_low_latency) {
+ periodSizeInMilliseconds = 40;
+ } else {
+ periodSizeInMilliseconds = 400;
}
}
-#endif
-done:
- /* Clean up. */
-#ifdef MA_WIN32_DESKTOP
- if (pDeviceInterface != NULL) {
- ma_IMMDevice_Release(pDeviceInterface);
+
+ if (pConfig->playback.pDeviceID != NULL) {
+ winMMDeviceIDPlayback = (UINT)pConfig->playback.pDeviceID->winmm;
}
-#else
- if (pDeviceInterface != NULL) {
- ma_IUnknown_Release(pDeviceInterface);
+ if (pConfig->capture.pDeviceID != NULL) {
+ winMMDeviceIDCapture = (UINT)pConfig->capture.pDeviceID->winmm;
}
-#endif
- if (result != MA_SUCCESS) {
- if (pData->pRenderClient) {
- ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pData->pRenderClient);
- pData->pRenderClient = NULL;
+ /* The capture device needs to be initialized first. */
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ WAVEINCAPSA caps;
+ WAVEFORMATEX wf;
+ MMRESULT resultMM;
+
+ /* We use an event to know when a new fragment needs to be enqueued. */
+ pDevice->winmm.hEventCapture = (ma_handle)CreateEventW(NULL, TRUE, TRUE, NULL);
+ if (pDevice->winmm.hEventCapture == NULL) {
+ errorMsg = "[WinMM] Failed to create event for fragment enqueing for the capture device.", errorCode = ma_result_from_GetLastError(GetLastError());
+ goto on_error;
}
- if (pData->pCaptureClient) {
- ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pData->pCaptureClient);
- pData->pCaptureClient = NULL;
+
+ /* The format should be based on the device's actual format. */
+ if (((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceIDCapture, &caps, sizeof(caps)) != MMSYSERR_NOERROR) {
+ errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED;
+ goto on_error;
}
- if (pData->pAudioClient) {
- ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient);
- pData->pAudioClient = NULL;
+
+ result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf);
+ if (result != MA_SUCCESS) {
+ errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result;
+ goto on_error;
}
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, errorMsg, result);
- } else {
- return MA_SUCCESS;
+ resultMM = ((MA_PFN_waveInOpen)pDevice->pContext->winmm.waveInOpen)((LPHWAVEIN)&pDevice->winmm.hDeviceCapture, winMMDeviceIDCapture, &wf, (DWORD_PTR)pDevice->winmm.hEventCapture, (DWORD_PTR)pDevice, CALLBACK_EVENT | WAVE_ALLOWSYNC);
+ if (resultMM != MMSYSERR_NOERROR) {
+ errorMsg = "[WinMM] Failed to open capture device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ goto on_error;
+ }
+
+ pDevice->capture.internalFormat = ma_format_from_WAVEFORMATEX(&wf);
+ pDevice->capture.internalChannels = wf.nChannels;
+ pDevice->capture.internalSampleRate = wf.nSamplesPerSec;
+ ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+ pDevice->capture.internalPeriods = pConfig->periods;
+ pDevice->capture.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, pDevice->capture.internalSampleRate);
}
-}
-ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType)
-{
- ma_device_init_internal_data__wasapi data;
- ma_result result;
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ WAVEOUTCAPSA caps;
+ WAVEFORMATEX wf;
+ MMRESULT resultMM;
- ma_assert(pDevice != NULL);
+ /* We use an event to know when a new fragment needs to be enqueued. */
+ pDevice->winmm.hEventPlayback = (ma_handle)CreateEvent(NULL, TRUE, TRUE, NULL);
+ if (pDevice->winmm.hEventPlayback == NULL) {
+ errorMsg = "[WinMM] Failed to create event for fragment enqueing for the playback device.", errorCode = ma_result_from_GetLastError(GetLastError());
+ goto on_error;
+ }
- /* We only re-initialize the playback or capture device. Never a full-duplex device. */
- if (deviceType == ma_device_type_duplex) {
- return MA_INVALID_ARGS;
- }
+ /* The format should be based on the device's actual format. */
+ if (((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceIDPlayback, &caps, sizeof(caps)) != MMSYSERR_NOERROR) {
+ errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED;
+ goto on_error;
+ }
- if (deviceType == ma_device_type_capture) {
- data.formatIn = pDevice->capture.format;
- data.channelsIn = pDevice->capture.channels;
- ma_copy_memory(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap));
- data.shareMode = pDevice->capture.shareMode;
- data.usingDefaultFormat = pDevice->capture.usingDefaultFormat;
- data.usingDefaultChannels = pDevice->capture.usingDefaultChannels;
- data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap;
- } else {
- data.formatIn = pDevice->playback.format;
- data.channelsIn = pDevice->playback.channels;
- ma_copy_memory(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap));
- data.shareMode = pDevice->playback.shareMode;
- data.usingDefaultFormat = pDevice->playback.usingDefaultFormat;
- data.usingDefaultChannels = pDevice->playback.usingDefaultChannels;
- data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap;
+ result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf);
+ if (result != MA_SUCCESS) {
+ errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result;
+ goto on_error;
+ }
+
+ resultMM = ((MA_PFN_waveOutOpen)pContext->winmm.waveOutOpen)((LPHWAVEOUT)&pDevice->winmm.hDevicePlayback, winMMDeviceIDPlayback, &wf, (DWORD_PTR)pDevice->winmm.hEventPlayback, (DWORD_PTR)pDevice, CALLBACK_EVENT | WAVE_ALLOWSYNC);
+ if (resultMM != MMSYSERR_NOERROR) {
+ errorMsg = "[WinMM] Failed to open playback device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ goto on_error;
+ }
+
+ pDevice->playback.internalFormat = ma_format_from_WAVEFORMATEX(&wf);
+ pDevice->playback.internalChannels = wf.nChannels;
+ pDevice->playback.internalSampleRate = wf.nSamplesPerSec;
+ ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
+ pDevice->playback.internalPeriods = pConfig->periods;
+ pDevice->playback.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, pDevice->playback.internalSampleRate);
}
+
+ /*
+ The heap allocated data is allocated like so:
- data.sampleRateIn = pDevice->sampleRate;
- data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
- data.bufferSizeInFramesIn = pDevice->wasapi.originalBufferSizeInFrames;
- data.bufferSizeInMillisecondsIn = pDevice->wasapi.originalBufferSizeInMilliseconds;
- data.periodsIn = pDevice->wasapi.originalPeriods;
- result = ma_device_init_internal__wasapi(pDevice->pContext, deviceType, NULL, &data);
- if (result != MA_SUCCESS) {
- return result;
+ [Capture WAVEHDRs][Playback WAVEHDRs][Capture Intermediary Buffer][Playback Intermediary Buffer]
+ */
+ heapSize = 0;
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ heapSize += sizeof(WAVEHDR)*pDevice->capture.internalPeriods + (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+ }
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ heapSize += sizeof(WAVEHDR)*pDevice->playback.internalPeriods + (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+ }
+
+ pDevice->winmm._pHeapData = (ma_uint8*)ma__calloc_from_callbacks(heapSize, &pContext->allocationCallbacks);
+ if (pDevice->winmm._pHeapData == NULL) {
+ errorMsg = "[WinMM] Failed to allocate memory for the intermediary buffer.", errorCode = MA_OUT_OF_MEMORY;
+ goto on_error;
}
- /* At this point we have some new objects ready to go. We need to uninitialize the previous ones and then set the new ones. */
- if (deviceType == ma_device_type_capture) {
- if (pDevice->wasapi.pCaptureClient) {
- ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
- pDevice->wasapi.pCaptureClient = NULL;
- }
-
- if (pDevice->wasapi.pAudioClientCapture) {
- ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- pDevice->wasapi.pAudioClientCapture = NULL;
- }
+ MA_ZERO_MEMORY(pDevice->winmm._pHeapData, heapSize);
- pDevice->wasapi.pAudioClientCapture = data.pAudioClient;
- pDevice->wasapi.pCaptureClient = data.pCaptureClient;
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 iPeriod;
- pDevice->capture.internalFormat = data.formatOut;
- pDevice->capture.internalChannels = data.channelsOut;
- pDevice->capture.internalSampleRate = data.sampleRateOut;
- ma_copy_memory(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
- pDevice->capture.internalBufferSizeInFrames = data.bufferSizeInFramesOut;
- pDevice->capture.internalPeriods = data.periodsOut;
- ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName);
+ if (pConfig->deviceType == ma_device_type_capture) {
+ pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData;
+ pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods));
+ } else {
+ pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData;
+ pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods + pDevice->playback.internalPeriods));
+ }
- ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, pDevice->wasapi.hEventCapture);
+ /* Prepare headers. */
+ for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) {
+ ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDevice->capture.internalPeriodSizeInFrames, pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut;
- ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualBufferSizeInFramesCapture);
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].lpData = (LPSTR)(pDevice->winmm.pIntermediaryBufferCapture + (periodSizeInBytes*iPeriod));
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwBufferLength = periodSizeInBytes;
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwFlags = 0L;
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwLoops = 0L;
+ ((MA_PFN_waveInPrepareHeader)pContext->winmm.waveInPrepareHeader)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR));
- /* The device may be in a started state. If so we need to immediately restart it. */
- if (pDevice->wasapi.isStartedCapture) {
- HRESULT hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device after reinitialization.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
+ /*
+ The user data of the WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means
+ it's unlocked and available for writing. A value of 1 means it's locked.
+ */
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwUser = 0;
}
}
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 iPeriod;
- if (deviceType == ma_device_type_playback) {
- if (pDevice->wasapi.pRenderClient) {
- ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient);
- pDevice->wasapi.pRenderClient = NULL;
+ if (pConfig->deviceType == ma_device_type_playback) {
+ pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData;
+ pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*pDevice->playback.internalPeriods);
+ } else {
+ pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods));
+ pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods + pDevice->playback.internalPeriods)) + (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
}
- if (pDevice->wasapi.pAudioClientPlayback) {
- ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
- pDevice->wasapi.pAudioClientPlayback = NULL;
- }
+ /* Prepare headers. */
+ for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) {
+ ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDevice->playback.internalPeriodSizeInFrames, pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- pDevice->wasapi.pAudioClientPlayback = data.pAudioClient;
- pDevice->wasapi.pRenderClient = data.pRenderClient;
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].lpData = (LPSTR)(pDevice->winmm.pIntermediaryBufferPlayback + (periodSizeInBytes*iPeriod));
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwBufferLength = periodSizeInBytes;
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwFlags = 0L;
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwLoops = 0L;
+ ((MA_PFN_waveOutPrepareHeader)pContext->winmm.waveOutPrepareHeader)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(WAVEHDR));
- pDevice->playback.internalFormat = data.formatOut;
- pDevice->playback.internalChannels = data.channelsOut;
- pDevice->playback.internalSampleRate = data.sampleRateOut;
- ma_copy_memory(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
- pDevice->playback.internalBufferSizeInFrames = data.bufferSizeInFramesOut;
- pDevice->playback.internalPeriods = data.periodsOut;
- ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName);
+ /*
+ The user data of the WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means
+ it's unlocked and available for writing. A value of 1 means it's locked.
+ */
+ ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwUser = 0;
+ }
+ }
- ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, pDevice->wasapi.hEventPlayback);
+ return MA_SUCCESS;
- pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut;
- ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualBufferSizeInFramesPlayback);
+on_error:
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->winmm.pWAVEHDRCapture != NULL) {
+ ma_uint32 iPeriod;
+ for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) {
+ ((MA_PFN_waveInUnprepareHeader)pContext->winmm.waveInUnprepareHeader)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR));
+ }
+ }
- /* The device may be in a started state. If so we need to immediately restart it. */
- if (pDevice->wasapi.isStartedPlayback) {
- HRESULT hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device after reinitialization.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ ((MA_PFN_waveInClose)pContext->winmm.waveInClose)((HWAVEIN)pDevice->winmm.hDeviceCapture);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->winmm.pWAVEHDRCapture != NULL) {
+ ma_uint32 iPeriod;
+ for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) {
+ ((MA_PFN_waveOutUnprepareHeader)pContext->winmm.waveOutUnprepareHeader)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(WAVEHDR));
}
}
+
+ ((MA_PFN_waveOutClose)pContext->winmm.waveOutClose)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
}
- return MA_SUCCESS;
+ ma__free_from_callbacks(pDevice->winmm._pHeapData, &pContext->allocationCallbacks);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, errorMsg, errorCode);
}
-ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+static ma_result ma_device_stop__winmm(ma_device* pDevice)
{
- ma_result result = MA_SUCCESS;
+ MMRESULT resultMM;
- (void)pContext;
+ MA_ASSERT(pDevice != NULL);
- ma_assert(pContext != NULL);
- ma_assert(pDevice != NULL);
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->winmm.hDeviceCapture == NULL) {
+ return MA_INVALID_ARGS;
+ }
- ma_zero_object(&pDevice->wasapi);
- pDevice->wasapi.originalBufferSizeInFrames = pConfig->bufferSizeInFrames;
- pDevice->wasapi.originalBufferSizeInMilliseconds = pConfig->bufferSizeInMilliseconds;
- pDevice->wasapi.originalPeriods = pConfig->periods;
+ resultMM = ((MA_PFN_waveInReset)pDevice->pContext->winmm.waveInReset)((HWAVEIN)pDevice->winmm.hDeviceCapture);
+ if (resultMM != MMSYSERR_NOERROR) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] WARNING: Failed to reset capture device.", ma_result_from_MMRESULT(resultMM));
+ }
+ }
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ma_device_init_internal_data__wasapi data;
- data.formatIn = pConfig->capture.format;
- data.channelsIn = pConfig->capture.channels;
- data.sampleRateIn = pConfig->sampleRate;
- ma_copy_memory(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap));
- data.usingDefaultFormat = pDevice->capture.usingDefaultFormat;
- data.usingDefaultChannels = pDevice->capture.usingDefaultChannels;
- data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
- data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap;
- data.shareMode = pConfig->capture.shareMode;
- data.bufferSizeInFramesIn = pConfig->bufferSizeInFrames;
- data.bufferSizeInMillisecondsIn = pConfig->bufferSizeInMilliseconds;
- data.periodsIn = pConfig->periods;
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_uint32 iPeriod;
+ WAVEHDR* pWAVEHDR;
- result = ma_device_init_internal__wasapi(pDevice->pContext, ma_device_type_capture, pConfig->capture.pDeviceID, &data);
- if (result != MA_SUCCESS) {
- return result;
+ if (pDevice->winmm.hDevicePlayback == NULL) {
+ return MA_INVALID_ARGS;
}
- pDevice->wasapi.pAudioClientCapture = data.pAudioClient;
- pDevice->wasapi.pCaptureClient = data.pCaptureClient;
-
- pDevice->capture.internalFormat = data.formatOut;
- pDevice->capture.internalChannels = data.channelsOut;
- pDevice->capture.internalSampleRate = data.sampleRateOut;
- ma_copy_memory(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
- pDevice->capture.internalBufferSizeInFrames = data.bufferSizeInFramesOut;
- pDevice->capture.internalPeriods = data.periodsOut;
- ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName);
+ /* We need to drain the device. To do this we just loop over each header and if it's locked just wait for the event. */
+ pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback;
+ for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; iPeriod += 1) {
+ if (pWAVEHDR[iPeriod].dwUser == 1) { /* 1 = locked. */
+ if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) {
+ break; /* An error occurred so just abandon ship and stop the device without draining. */
+ }
- /*
- The event for capture needs to be manual reset for the same reason as playback. We keep the initial state set to unsignaled,
- however, because we want to block until we actually have something for the first call to ma_device_read().
- */
- pDevice->wasapi.hEventCapture = CreateEventA(NULL, FALSE, FALSE, NULL); /* Auto reset, unsignaled by default. */
- if (pDevice->wasapi.hEventCapture == NULL) {
- if (pDevice->wasapi.pCaptureClient != NULL) {
- ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
- pDevice->wasapi.pCaptureClient = NULL;
- }
- if (pDevice->wasapi.pAudioClientCapture != NULL) {
- ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- pDevice->wasapi.pAudioClientCapture = NULL;
+ pWAVEHDR[iPeriod].dwUser = 0;
}
+ }
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for capture.", MA_FAILED_TO_CREATE_EVENT);
+ resultMM = ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
+ if (resultMM != MMSYSERR_NOERROR) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] WARNING: Failed to reset playback device.", ma_result_from_MMRESULT(resultMM));
}
- ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, pDevice->wasapi.hEventCapture);
+ }
- pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut;
- ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualBufferSizeInFramesCapture);
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
+{
+ ma_result result = MA_SUCCESS;
+ MMRESULT resultMM;
+ ma_uint32 totalFramesWritten;
+ WAVEHDR* pWAVEHDR;
+
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pPCMFrames != NULL);
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
}
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ma_device_init_internal_data__wasapi data;
- data.formatIn = pConfig->playback.format;
- data.channelsIn = pConfig->playback.channels;
- data.sampleRateIn = pConfig->sampleRate;
- ma_copy_memory(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap));
- data.usingDefaultFormat = pDevice->playback.usingDefaultFormat;
- data.usingDefaultChannels = pDevice->playback.usingDefaultChannels;
- data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
- data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap;
- data.shareMode = pConfig->playback.shareMode;
- data.bufferSizeInFramesIn = pConfig->bufferSizeInFrames;
- data.bufferSizeInMillisecondsIn = pConfig->bufferSizeInMilliseconds;
- data.periodsIn = pConfig->periods;
+ pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback;
- result = ma_device_init_internal__wasapi(pDevice->pContext, ma_device_type_playback, pConfig->playback.pDeviceID, &data);
- if (result != MA_SUCCESS) {
- if (pConfig->deviceType == ma_device_type_duplex) {
- if (pDevice->wasapi.pCaptureClient != NULL) {
- ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
- pDevice->wasapi.pCaptureClient = NULL;
- }
- if (pDevice->wasapi.pAudioClientCapture != NULL) {
- ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- pDevice->wasapi.pAudioClientCapture = NULL;
- }
+ /* Keep processing as much data as possible. */
+ totalFramesWritten = 0;
+ while (totalFramesWritten < frameCount) {
+ /* If the current header has some space available we need to write part of it. */
+ if (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser == 0) { /* 0 = unlocked. */
+ /*
+ This header has room in it. We copy as much of it as we can. If we end up fully consuming the buffer we need to
+ write it out and move on to the next iteration.
+ */
+ ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedPlayback;
- CloseHandle(pDevice->wasapi.hEventCapture);
- pDevice->wasapi.hEventCapture = NULL;
- }
- return result;
- }
+ ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesWritten));
+ const void* pSrc = ma_offset_ptr(pPCMFrames, totalFramesWritten*bpf);
+ void* pDst = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].lpData, pDevice->winmm.headerFramesConsumedPlayback*bpf);
+ MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf);
- pDevice->wasapi.pAudioClientPlayback = data.pAudioClient;
- pDevice->wasapi.pRenderClient = data.pRenderClient;
+ pDevice->winmm.headerFramesConsumedPlayback += framesToCopy;
+ totalFramesWritten += framesToCopy;
- pDevice->playback.internalFormat = data.formatOut;
- pDevice->playback.internalChannels = data.channelsOut;
- pDevice->playback.internalSampleRate = data.sampleRateOut;
- ma_copy_memory(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
- pDevice->playback.internalBufferSizeInFrames = data.bufferSizeInFramesOut;
- pDevice->playback.internalPeriods = data.periodsOut;
- ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName);
+ /* If we've consumed the buffer entirely we need to write it out to the device. */
+ if (pDevice->winmm.headerFramesConsumedPlayback == (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf)) {
+ pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 1; /* 1 = locked. */
+ pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags &= ~WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */
- /*
- The event for playback is needs to be manual reset because we want to explicitly control the fact that it becomes signalled
- only after the whole available space has been filled, never before.
+ /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */
+ ResetEvent((HANDLE)pDevice->winmm.hEventPlayback);
- The playback event also needs to be initially set to a signaled state so that the first call to ma_device_write() is able
- to get passed WaitForMultipleObjects().
- */
- pDevice->wasapi.hEventPlayback = CreateEventA(NULL, FALSE, TRUE, NULL); /* Auto reset, signaled by default. */
- if (pDevice->wasapi.hEventPlayback == NULL) {
- if (pConfig->deviceType == ma_device_type_duplex) {
- if (pDevice->wasapi.pCaptureClient != NULL) {
- ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient);
- pDevice->wasapi.pCaptureClient = NULL;
- }
- if (pDevice->wasapi.pAudioClientCapture != NULL) {
- ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- pDevice->wasapi.pAudioClientCapture = NULL;
+ /* The device will be started here. */
+ resultMM = ((MA_PFN_waveOutWrite)pDevice->pContext->winmm.waveOutWrite)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &pWAVEHDR[pDevice->winmm.iNextHeaderPlayback], sizeof(WAVEHDR));
+ if (resultMM != MMSYSERR_NOERROR) {
+ result = ma_result_from_MMRESULT(resultMM);
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] waveOutWrite() failed.", result);
+ break;
}
- CloseHandle(pDevice->wasapi.hEventCapture);
- pDevice->wasapi.hEventCapture = NULL;
+ /* Make sure we move to the next header. */
+ pDevice->winmm.iNextHeaderPlayback = (pDevice->winmm.iNextHeaderPlayback + 1) % pDevice->playback.internalPeriods;
+ pDevice->winmm.headerFramesConsumedPlayback = 0;
}
- if (pDevice->wasapi.pRenderClient != NULL) {
- ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient);
- pDevice->wasapi.pRenderClient = NULL;
- }
- if (pDevice->wasapi.pAudioClientPlayback != NULL) {
- ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
- pDevice->wasapi.pAudioClientPlayback = NULL;
+ /* If at this point we have consumed the entire input buffer we can return. */
+ MA_ASSERT(totalFramesWritten <= frameCount);
+ if (totalFramesWritten == frameCount) {
+ break;
}
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for playback.", MA_FAILED_TO_CREATE_EVENT);
+ /* Getting here means there's more to process. */
+ continue;
}
- ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, pDevice->wasapi.hEventPlayback);
-
- pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut;
- ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualBufferSizeInFramesPlayback);
- }
- /*
- We need to get notifications of when the default device changes. We do this through a device enumerator by
- registering a IMMNotificationClient with it. We only care about this if it's the default device.
- */
-#ifdef MA_WIN32_DESKTOP
- {
- ma_IMMDeviceEnumerator* pDeviceEnumerator;
- HRESULT hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator);
- if (FAILED(hr)) {
- ma_device_uninit__wasapi(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ /* Getting here means there isn't enough room in the buffer and we need to wait for one to become available. */
+ if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) {
+ result = MA_ERROR;
+ break;
}
- pDevice->wasapi.notificationClient.lpVtbl = (void*)&g_maNotificationCientVtbl;
- pDevice->wasapi.notificationClient.counter = 1;
- pDevice->wasapi.notificationClient.pDevice = pDevice;
+ /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */
+ if ((pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags & WHDR_DONE) != 0) {
+ pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 0; /* 0 = unlocked (make it available for writing). */
+ pDevice->winmm.headerFramesConsumedPlayback = 0;
+ }
- hr = pDeviceEnumerator->lpVtbl->RegisterEndpointNotificationCallback(pDeviceEnumerator, &pDevice->wasapi.notificationClient);
- if (SUCCEEDED(hr)) {
- pDevice->wasapi.pDeviceEnumerator = (ma_ptr)pDeviceEnumerator;
- } else {
- /* Not the end of the world if we fail to register the notification callback. We just won't support automatic stream routing. */
- ma_IMMDeviceEnumerator_Release(pDeviceEnumerator);
+ /* If the device has been stopped we need to break. */
+ if (ma_device__get_state(pDevice) != MA_STATE_STARTED) {
+ break;
}
}
-#endif
- ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
- ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = totalFramesWritten;
+ }
- return MA_SUCCESS;
+ return result;
}
-ma_result ma_device__get_available_frames__wasapi(ma_device* pDevice, ma_IAudioClient* pAudioClient, ma_uint32* pFrameCount)
+static ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
{
- ma_uint32 paddingFramesCount;
- HRESULT hr;
- ma_share_mode shareMode;
+ ma_result result = MA_SUCCESS;
+ MMRESULT resultMM;
+ ma_uint32 totalFramesRead;
+ WAVEHDR* pWAVEHDR;
- ma_assert(pDevice != NULL);
- ma_assert(pFrameCount != NULL);
-
- *pFrameCount = 0;
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pPCMFrames != NULL);
- if ((ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientPlayback && (ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientCapture) {
- return MA_INVALID_OPERATION;
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
}
- hr = ma_IAudioClient_GetCurrentPadding(pAudioClient, &paddingFramesCount);
- if (FAILED(hr)) {
- return MA_DEVICE_UNAVAILABLE;
- }
+ pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRCapture;
- /* Slightly different rules for exclusive and shared modes. */
- shareMode = ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) ? pDevice->playback.shareMode : pDevice->capture.shareMode;
- if (shareMode == ma_share_mode_exclusive) {
- *pFrameCount = paddingFramesCount;
- } else {
- if ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) {
- *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesPlayback - paddingFramesCount;
- } else {
- *pFrameCount = paddingFramesCount;
- }
- }
+ /* Keep processing as much data as possible. */
+ totalFramesRead = 0;
+ while (totalFramesRead < frameCount) {
+ /* If the current header has some space available we need to write part of it. */
+ if (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser == 0) { /* 0 = unlocked. */
+ /* The buffer is available for reading. If we fully consume it we need to add it back to the buffer. */
+ ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedCapture;
- return MA_SUCCESS;
-}
+ ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesRead));
+ const void* pSrc = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderCapture].lpData, pDevice->winmm.headerFramesConsumedCapture*bpf);
+ void* pDst = ma_offset_ptr(pPCMFrames, totalFramesRead*bpf);
+ MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf);
-ma_bool32 ma_device_is_reroute_required__wasapi(ma_device* pDevice, ma_device_type deviceType)
-{
- ma_assert(pDevice != NULL);
+ pDevice->winmm.headerFramesConsumedCapture += framesToCopy;
+ totalFramesRead += framesToCopy;
- if (deviceType == ma_device_type_playback) {
- return pDevice->wasapi.hasDefaultPlaybackDeviceChanged;
- }
+ /* If we've consumed the buffer entirely we need to add it back to the device. */
+ if (pDevice->winmm.headerFramesConsumedCapture == (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf)) {
+ pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 1; /* 1 = locked. */
+ pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags &= ~WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */
- if (deviceType == ma_device_type_capture) {
- return pDevice->wasapi.hasDefaultCaptureDeviceChanged;
- }
-
- return MA_FALSE;
-}
+ /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */
+ ResetEvent((HANDLE)pDevice->winmm.hEventCapture);
-ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType)
-{
- ma_result result;
+ /* The device will be started here. */
+ resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((LPWAVEHDR)pDevice->winmm.pWAVEHDRCapture)[pDevice->winmm.iNextHeaderCapture], sizeof(WAVEHDR));
+ if (resultMM != MMSYSERR_NOERROR) {
+ result = ma_result_from_MMRESULT(resultMM);
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] waveInAddBuffer() failed.", result);
+ break;
+ }
- if (deviceType == ma_device_type_duplex) {
- return MA_INVALID_ARGS;
- }
+ /* Make sure we move to the next header. */
+ pDevice->winmm.iNextHeaderCapture = (pDevice->winmm.iNextHeaderCapture + 1) % pDevice->capture.internalPeriods;
+ pDevice->winmm.headerFramesConsumedCapture = 0;
+ }
- if (deviceType == ma_device_type_playback) {
- ma_atomic_exchange_32(&pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_FALSE);
- }
- if (deviceType == ma_device_type_capture) {
- ma_atomic_exchange_32(&pDevice->wasapi.hasDefaultCaptureDeviceChanged, MA_FALSE);
- }
-
+ /* If at this point we have filled the entire input buffer we can return. */
+ MA_ASSERT(totalFramesRead <= frameCount);
+ if (totalFramesRead == frameCount) {
+ break;
+ }
- #ifdef MA_DEBUG_OUTPUT
- printf("=== CHANGING DEVICE ===\n");
- #endif
+ /* Getting here means there's more to process. */
+ continue;
+ }
- result = ma_device_reinit__wasapi(pDevice, deviceType);
- if (result != MA_SUCCESS) {
- return result;
+ /* Getting here means there isn't enough any data left to send to the client which means we need to wait for more. */
+ if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventCapture, INFINITE) != WAIT_OBJECT_0) {
+ result = MA_ERROR;
+ break;
+ }
+
+ /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */
+ if ((pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags & WHDR_DONE) != 0) {
+ pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 0; /* 0 = unlocked (make it available for reading). */
+ pDevice->winmm.headerFramesConsumedCapture = 0;
+ }
+
+ /* If the device has been stopped we need to break. */
+ if (ma_device__get_state(pDevice) != MA_STATE_STARTED) {
+ break;
+ }
}
- ma_device__post_init_setup(pDevice, deviceType);
+ if (pFramesRead != NULL) {
+ *pFramesRead = totalFramesRead;
+ }
- return MA_SUCCESS;
+ return result;
}
-
-ma_result ma_device_main_loop__wasapi(ma_device* pDevice)
+static ma_result ma_device_main_loop__winmm(ma_device* pDevice)
{
- ma_result result;
- HRESULT hr;
+ ma_result result = MA_SUCCESS;
ma_bool32 exitLoop = MA_FALSE;
- ma_uint32 framesWrittenToPlaybackDevice = 0;
- ma_uint32 mappedBufferSizeInFramesCapture = 0;
- ma_uint32 mappedBufferSizeInFramesPlayback = 0;
- ma_uint32 mappedBufferFramesRemainingCapture = 0;
- ma_uint32 mappedBufferFramesRemainingPlayback = 0;
- BYTE* pMappedBufferCapture = NULL;
- BYTE* pMappedBufferPlayback = NULL;
- ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- ma_uint8 inputDataInExternalFormat[4096];
- ma_uint32 inputDataInExternalFormatCap = sizeof(inputDataInExternalFormat) / bpfCapture;
- ma_uint8 outputDataInExternalFormat[4096];
- ma_uint32 outputDataInExternalFormatCap = sizeof(outputDataInExternalFormat) / bpfPlayback;
-
- ma_assert(pDevice != NULL);
-
- /* The playback device needs to be started immediately. */
+
+ MA_ASSERT(pDevice != NULL);
+
+ /* The capture device needs to be started immediately. */
if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
- ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_TRUE);
- }
+ MMRESULT resultMM;
+ WAVEHDR* pWAVEHDR;
+ ma_uint32 iPeriod;
- while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
- /* We may need to reroute the device. */
- if (ma_device_is_reroute_required__wasapi(pDevice, ma_device_type_playback)) {
- result = ma_device_reroute__wasapi(pDevice, ma_device_type_playback);
- if (result != MA_SUCCESS) {
- exitLoop = MA_TRUE;
- break;
+ pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRCapture;
+
+ /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */
+ ResetEvent((HANDLE)pDevice->winmm.hEventCapture);
+
+ /* To start the device we attach all of the buffers and then start it. As the buffers are filled with data we will get notifications. */
+ for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) {
+ resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((LPWAVEHDR)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR));
+ if (resultMM != MMSYSERR_NOERROR) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] Failed to attach input buffers to capture device in preparation for capture.", ma_result_from_MMRESULT(resultMM));
}
+
+ /* Make sure all of the buffers start out locked. We don't want to access them until the backend tells us we can. */
+ pWAVEHDR[iPeriod].dwUser = 1; /* 1 = locked. */
}
- if (ma_device_is_reroute_required__wasapi(pDevice, ma_device_type_capture)) {
- result = ma_device_reroute__wasapi(pDevice, ma_device_type_capture);
- if (result != MA_SUCCESS) {
- exitLoop = MA_TRUE;
- break;
- }
+
+ /* Capture devices need to be explicitly started, unlike playback devices. */
+ resultMM = ((MA_PFN_waveInStart)pDevice->pContext->winmm.waveInStart)((HWAVEIN)pDevice->winmm.hDeviceCapture);
+ if (resultMM != MMSYSERR_NOERROR) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] Failed to start backend device.", ma_result_from_MMRESULT(resultMM));
}
+ }
+
+ while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
switch (pDevice->type)
{
case ma_device_type_duplex:
{
- ma_uint32 framesAvailableCapture;
- ma_uint32 framesAvailablePlayback;
- DWORD flagsCapture; /* Passed to IAudioCaptureClient_GetBuffer(). */
-
- /* The process is to map the playback buffer and fill it as quickly as possible from input data. */
- if (pMappedBufferPlayback == NULL) {
- /* WASAPI is weird with exclusive mode. You need to wait on the event _before_ querying the available frames. */
- if (pDevice->playback.shareMode == ma_share_mode_exclusive) {
- if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) {
- return MA_ERROR; /* Wait failed. */
- }
+ /* The process is: device_read -> convert -> callback -> convert -> device_write */
+ ma_uint32 totalCapturedDeviceFramesProcessed = 0;
+ ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames);
+
+ while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) {
+ ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 capturedDeviceFramesRemaining;
+ ma_uint32 capturedDeviceFramesProcessed;
+ ma_uint32 capturedDeviceFramesToProcess;
+ ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed;
+ if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) {
+ capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames;
}
- result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback);
+ result = ma_device_read__winmm(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess);
if (result != MA_SUCCESS) {
- return result;
+ exitLoop = MA_TRUE;
+ break;
}
- /*printf("TRACE 1: framesAvailablePlayback=%d\n", framesAvailablePlayback);*/
+ capturedDeviceFramesRemaining = capturedDeviceFramesToProcess;
+ capturedDeviceFramesProcessed = 0;
+ for (;;) {
+ ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames);
+ ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining;
+ ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+
+ /* Convert capture data from device format to client format. */
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
- /* In exclusive mode, the frame count needs to exactly match the value returned by GetCurrentPadding(). */
- if (pDevice->playback.shareMode != ma_share_mode_exclusive) {
- if (framesAvailablePlayback >= pDevice->wasapi.periodSizeInFramesPlayback) {
- framesAvailablePlayback = pDevice->wasapi.periodSizeInFramesPlayback;
+ /*
+ If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small
+ which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE.
+ */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+
+ ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/
+
+ capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+ capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+
+ /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */
+ for (;;) {
+ ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration;
+ ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ result = ma_device_write__winmm(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+ }
+
+ /* In case an error happened from ma_device_write__winmm()... */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
}
}
- /* If there's no frames available in the playback device we need to wait for more. */
- if (framesAvailablePlayback == 0) {
- /* In exclusive mode we waited at the top. */
- if (pDevice->playback.shareMode != ma_share_mode_exclusive) {
- if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) {
- return MA_ERROR; /* Wait failed. */
- }
- }
+ totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed;
+ }
+ } break;
+
+ case ma_device_type_capture:
+ {
+ /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames;
+ ma_uint32 framesReadThisPeriod = 0;
+ while (framesReadThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToReadThisIteration = framesRemainingInPeriod;
+ if (framesToReadThisIteration > intermediaryBufferSizeInFrames) {
+ framesToReadThisIteration = intermediaryBufferSizeInFrames;
+ }
+
+ result = ma_device_read__winmm(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer);
- continue;
+ framesReadThisPeriod += framesProcessed;
+ }
+ } break;
+
+ case ma_device_type_playback:
+ {
+ /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames;
+ ma_uint32 framesWrittenThisPeriod = 0;
+ while (framesWrittenThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod;
+ if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) {
+ framesToWriteThisIteration = intermediaryBufferSizeInFrames;
}
- /* We're ready to map the playback device's buffer. We don't release this until it's been entirely filled. */
- hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedBufferPlayback);
- if (FAILED(hr)) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER);
+ ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer);
+
+ result = ma_device_write__winmm(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
exitLoop = MA_TRUE;
break;
}
- mappedBufferSizeInFramesPlayback = framesAvailablePlayback;
- mappedBufferFramesRemainingPlayback = framesAvailablePlayback;
+ framesWrittenThisPeriod += framesProcessed;
}
+ } break;
- /* At this point we should have a buffer available for output. We need to keep writing input samples to it. */
- for (;;) {
- /* Try grabbing some captured data if we haven't already got a mapped buffer. */
- if (pMappedBufferCapture == NULL) {
- if (pDevice->capture.shareMode == ma_share_mode_shared) {
- if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) {
- return MA_ERROR; /* Wait failed. */
- }
- }
+ /* To silence a warning. Will never hit this. */
+ case ma_device_type_loopback:
+ default: break;
+ }
+ }
- result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &framesAvailableCapture);
- if (result != MA_SUCCESS) {
- exitLoop = MA_TRUE;
- break;
- }
- /*printf("TRACE 2: framesAvailableCapture=%d\n", framesAvailableCapture);*/
+ /* Here is where the device is started. */
+ ma_device_stop__winmm(pDevice);
- /* Wait for more if nothing is available. */
- if (framesAvailableCapture == 0) {
- /* In exclusive mode we waited at the top. */
- if (pDevice->capture.shareMode != ma_share_mode_shared) {
- if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) {
- return MA_ERROR; /* Wait failed. */
- }
- }
+ return result;
+}
- continue;
- }
+static ma_result ma_context_uninit__winmm(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_winmm);
- /* Getting here means there's data available for writing to the output device. */
- mappedBufferSizeInFramesCapture = framesAvailableCapture;
- hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedBufferCapture, &mappedBufferSizeInFramesCapture, &flagsCapture, NULL, NULL);
- if (FAILED(hr)) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER);
- exitLoop = MA_TRUE;
- break;
- }
+ ma_dlclose(pContext, pContext->winmm.hWinMM);
+ return MA_SUCCESS;
+}
- /* TODO: How do we handle the capture flags returned by GetBuffer()? In particular, AUDCLNT_BUFFERFLAGS_SILENT (1). */
- #ifdef MA_DEBUG_OUTPUT
- if (flagsCapture != 0) {
- printf("[WASAPI] Capture Flags: %d\n", flagsCapture);
- }
- #endif
+static ma_result ma_context_init__winmm(const ma_context_config* pConfig, ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
- mappedBufferFramesRemainingCapture = mappedBufferSizeInFramesCapture;
+ (void)pConfig;
- pDevice->capture._dspFrameCount = mappedBufferSizeInFramesCapture;
- pDevice->capture._dspFrames = (const ma_uint8*)pMappedBufferCapture;
- }
+ pContext->winmm.hWinMM = ma_dlopen(pContext, "winmm.dll");
+ if (pContext->winmm.hWinMM == NULL) {
+ return MA_NO_BACKEND;
+ }
+ pContext->winmm.waveOutGetNumDevs = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutGetNumDevs");
+ pContext->winmm.waveOutGetDevCapsA = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutGetDevCapsA");
+ pContext->winmm.waveOutOpen = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutOpen");
+ pContext->winmm.waveOutClose = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutClose");
+ pContext->winmm.waveOutPrepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutPrepareHeader");
+ pContext->winmm.waveOutUnprepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutUnprepareHeader");
+ pContext->winmm.waveOutWrite = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutWrite");
+ pContext->winmm.waveOutReset = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutReset");
+ pContext->winmm.waveInGetNumDevs = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInGetNumDevs");
+ pContext->winmm.waveInGetDevCapsA = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInGetDevCapsA");
+ pContext->winmm.waveInOpen = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInOpen");
+ pContext->winmm.waveInClose = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInClose");
+ pContext->winmm.waveInPrepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInPrepareHeader");
+ pContext->winmm.waveInUnprepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInUnprepareHeader");
+ pContext->winmm.waveInAddBuffer = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInAddBuffer");
+ pContext->winmm.waveInStart = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInStart");
+ pContext->winmm.waveInReset = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInReset");
- /* At this point we should have both input and output data available. We now need to post it to the convert the data and post it to the client. */
- for (;;) {
- BYTE* pRunningBufferCapture;
- BYTE* pRunningBufferPlayback;
- ma_uint32 framesToProcess;
- ma_uint32 framesProcessed;
+ pContext->onUninit = ma_context_uninit__winmm;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__winmm;
+ pContext->onEnumDevices = ma_context_enumerate_devices__winmm;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__winmm;
+ pContext->onDeviceInit = ma_device_init__winmm;
+ pContext->onDeviceUninit = ma_device_uninit__winmm;
+ pContext->onDeviceStart = NULL; /* Not used with synchronous backends. */
+ pContext->onDeviceStop = NULL; /* Not used with synchronous backends. */
+ pContext->onDeviceMainLoop = ma_device_main_loop__winmm;
- pRunningBufferCapture = pMappedBufferCapture + ((mappedBufferSizeInFramesCapture - mappedBufferFramesRemainingCapture ) * bpfPlayback);
- pRunningBufferPlayback = pMappedBufferPlayback + ((mappedBufferSizeInFramesPlayback - mappedBufferFramesRemainingPlayback) * bpfPlayback);
-
- /* There may be some data sitting in the converter that needs to be processed first. Once this is exhaused, run the data callback again. */
- if (!pDevice->playback.converter.isPassthrough) {
- framesProcessed = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, pRunningBufferPlayback, mappedBufferFramesRemainingPlayback);
- if (framesProcessed > 0) {
- mappedBufferFramesRemainingPlayback -= framesProcessed;
- if (mappedBufferFramesRemainingPlayback == 0) {
- break;
- }
- }
- }
+ return MA_SUCCESS;
+}
+#endif
- /*
- Getting here means we need to fire the callback. If format conversion is unnecessary, we can optimize this by passing the pointers to the internal
- buffers directly to the callback.
- */
- if (pDevice->capture.converter.isPassthrough && pDevice->playback.converter.isPassthrough) {
- /* Optimal path. We can pass mapped pointers directly to the callback. */
- framesToProcess = ma_min(mappedBufferFramesRemainingCapture, mappedBufferFramesRemainingPlayback);
- framesProcessed = framesToProcess;
- pDevice->onData(pDevice, pRunningBufferPlayback, pRunningBufferCapture, framesToProcess);
- mappedBufferFramesRemainingCapture -= framesProcessed;
- mappedBufferFramesRemainingPlayback -= framesProcessed;
- if (mappedBufferFramesRemainingCapture == 0) {
- break; /* Exhausted input data. */
- }
- if (mappedBufferFramesRemainingPlayback == 0) {
- break; /* Exhausted output data. */
- }
- } else if (pDevice->capture.converter.isPassthrough) {
- /* The input buffer is a passthrough, but the playback buffer requires a conversion. */
- framesToProcess = ma_min(mappedBufferFramesRemainingCapture, outputDataInExternalFormatCap);
- framesProcessed = framesToProcess;
+/******************************************************************************
+
+ALSA Backend
+
+******************************************************************************/
+#ifdef MA_HAS_ALSA
+
+#ifdef MA_NO_RUNTIME_LINKING
+#include
+typedef snd_pcm_uframes_t ma_snd_pcm_uframes_t;
+typedef snd_pcm_sframes_t ma_snd_pcm_sframes_t;
+typedef snd_pcm_stream_t ma_snd_pcm_stream_t;
+typedef snd_pcm_format_t ma_snd_pcm_format_t;
+typedef snd_pcm_access_t ma_snd_pcm_access_t;
+typedef snd_pcm_t ma_snd_pcm_t;
+typedef snd_pcm_hw_params_t ma_snd_pcm_hw_params_t;
+typedef snd_pcm_sw_params_t ma_snd_pcm_sw_params_t;
+typedef snd_pcm_format_mask_t ma_snd_pcm_format_mask_t;
+typedef snd_pcm_info_t ma_snd_pcm_info_t;
+typedef snd_pcm_channel_area_t ma_snd_pcm_channel_area_t;
+typedef snd_pcm_chmap_t ma_snd_pcm_chmap_t;
+
+/* snd_pcm_stream_t */
+#define MA_SND_PCM_STREAM_PLAYBACK SND_PCM_STREAM_PLAYBACK
+#define MA_SND_PCM_STREAM_CAPTURE SND_PCM_STREAM_CAPTURE
+
+/* snd_pcm_format_t */
+#define MA_SND_PCM_FORMAT_UNKNOWN SND_PCM_FORMAT_UNKNOWN
+#define MA_SND_PCM_FORMAT_U8 SND_PCM_FORMAT_U8
+#define MA_SND_PCM_FORMAT_S16_LE SND_PCM_FORMAT_S16_LE
+#define MA_SND_PCM_FORMAT_S16_BE SND_PCM_FORMAT_S16_BE
+#define MA_SND_PCM_FORMAT_S24_LE SND_PCM_FORMAT_S24_LE
+#define MA_SND_PCM_FORMAT_S24_BE SND_PCM_FORMAT_S24_BE
+#define MA_SND_PCM_FORMAT_S32_LE SND_PCM_FORMAT_S32_LE
+#define MA_SND_PCM_FORMAT_S32_BE SND_PCM_FORMAT_S32_BE
+#define MA_SND_PCM_FORMAT_FLOAT_LE SND_PCM_FORMAT_FLOAT_LE
+#define MA_SND_PCM_FORMAT_FLOAT_BE SND_PCM_FORMAT_FLOAT_BE
+#define MA_SND_PCM_FORMAT_FLOAT64_LE SND_PCM_FORMAT_FLOAT64_LE
+#define MA_SND_PCM_FORMAT_FLOAT64_BE SND_PCM_FORMAT_FLOAT64_BE
+#define MA_SND_PCM_FORMAT_MU_LAW SND_PCM_FORMAT_MU_LAW
+#define MA_SND_PCM_FORMAT_A_LAW SND_PCM_FORMAT_A_LAW
+#define MA_SND_PCM_FORMAT_S24_3LE SND_PCM_FORMAT_S24_3LE
+#define MA_SND_PCM_FORMAT_S24_3BE SND_PCM_FORMAT_S24_3BE
+
+/* ma_snd_pcm_access_t */
+#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED SND_PCM_ACCESS_MMAP_INTERLEAVED
+#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED SND_PCM_ACCESS_MMAP_NONINTERLEAVED
+#define MA_SND_PCM_ACCESS_MMAP_COMPLEX SND_PCM_ACCESS_MMAP_COMPLEX
+#define MA_SND_PCM_ACCESS_RW_INTERLEAVED SND_PCM_ACCESS_RW_INTERLEAVED
+#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED SND_PCM_ACCESS_RW_NONINTERLEAVED
+
+/* Channel positions. */
+#define MA_SND_CHMAP_UNKNOWN SND_CHMAP_UNKNOWN
+#define MA_SND_CHMAP_NA SND_CHMAP_NA
+#define MA_SND_CHMAP_MONO SND_CHMAP_MONO
+#define MA_SND_CHMAP_FL SND_CHMAP_FL
+#define MA_SND_CHMAP_FR SND_CHMAP_FR
+#define MA_SND_CHMAP_RL SND_CHMAP_RL
+#define MA_SND_CHMAP_RR SND_CHMAP_RR
+#define MA_SND_CHMAP_FC SND_CHMAP_FC
+#define MA_SND_CHMAP_LFE SND_CHMAP_LFE
+#define MA_SND_CHMAP_SL SND_CHMAP_SL
+#define MA_SND_CHMAP_SR SND_CHMAP_SR
+#define MA_SND_CHMAP_RC SND_CHMAP_RC
+#define MA_SND_CHMAP_FLC SND_CHMAP_FLC
+#define MA_SND_CHMAP_FRC SND_CHMAP_FRC
+#define MA_SND_CHMAP_RLC SND_CHMAP_RLC
+#define MA_SND_CHMAP_RRC SND_CHMAP_RRC
+#define MA_SND_CHMAP_FLW SND_CHMAP_FLW
+#define MA_SND_CHMAP_FRW SND_CHMAP_FRW
+#define MA_SND_CHMAP_FLH SND_CHMAP_FLH
+#define MA_SND_CHMAP_FCH SND_CHMAP_FCH
+#define MA_SND_CHMAP_FRH SND_CHMAP_FRH
+#define MA_SND_CHMAP_TC SND_CHMAP_TC
+#define MA_SND_CHMAP_TFL SND_CHMAP_TFL
+#define MA_SND_CHMAP_TFR SND_CHMAP_TFR
+#define MA_SND_CHMAP_TFC SND_CHMAP_TFC
+#define MA_SND_CHMAP_TRL SND_CHMAP_TRL
+#define MA_SND_CHMAP_TRR SND_CHMAP_TRR
+#define MA_SND_CHMAP_TRC SND_CHMAP_TRC
+#define MA_SND_CHMAP_TFLC SND_CHMAP_TFLC
+#define MA_SND_CHMAP_TFRC SND_CHMAP_TFRC
+#define MA_SND_CHMAP_TSL SND_CHMAP_TSL
+#define MA_SND_CHMAP_TSR SND_CHMAP_TSR
+#define MA_SND_CHMAP_LLFE SND_CHMAP_LLFE
+#define MA_SND_CHMAP_RLFE SND_CHMAP_RLFE
+#define MA_SND_CHMAP_BC SND_CHMAP_BC
+#define MA_SND_CHMAP_BLC SND_CHMAP_BLC
+#define MA_SND_CHMAP_BRC SND_CHMAP_BRC
+
+/* Open mode flags. */
+#define MA_SND_PCM_NO_AUTO_RESAMPLE SND_PCM_NO_AUTO_RESAMPLE
+#define MA_SND_PCM_NO_AUTO_CHANNELS SND_PCM_NO_AUTO_CHANNELS
+#define MA_SND_PCM_NO_AUTO_FORMAT SND_PCM_NO_AUTO_FORMAT
+#else
+#include /* For EPIPE, etc. */
+typedef unsigned long ma_snd_pcm_uframes_t;
+typedef long ma_snd_pcm_sframes_t;
+typedef int ma_snd_pcm_stream_t;
+typedef int ma_snd_pcm_format_t;
+typedef int ma_snd_pcm_access_t;
+typedef struct ma_snd_pcm_t ma_snd_pcm_t;
+typedef struct ma_snd_pcm_hw_params_t ma_snd_pcm_hw_params_t;
+typedef struct ma_snd_pcm_sw_params_t ma_snd_pcm_sw_params_t;
+typedef struct ma_snd_pcm_format_mask_t ma_snd_pcm_format_mask_t;
+typedef struct ma_snd_pcm_info_t ma_snd_pcm_info_t;
+typedef struct
+{
+ void* addr;
+ unsigned int first;
+ unsigned int step;
+} ma_snd_pcm_channel_area_t;
+typedef struct
+{
+ unsigned int channels;
+ unsigned int pos[1];
+} ma_snd_pcm_chmap_t;
+
+/* snd_pcm_state_t */
+#define MA_SND_PCM_STATE_OPEN 0
+#define MA_SND_PCM_STATE_SETUP 1
+#define MA_SND_PCM_STATE_PREPARED 2
+#define MA_SND_PCM_STATE_RUNNING 3
+#define MA_SND_PCM_STATE_XRUN 4
+#define MA_SND_PCM_STATE_DRAINING 5
+#define MA_SND_PCM_STATE_PAUSED 6
+#define MA_SND_PCM_STATE_SUSPENDED 7
+#define MA_SND_PCM_STATE_DISCONNECTED 8
+
+/* snd_pcm_stream_t */
+#define MA_SND_PCM_STREAM_PLAYBACK 0
+#define MA_SND_PCM_STREAM_CAPTURE 1
+
+/* snd_pcm_format_t */
+#define MA_SND_PCM_FORMAT_UNKNOWN -1
+#define MA_SND_PCM_FORMAT_U8 1
+#define MA_SND_PCM_FORMAT_S16_LE 2
+#define MA_SND_PCM_FORMAT_S16_BE 3
+#define MA_SND_PCM_FORMAT_S24_LE 6
+#define MA_SND_PCM_FORMAT_S24_BE 7
+#define MA_SND_PCM_FORMAT_S32_LE 10
+#define MA_SND_PCM_FORMAT_S32_BE 11
+#define MA_SND_PCM_FORMAT_FLOAT_LE 14
+#define MA_SND_PCM_FORMAT_FLOAT_BE 15
+#define MA_SND_PCM_FORMAT_FLOAT64_LE 16
+#define MA_SND_PCM_FORMAT_FLOAT64_BE 17
+#define MA_SND_PCM_FORMAT_MU_LAW 20
+#define MA_SND_PCM_FORMAT_A_LAW 21
+#define MA_SND_PCM_FORMAT_S24_3LE 32
+#define MA_SND_PCM_FORMAT_S24_3BE 33
- pDevice->onData(pDevice, outputDataInExternalFormat, pRunningBufferCapture, framesToProcess);
- mappedBufferFramesRemainingCapture -= framesProcessed;
+/* snd_pcm_access_t */
+#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED 0
+#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED 1
+#define MA_SND_PCM_ACCESS_MMAP_COMPLEX 2
+#define MA_SND_PCM_ACCESS_RW_INTERLEAVED 3
+#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED 4
- pDevice->playback._dspFrameCount = framesProcessed;
- pDevice->playback._dspFrames = (const ma_uint8*)outputDataInExternalFormat;
+/* Channel positions. */
+#define MA_SND_CHMAP_UNKNOWN 0
+#define MA_SND_CHMAP_NA 1
+#define MA_SND_CHMAP_MONO 2
+#define MA_SND_CHMAP_FL 3
+#define MA_SND_CHMAP_FR 4
+#define MA_SND_CHMAP_RL 5
+#define MA_SND_CHMAP_RR 6
+#define MA_SND_CHMAP_FC 7
+#define MA_SND_CHMAP_LFE 8
+#define MA_SND_CHMAP_SL 9
+#define MA_SND_CHMAP_SR 10
+#define MA_SND_CHMAP_RC 11
+#define MA_SND_CHMAP_FLC 12
+#define MA_SND_CHMAP_FRC 13
+#define MA_SND_CHMAP_RLC 14
+#define MA_SND_CHMAP_RRC 15
+#define MA_SND_CHMAP_FLW 16
+#define MA_SND_CHMAP_FRW 17
+#define MA_SND_CHMAP_FLH 18
+#define MA_SND_CHMAP_FCH 19
+#define MA_SND_CHMAP_FRH 20
+#define MA_SND_CHMAP_TC 21
+#define MA_SND_CHMAP_TFL 22
+#define MA_SND_CHMAP_TFR 23
+#define MA_SND_CHMAP_TFC 24
+#define MA_SND_CHMAP_TRL 25
+#define MA_SND_CHMAP_TRR 26
+#define MA_SND_CHMAP_TRC 27
+#define MA_SND_CHMAP_TFLC 28
+#define MA_SND_CHMAP_TFRC 29
+#define MA_SND_CHMAP_TSL 30
+#define MA_SND_CHMAP_TSR 31
+#define MA_SND_CHMAP_LLFE 32
+#define MA_SND_CHMAP_RLFE 33
+#define MA_SND_CHMAP_BC 34
+#define MA_SND_CHMAP_BLC 35
+#define MA_SND_CHMAP_BRC 36
- if (mappedBufferFramesRemainingCapture == 0) {
- break; /* Exhausted input data. */
- }
- } else if (pDevice->playback.converter.isPassthrough) {
- /* The input buffer requires conversion, the playback buffer is passthrough. */
- framesToProcess = ma_min(inputDataInExternalFormatCap, mappedBufferFramesRemainingPlayback);
- framesProcessed = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, inputDataInExternalFormat, framesToProcess);
- if (framesProcessed == 0) {
- /* Getting here means we've run out of input data. */
- mappedBufferFramesRemainingCapture = 0;
- break;
- }
+/* Open mode flags. */
+#define MA_SND_PCM_NO_AUTO_RESAMPLE 0x00010000
+#define MA_SND_PCM_NO_AUTO_CHANNELS 0x00020000
+#define MA_SND_PCM_NO_AUTO_FORMAT 0x00040000
+#endif
- pDevice->onData(pDevice, pRunningBufferPlayback, inputDataInExternalFormat, framesProcessed);
- mappedBufferFramesRemainingPlayback -= framesProcessed;
+typedef int (* ma_snd_pcm_open_proc) (ma_snd_pcm_t **pcm, const char *name, ma_snd_pcm_stream_t stream, int mode);
+typedef int (* ma_snd_pcm_close_proc) (ma_snd_pcm_t *pcm);
+typedef size_t (* ma_snd_pcm_hw_params_sizeof_proc) (void);
+typedef int (* ma_snd_pcm_hw_params_any_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params);
+typedef int (* ma_snd_pcm_hw_params_set_format_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t val);
+typedef int (* ma_snd_pcm_hw_params_set_format_first_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format);
+typedef void (* ma_snd_pcm_hw_params_get_format_mask_proc) (ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_mask_t *mask);
+typedef int (* ma_snd_pcm_hw_params_set_channels_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val);
+typedef int (* ma_snd_pcm_hw_params_set_rate_resample_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val);
+typedef int (* ma_snd_pcm_hw_params_set_rate_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir);
+typedef int (* ma_snd_pcm_hw_params_set_buffer_size_near_proc)(ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val);
+typedef int (* ma_snd_pcm_hw_params_set_periods_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir);
+typedef int (* ma_snd_pcm_hw_params_set_access_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t _access);
+typedef int (* ma_snd_pcm_hw_params_get_format_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format);
+typedef int (* ma_snd_pcm_hw_params_get_channels_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val);
+typedef int (* ma_snd_pcm_hw_params_get_channels_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val);
+typedef int (* ma_snd_pcm_hw_params_get_channels_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val);
+typedef int (* ma_snd_pcm_hw_params_get_rate_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir);
+typedef int (* ma_snd_pcm_hw_params_get_rate_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir);
+typedef int (* ma_snd_pcm_hw_params_get_rate_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir);
+typedef int (* ma_snd_pcm_hw_params_get_buffer_size_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val);
+typedef int (* ma_snd_pcm_hw_params_get_periods_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir);
+typedef int (* ma_snd_pcm_hw_params_get_access_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t *_access);
+typedef int (* ma_snd_pcm_hw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params);
+typedef size_t (* ma_snd_pcm_sw_params_sizeof_proc) (void);
+typedef int (* ma_snd_pcm_sw_params_current_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params);
+typedef int (* ma_snd_pcm_sw_params_get_boundary_proc) (ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t* val);
+typedef int (* ma_snd_pcm_sw_params_set_avail_min_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val);
+typedef int (* ma_snd_pcm_sw_params_set_start_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val);
+typedef int (* ma_snd_pcm_sw_params_set_stop_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val);
+typedef int (* ma_snd_pcm_sw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params);
+typedef size_t (* ma_snd_pcm_format_mask_sizeof_proc) (void);
+typedef int (* ma_snd_pcm_format_mask_test_proc) (const ma_snd_pcm_format_mask_t *mask, ma_snd_pcm_format_t val);
+typedef ma_snd_pcm_chmap_t * (* ma_snd_pcm_get_chmap_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_state_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_prepare_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_start_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_drop_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_drain_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_device_name_hint_proc) (int card, const char *iface, void ***hints);
+typedef char * (* ma_snd_device_name_get_hint_proc) (const void *hint, const char *id);
+typedef int (* ma_snd_card_get_index_proc) (const char *name);
+typedef int (* ma_snd_device_name_free_hint_proc) (void **hints);
+typedef int (* ma_snd_pcm_mmap_begin_proc) (ma_snd_pcm_t *pcm, const ma_snd_pcm_channel_area_t **areas, ma_snd_pcm_uframes_t *offset, ma_snd_pcm_uframes_t *frames);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_mmap_commit_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_uframes_t offset, ma_snd_pcm_uframes_t frames);
+typedef int (* ma_snd_pcm_recover_proc) (ma_snd_pcm_t *pcm, int err, int silent);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_readi_proc) (ma_snd_pcm_t *pcm, void *buffer, ma_snd_pcm_uframes_t size);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_writei_proc) (ma_snd_pcm_t *pcm, const void *buffer, ma_snd_pcm_uframes_t size);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_proc) (ma_snd_pcm_t *pcm);
+typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_update_proc) (ma_snd_pcm_t *pcm);
+typedef int (* ma_snd_pcm_wait_proc) (ma_snd_pcm_t *pcm, int timeout);
+typedef int (* ma_snd_pcm_info_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_info_t* info);
+typedef size_t (* ma_snd_pcm_info_sizeof_proc) ();
+typedef const char* (* ma_snd_pcm_info_get_name_proc) (const ma_snd_pcm_info_t* info);
+typedef int (* ma_snd_config_update_free_global_proc) ();
- if (framesProcessed < framesToProcess) {
- mappedBufferFramesRemainingCapture = 0;
- break; /* Exhausted input data. */
- }
+/* This array specifies each of the common devices that can be used for both playback and capture. */
+static const char* g_maCommonDeviceNamesALSA[] = {
+ "default",
+ "null",
+ "pulse",
+ "jack"
+};
- if (mappedBufferFramesRemainingPlayback == 0) {
- break; /* Exhausted output data. */
- }
- } else {
- framesToProcess = ma_min(inputDataInExternalFormatCap, outputDataInExternalFormatCap);
- framesProcessed = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, inputDataInExternalFormat, framesToProcess);
- if (framesProcessed == 0) {
- /* Getting here means we've run out of input data. */
- mappedBufferFramesRemainingCapture = 0;
- break;
- }
+/* This array allows us to blacklist specific playback devices. */
+static const char* g_maBlacklistedPlaybackDeviceNamesALSA[] = {
+ ""
+};
- pDevice->onData(pDevice, outputDataInExternalFormat, inputDataInExternalFormat, framesProcessed);
+/* This array allows us to blacklist specific capture devices. */
+static const char* g_maBlacklistedCaptureDeviceNamesALSA[] = {
+ ""
+};
- pDevice->playback._dspFrameCount = framesProcessed;
- pDevice->playback._dspFrames = (const ma_uint8*)outputDataInExternalFormat;
- if (framesProcessed < framesToProcess) {
- /* Getting here means we've run out of input data. */
- mappedBufferFramesRemainingCapture = 0;
- break;
- }
- }
- }
+/*
+This array allows miniaudio to control device-specific default buffer sizes. This uses a scaling factor. Order is important. If
+any part of the string is present in the device's name, the associated scale will be used.
+*/
+static struct
+{
+ const char* name;
+ float scale;
+} g_maDefaultBufferSizeScalesALSA[] = {
+ {"bcm2835 IEC958/HDMI", 2.0f},
+ {"bcm2835 ALSA", 2.0f}
+};
+static float ma_find_default_buffer_size_scale__alsa(const char* deviceName)
+{
+ size_t i;
- /* If at this point we've run out of capture data we need to release the buffer. */
- if (mappedBufferFramesRemainingCapture == 0 && pMappedBufferCapture != NULL) {
- hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedBufferSizeInFramesCapture);
- if (FAILED(hr)) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from capture device after reading from the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER);
- exitLoop = MA_TRUE;
- break;
- }
+ if (deviceName == NULL) {
+ return 1;
+ }
- /*printf("TRACE: Released capture buffer\n");*/
+ for (i = 0; i < ma_countof(g_maDefaultBufferSizeScalesALSA); ++i) {
+ if (strstr(g_maDefaultBufferSizeScalesALSA[i].name, deviceName) != NULL) {
+ return g_maDefaultBufferSizeScalesALSA[i].scale;
+ }
+ }
- pMappedBufferCapture = NULL;
- mappedBufferFramesRemainingCapture = 0;
- mappedBufferSizeInFramesCapture = 0;
- }
+ return 1;
+}
- /* Get out of this loop if we're run out of room in the playback buffer. */
- if (mappedBufferFramesRemainingPlayback == 0) {
- break;
- }
- }
+static ma_snd_pcm_format_t ma_convert_ma_format_to_alsa_format(ma_format format)
+{
+ ma_snd_pcm_format_t ALSAFormats[] = {
+ MA_SND_PCM_FORMAT_UNKNOWN, /* ma_format_unknown */
+ MA_SND_PCM_FORMAT_U8, /* ma_format_u8 */
+ MA_SND_PCM_FORMAT_S16_LE, /* ma_format_s16 */
+ MA_SND_PCM_FORMAT_S24_3LE, /* ma_format_s24 */
+ MA_SND_PCM_FORMAT_S32_LE, /* ma_format_s32 */
+ MA_SND_PCM_FORMAT_FLOAT_LE /* ma_format_f32 */
+ };
+ if (ma_is_big_endian()) {
+ ALSAFormats[0] = MA_SND_PCM_FORMAT_UNKNOWN;
+ ALSAFormats[1] = MA_SND_PCM_FORMAT_U8;
+ ALSAFormats[2] = MA_SND_PCM_FORMAT_S16_BE;
+ ALSAFormats[3] = MA_SND_PCM_FORMAT_S24_3BE;
+ ALSAFormats[4] = MA_SND_PCM_FORMAT_S32_BE;
+ ALSAFormats[5] = MA_SND_PCM_FORMAT_FLOAT_BE;
+ }
- /* If at this point we've run out of data we need to release the buffer. */
- if (mappedBufferFramesRemainingPlayback == 0 && pMappedBufferPlayback != NULL) {
- hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedBufferSizeInFramesPlayback, 0);
- if (FAILED(hr)) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from playback device after writing to the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER);
- exitLoop = MA_TRUE;
- break;
- }
+ return ALSAFormats[format];
+}
- /*printf("TRACE: Released playback buffer\n");*/
- framesWrittenToPlaybackDevice += mappedBufferSizeInFramesPlayback;
+static ma_format ma_format_from_alsa(ma_snd_pcm_format_t formatALSA)
+{
+ if (ma_is_little_endian()) {
+ switch (formatALSA) {
+ case MA_SND_PCM_FORMAT_S16_LE: return ma_format_s16;
+ case MA_SND_PCM_FORMAT_S24_3LE: return ma_format_s24;
+ case MA_SND_PCM_FORMAT_S32_LE: return ma_format_s32;
+ case MA_SND_PCM_FORMAT_FLOAT_LE: return ma_format_f32;
+ default: break;
+ }
+ } else {
+ switch (formatALSA) {
+ case MA_SND_PCM_FORMAT_S16_BE: return ma_format_s16;
+ case MA_SND_PCM_FORMAT_S24_3BE: return ma_format_s24;
+ case MA_SND_PCM_FORMAT_S32_BE: return ma_format_s32;
+ case MA_SND_PCM_FORMAT_FLOAT_BE: return ma_format_f32;
+ default: break;
+ }
+ }
- pMappedBufferPlayback = NULL;
- mappedBufferFramesRemainingPlayback = 0;
- mappedBufferSizeInFramesPlayback = 0;
- }
+ /* Endian agnostic. */
+ switch (formatALSA) {
+ case MA_SND_PCM_FORMAT_U8: return ma_format_u8;
+ default: return ma_format_unknown;
+ }
+}
- if (!pDevice->wasapi.isStartedPlayback) {
- if (pDevice->playback.shareMode == ma_share_mode_exclusive || framesWrittenToPlaybackDevice >= (pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)*2) {
- hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
- if (FAILED(hr)) {
- ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
- ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE);
- }
- }
- } break;
+static ma_channel ma_convert_alsa_channel_position_to_ma_channel(unsigned int alsaChannelPos)
+{
+ switch (alsaChannelPos)
+ {
+ case MA_SND_CHMAP_MONO: return MA_CHANNEL_MONO;
+ case MA_SND_CHMAP_FL: return MA_CHANNEL_FRONT_LEFT;
+ case MA_SND_CHMAP_FR: return MA_CHANNEL_FRONT_RIGHT;
+ case MA_SND_CHMAP_RL: return MA_CHANNEL_BACK_LEFT;
+ case MA_SND_CHMAP_RR: return MA_CHANNEL_BACK_RIGHT;
+ case MA_SND_CHMAP_FC: return MA_CHANNEL_FRONT_CENTER;
+ case MA_SND_CHMAP_LFE: return MA_CHANNEL_LFE;
+ case MA_SND_CHMAP_SL: return MA_CHANNEL_SIDE_LEFT;
+ case MA_SND_CHMAP_SR: return MA_CHANNEL_SIDE_RIGHT;
+ case MA_SND_CHMAP_RC: return MA_CHANNEL_BACK_CENTER;
+ case MA_SND_CHMAP_FLC: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case MA_SND_CHMAP_FRC: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case MA_SND_CHMAP_RLC: return 0;
+ case MA_SND_CHMAP_RRC: return 0;
+ case MA_SND_CHMAP_FLW: return 0;
+ case MA_SND_CHMAP_FRW: return 0;
+ case MA_SND_CHMAP_FLH: return 0;
+ case MA_SND_CHMAP_FCH: return 0;
+ case MA_SND_CHMAP_FRH: return 0;
+ case MA_SND_CHMAP_TC: return MA_CHANNEL_TOP_CENTER;
+ case MA_SND_CHMAP_TFL: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case MA_SND_CHMAP_TFR: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case MA_SND_CHMAP_TFC: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case MA_SND_CHMAP_TRL: return MA_CHANNEL_TOP_BACK_LEFT;
+ case MA_SND_CHMAP_TRR: return MA_CHANNEL_TOP_BACK_RIGHT;
+ case MA_SND_CHMAP_TRC: return MA_CHANNEL_TOP_BACK_CENTER;
+ default: break;
+ }
+ return 0;
+}
+static ma_bool32 ma_is_common_device_name__alsa(const char* name)
+{
+ size_t iName;
+ for (iName = 0; iName < ma_countof(g_maCommonDeviceNamesALSA); ++iName) {
+ if (ma_strcmp(name, g_maCommonDeviceNamesALSA[iName]) == 0) {
+ return MA_TRUE;
+ }
+ }
- case ma_device_type_capture:
- {
- ma_uint32 framesAvailableCapture;
- DWORD flagsCapture; /* Passed to IAudioCaptureClient_GetBuffer(). */
+ return MA_FALSE;
+}
- /* Wait for data to become available first. */
- if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) {
- exitLoop = MA_TRUE;
- break; /* Wait failed. */
- }
- /* See how many frames are available. Since we waited at the top, I don't think this should ever return 0. I'm checking for this anyway. */
- result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &framesAvailableCapture);
- if (result != MA_SUCCESS) {
- exitLoop = MA_TRUE;
- break;
- }
+static ma_bool32 ma_is_playback_device_blacklisted__alsa(const char* name)
+{
+ size_t iName;
+ for (iName = 0; iName < ma_countof(g_maBlacklistedPlaybackDeviceNamesALSA); ++iName) {
+ if (ma_strcmp(name, g_maBlacklistedPlaybackDeviceNamesALSA[iName]) == 0) {
+ return MA_TRUE;
+ }
+ }
- if (framesAvailableCapture < pDevice->wasapi.periodSizeInFramesCapture) {
- continue; /* Nothing available. Keep waiting. */
- }
+ return MA_FALSE;
+}
- /* Map a the data buffer in preparation for sending to the client. */
- mappedBufferSizeInFramesCapture = framesAvailableCapture;
- hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedBufferCapture, &mappedBufferSizeInFramesCapture, &flagsCapture, NULL, NULL);
- if (FAILED(hr)) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER);
- exitLoop = MA_TRUE;
- break;
- }
+static ma_bool32 ma_is_capture_device_blacklisted__alsa(const char* name)
+{
+ size_t iName;
+ for (iName = 0; iName < ma_countof(g_maBlacklistedCaptureDeviceNamesALSA); ++iName) {
+ if (ma_strcmp(name, g_maBlacklistedCaptureDeviceNamesALSA[iName]) == 0) {
+ return MA_TRUE;
+ }
+ }
- /* We should have a buffer at this point. */
- ma_device__send_frames_to_client(pDevice, mappedBufferSizeInFramesCapture, pMappedBufferCapture);
+ return MA_FALSE;
+}
- /* At this point we're done with the buffer. */
- hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedBufferSizeInFramesCapture);
- pMappedBufferCapture = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */
- mappedBufferSizeInFramesCapture = 0;
- if (FAILED(hr)) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from capture device after reading from the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER);
- exitLoop = MA_TRUE;
- break;
- }
- } break;
+static ma_bool32 ma_is_device_blacklisted__alsa(ma_device_type deviceType, const char* name)
+{
+ if (deviceType == ma_device_type_playback) {
+ return ma_is_playback_device_blacklisted__alsa(name);
+ } else {
+ return ma_is_capture_device_blacklisted__alsa(name);
+ }
+}
+static const char* ma_find_char(const char* str, char c, int* index)
+{
+ int i = 0;
+ for (;;) {
+ if (str[i] == '\0') {
+ if (index) *index = -1;
+ return NULL;
+ }
- case ma_device_type_playback:
- {
- ma_uint32 framesAvailablePlayback;
+ if (str[i] == c) {
+ if (index) *index = i;
+ return str + i;
+ }
- /* Wait for space to become available first. */
- if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) {
- exitLoop = MA_TRUE;
- break; /* Wait failed. */
- }
+ i += 1;
+ }
- /* Check how much space is available. If this returns 0 we just keep waiting. */
- result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback);
- if (result != MA_SUCCESS) {
- exitLoop = MA_TRUE;
- break;
- }
+ /* Should never get here, but treat it as though the character was not found to make me feel better inside. */
+ if (index) *index = -1;
+ return NULL;
+}
- if (framesAvailablePlayback < pDevice->wasapi.periodSizeInFramesPlayback) {
- continue; /* No space available. */
- }
+static ma_bool32 ma_is_device_name_in_hw_format__alsa(const char* hwid)
+{
+ /* This function is just checking whether or not hwid is in "hw:%d,%d" format. */
- /* Map a the data buffer in preparation for the callback. */
- hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedBufferPlayback);
- if (FAILED(hr)) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER);
- exitLoop = MA_TRUE;
- break;
- }
+ int commaPos;
+ const char* dev;
+ int i;
- /* We should have a buffer at this point. */
- ma_device__read_frames_from_client(pDevice, framesAvailablePlayback, pMappedBufferPlayback);
+ if (hwid == NULL) {
+ return MA_FALSE;
+ }
- /* At this point we're done writing to the device and we just need to release the buffer. */
- hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, 0);
- pMappedBufferPlayback = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */
- mappedBufferSizeInFramesPlayback = 0;
+ if (hwid[0] != 'h' || hwid[1] != 'w' || hwid[2] != ':') {
+ return MA_FALSE;
+ }
- if (FAILED(hr)) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from playback device after writing to the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER);
- exitLoop = MA_TRUE;
- break;
- }
+ hwid += 3;
- framesWrittenToPlaybackDevice += framesAvailablePlayback;
- if (!pDevice->wasapi.isStartedPlayback) {
- if (pDevice->playback.shareMode == ma_share_mode_exclusive || framesWrittenToPlaybackDevice >= (pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)*1) {
- hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
- if (FAILED(hr)) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device.", MA_FAILED_TO_START_BACKEND_DEVICE);
- exitLoop = MA_TRUE;
- break;
- }
- ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_TRUE);
- }
- }
- } break;
+ dev = ma_find_char(hwid, ',', &commaPos);
+ if (dev == NULL) {
+ return MA_FALSE;
+ } else {
+ dev += 1; /* Skip past the ",". */
+ }
- default: return MA_INVALID_ARGS;
+ /* Check if the part between the ":" and the "," contains only numbers. If not, return false. */
+ for (i = 0; i < commaPos; ++i) {
+ if (hwid[i] < '0' || hwid[i] > '9') {
+ return MA_FALSE;
}
}
- /* Here is where the device needs to be stopped. */
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- /* Any mapped buffers need to be released. */
- if (pMappedBufferCapture != NULL) {
- hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedBufferSizeInFramesCapture);
+ /* Check if everything after the "," is numeric. If not, return false. */
+ i = 0;
+ while (dev[i] != '\0') {
+ if (dev[i] < '0' || dev[i] > '9') {
+ return MA_FALSE;
}
+ i += 1;
+ }
- hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal capture device.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- }
+ return MA_TRUE;
+}
- /* The audio client needs to be reset otherwise restarting will fail. */
- hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal capture device.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- }
+static int ma_convert_device_name_to_hw_format__alsa(ma_context* pContext, char* dst, size_t dstSize, const char* src) /* Returns 0 on success, non-0 on error. */
+{
+ /* src should look something like this: "hw:CARD=I82801AAICH,DEV=0" */
- ma_atomic_exchange_32(&pDevice->wasapi.isStartedCapture, MA_FALSE);
+ int colonPos;
+ int commaPos;
+ char card[256];
+ const char* dev;
+ int cardIndex;
+
+ if (dst == NULL) {
+ return -1;
+ }
+ if (dstSize < 7) {
+ return -1; /* Absolute minimum size of the output buffer is 7 bytes. */
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- /* Any mapped buffers need to be released. */
- if (pMappedBufferPlayback != NULL) {
- hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedBufferSizeInFramesPlayback, 0);
- }
+ *dst = '\0'; /* Safety. */
+ if (src == NULL) {
+ return -1;
+ }
- /*
- The buffer needs to be drained before stopping the device. Not doing this will result in the last few frames not getting output to
- the speakers. This is a problem for very short sounds because it'll result in a significant potion of it not getting played.
- */
- if (pDevice->wasapi.isStartedPlayback) {
- if (pDevice->playback.shareMode == ma_share_mode_exclusive) {
- WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE);
- } else {
- ma_uint32 prevFramesAvaialablePlayback = (size_t)-1;
- ma_uint32 framesAvailablePlayback;
- for (;;) {
- result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback);
- if (result != MA_SUCCESS) {
- break;
- }
+ /* If the input name is already in "hw:%d,%d" format, just return that verbatim. */
+ if (ma_is_device_name_in_hw_format__alsa(src)) {
+ return ma_strcpy_s(dst, dstSize, src);
+ }
- if (framesAvailablePlayback >= pDevice->wasapi.actualBufferSizeInFramesPlayback) {
- break;
- }
+ src = ma_find_char(src, ':', &colonPos);
+ if (src == NULL) {
+ return -1; /* Couldn't find a colon */
+ }
- /*
- Just a safety check to avoid an infinite loop. If this iteration results in a situation where the number of available frames
- has not changed, get out of the loop. I don't think this should ever happen, but I think it's nice to have just in case.
- */
- if (framesAvailablePlayback == prevFramesAvaialablePlayback) {
- break;
- }
- prevFramesAvaialablePlayback = framesAvailablePlayback;
+ dev = ma_find_char(src, ',', &commaPos);
+ if (dev == NULL) {
+ dev = "0";
+ ma_strncpy_s(card, sizeof(card), src+6, (size_t)-1); /* +6 = ":CARD=" */
+ } else {
+ dev = dev + 5; /* +5 = ",DEV=" */
+ ma_strncpy_s(card, sizeof(card), src+6, commaPos-6); /* +6 = ":CARD=" */
+ }
- WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE);
- ResetEvent(pDevice->wasapi.hEventPlayback); /* Manual reset. */
- }
- }
- }
+ cardIndex = ((ma_snd_card_get_index_proc)pContext->alsa.snd_card_get_index)(card);
+ if (cardIndex < 0) {
+ return -2; /* Failed to retrieve the card index. */
+ }
- hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal playback device.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- }
+ /*printf("TESTING: CARD=%s,DEV=%s\n", card, dev); */
- /* The audio client needs to be reset otherwise restarting will fail. */
- hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal playback device.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- }
- ma_atomic_exchange_32(&pDevice->wasapi.isStartedPlayback, MA_FALSE);
+ /* Construction. */
+ dst[0] = 'h'; dst[1] = 'w'; dst[2] = ':';
+ if (ma_itoa_s(cardIndex, dst+3, dstSize-3, 10) != 0) {
+ return -3;
+ }
+ if (ma_strcat_s(dst, dstSize, ",") != 0) {
+ return -3;
+ }
+ if (ma_strcat_s(dst, dstSize, dev) != 0) {
+ return -3;
}
- return MA_SUCCESS;
+ return 0;
}
-ma_result ma_context_uninit__wasapi(ma_context* pContext)
+static ma_bool32 ma_does_id_exist_in_list__alsa(ma_device_id* pUniqueIDs, ma_uint32 count, const char* pHWID)
{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_wasapi);
- (void)pContext;
+ ma_uint32 i;
- return MA_SUCCESS;
+ MA_ASSERT(pHWID != NULL);
+
+ for (i = 0; i < count; ++i) {
+ if (ma_strcmp(pUniqueIDs[i].alsa, pHWID) == 0) {
+ return MA_TRUE;
+ }
+ }
+
+ return MA_FALSE;
}
-ma_result ma_context_init__wasapi(const ma_context_config* pConfig, ma_context* pContext)
+
+static ma_result ma_context_open_pcm__alsa(ma_context* pContext, ma_share_mode shareMode, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_snd_pcm_t** ppPCM)
{
- ma_result result = MA_SUCCESS;
+ ma_snd_pcm_t* pPCM;
+ ma_snd_pcm_stream_t stream;
+ int openMode;
- ma_assert(pContext != NULL);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppPCM != NULL);
- (void)pConfig;
+ *ppPCM = NULL;
+ pPCM = NULL;
-#ifdef MA_WIN32_DESKTOP
- /*
- WASAPI is only supported in Vista SP1 and newer. The reason for SP1 and not the base version of Vista is that event-driven
- exclusive mode does not work until SP1.
+ stream = (deviceType == ma_device_type_playback) ? MA_SND_PCM_STREAM_PLAYBACK : MA_SND_PCM_STREAM_CAPTURE;
+ openMode = MA_SND_PCM_NO_AUTO_RESAMPLE | MA_SND_PCM_NO_AUTO_CHANNELS | MA_SND_PCM_NO_AUTO_FORMAT;
- Unfortunately older compilers don't define these functions so we need to dynamically load them in order to avoid a lin error.
- */
- {
- ma_OSVERSIONINFOEXW osvi;
- ma_handle kernel32DLL;
- ma_PFNVerifyVersionInfoW _VerifyVersionInfoW;
- ma_PFNVerSetConditionMask _VerSetConditionMask;
+ if (pDeviceID == NULL) {
+ ma_bool32 isDeviceOpen;
+ size_t i;
- kernel32DLL = ma_dlopen(pContext, "kernel32.dll");
- if (kernel32DLL == NULL) {
- return MA_NO_BACKEND;
+ /*
+ We're opening the default device. I don't know if trying anything other than "default" is necessary, but it makes
+ me feel better to try as hard as we can get to get _something_ working.
+ */
+ const char* defaultDeviceNames[] = {
+ "default",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ };
+
+ if (shareMode == ma_share_mode_exclusive) {
+ defaultDeviceNames[1] = "hw";
+ defaultDeviceNames[2] = "hw:0";
+ defaultDeviceNames[3] = "hw:0,0";
+ } else {
+ if (deviceType == ma_device_type_playback) {
+ defaultDeviceNames[1] = "dmix";
+ defaultDeviceNames[2] = "dmix:0";
+ defaultDeviceNames[3] = "dmix:0,0";
+ } else {
+ defaultDeviceNames[1] = "dsnoop";
+ defaultDeviceNames[2] = "dsnoop:0";
+ defaultDeviceNames[3] = "dsnoop:0,0";
+ }
+ defaultDeviceNames[4] = "hw";
+ defaultDeviceNames[5] = "hw:0";
+ defaultDeviceNames[6] = "hw:0,0";
}
- _VerifyVersionInfoW = (ma_PFNVerifyVersionInfoW)ma_dlsym(pContext, kernel32DLL, "VerifyVersionInfoW");
- _VerSetConditionMask = (ma_PFNVerSetConditionMask)ma_dlsym(pContext, kernel32DLL, "VerSetConditionMask");
- if (_VerifyVersionInfoW == NULL || _VerSetConditionMask == NULL) {
- ma_dlclose(pContext, kernel32DLL);
- return MA_NO_BACKEND;
+ isDeviceOpen = MA_FALSE;
+ for (i = 0; i < ma_countof(defaultDeviceNames); ++i) {
+ if (defaultDeviceNames[i] != NULL && defaultDeviceNames[i][0] != '\0') {
+ if (((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, defaultDeviceNames[i], stream, openMode) == 0) {
+ isDeviceOpen = MA_TRUE;
+ break;
+ }
+ }
}
- ma_zero_object(&osvi);
- osvi.dwOSVersionInfoSize = sizeof(osvi);
- osvi.dwMajorVersion = HIBYTE(MA_WIN32_WINNT_VISTA);
- osvi.dwMinorVersion = LOBYTE(MA_WIN32_WINNT_VISTA);
- osvi.wServicePackMajor = 1;
- if (_VerifyVersionInfoW(&osvi, MA_VER_MAJORVERSION | MA_VER_MINORVERSION | MA_VER_SERVICEPACKMAJOR, _VerSetConditionMask(_VerSetConditionMask(_VerSetConditionMask(0, MA_VER_MAJORVERSION, MA_VER_GREATER_EQUAL), MA_VER_MINORVERSION, MA_VER_GREATER_EQUAL), MA_VER_SERVICEPACKMAJOR, MA_VER_GREATER_EQUAL))) {
- result = MA_SUCCESS;
- } else {
- result = MA_NO_BACKEND;
+ if (!isDeviceOpen) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed when trying to open an appropriate default device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
}
+ } else {
+ /*
+ We're trying to open a specific device. There's a few things to consider here:
+
+ miniaudio recongnizes a special format of device id that excludes the "hw", "dmix", etc. prefix. It looks like this: ":0,0", ":0,1", etc. When
+ an ID of this format is specified, it indicates to miniaudio that it can try different combinations of plugins ("hw", "dmix", etc.) until it
+ finds an appropriate one that works. This comes in very handy when trying to open a device in shared mode ("dmix"), vs exclusive mode ("hw").
+ */
- ma_dlclose(pContext, kernel32DLL);
- }
-#endif
+ /* May end up needing to make small adjustments to the ID, so make a copy. */
+ ma_device_id deviceID = *pDeviceID;
+ int resultALSA = -ENODEV;
- if (result != MA_SUCCESS) {
- return result;
- }
+ if (deviceID.alsa[0] != ':') {
+ /* The ID is not in ":0,0" format. Use the ID exactly as-is. */
+ resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, deviceID.alsa, stream, openMode);
+ } else {
+ char hwid[256];
- pContext->onUninit = ma_context_uninit__wasapi;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__wasapi;
- pContext->onEnumDevices = ma_context_enumerate_devices__wasapi;
- pContext->onGetDeviceInfo = ma_context_get_device_info__wasapi;
- pContext->onDeviceInit = ma_device_init__wasapi;
- pContext->onDeviceUninit = ma_device_uninit__wasapi;
- pContext->onDeviceStart = NULL; /* Not used. Started in onDeviceMainLoop. */
- pContext->onDeviceStop = NULL; /* Not used. Stopped in onDeviceMainLoop. */
- pContext->onDeviceWrite = NULL;
- pContext->onDeviceRead = NULL;
- pContext->onDeviceMainLoop = ma_device_main_loop__wasapi;
+ /* The ID is in ":0,0" format. Try different plugins depending on the shared mode. */
+ if (deviceID.alsa[1] == '\0') {
+ deviceID.alsa[0] = '\0'; /* An ID of ":" should be converted to "". */
+ }
- return result;
-}
-#endif
+ if (shareMode == ma_share_mode_shared) {
+ if (deviceType == ma_device_type_playback) {
+ ma_strcpy_s(hwid, sizeof(hwid), "dmix");
+ } else {
+ ma_strcpy_s(hwid, sizeof(hwid), "dsnoop");
+ }
-/******************************************************************************
+ if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) {
+ resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode);
+ }
+ }
-DirectSound Backend
+ /* If at this point we still don't have an open device it means we're either preferencing exclusive mode or opening with "dmix"/"dsnoop" failed. */
+ if (resultALSA != 0) {
+ ma_strcpy_s(hwid, sizeof(hwid), "hw");
+ if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) {
+ resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode);
+ }
+ }
+ }
-******************************************************************************/
-#ifdef MA_HAS_DSOUND
-/*#include */
+ if (resultALSA < 0) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed.", ma_result_from_errno(-resultALSA));
+ }
+ }
-GUID MA_GUID_IID_DirectSoundNotify = {0xb0210783, 0x89cd, 0x11d0, {0xaf, 0x08, 0x00, 0xa0, 0xc9, 0x25, 0xcd, 0x16}};
+ *ppPCM = pPCM;
+ return MA_SUCCESS;
+}
-/* miniaudio only uses priority or exclusive modes. */
-#define MA_DSSCL_NORMAL 1
-#define MA_DSSCL_PRIORITY 2
-#define MA_DSSCL_EXCLUSIVE 3
-#define MA_DSSCL_WRITEPRIMARY 4
-#define MA_DSCAPS_PRIMARYMONO 0x00000001
-#define MA_DSCAPS_PRIMARYSTEREO 0x00000002
-#define MA_DSCAPS_PRIMARY8BIT 0x00000004
-#define MA_DSCAPS_PRIMARY16BIT 0x00000008
-#define MA_DSCAPS_CONTINUOUSRATE 0x00000010
-#define MA_DSCAPS_EMULDRIVER 0x00000020
-#define MA_DSCAPS_CERTIFIED 0x00000040
-#define MA_DSCAPS_SECONDARYMONO 0x00000100
-#define MA_DSCAPS_SECONDARYSTEREO 0x00000200
-#define MA_DSCAPS_SECONDARY8BIT 0x00000400
-#define MA_DSCAPS_SECONDARY16BIT 0x00000800
+static ma_bool32 ma_context_is_device_id_equal__alsa(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
-#define MA_DSBCAPS_PRIMARYBUFFER 0x00000001
-#define MA_DSBCAPS_STATIC 0x00000002
-#define MA_DSBCAPS_LOCHARDWARE 0x00000004
-#define MA_DSBCAPS_LOCSOFTWARE 0x00000008
-#define MA_DSBCAPS_CTRL3D 0x00000010
-#define MA_DSBCAPS_CTRLFREQUENCY 0x00000020
-#define MA_DSBCAPS_CTRLPAN 0x00000040
-#define MA_DSBCAPS_CTRLVOLUME 0x00000080
-#define MA_DSBCAPS_CTRLPOSITIONNOTIFY 0x00000100
-#define MA_DSBCAPS_CTRLFX 0x00000200
-#define MA_DSBCAPS_STICKYFOCUS 0x00004000
-#define MA_DSBCAPS_GLOBALFOCUS 0x00008000
-#define MA_DSBCAPS_GETCURRENTPOSITION2 0x00010000
-#define MA_DSBCAPS_MUTE3DATMAXDISTANCE 0x00020000
-#define MA_DSBCAPS_LOCDEFER 0x00040000
-#define MA_DSBCAPS_TRUEPLAYPOSITION 0x00080000
+ return ma_strcmp(pID0->alsa, pID1->alsa) == 0;
+}
-#define MA_DSBPLAY_LOOPING 0x00000001
-#define MA_DSBPLAY_LOCHARDWARE 0x00000002
-#define MA_DSBPLAY_LOCSOFTWARE 0x00000004
-#define MA_DSBPLAY_TERMINATEBY_TIME 0x00000008
-#define MA_DSBPLAY_TERMINATEBY_DISTANCE 0x00000010
-#define MA_DSBPLAY_TERMINATEBY_PRIORITY 0x00000020
+static ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ int resultALSA;
+ ma_bool32 cbResult = MA_TRUE;
+ char** ppDeviceHints;
+ ma_device_id* pUniqueIDs = NULL;
+ ma_uint32 uniqueIDCount = 0;
+ char** ppNextDeviceHint;
-#define MA_DSCBSTART_LOOPING 0x00000001
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
-typedef struct
-{
- DWORD dwSize;
- DWORD dwFlags;
- DWORD dwBufferBytes;
- DWORD dwReserved;
- WAVEFORMATEX* lpwfxFormat;
- GUID guid3DAlgorithm;
-} MA_DSBUFFERDESC;
+ ma_mutex_lock(&pContext->alsa.internalDeviceEnumLock);
-typedef struct
-{
- DWORD dwSize;
- DWORD dwFlags;
- DWORD dwBufferBytes;
- DWORD dwReserved;
- WAVEFORMATEX* lpwfxFormat;
- DWORD dwFXCount;
- void* lpDSCFXDesc; /* <-- miniaudio doesn't use this, so set to void*. */
-} MA_DSCBUFFERDESC;
+ resultALSA = ((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints);
+ if (resultALSA < 0) {
+ ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock);
+ return ma_result_from_errno(-resultALSA);
+ }
-typedef struct
-{
- DWORD dwSize;
- DWORD dwFlags;
- DWORD dwMinSecondarySampleRate;
- DWORD dwMaxSecondarySampleRate;
- DWORD dwPrimaryBuffers;
- DWORD dwMaxHwMixingAllBuffers;
- DWORD dwMaxHwMixingStaticBuffers;
- DWORD dwMaxHwMixingStreamingBuffers;
- DWORD dwFreeHwMixingAllBuffers;
- DWORD dwFreeHwMixingStaticBuffers;
- DWORD dwFreeHwMixingStreamingBuffers;
- DWORD dwMaxHw3DAllBuffers;
- DWORD dwMaxHw3DStaticBuffers;
- DWORD dwMaxHw3DStreamingBuffers;
- DWORD dwFreeHw3DAllBuffers;
- DWORD dwFreeHw3DStaticBuffers;
- DWORD dwFreeHw3DStreamingBuffers;
- DWORD dwTotalHwMemBytes;
- DWORD dwFreeHwMemBytes;
- DWORD dwMaxContigFreeHwMemBytes;
- DWORD dwUnlockTransferRateHwBuffers;
- DWORD dwPlayCpuOverheadSwBuffers;
- DWORD dwReserved1;
- DWORD dwReserved2;
-} MA_DSCAPS;
+ ppNextDeviceHint = ppDeviceHints;
+ while (*ppNextDeviceHint != NULL) {
+ char* NAME = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "NAME");
+ char* DESC = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "DESC");
+ char* IOID = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "IOID");
+ ma_device_type deviceType = ma_device_type_playback;
+ ma_bool32 stopEnumeration = MA_FALSE;
+ char hwid[sizeof(pUniqueIDs->alsa)];
+ ma_device_info deviceInfo;
-typedef struct
-{
- DWORD dwSize;
- DWORD dwFlags;
- DWORD dwBufferBytes;
- DWORD dwUnlockTransferRate;
- DWORD dwPlayCpuOverhead;
-} MA_DSBCAPS;
+ if ((IOID == NULL || ma_strcmp(IOID, "Output") == 0)) {
+ deviceType = ma_device_type_playback;
+ }
+ if ((IOID != NULL && ma_strcmp(IOID, "Input" ) == 0)) {
+ deviceType = ma_device_type_capture;
+ }
-typedef struct
-{
- DWORD dwSize;
- DWORD dwFlags;
- DWORD dwFormats;
- DWORD dwChannels;
-} MA_DSCCAPS;
+ if (NAME != NULL) {
+ if (pContext->alsa.useVerboseDeviceEnumeration) {
+ /* Verbose mode. Use the name exactly as-is. */
+ ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1);
+ } else {
+ /* Simplified mode. Use ":%d,%d" format. */
+ if (ma_convert_device_name_to_hw_format__alsa(pContext, hwid, sizeof(hwid), NAME) == 0) {
+ /*
+ At this point, hwid looks like "hw:0,0". In simplified enumeration mode, we actually want to strip off the
+ plugin name so it looks like ":0,0". The reason for this is that this special format is detected at device
+ initialization time and is used as an indicator to try and use the most appropriate plugin depending on the
+ device type and sharing mode.
+ */
+ char* dst = hwid;
+ char* src = hwid+2;
+ while ((*dst++ = *src++));
+ } else {
+ /* Conversion to "hw:%d,%d" failed. Just use the name as-is. */
+ ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1);
+ }
-typedef struct
-{
- DWORD dwSize;
- DWORD dwFlags;
- DWORD dwBufferBytes;
- DWORD dwReserved;
-} MA_DSCBCAPS;
+ if (ma_does_id_exist_in_list__alsa(pUniqueIDs, uniqueIDCount, hwid)) {
+ goto next_device; /* The device has already been enumerated. Move on to the next one. */
+ } else {
+ /* The device has not yet been enumerated. Make sure it's added to our list so that it's not enumerated again. */
+ size_t oldCapacity = sizeof(*pUniqueIDs) * uniqueIDCount;
+ size_t newCapacity = sizeof(*pUniqueIDs) * (uniqueIDCount + 1);
+ ma_device_id* pNewUniqueIDs = (ma_device_id*)ma__realloc_from_callbacks(pUniqueIDs, newCapacity, oldCapacity, &pContext->allocationCallbacks);
+ if (pNewUniqueIDs == NULL) {
+ goto next_device; /* Failed to allocate memory. */
+ }
-typedef struct
-{
- DWORD dwOffset;
- HANDLE hEventNotify;
-} MA_DSBPOSITIONNOTIFY;
+ pUniqueIDs = pNewUniqueIDs;
+ MA_COPY_MEMORY(pUniqueIDs[uniqueIDCount].alsa, hwid, sizeof(hwid));
+ uniqueIDCount += 1;
+ }
+ }
+ } else {
+ MA_ZERO_MEMORY(hwid, sizeof(hwid));
+ }
-typedef struct ma_IDirectSound ma_IDirectSound;
-typedef struct ma_IDirectSoundBuffer ma_IDirectSoundBuffer;
-typedef struct ma_IDirectSoundCapture ma_IDirectSoundCapture;
-typedef struct ma_IDirectSoundCaptureBuffer ma_IDirectSoundCaptureBuffer;
-typedef struct ma_IDirectSoundNotify ma_IDirectSoundNotify;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.id.alsa, sizeof(deviceInfo.id.alsa), hwid, (size_t)-1);
+ /*
+ DESC is the friendly name. We treat this slightly differently depending on whether or not we are using verbose
+ device enumeration. In verbose mode we want to take the entire description so that the end-user can distinguish
+ between the subdevices of each card/dev pair. In simplified mode, however, we only want the first part of the
+ description.
+
+ The value in DESC seems to be split into two lines, with the first line being the name of the device and the
+ second line being a description of the device. I don't like having the description be across two lines because
+ it makes formatting ugly and annoying. I'm therefore deciding to put it all on a single line with the second line
+ being put into parentheses. In simplified mode I'm just stripping the second line entirely.
+ */
+ if (DESC != NULL) {
+ int lfPos;
+ const char* line2 = ma_find_char(DESC, '\n', &lfPos);
+ if (line2 != NULL) {
+ line2 += 1; /* Skip past the new-line character. */
-/*
-COM objects. The way these work is that you have a vtable (a list of function pointers, kind of
-like how C++ works internally), and then you have a structure with a single member, which is a
-pointer to the vtable. The vtable is where the methods of the object are defined. Methods need
-to be in a specific order, and parent classes need to have their methods declared first.
-*/
+ if (pContext->alsa.useVerboseDeviceEnumeration) {
+ /* Verbose mode. Put the second line in brackets. */
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos);
+ ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), " (");
+ ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), line2);
+ ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), ")");
+ } else {
+ /* Simplified mode. Strip the second line entirely. */
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos);
+ }
+ } else {
+ /* There's no second line. Just copy the whole description. */
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, (size_t)-1);
+ }
+ }
-/* IDirectSound */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSound* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSound* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSound* pThis);
+ if (!ma_is_device_blacklisted__alsa(deviceType, NAME)) {
+ cbResult = callback(pContext, deviceType, &deviceInfo, pUserData);
+ }
- /* IDirectSound */
- HRESULT (STDMETHODCALLTYPE * CreateSoundBuffer) (ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter);
- HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps);
- HRESULT (STDMETHODCALLTYPE * DuplicateSoundBuffer)(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate);
- HRESULT (STDMETHODCALLTYPE * SetCooperativeLevel) (ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel);
- HRESULT (STDMETHODCALLTYPE * Compact) (ma_IDirectSound* pThis);
- HRESULT (STDMETHODCALLTYPE * GetSpeakerConfig) (ma_IDirectSound* pThis, DWORD* pSpeakerConfig);
- HRESULT (STDMETHODCALLTYPE * SetSpeakerConfig) (ma_IDirectSound* pThis, DWORD dwSpeakerConfig);
- HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSound* pThis, const GUID* pGuidDevice);
-} ma_IDirectSoundVtbl;
-struct ma_IDirectSound
-{
- ma_IDirectSoundVtbl* lpVtbl;
-};
-HRESULT ma_IDirectSound_QueryInterface(ma_IDirectSound* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IDirectSound_AddRef(ma_IDirectSound* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IDirectSound_Release(ma_IDirectSound* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IDirectSound_CreateSoundBuffer(ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateSoundBuffer(pThis, pDSBufferDesc, ppDSBuffer, pUnkOuter); }
-HRESULT ma_IDirectSound_GetCaps(ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCaps); }
-HRESULT ma_IDirectSound_DuplicateSoundBuffer(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate) { return pThis->lpVtbl->DuplicateSoundBuffer(pThis, pDSBufferOriginal, ppDSBufferDuplicate); }
-HRESULT ma_IDirectSound_SetCooperativeLevel(ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel) { return pThis->lpVtbl->SetCooperativeLevel(pThis, hwnd, dwLevel); }
-HRESULT ma_IDirectSound_Compact(ma_IDirectSound* pThis) { return pThis->lpVtbl->Compact(pThis); }
-HRESULT ma_IDirectSound_GetSpeakerConfig(ma_IDirectSound* pThis, DWORD* pSpeakerConfig) { return pThis->lpVtbl->GetSpeakerConfig(pThis, pSpeakerConfig); }
-HRESULT ma_IDirectSound_SetSpeakerConfig(ma_IDirectSound* pThis, DWORD dwSpeakerConfig) { return pThis->lpVtbl->SetSpeakerConfig(pThis, dwSpeakerConfig); }
-HRESULT ma_IDirectSound_Initialize(ma_IDirectSound* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); }
+ /*
+ Some devices are both playback and capture, but they are only enumerated by ALSA once. We need to fire the callback
+ again for the other device type in this case. We do this for known devices.
+ */
+ if (cbResult) {
+ if (ma_is_common_device_name__alsa(NAME)) {
+ if (deviceType == ma_device_type_playback) {
+ if (!ma_is_capture_device_blacklisted__alsa(NAME)) {
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+ } else {
+ if (!ma_is_playback_device_blacklisted__alsa(NAME)) {
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+ }
+ }
+ }
+ if (cbResult == MA_FALSE) {
+ stopEnumeration = MA_TRUE;
+ }
-/* IDirectSoundBuffer */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundBuffer* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundBuffer* pThis);
+ next_device:
+ free(NAME);
+ free(DESC);
+ free(IOID);
+ ppNextDeviceHint += 1;
- /* IDirectSoundBuffer */
- HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps);
- HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor);
- HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten);
- HRESULT (STDMETHODCALLTYPE * GetVolume) (ma_IDirectSoundBuffer* pThis, LONG* pVolume);
- HRESULT (STDMETHODCALLTYPE * GetPan) (ma_IDirectSoundBuffer* pThis, LONG* pPan);
- HRESULT (STDMETHODCALLTYPE * GetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD* pFrequency);
- HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundBuffer* pThis, DWORD* pStatus);
- HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc);
- HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags);
- HRESULT (STDMETHODCALLTYPE * Play) (ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags);
- HRESULT (STDMETHODCALLTYPE * SetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition);
- HRESULT (STDMETHODCALLTYPE * SetFormat) (ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat);
- HRESULT (STDMETHODCALLTYPE * SetVolume) (ma_IDirectSoundBuffer* pThis, LONG volume);
- HRESULT (STDMETHODCALLTYPE * SetPan) (ma_IDirectSoundBuffer* pThis, LONG pan);
- HRESULT (STDMETHODCALLTYPE * SetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD dwFrequency);
- HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundBuffer* pThis);
- HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2);
- HRESULT (STDMETHODCALLTYPE * Restore) (ma_IDirectSoundBuffer* pThis);
-} ma_IDirectSoundBufferVtbl;
-struct ma_IDirectSoundBuffer
-{
- ma_IDirectSoundBufferVtbl* lpVtbl;
-};
-HRESULT ma_IDirectSoundBuffer_QueryInterface(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IDirectSoundBuffer_AddRef(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IDirectSoundBuffer_Release(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IDirectSoundBuffer_GetCaps(ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSBufferCaps); }
-HRESULT ma_IDirectSoundBuffer_GetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCurrentPlayCursor, pCurrentWriteCursor); }
-HRESULT ma_IDirectSoundBuffer_GetFormat(ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); }
-HRESULT ma_IDirectSoundBuffer_GetVolume(ma_IDirectSoundBuffer* pThis, LONG* pVolume) { return pThis->lpVtbl->GetVolume(pThis, pVolume); }
-HRESULT ma_IDirectSoundBuffer_GetPan(ma_IDirectSoundBuffer* pThis, LONG* pPan) { return pThis->lpVtbl->GetPan(pThis, pPan); }
-HRESULT ma_IDirectSoundBuffer_GetFrequency(ma_IDirectSoundBuffer* pThis, DWORD* pFrequency) { return pThis->lpVtbl->GetFrequency(pThis, pFrequency); }
-HRESULT ma_IDirectSoundBuffer_GetStatus(ma_IDirectSoundBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); }
-HRESULT ma_IDirectSoundBuffer_Initialize(ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSound, pDSBufferDesc); }
-HRESULT ma_IDirectSoundBuffer_Lock(ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); }
-HRESULT ma_IDirectSoundBuffer_Play(ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags) { return pThis->lpVtbl->Play(pThis, dwReserved1, dwPriority, dwFlags); }
-HRESULT ma_IDirectSoundBuffer_SetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition) { return pThis->lpVtbl->SetCurrentPosition(pThis, dwNewPosition); }
-HRESULT ma_IDirectSoundBuffer_SetFormat(ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat) { return pThis->lpVtbl->SetFormat(pThis, pFormat); }
-HRESULT ma_IDirectSoundBuffer_SetVolume(ma_IDirectSoundBuffer* pThis, LONG volume) { return pThis->lpVtbl->SetVolume(pThis, volume); }
-HRESULT ma_IDirectSoundBuffer_SetPan(ma_IDirectSoundBuffer* pThis, LONG pan) { return pThis->lpVtbl->SetPan(pThis, pan); }
-HRESULT ma_IDirectSoundBuffer_SetFrequency(ma_IDirectSoundBuffer* pThis, DWORD dwFrequency) { return pThis->lpVtbl->SetFrequency(pThis, dwFrequency); }
-HRESULT ma_IDirectSoundBuffer_Stop(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); }
-HRESULT ma_IDirectSoundBuffer_Unlock(ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); }
-HRESULT ma_IDirectSoundBuffer_Restore(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Restore(pThis); }
+ /* We need to stop enumeration if the callback returned false. */
+ if (stopEnumeration) {
+ break;
+ }
+ }
+ ma__free_from_callbacks(pUniqueIDs, &pContext->allocationCallbacks);
+ ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints);
-/* IDirectSoundCapture */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCapture* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCapture* pThis);
+ ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock);
- /* IDirectSoundCapture */
- HRESULT (STDMETHODCALLTYPE * CreateCaptureBuffer)(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter);
- HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps);
- HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice);
-} ma_IDirectSoundCaptureVtbl;
-struct ma_IDirectSoundCapture
-{
- ma_IDirectSoundCaptureVtbl* lpVtbl;
-};
-HRESULT ma_IDirectSoundCapture_QueryInterface(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IDirectSoundCapture_AddRef(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IDirectSoundCapture_Release(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IDirectSoundCapture_CreateCaptureBuffer(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateCaptureBuffer(pThis, pDSCBufferDesc, ppDSCBuffer, pUnkOuter); }
-HRESULT ma_IDirectSoundCapture_GetCaps (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCCaps); }
-HRESULT ma_IDirectSoundCapture_Initialize (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); }
+ return MA_SUCCESS;
+}
-/* IDirectSoundCaptureBuffer */
typedef struct
{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCaptureBuffer* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCaptureBuffer* pThis);
+ ma_device_type deviceType;
+ const ma_device_id* pDeviceID;
+ ma_share_mode shareMode;
+ ma_device_info* pDeviceInfo;
+ ma_bool32 foundDevice;
+} ma_context_get_device_info_enum_callback_data__alsa;
- /* IDirectSoundCaptureBuffer */
- HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps);
- HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition);
- HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten);
- HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus);
- HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc);
- HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags);
- HRESULT (STDMETHODCALLTYPE * Start) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags);
- HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundCaptureBuffer* pThis);
- HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2);
-} ma_IDirectSoundCaptureBufferVtbl;
-struct ma_IDirectSoundCaptureBuffer
+static ma_bool32 ma_context_get_device_info_enum_callback__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData)
{
- ma_IDirectSoundCaptureBufferVtbl* lpVtbl;
-};
-HRESULT ma_IDirectSoundCaptureBuffer_QueryInterface(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IDirectSoundCaptureBuffer_AddRef(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IDirectSoundCaptureBuffer_Release(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IDirectSoundCaptureBuffer_GetCaps(ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCBCaps); }
-HRESULT ma_IDirectSoundCaptureBuffer_GetCurrentPosition(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCapturePosition, pReadPosition); }
-HRESULT ma_IDirectSoundCaptureBuffer_GetFormat(ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); }
-HRESULT ma_IDirectSoundCaptureBuffer_GetStatus(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); }
-HRESULT ma_IDirectSoundCaptureBuffer_Initialize(ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSoundCapture, pDSCBufferDesc); }
-HRESULT ma_IDirectSoundCaptureBuffer_Lock(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); }
-HRESULT ma_IDirectSoundCaptureBuffer_Start(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags) { return pThis->lpVtbl->Start(pThis, dwFlags); }
-HRESULT ma_IDirectSoundCaptureBuffer_Stop(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); }
-HRESULT ma_IDirectSoundCaptureBuffer_Unlock(ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); }
+ ma_context_get_device_info_enum_callback_data__alsa* pData = (ma_context_get_device_info_enum_callback_data__alsa*)pUserData;
+ MA_ASSERT(pData != NULL);
+ if (pData->pDeviceID == NULL && ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1);
+ pData->foundDevice = MA_TRUE;
+ } else {
+ if (pData->deviceType == deviceType && ma_context_is_device_id_equal__alsa(pContext, pData->pDeviceID, &pDeviceInfo->id)) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1);
+ pData->foundDevice = MA_TRUE;
+ }
+ }
-/* IDirectSoundNotify */
-typedef struct
-{
- /* IUnknown */
- HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject);
- ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundNotify* pThis);
- ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundNotify* pThis);
+ /* Keep enumerating until we have found the device. */
+ return !pData->foundDevice;
+}
- /* IDirectSoundNotify */
- HRESULT (STDMETHODCALLTYPE * SetNotificationPositions)(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies);
-} ma_IDirectSoundNotifyVtbl;
-struct ma_IDirectSoundNotify
+static ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
{
- ma_IDirectSoundNotifyVtbl* lpVtbl;
-};
-HRESULT ma_IDirectSoundNotify_QueryInterface(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); }
-ULONG ma_IDirectSoundNotify_AddRef(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->AddRef(pThis); }
-ULONG ma_IDirectSoundNotify_Release(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->Release(pThis); }
-HRESULT ma_IDirectSoundNotify_SetNotificationPositions(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies) { return pThis->lpVtbl->SetNotificationPositions(pThis, dwPositionNotifies, pPositionNotifies); }
-
+ ma_context_get_device_info_enum_callback_data__alsa data;
+ ma_result result;
+ int resultALSA;
+ ma_snd_pcm_t* pPCM;
+ ma_snd_pcm_hw_params_t* pHWParams;
+ ma_snd_pcm_format_mask_t* pFormatMask;
+ int sampleRateDir = 0;
-typedef BOOL (CALLBACK * ma_DSEnumCallbackAProc) (LPGUID pDeviceGUID, LPCSTR pDeviceDescription, LPCSTR pModule, LPVOID pContext);
-typedef HRESULT (WINAPI * ma_DirectSoundCreateProc) (const GUID* pcGuidDevice, ma_IDirectSound** ppDS8, LPUNKNOWN pUnkOuter);
-typedef HRESULT (WINAPI * ma_DirectSoundEnumerateAProc) (ma_DSEnumCallbackAProc pDSEnumCallback, LPVOID pContext);
-typedef HRESULT (WINAPI * ma_DirectSoundCaptureCreateProc) (const GUID* pcGuidDevice, ma_IDirectSoundCapture** ppDSC8, LPUNKNOWN pUnkOuter);
-typedef HRESULT (WINAPI * ma_DirectSoundCaptureEnumerateAProc)(ma_DSEnumCallbackAProc pDSEnumCallback, LPVOID pContext);
+ MA_ASSERT(pContext != NULL);
+ /* We just enumerate to find basic information about the device. */
+ data.deviceType = deviceType;
+ data.pDeviceID = pDeviceID;
+ data.shareMode = shareMode;
+ data.pDeviceInfo = pDeviceInfo;
+ data.foundDevice = MA_FALSE;
+ result = ma_context_enumerate_devices__alsa(pContext, ma_context_get_device_info_enum_callback__alsa, &data);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-/*
-Retrieves the channel count and channel map for the given speaker configuration. If the speaker configuration is unknown,
-the channel count and channel map will be left unmodified.
-*/
-void ma_get_channels_from_speaker_config__dsound(DWORD speakerConfig, WORD* pChannelsOut, DWORD* pChannelMapOut)
-{
- WORD channels;
- DWORD channelMap;
+ if (!data.foundDevice) {
+ return MA_NO_DEVICE;
+ }
- channels = 0;
- if (pChannelsOut != NULL) {
- channels = *pChannelsOut;
+ /* For detailed info we need to open the device. */
+ result = ma_context_open_pcm__alsa(pContext, shareMode, deviceType, pDeviceID, &pPCM);
+ if (result != MA_SUCCESS) {
+ return result;
}
- channelMap = 0;
- if (pChannelMapOut != NULL) {
- channelMap = *pChannelMapOut;
+ /* We need to initialize a HW parameters object in order to know what formats are supported. */
+ pHWParams = (ma_snd_pcm_hw_params_t*)ma__calloc_from_callbacks(((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)(), &pContext->allocationCallbacks);
+ if (pHWParams == NULL) {
+ return MA_OUT_OF_MEMORY;
}
- /*
- The speaker configuration is a combination of speaker config and speaker geometry. The lower 8 bits is what we care about. The upper
- 16 bits is for the geometry.
- */
- switch ((BYTE)(speakerConfig)) {
- case 1 /*DSSPEAKER_HEADPHONE*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break;
- case 2 /*DSSPEAKER_MONO*/: channels = 1; channelMap = SPEAKER_FRONT_CENTER; break;
- case 3 /*DSSPEAKER_QUAD*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break;
- case 4 /*DSSPEAKER_STEREO*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break;
- case 5 /*DSSPEAKER_SURROUND*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_BACK_CENTER; break;
- case 6 /*DSSPEAKER_5POINT1_BACK*/ /*DSSPEAKER_5POINT1*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break;
- case 7 /*DSSPEAKER_7POINT1_WIDE*/ /*DSSPEAKER_7POINT1*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_FRONT_LEFT_OF_CENTER | SPEAKER_FRONT_RIGHT_OF_CENTER; break;
- case 8 /*DSSPEAKER_7POINT1_SURROUND*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break;
- case 9 /*DSSPEAKER_5POINT1_SURROUND*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break;
- default: break;
+ resultALSA = ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed.", ma_result_from_errno(-resultALSA));
}
- if (pChannelsOut != NULL) {
- *pChannelsOut = channels;
+ ((ma_snd_pcm_hw_params_get_channels_min_proc)pContext->alsa.snd_pcm_hw_params_get_channels_min)(pHWParams, &pDeviceInfo->minChannels);
+ ((ma_snd_pcm_hw_params_get_channels_max_proc)pContext->alsa.snd_pcm_hw_params_get_channels_max)(pHWParams, &pDeviceInfo->maxChannels);
+ ((ma_snd_pcm_hw_params_get_rate_min_proc)pContext->alsa.snd_pcm_hw_params_get_rate_min)(pHWParams, &pDeviceInfo->minSampleRate, &sampleRateDir);
+ ((ma_snd_pcm_hw_params_get_rate_max_proc)pContext->alsa.snd_pcm_hw_params_get_rate_max)(pHWParams, &pDeviceInfo->maxSampleRate, &sampleRateDir);
+
+ /* Formats. */
+ pFormatMask = (ma_snd_pcm_format_mask_t*)ma__calloc_from_callbacks(((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)(), &pContext->allocationCallbacks);
+ if (pFormatMask == NULL) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ return MA_OUT_OF_MEMORY;
}
- if (pChannelMapOut != NULL) {
- *pChannelMapOut = channelMap;
+ ((ma_snd_pcm_hw_params_get_format_mask_proc)pContext->alsa.snd_pcm_hw_params_get_format_mask)(pHWParams, pFormatMask);
+
+ pDeviceInfo->formatCount = 0;
+ if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_U8)) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_u8;
+ }
+ if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_S16_LE)) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s16;
+ }
+ if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_S24_3LE)) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s24;
+ }
+ if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_S32_LE)) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s32;
+ }
+ if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_FLOAT_LE)) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_f32;
}
+
+ ma__free_from_callbacks(pFormatMask, &pContext->allocationCallbacks);
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+
+ ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM);
+ return MA_SUCCESS;
}
-ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSound** ppDirectSound)
+#if 0
+/*
+Waits for a number of frames to become available for either capture or playback. The return
+value is the number of frames available.
+
+This will return early if the main loop is broken with ma_device__break_main_loop().
+*/
+static ma_uint32 ma_device__wait_for_frames__alsa(ma_device* pDevice, ma_bool32* pRequiresRestart)
{
- ma_IDirectSound* pDirectSound;
- HWND hWnd;
+ MA_ASSERT(pDevice != NULL);
- ma_assert(pContext != NULL);
- ma_assert(ppDirectSound != NULL);
+ if (pRequiresRestart) *pRequiresRestart = MA_FALSE;
- *ppDirectSound = NULL;
- pDirectSound = NULL;
+ /* I want it so that this function returns the period size in frames. We just wait until that number of frames are available and then return. */
+ ma_uint32 periodSizeInFrames = pDevice->bufferSizeInFrames / pDevice->periods;
+ while (!pDevice->alsa.breakFromMainLoop) {
+ ma_snd_pcm_sframes_t framesAvailable = ((ma_snd_pcm_avail_update_proc)pDevice->pContext->alsa.snd_pcm_avail_update)((ma_snd_pcm_t*)pDevice->alsa.pPCM);
+ if (framesAvailable < 0) {
+ if (framesAvailable == -EPIPE) {
+ if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, framesAvailable, MA_TRUE) < 0) {
+ return 0;
+ }
- if (FAILED(((ma_DirectSoundCreateProc)pContext->dsound.DirectSoundCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSound, NULL))) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCreate() failed for playback device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ /* A device recovery means a restart for mmap mode. */
+ if (pRequiresRestart) {
+ *pRequiresRestart = MA_TRUE;
+ }
- /* The cooperative level must be set before doing anything else. */
- hWnd = ((MA_PFN_GetForegroundWindow)pContext->win32.GetForegroundWindow)();
- if (hWnd == NULL) {
- hWnd = ((MA_PFN_GetDesktopWindow)pContext->win32.GetDesktopWindow)();
+ /* Try again, but if it fails this time just return an error. */
+ framesAvailable = ((ma_snd_pcm_avail_update_proc)pDevice->pContext->alsa.snd_pcm_avail_update)((ma_snd_pcm_t*)pDevice->alsa.pPCM);
+ if (framesAvailable < 0) {
+ return 0;
+ }
+ }
+ }
+
+ if (framesAvailable >= periodSizeInFrames) {
+ return periodSizeInFrames;
+ }
+
+ if (framesAvailable < periodSizeInFrames) {
+ /* Less than a whole period is available so keep waiting. */
+ int waitResult = ((ma_snd_pcm_wait_proc)pDevice->pContext->alsa.snd_pcm_wait)((ma_snd_pcm_t*)pDevice->alsa.pPCM, -1);
+ if (waitResult < 0) {
+ if (waitResult == -EPIPE) {
+ if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, waitResult, MA_TRUE) < 0) {
+ return 0;
+ }
+
+ /* A device recovery means a restart for mmap mode. */
+ if (pRequiresRestart) {
+ *pRequiresRestart = MA_TRUE;
+ }
+ }
+ }
+ }
}
- if (FAILED(ma_IDirectSound_SetCooperativeLevel(pDirectSound, hWnd, (shareMode == ma_share_mode_exclusive) ? MA_DSSCL_EXCLUSIVE : MA_DSSCL_PRIORITY))) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_SetCooperateiveLevel() failed for playback device.", MA_SHARE_MODE_NOT_SUPPORTED);
+
+ /* We'll get here if the loop was terminated. Just return whatever's available. */
+ ma_snd_pcm_sframes_t framesAvailable = ((ma_snd_pcm_avail_update_proc)pDevice->pContext->alsa.snd_pcm_avail_update)((ma_snd_pcm_t*)pDevice->alsa.pPCM);
+ if (framesAvailable < 0) {
+ return 0;
}
- *ppDirectSound = pDirectSound;
- return MA_SUCCESS;
+ return framesAvailable;
}
-ma_result ma_context_create_IDirectSoundCapture__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSoundCapture** ppDirectSoundCapture)
+static ma_bool32 ma_device_read_from_client_and_write__alsa(ma_device* pDevice)
{
- ma_IDirectSoundCapture* pDirectSoundCapture;
+ MA_ASSERT(pDevice != NULL);
+ if (!ma_device_is_started(pDevice) && ma_device__get_state(pDevice) != MA_STATE_STARTING) {
+ return MA_FALSE;
+ }
+ if (pDevice->alsa.breakFromMainLoop) {
+ return MA_FALSE;
+ }
+
+ if (pDevice->alsa.isUsingMMap) {
+ /* mmap. */
+ ma_bool32 requiresRestart;
+ ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, &requiresRestart);
+ if (framesAvailable == 0) {
+ return MA_FALSE;
+ }
- ma_assert(pContext != NULL);
- ma_assert(ppDirectSoundCapture != NULL);
+ /* Don't bother asking the client for more audio data if we're just stopping the device anyway. */
+ if (pDevice->alsa.breakFromMainLoop) {
+ return MA_FALSE;
+ }
- /* DirectSound does not support exclusive mode for capture. */
- if (shareMode == ma_share_mode_exclusive) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
- }
+ const ma_snd_pcm_channel_area_t* pAreas;
+ ma_snd_pcm_uframes_t mappedOffset;
+ ma_snd_pcm_uframes_t mappedFrames = framesAvailable;
+ while (framesAvailable > 0) {
+ int result = ((ma_snd_pcm_mmap_begin_proc)pDevice->pContext->alsa.snd_pcm_mmap_begin)((ma_snd_pcm_t*)pDevice->alsa.pPCM, &pAreas, &mappedOffset, &mappedFrames);
+ if (result < 0) {
+ return MA_FALSE;
+ }
+
+ if (mappedFrames > 0) {
+ void* pBuffer = (ma_uint8*)pAreas[0].addr + ((pAreas[0].first + (mappedOffset * pAreas[0].step)) / 8);
+ ma_device__read_frames_from_client(pDevice, mappedFrames, pBuffer);
+ }
+
+ result = ((ma_snd_pcm_mmap_commit_proc)pDevice->pContext->alsa.snd_pcm_mmap_commit)((ma_snd_pcm_t*)pDevice->alsa.pPCM, mappedOffset, mappedFrames);
+ if (result < 0 || (ma_snd_pcm_uframes_t)result != mappedFrames) {
+ ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, result, MA_TRUE);
+ return MA_FALSE;
+ }
+
+ if (requiresRestart) {
+ if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) {
+ return MA_FALSE;
+ }
+ }
+
+ if (framesAvailable >= mappedFrames) {
+ framesAvailable -= mappedFrames;
+ } else {
+ framesAvailable = 0;
+ }
+ }
+ } else {
+ /* readi/writei. */
+ while (!pDevice->alsa.breakFromMainLoop) {
+ ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, NULL);
+ if (framesAvailable == 0) {
+ continue;
+ }
+
+ /* Don't bother asking the client for more audio data if we're just stopping the device anyway. */
+ if (pDevice->alsa.breakFromMainLoop) {
+ return MA_FALSE;
+ }
+
+ ma_device__read_frames_from_client(pDevice, framesAvailable, pDevice->alsa.pIntermediaryBuffer);
+
+ ma_snd_pcm_sframes_t framesWritten = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable);
+ if (framesWritten < 0) {
+ if (framesWritten == -EAGAIN) {
+ continue; /* Just keep trying... */
+ } else if (framesWritten == -EPIPE) {
+ /* Underrun. Just recover and try writing again. */
+ if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, framesWritten, MA_TRUE) < 0) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ return MA_FALSE;
+ }
- *ppDirectSoundCapture = NULL;
- pDirectSoundCapture = NULL;
+ framesWritten = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable);
+ if (framesWritten < 0) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to write data to the internal device.", ma_result_from_errno((int)-framesWritten));
+ return MA_FALSE;
+ }
- if (FAILED(((ma_DirectSoundCaptureCreateProc)pContext->dsound.DirectSoundCaptureCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSoundCapture, NULL))) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCaptureCreate() failed for capture device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ break; /* Success. */
+ } else {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_writei() failed when writing initial data.", ma_result_from_errno((int)-framesWritten));
+ return MA_FALSE;
+ }
+ } else {
+ break; /* Success. */
+ }
+ }
}
- *ppDirectSoundCapture = pDirectSoundCapture;
- return MA_SUCCESS;
+ return MA_TRUE;
}
-ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* pContext, ma_IDirectSoundCapture* pDirectSoundCapture, WORD* pChannels, WORD* pBitsPerSample, DWORD* pSampleRate)
+static ma_bool32 ma_device_read_and_send_to_client__alsa(ma_device* pDevice)
{
- MA_DSCCAPS caps;
- WORD bitsPerSample;
- DWORD sampleRate;
-
- ma_assert(pContext != NULL);
- ma_assert(pDirectSoundCapture != NULL);
-
- if (pChannels) {
- *pChannels = 0;
- }
- if (pBitsPerSample) {
- *pBitsPerSample = 0;
+ MA_ASSERT(pDevice != NULL);
+ if (!ma_device_is_started(pDevice)) {
+ return MA_FALSE;
}
- if (pSampleRate) {
- *pSampleRate = 0;
+ if (pDevice->alsa.breakFromMainLoop) {
+ return MA_FALSE;
}
- ma_zero_object(&caps);
- caps.dwSize = sizeof(caps);
- if (FAILED(ma_IDirectSoundCapture_GetCaps(pDirectSoundCapture, &caps))) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_GetCaps() failed for capture device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ ma_uint32 framesToSend = 0;
+ void* pBuffer = NULL;
+ if (pDevice->alsa.pIntermediaryBuffer == NULL) {
+ /* mmap. */
+ ma_bool32 requiresRestart;
+ ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, &requiresRestart);
+ if (framesAvailable == 0) {
+ return MA_FALSE;
+ }
- if (pChannels) {
- *pChannels = (WORD)caps.dwChannels;
- }
+ const ma_snd_pcm_channel_area_t* pAreas;
+ ma_snd_pcm_uframes_t mappedOffset;
+ ma_snd_pcm_uframes_t mappedFrames = framesAvailable;
+ while (framesAvailable > 0) {
+ int result = ((ma_snd_pcm_mmap_begin_proc)pDevice->pContext->alsa.snd_pcm_mmap_begin)((ma_snd_pcm_t*)pDevice->alsa.pPCM, &pAreas, &mappedOffset, &mappedFrames);
+ if (result < 0) {
+ return MA_FALSE;
+ }
- /* The device can support multiple formats. We just go through the different formats in order of priority and pick the first one. This the same type of system as the WinMM backend. */
- bitsPerSample = 16;
- sampleRate = 48000;
+ if (mappedFrames > 0) {
+ void* pBuffer = (ma_uint8*)pAreas[0].addr + ((pAreas[0].first + (mappedOffset * pAreas[0].step)) / 8);
+ ma_device__send_frames_to_client(pDevice, mappedFrames, pBuffer);
+ }
- if (caps.dwChannels == 1) {
- if ((caps.dwFormats & WAVE_FORMAT_48M16) != 0) {
- sampleRate = 48000;
- } else if ((caps.dwFormats & WAVE_FORMAT_44M16) != 0) {
- sampleRate = 44100;
- } else if ((caps.dwFormats & WAVE_FORMAT_2M16) != 0) {
- sampleRate = 22050;
- } else if ((caps.dwFormats & WAVE_FORMAT_1M16) != 0) {
- sampleRate = 11025;
- } else if ((caps.dwFormats & WAVE_FORMAT_96M16) != 0) {
- sampleRate = 96000;
- } else {
- bitsPerSample = 8;
- if ((caps.dwFormats & WAVE_FORMAT_48M08) != 0) {
- sampleRate = 48000;
- } else if ((caps.dwFormats & WAVE_FORMAT_44M08) != 0) {
- sampleRate = 44100;
- } else if ((caps.dwFormats & WAVE_FORMAT_2M08) != 0) {
- sampleRate = 22050;
- } else if ((caps.dwFormats & WAVE_FORMAT_1M08) != 0) {
- sampleRate = 11025;
- } else if ((caps.dwFormats & WAVE_FORMAT_96M08) != 0) {
- sampleRate = 96000;
+ result = ((ma_snd_pcm_mmap_commit_proc)pDevice->pContext->alsa.snd_pcm_mmap_commit)((ma_snd_pcm_t*)pDevice->alsa.pPCM, mappedOffset, mappedFrames);
+ if (result < 0 || (ma_snd_pcm_uframes_t)result != mappedFrames) {
+ ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, result, MA_TRUE);
+ return MA_FALSE;
+ }
+
+ if (requiresRestart) {
+ if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) {
+ return MA_FALSE;
+ }
+ }
+
+ if (framesAvailable >= mappedFrames) {
+ framesAvailable -= mappedFrames;
} else {
- bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */
+ framesAvailable = 0;
}
}
- } else if (caps.dwChannels == 2) {
- if ((caps.dwFormats & WAVE_FORMAT_48S16) != 0) {
- sampleRate = 48000;
- } else if ((caps.dwFormats & WAVE_FORMAT_44S16) != 0) {
- sampleRate = 44100;
- } else if ((caps.dwFormats & WAVE_FORMAT_2S16) != 0) {
- sampleRate = 22050;
- } else if ((caps.dwFormats & WAVE_FORMAT_1S16) != 0) {
- sampleRate = 11025;
- } else if ((caps.dwFormats & WAVE_FORMAT_96S16) != 0) {
- sampleRate = 96000;
- } else {
- bitsPerSample = 8;
- if ((caps.dwFormats & WAVE_FORMAT_48S08) != 0) {
- sampleRate = 48000;
- } else if ((caps.dwFormats & WAVE_FORMAT_44S08) != 0) {
- sampleRate = 44100;
- } else if ((caps.dwFormats & WAVE_FORMAT_2S08) != 0) {
- sampleRate = 22050;
- } else if ((caps.dwFormats & WAVE_FORMAT_1S08) != 0) {
- sampleRate = 11025;
- } else if ((caps.dwFormats & WAVE_FORMAT_96S08) != 0) {
- sampleRate = 96000;
+ } else {
+ /* readi/writei. */
+ ma_snd_pcm_sframes_t framesRead = 0;
+ while (!pDevice->alsa.breakFromMainLoop) {
+ ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, NULL);
+ if (framesAvailable == 0) {
+ continue;
+ }
+
+ framesRead = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable);
+ if (framesRead < 0) {
+ if (framesRead == -EAGAIN) {
+ continue; /* Just keep trying... */
+ } else if (framesRead == -EPIPE) {
+ /* Overrun. Just recover and try reading again. */
+ if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, framesRead, MA_TRUE) < 0) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ return MA_FALSE;
+ }
+
+ framesRead = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable);
+ if (framesRead < 0) {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to read data from the internal device.", ma_result_from_errno((int)-framesRead));
+ return MA_FALSE;
+ }
+
+ break; /* Success. */
+ } else {
+ return MA_FALSE;
+ }
} else {
- bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */
+ break; /* Success. */
}
}
- }
- if (pBitsPerSample) {
- *pBitsPerSample = bitsPerSample;
- }
- if (pSampleRate) {
- *pSampleRate = sampleRate;
+ framesToSend = framesRead;
+ pBuffer = pDevice->alsa.pIntermediaryBuffer;
}
- return MA_SUCCESS;
-}
-
-ma_bool32 ma_context_is_device_id_equal__dsound(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
-{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
+ if (framesToSend > 0) {
+ ma_device__send_frames_to_client(pDevice, framesToSend, pBuffer);
+ }
- return memcmp(pID0->dsound, pID1->dsound, sizeof(pID0->dsound)) == 0;
+ return MA_TRUE;
}
+#endif /* 0 */
-
-typedef struct
-{
- ma_context* pContext;
- ma_device_type deviceType;
- ma_enum_devices_callback_proc callback;
- void* pUserData;
- ma_bool32 terminated;
-} ma_context_enumerate_devices_callback_data__dsound;
-
-BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext)
+static void ma_device_uninit__alsa(ma_device* pDevice)
{
- ma_context_enumerate_devices_callback_data__dsound* pData = (ma_context_enumerate_devices_callback_data__dsound*)lpContext;
- ma_device_info deviceInfo;
+ MA_ASSERT(pDevice != NULL);
- ma_zero_object(&deviceInfo);
-
- /* ID. */
- if (lpGuid != NULL) {
- ma_copy_memory(deviceInfo.id.dsound, lpGuid, 16);
- } else {
- ma_zero_memory(deviceInfo.id.dsound, 16);
+ if ((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
}
- /* Name / Description */
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), lpcstrDescription, (size_t)-1);
-
-
- /* Call the callback function, but make sure we stop enumerating if the callee requested so. */
- ma_assert(pData != NULL);
- pData->terminated = !pData->callback(pData->pContext, pData->deviceType, &deviceInfo, pData->pUserData);
- if (pData->terminated) {
- return FALSE; /* Stop enumeration. */
- } else {
- return TRUE; /* Continue enumeration. */
+ if ((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback);
}
-
- (void)lpcstrModule;
}
-ma_result ma_context_enumerate_devices__dsound(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+static ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
{
- ma_context_enumerate_devices_callback_data__dsound data;
+ ma_result result;
+ int resultALSA;
+ ma_snd_pcm_t* pPCM;
+ ma_bool32 isUsingMMap;
+ ma_snd_pcm_format_t formatALSA;
+ ma_share_mode shareMode;
+ ma_device_id* pDeviceID;
+ ma_format internalFormat;
+ ma_uint32 internalChannels;
+ ma_uint32 internalSampleRate;
+ ma_channel internalChannelMap[MA_MAX_CHANNELS];
+ ma_uint32 internalPeriodSizeInFrames;
+ ma_uint32 internalPeriods;
+ ma_snd_pcm_hw_params_t* pHWParams;
+ ma_snd_pcm_sw_params_t* pSWParams;
+ ma_snd_pcm_uframes_t bufferBoundary;
+ float bufferSizeScaleFactor;
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should only be called for playback _or_ capture, never duplex. */
+ MA_ASSERT(pDevice != NULL);
- data.pContext = pContext;
- data.callback = callback;
- data.pUserData = pUserData;
- data.terminated = MA_FALSE;
+ formatALSA = ma_convert_ma_format_to_alsa_format((deviceType == ma_device_type_capture) ? pConfig->capture.format : pConfig->playback.format);
+ shareMode = (deviceType == ma_device_type_capture) ? pConfig->capture.shareMode : pConfig->playback.shareMode;
+ pDeviceID = (deviceType == ma_device_type_capture) ? pConfig->capture.pDeviceID : pConfig->playback.pDeviceID;
- /* Playback. */
- if (!data.terminated) {
- data.deviceType = ma_device_type_playback;
- ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data);
+ result = ma_context_open_pcm__alsa(pContext, shareMode, deviceType, pDeviceID, &pPCM);
+ if (result != MA_SUCCESS) {
+ return result;
}
- /* Capture. */
- if (!data.terminated) {
- data.deviceType = ma_device_type_capture;
- ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data);
- }
+ /* If using the default buffer size we may want to apply some device-specific scaling for known devices that have peculiar latency characteristics */
+ bufferSizeScaleFactor = 1;
+ if (pDevice->usingDefaultBufferSize) {
+ ma_snd_pcm_info_t* pInfo = (ma_snd_pcm_info_t*)ma__calloc_from_callbacks(((ma_snd_pcm_info_sizeof_proc)pContext->alsa.snd_pcm_info_sizeof)(), &pContext->allocationCallbacks);
+ if (pInfo == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ /* We may need to scale the size of the buffer depending on the device. */
+ if (((ma_snd_pcm_info_proc)pContext->alsa.snd_pcm_info)(pPCM, pInfo) == 0) {
+ const char* deviceName = ((ma_snd_pcm_info_get_name_proc)pContext->alsa.snd_pcm_info_get_name)(pInfo);
+ if (deviceName != NULL) {
+ if (ma_strcmp(deviceName, "default") == 0) {
+ char** ppDeviceHints;
+ char** ppNextDeviceHint;
- return MA_SUCCESS;
-}
+ /* It's the default device. We need to use DESC from snd_device_name_hint(). */
+ if (((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints) < 0) {
+ ma__free_from_callbacks(pInfo, &pContext->allocationCallbacks);
+ return MA_NO_BACKEND;
+ }
+ ppNextDeviceHint = ppDeviceHints;
+ while (*ppNextDeviceHint != NULL) {
+ char* NAME = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "NAME");
+ char* DESC = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "DESC");
+ char* IOID = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "IOID");
-typedef struct
-{
- const ma_device_id* pDeviceID;
- ma_device_info* pDeviceInfo;
- ma_bool32 found;
-} ma_context_get_device_info_callback_data__dsound;
+ ma_bool32 foundDevice = MA_FALSE;
+ if ((deviceType == ma_device_type_playback && (IOID == NULL || ma_strcmp(IOID, "Output") == 0)) ||
+ (deviceType == ma_device_type_capture && (IOID != NULL && ma_strcmp(IOID, "Input" ) == 0))) {
+ if (ma_strcmp(NAME, deviceName) == 0) {
+ bufferSizeScaleFactor = ma_find_default_buffer_size_scale__alsa(DESC);
+ foundDevice = MA_TRUE;
+ }
+ }
-BOOL CALLBACK ma_context_get_device_info_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext)
-{
- ma_context_get_device_info_callback_data__dsound* pData = (ma_context_get_device_info_callback_data__dsound*)lpContext;
- ma_assert(pData != NULL);
+ free(NAME);
+ free(DESC);
+ free(IOID);
+ ppNextDeviceHint += 1;
- if ((pData->pDeviceID == NULL || ma_is_guid_equal(pData->pDeviceID->dsound, &MA_GUID_NULL)) && (lpGuid == NULL || ma_is_guid_equal(lpGuid, &MA_GUID_NULL))) {
- /* Default device. */
- ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1);
- pData->found = MA_TRUE;
- return FALSE; /* Stop enumeration. */
- } else {
- /* Not the default device. */
- if (lpGuid != NULL) {
- if (memcmp(pData->pDeviceID->dsound, lpGuid, sizeof(pData->pDeviceID->dsound)) == 0) {
- ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1);
- pData->found = MA_TRUE;
- return FALSE; /* Stop enumeration. */
+ if (foundDevice) {
+ break;
+ }
+ }
+
+ ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints);
+ } else {
+ bufferSizeScaleFactor = ma_find_default_buffer_size_scale__alsa(deviceName);
+ }
}
}
- }
-
- (void)lpcstrModule;
- return TRUE;
-}
-ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
-{
- /* Exclusive mode and capture not supported with DirectSound. */
- if (deviceType == ma_device_type_capture && shareMode == ma_share_mode_exclusive) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ ma__free_from_callbacks(pInfo, &pContext->allocationCallbacks);
}
- if (pDeviceID != NULL) {
- ma_context_get_device_info_callback_data__dsound data;
- /* ID. */
- ma_copy_memory(pDeviceInfo->id.dsound, pDeviceID->dsound, 16);
+ /* Hardware parameters. */
+ pHWParams = (ma_snd_pcm_hw_params_t*)ma__calloc_from_callbacks(((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)(), &pContext->allocationCallbacks);
+ if (pHWParams == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
- /* Name / Description. This is retrieved by enumerating over each device until we find that one that matches the input ID. */
- data.pDeviceID = pDeviceID;
- data.pDeviceInfo = pDeviceInfo;
- data.found = MA_FALSE;
- if (deviceType == ma_device_type_playback) {
- ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_get_device_info_callback__dsound, &data);
- } else {
- ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_get_device_info_callback__dsound, &data);
- }
+ resultALSA = ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed.", ma_result_from_errno(-resultALSA));
+ }
- if (!data.found) {
- return MA_NO_DEVICE;
+ /* MMAP Mode. Try using interleaved MMAP access. If this fails, fall back to standard readi/writei. */
+ isUsingMMap = MA_FALSE;
+#if 0 /* NOTE: MMAP mode temporarily disabled. */
+ if (deviceType != ma_device_type_capture) { /* <-- Disabling MMAP mode for capture devices because I apparently do not have a device that supports it which means I can't test it... Contributions welcome. */
+ if (!pConfig->alsa.noMMap && ma_device__is_async(pDevice)) {
+ if (((ma_snd_pcm_hw_params_set_access_proc)pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_MMAP_INTERLEAVED) == 0) {
+ pDevice->alsa.isUsingMMap = MA_TRUE;
+ }
}
- } else {
- /* I don't think there's a way to get the name of the default device with DirectSound. In this case we just need to use defaults. */
-
- /* ID */
- ma_zero_memory(pDeviceInfo->id.dsound, 16);
+ }
+#endif
- /* Name / Description */
- if (deviceType == ma_device_type_playback) {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- } else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ if (!isUsingMMap) {
+ resultALSA = ((ma_snd_pcm_hw_params_set_access_proc)pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_RW_INTERLEAVED);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set access mode to neither SND_PCM_ACCESS_MMAP_INTERLEAVED nor SND_PCM_ACCESS_RW_INTERLEAVED. snd_pcm_hw_params_set_access() failed.", ma_result_from_errno(-resultALSA));
}
}
- /* Retrieving detailed information is slightly different depending on the device type. */
- if (deviceType == ma_device_type_playback) {
- /* Playback. */
- ma_IDirectSound* pDirectSound;
- ma_result result;
- MA_DSCAPS caps;
- ma_uint32 iFormat;
+ /*
+ Most important properties first. The documentation for OSS (yes, I know this is ALSA!) recommends format, channels, then sample rate. I can't
+ find any documentation for ALSA specifically, so I'm going to copy the recommendation for OSS.
+ */
- result = ma_context_create_IDirectSound__dsound(pContext, shareMode, pDeviceID, &pDirectSound);
- if (result != MA_SUCCESS) {
- return result;
- }
+ /* Format. */
+ {
+ ma_snd_pcm_format_mask_t* pFormatMask;
- ma_zero_object(&caps);
- caps.dwSize = sizeof(caps);
- if (FAILED(ma_IDirectSound_GetCaps(pDirectSound, &caps))) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ /* Try getting every supported format first. */
+ pFormatMask = (ma_snd_pcm_format_mask_t*)ma__calloc_from_callbacks(((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)(), &pContext->allocationCallbacks);
+ if (pFormatMask == NULL) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return MA_OUT_OF_MEMORY;
}
- if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) {
- /* It supports at least stereo, but could support more. */
- WORD channels = 2;
-
- /* Look at the speaker configuration to get a better idea on the channel count. */
- DWORD speakerConfig;
- if (SUCCEEDED(ma_IDirectSound_GetSpeakerConfig(pDirectSound, &speakerConfig))) {
- ma_get_channels_from_speaker_config__dsound(speakerConfig, &channels, NULL);
- }
+ ((ma_snd_pcm_hw_params_get_format_mask_proc)pContext->alsa.snd_pcm_hw_params_get_format_mask)(pHWParams, pFormatMask);
- pDeviceInfo->minChannels = channels;
- pDeviceInfo->maxChannels = channels;
- } else {
- /* It does not support stereo, which means we are stuck with mono. */
- pDeviceInfo->minChannels = 1;
- pDeviceInfo->maxChannels = 1;
- }
+ /*
+ At this point we should have a list of supported formats, so now we need to find the best one. We first check if the requested format is
+ supported, and if so, use that one. If it's not supported, we just run though a list of formats and try to find the best one.
+ */
+ if (!((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, formatALSA)) {
+ size_t i;
- /* Sample rate. */
- if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) {
- pDeviceInfo->minSampleRate = caps.dwMinSecondarySampleRate;
- pDeviceInfo->maxSampleRate = caps.dwMaxSecondarySampleRate;
+ /* The requested format is not supported so now try running through the list of formats and return the best one. */
+ ma_snd_pcm_format_t preferredFormatsALSA[] = {
+ MA_SND_PCM_FORMAT_S16_LE, /* ma_format_s16 */
+ MA_SND_PCM_FORMAT_FLOAT_LE, /* ma_format_f32 */
+ MA_SND_PCM_FORMAT_S32_LE, /* ma_format_s32 */
+ MA_SND_PCM_FORMAT_S24_3LE, /* ma_format_s24 */
+ MA_SND_PCM_FORMAT_U8 /* ma_format_u8 */
+ };
- /*
- On my machine the min and max sample rates can return 100 and 200000 respectively. I'd rather these be within
- the range of our standard sample rates so I'm clamping.
- */
- if (caps.dwMinSecondarySampleRate < MA_MIN_SAMPLE_RATE && caps.dwMaxSecondarySampleRate >= MA_MIN_SAMPLE_RATE) {
- pDeviceInfo->minSampleRate = MA_MIN_SAMPLE_RATE;
+ if (ma_is_big_endian()) {
+ preferredFormatsALSA[0] = MA_SND_PCM_FORMAT_S16_BE;
+ preferredFormatsALSA[1] = MA_SND_PCM_FORMAT_FLOAT_BE;
+ preferredFormatsALSA[2] = MA_SND_PCM_FORMAT_S32_BE;
+ preferredFormatsALSA[3] = MA_SND_PCM_FORMAT_S24_3BE;
+ preferredFormatsALSA[4] = MA_SND_PCM_FORMAT_U8;
}
- if (caps.dwMaxSecondarySampleRate > MA_MAX_SAMPLE_RATE && caps.dwMinSecondarySampleRate <= MA_MAX_SAMPLE_RATE) {
- pDeviceInfo->maxSampleRate = MA_MAX_SAMPLE_RATE;
+
+ formatALSA = MA_SND_PCM_FORMAT_UNKNOWN;
+ for (i = 0; i < (sizeof(preferredFormatsALSA) / sizeof(preferredFormatsALSA[0])); ++i) {
+ if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, preferredFormatsALSA[i])) {
+ formatALSA = preferredFormatsALSA[i];
+ break;
+ }
}
- } else {
- /* Only supports a single sample rate. Set both min an max to the same thing. Do not clamp within the standard rates. */
- pDeviceInfo->minSampleRate = caps.dwMaxSecondarySampleRate;
- pDeviceInfo->maxSampleRate = caps.dwMaxSecondarySampleRate;
- }
- /* DirectSound can support all formats. */
- pDeviceInfo->formatCount = ma_format_count - 1; /* Minus one because we don't want to include ma_format_unknown. */
- for (iFormat = 0; iFormat < pDeviceInfo->formatCount; ++iFormat) {
- pDeviceInfo->formats[iFormat] = (ma_format)(iFormat + 1); /* +1 to skip over ma_format_unknown. */
+ if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. The device does not support any miniaudio formats.", MA_FORMAT_NOT_SUPPORTED);
+ }
}
- ma_IDirectSound_Release(pDirectSound);
- } else {
- /*
- Capture. This is a little different to playback due to the say the supported formats are reported. Technically capture
- devices can support a number of different formats, but for simplicity and consistency with ma_device_init() I'm just
- reporting the best format.
- */
- ma_IDirectSoundCapture* pDirectSoundCapture;
- ma_result result;
- WORD channels;
- WORD bitsPerSample;
- DWORD sampleRate;
-
- result = ma_context_create_IDirectSoundCapture__dsound(pContext, shareMode, pDeviceID, &pDirectSoundCapture);
- if (result != MA_SUCCESS) {
- return result;
- }
+ ma__free_from_callbacks(pFormatMask, &pContext->allocationCallbacks);
+ pFormatMask = NULL;
- result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pContext, pDirectSoundCapture, &channels, &bitsPerSample, &sampleRate);
- if (result != MA_SUCCESS) {
- ma_IDirectSoundCapture_Release(pDirectSoundCapture);
- return result;
+ resultALSA = ((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, formatALSA);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. snd_pcm_hw_params_set_format() failed.", ma_result_from_errno(-resultALSA));
}
-
- pDeviceInfo->minChannels = channels;
- pDeviceInfo->maxChannels = channels;
- pDeviceInfo->minSampleRate = sampleRate;
- pDeviceInfo->maxSampleRate = sampleRate;
- pDeviceInfo->formatCount = 1;
- if (bitsPerSample == 8) {
- pDeviceInfo->formats[0] = ma_format_u8;
- } else if (bitsPerSample == 16) {
- pDeviceInfo->formats[0] = ma_format_s16;
- } else if (bitsPerSample == 24) {
- pDeviceInfo->formats[0] = ma_format_s24;
- } else if (bitsPerSample == 32) {
- pDeviceInfo->formats[0] = ma_format_s32;
- } else {
- ma_IDirectSoundCapture_Release(pDirectSoundCapture);
- return MA_FORMAT_NOT_SUPPORTED;
+
+ internalFormat = ma_format_from_alsa(formatALSA);
+ if (internalFormat == ma_format_unknown) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] The chosen format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED);
}
-
- ma_IDirectSoundCapture_Release(pDirectSoundCapture);
}
- return MA_SUCCESS;
-}
-
-
-typedef struct
-{
- ma_uint32 deviceCount;
- ma_uint32 infoCount;
- ma_device_info* pInfo;
-} ma_device_enum_data__dsound;
-
-BOOL CALLBACK ma_enum_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext)
-{
- ma_device_enum_data__dsound* pData = (ma_device_enum_data__dsound*)lpContext;
- ma_assert(pData != NULL);
+ /* Channels. */
+ {
+ unsigned int channels = (deviceType == ma_device_type_capture) ? pConfig->capture.channels : pConfig->playback.channels;
+ resultALSA = ((ma_snd_pcm_hw_params_set_channels_near_proc)pContext->alsa.snd_pcm_hw_params_set_channels_near)(pPCM, pHWParams, &channels);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set channel count. snd_pcm_hw_params_set_channels_near() failed.", ma_result_from_errno(-resultALSA));
+ }
+ internalChannels = (ma_uint32)channels;
+ }
- if (pData->pInfo != NULL) {
- if (pData->infoCount > 0) {
- ma_zero_object(pData->pInfo);
- ma_strncpy_s(pData->pInfo->name, sizeof(pData->pInfo->name), lpcstrDescription, (size_t)-1);
+ /* Sample Rate */
+ {
+ unsigned int sampleRate;
- if (lpGuid != NULL) {
- ma_copy_memory(pData->pInfo->id.dsound, lpGuid, 16);
- } else {
- ma_zero_memory(pData->pInfo->id.dsound, 16);
- }
+ /*
+ It appears there's either a bug in ALSA, a bug in some drivers, or I'm doing something silly; but having resampling enabled causes
+ problems with some device configurations when used in conjunction with MMAP access mode. To fix this problem we need to disable
+ resampling.
+
+ To reproduce this problem, open the "plug:dmix" device, and set the sample rate to 44100. Internally, it looks like dmix uses a
+ sample rate of 48000. The hardware parameters will get set correctly with no errors, but it looks like the 44100 -> 48000 resampling
+ doesn't work properly - but only with MMAP access mode. You will notice skipping/crackling in the audio, and it'll run at a slightly
+ faster rate.
+
+ miniaudio has built-in support for sample rate conversion (albeit low quality at the moment), so disabling resampling should be fine
+ for us. The only problem is that it won't be taking advantage of any kind of hardware-accelerated resampling and it won't be very
+ good quality until I get a chance to improve the quality of miniaudio's software sample rate conversion.
+
+ I don't currently know if the dmix plugin is the only one with this error. Indeed, this is the only one I've been able to reproduce
+ this error with. In the future, we may want to restrict the disabling of resampling to only known bad plugins.
+ */
+ ((ma_snd_pcm_hw_params_set_rate_resample_proc)pContext->alsa.snd_pcm_hw_params_set_rate_resample)(pPCM, pHWParams, 0);
- pData->pInfo += 1;
- pData->infoCount -= 1;
- pData->deviceCount += 1;
+ sampleRate = pConfig->sampleRate;
+ resultALSA = ((ma_snd_pcm_hw_params_set_rate_near_proc)pContext->alsa.snd_pcm_hw_params_set_rate_near)(pPCM, pHWParams, &sampleRate, 0);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Sample rate not supported. snd_pcm_hw_params_set_rate_near() failed.", ma_result_from_errno(-resultALSA));
}
- } else {
- pData->deviceCount += 1;
+ internalSampleRate = (ma_uint32)sampleRate;
}
- (void)lpcstrModule;
- return TRUE;
-}
+ /* Periods. */
+ {
+ ma_uint32 periods = pConfig->periods;
+ resultALSA = ((ma_snd_pcm_hw_params_set_periods_near_proc)pContext->alsa.snd_pcm_hw_params_set_periods_near)(pPCM, pHWParams, &periods, NULL);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set period count. snd_pcm_hw_params_set_periods_near() failed.", ma_result_from_errno(-resultALSA));
+ }
+ internalPeriods = periods;
+ }
-void ma_device_uninit__dsound(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
+ /* Buffer Size */
+ {
+ ma_snd_pcm_uframes_t actualBufferSizeInFrames = pConfig->periodSizeInFrames * internalPeriods;
+ if (actualBufferSizeInFrames == 0) {
+ actualBufferSizeInFrames = ma_scale_buffer_size(ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, internalSampleRate), bufferSizeScaleFactor) * internalPeriods;
+ }
- if (pDevice->dsound.pCaptureBuffer != NULL) {
- ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
- }
- if (pDevice->dsound.pCapture != NULL) {
- ma_IDirectSoundCapture_Release((ma_IDirectSoundCapture*)pDevice->dsound.pCapture);
+ resultALSA = ((ma_snd_pcm_hw_params_set_buffer_size_near_proc)pContext->alsa.snd_pcm_hw_params_set_buffer_size_near)(pPCM, pHWParams, &actualBufferSizeInFrames);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set buffer size for device. snd_pcm_hw_params_set_buffer_size() failed.", ma_result_from_errno(-resultALSA));
+ }
+ internalPeriodSizeInFrames = actualBufferSizeInFrames / internalPeriods;
}
- if (pDevice->dsound.pPlaybackBuffer != NULL) {
- ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer);
- }
- if (pDevice->dsound.pPlaybackPrimaryBuffer != NULL) {
- ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer);
- }
- if (pDevice->dsound.pPlayback != NULL) {
- ma_IDirectSound_Release((ma_IDirectSound*)pDevice->dsound.pPlayback);
+ /* Apply hardware parameters. */
+ resultALSA = ((ma_snd_pcm_hw_params_proc)pContext->alsa.snd_pcm_hw_params)(pPCM, pHWParams);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set hardware parameters. snd_pcm_hw_params() failed.", ma_result_from_errno(-resultALSA));
}
-}
-ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* pChannelMap, WAVEFORMATEXTENSIBLE* pWF)
-{
- GUID subformat;
+ ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks);
+ pHWParams = NULL;
- switch (format)
- {
- case ma_format_u8:
- case ma_format_s16:
- case ma_format_s24:
- /*case ma_format_s24_32:*/
- case ma_format_s32:
- {
- subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM;
- } break;
- case ma_format_f32:
- {
- subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
- } break;
+ /* Software parameters. */
+ pSWParams = (ma_snd_pcm_sw_params_t*)ma__calloc_from_callbacks(((ma_snd_pcm_sw_params_sizeof_proc)pContext->alsa.snd_pcm_sw_params_sizeof)(), &pContext->allocationCallbacks);
+ if (pSWParams == NULL) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return MA_OUT_OF_MEMORY;
+ }
- default:
- return MA_FORMAT_NOT_SUPPORTED;
+ resultALSA = ((ma_snd_pcm_sw_params_current_proc)pContext->alsa.snd_pcm_sw_params_current)(pPCM, pSWParams);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize software parameters. snd_pcm_sw_params_current() failed.", ma_result_from_errno(-resultALSA));
}
- ma_zero_object(pWF);
- pWF->Format.cbSize = sizeof(*pWF);
- pWF->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- pWF->Format.nChannels = (WORD)channels;
- pWF->Format.nSamplesPerSec = (DWORD)sampleRate;
- pWF->Format.wBitsPerSample = (WORD)ma_get_bytes_per_sample(format)*8;
- pWF->Format.nBlockAlign = (pWF->Format.nChannels * pWF->Format.wBitsPerSample) / 8;
- pWF->Format.nAvgBytesPerSec = pWF->Format.nBlockAlign * pWF->Format.nSamplesPerSec;
- pWF->Samples.wValidBitsPerSample = pWF->Format.wBitsPerSample;
- pWF->dwChannelMask = ma_channel_map_to_channel_mask__win32(pChannelMap, channels);
- pWF->SubFormat = subformat;
+ resultALSA = ((ma_snd_pcm_sw_params_set_avail_min_proc)pContext->alsa.snd_pcm_sw_params_set_avail_min)(pPCM, pSWParams, ma_prev_power_of_2(internalPeriodSizeInFrames));
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_sw_params_set_avail_min() failed.", ma_result_from_errno(-resultALSA));
+ }
- return MA_SUCCESS;
-}
+ resultALSA = ((ma_snd_pcm_sw_params_get_boundary_proc)pContext->alsa.snd_pcm_sw_params_get_boundary)(pSWParams, &bufferBoundary);
+ if (resultALSA < 0) {
+ bufferBoundary = internalPeriodSizeInFrames * internalPeriods;
+ }
-ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
-{
- ma_result result;
- ma_uint32 bufferSizeInMilliseconds;
+ /*printf("TRACE: bufferBoundary=%ld\n", bufferBoundary);*/
- ma_assert(pDevice != NULL);
- ma_zero_object(&pDevice->dsound);
+ if (deviceType == ma_device_type_playback && !isUsingMMap) { /* Only playback devices in writei/readi mode need a start threshold. */
+ /*
+ Subtle detail here with the start threshold. When in playback-only mode (no full-duplex) we can set the start threshold to
+ the size of a period. But for full-duplex we need to set it such that it is at least two periods.
+ */
+ resultALSA = ((ma_snd_pcm_sw_params_set_start_threshold_proc)pContext->alsa.snd_pcm_sw_params_set_start_threshold)(pPCM, pSWParams, internalPeriodSizeInFrames*2);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set start threshold for playback device. snd_pcm_sw_params_set_start_threshold() failed.", ma_result_from_errno(-resultALSA));
+ }
- bufferSizeInMilliseconds = pConfig->bufferSizeInMilliseconds;
- if (bufferSizeInMilliseconds == 0) {
- bufferSizeInMilliseconds = ma_calculate_buffer_size_in_milliseconds_from_frames(pConfig->bufferSizeInFrames, pConfig->sampleRate);
- }
-
- /* DirectSound should use a latency of about 20ms per period for low latency mode. */
- if (pDevice->usingDefaultBufferSize) {
- if (pConfig->performanceProfile == ma_performance_profile_low_latency) {
- bufferSizeInMilliseconds = 20 * pConfig->periods;
- } else {
- bufferSizeInMilliseconds = 200 * pConfig->periods;
+ resultALSA = ((ma_snd_pcm_sw_params_set_stop_threshold_proc)pContext->alsa.snd_pcm_sw_params_set_stop_threshold)(pPCM, pSWParams, bufferBoundary);
+ if (resultALSA < 0) { /* Set to boundary to loop instead of stop in the event of an xrun. */
+ ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set stop threshold for playback device. snd_pcm_sw_params_set_stop_threshold() failed.", ma_result_from_errno(-resultALSA));
}
}
- /* DirectSound breaks down with tiny buffer sizes (bad glitching and silent output). I am therefore restricting the size of the buffer to a minimum of 20 milliseconds. */
- if ((bufferSizeInMilliseconds/pConfig->periods) < 20) {
- bufferSizeInMilliseconds = pConfig->periods * 20;
+ resultALSA = ((ma_snd_pcm_sw_params_proc)pContext->alsa.snd_pcm_sw_params)(pPCM, pSWParams);
+ if (resultALSA < 0) {
+ ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks);
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set software parameters. snd_pcm_sw_params() failed.", ma_result_from_errno(-resultALSA));
}
- /*
- Unfortunately DirectSound uses different APIs and data structures for playback and catpure devices. We need to initialize
- the capture device first because we'll want to match it's buffer size and period count on the playback side if we're using
- full-duplex mode.
- */
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- WAVEFORMATEXTENSIBLE wf;
- MA_DSCBUFFERDESC descDS;
- ma_uint32 bufferSizeInFrames;
- char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */
- WAVEFORMATEXTENSIBLE* pActualFormat;
+ ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks);
+ pSWParams = NULL;
- result = ma_config_to_WAVEFORMATEXTENSIBLE(pConfig->capture.format, pConfig->capture.channels, pConfig->sampleRate, pConfig->capture.channelMap, &wf);
- if (result != MA_SUCCESS) {
- return result;
- }
- result = ma_context_create_IDirectSoundCapture__dsound(pContext, pConfig->capture.shareMode, pConfig->capture.pDeviceID, (ma_IDirectSoundCapture**)&pDevice->dsound.pCapture);
- if (result != MA_SUCCESS) {
- ma_device_uninit__dsound(pDevice);
- return result;
- }
+ /* Grab the internal channel map. For now we're not going to bother trying to change the channel map and instead just do it ourselves. */
+ {
+ ma_snd_pcm_chmap_t* pChmap = ((ma_snd_pcm_get_chmap_proc)pContext->alsa.snd_pcm_get_chmap)(pPCM);
+ if (pChmap != NULL) {
+ ma_uint32 iChannel;
- result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pContext, (ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &wf.Format.nChannels, &wf.Format.wBitsPerSample, &wf.Format.nSamplesPerSec);
- if (result != MA_SUCCESS) {
- ma_device_uninit__dsound(pDevice);
- return result;
- }
+ /* There are cases where the returned channel map can have a different channel count than was returned by snd_pcm_hw_params_set_channels_near(). */
+ if (pChmap->channels >= internalChannels) {
+ /* Drop excess channels. */
+ for (iChannel = 0; iChannel < internalChannels; ++iChannel) {
+ internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]);
+ }
+ } else {
+ ma_uint32 i;
- wf.Format.nBlockAlign = (wf.Format.nChannels * wf.Format.wBitsPerSample) / 8;
- wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec;
- wf.Samples.wValidBitsPerSample = wf.Format.wBitsPerSample;
- wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM;
+ /*
+ Excess channels use defaults. Do an initial fill with defaults, overwrite the first pChmap->channels, validate to ensure there are no duplicate
+ channels. If validation fails, fall back to defaults.
+ */
+ ma_bool32 isValid = MA_TRUE;
- /* The size of the buffer must be a clean multiple of the period count. */
- bufferSizeInFrames = (ma_calculate_buffer_size_in_frames_from_milliseconds(bufferSizeInMilliseconds, wf.Format.nSamplesPerSec) / pConfig->periods) * pConfig->periods;
+ /* Fill with defaults. */
+ ma_get_standard_channel_map(ma_standard_channel_map_alsa, internalChannels, internalChannelMap);
- ma_zero_object(&descDS);
- descDS.dwSize = sizeof(descDS);
- descDS.dwFlags = 0;
- descDS.dwBufferBytes = bufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, wf.Format.nChannels);
- descDS.lpwfxFormat = (WAVEFORMATEX*)&wf;
- if (FAILED(ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL))) {
- ma_device_uninit__dsound(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_CreateCaptureBuffer() failed for capture device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ /* Overwrite first pChmap->channels channels. */
+ for (iChannel = 0; iChannel < pChmap->channels; ++iChannel) {
+ internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]);
+ }
- /* Get the _actual_ properties of the buffer. */
- pActualFormat = (WAVEFORMATEXTENSIBLE*)rawdata;
- if (FAILED(ma_IDirectSoundCaptureBuffer_GetFormat((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, (WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL))) {
- ma_device_uninit__dsound(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the capture device's buffer.", MA_FORMAT_NOT_SUPPORTED);
- }
+ /* Validate. */
+ for (i = 0; i < internalChannels && isValid; ++i) {
+ ma_uint32 j;
+ for (j = i+1; j < internalChannels; ++j) {
+ if (internalChannelMap[i] == internalChannelMap[j]) {
+ isValid = MA_FALSE;
+ break;
+ }
+ }
+ }
- pDevice->capture.internalFormat = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)pActualFormat);
- pDevice->capture.internalChannels = pActualFormat->Format.nChannels;
- pDevice->capture.internalSampleRate = pActualFormat->Format.nSamplesPerSec;
+ /* If our channel map is invalid, fall back to defaults. */
+ if (!isValid) {
+ ma_get_standard_channel_map(ma_standard_channel_map_alsa, internalChannels, internalChannelMap);
+ }
+ }
- /* Get the internal channel map based on the channel mask. */
- if (pActualFormat->Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
- ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+ free(pChmap);
+ pChmap = NULL;
} else {
- ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+ /* Could not retrieve the channel map. Fall back to a hard-coded assumption. */
+ ma_get_standard_channel_map(ma_standard_channel_map_alsa, internalChannels, internalChannelMap);
}
+ }
- /*
- After getting the actual format the size of the buffer in frames may have actually changed. However, we want this to be as close to what the
- user has asked for as possible, so let's go ahead and release the old capture buffer and create a new one in this case.
- */
- if (bufferSizeInFrames != (descDS.dwBufferBytes / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels))) {
- descDS.dwBufferBytes = bufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, wf.Format.nChannels);
- ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
- if (FAILED(ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL))) {
- ma_device_uninit__dsound(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Second attempt at IDirectSoundCapture_CreateCaptureBuffer() failed for capture device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
- }
+ /* We're done. Prepare the device. */
+ resultALSA = ((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)(pPCM);
+ if (resultALSA < 0) {
+ ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to prepare device.", ma_result_from_errno(-resultALSA));
+ }
- /* DirectSound should give us a buffer exactly the size we asked for. */
- pDevice->capture.internalBufferSizeInFrames = bufferSizeInFrames;
- pDevice->capture.internalPeriods = pConfig->periods;
+
+ if (deviceType == ma_device_type_capture) {
+ pDevice->alsa.pPCMCapture = (ma_ptr)pPCM;
+ pDevice->alsa.isUsingMMapCapture = isUsingMMap;
+ pDevice->capture.internalFormat = internalFormat;
+ pDevice->capture.internalChannels = internalChannels;
+ pDevice->capture.internalSampleRate = internalSampleRate;
+ ma_channel_map_copy(pDevice->capture.internalChannelMap, internalChannelMap, internalChannels);
+ pDevice->capture.internalPeriodSizeInFrames = internalPeriodSizeInFrames;
+ pDevice->capture.internalPeriods = internalPeriods;
+ } else {
+ pDevice->alsa.pPCMPlayback = (ma_ptr)pPCM;
+ pDevice->alsa.isUsingMMapPlayback = isUsingMMap;
+ pDevice->playback.internalFormat = internalFormat;
+ pDevice->playback.internalChannels = internalChannels;
+ pDevice->playback.internalSampleRate = internalSampleRate;
+ ma_channel_map_copy(pDevice->playback.internalChannelMap, internalChannelMap, internalChannels);
+ pDevice->playback.internalPeriodSizeInFrames = internalPeriodSizeInFrames;
+ pDevice->playback.internalPeriods = internalPeriods;
}
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- WAVEFORMATEXTENSIBLE wf;
- MA_DSBUFFERDESC descDSPrimary;
- MA_DSCAPS caps;
- char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */
- WAVEFORMATEXTENSIBLE* pActualFormat;
- ma_uint32 bufferSizeInFrames;
- MA_DSBUFFERDESC descDS;
+ return MA_SUCCESS;
+}
- result = ma_config_to_WAVEFORMATEXTENSIBLE(pConfig->playback.format, pConfig->playback.channels, pConfig->sampleRate, pConfig->playback.channelMap, &wf);
+static ma_result ma_device_init__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ MA_ZERO_OBJECT(&pDevice->alsa);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_by_type__alsa(pContext, pConfig, ma_device_type_capture, pDevice);
if (result != MA_SUCCESS) {
return result;
}
+ }
- result = ma_context_create_IDirectSound__dsound(pContext, pConfig->playback.shareMode, pConfig->playback.pDeviceID, (ma_IDirectSound**)&pDevice->dsound.pPlayback);
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_by_type__alsa(pContext, pConfig, ma_device_type_playback, pDevice);
if (result != MA_SUCCESS) {
- ma_device_uninit__dsound(pDevice);
return result;
}
+ }
- ma_zero_object(&descDSPrimary);
- descDSPrimary.dwSize = sizeof(MA_DSBUFFERDESC);
- descDSPrimary.dwFlags = MA_DSBCAPS_PRIMARYBUFFER | MA_DSBCAPS_CTRLVOLUME;
- if (FAILED(ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDSPrimary, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackPrimaryBuffer, NULL))) {
- ma_device_uninit__dsound(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's primary buffer.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ return MA_SUCCESS;
+}
+static ma_result ma_device_read__alsa(ma_device* pDevice, void* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead)
+{
+ ma_snd_pcm_sframes_t resultALSA;
- /* We may want to make some adjustments to the format if we are using defaults. */
- ma_zero_object(&caps);
- caps.dwSize = sizeof(caps);
- if (FAILED(ma_IDirectSound_GetCaps((ma_IDirectSound*)pDevice->dsound.pPlayback, &caps))) {
- ma_device_uninit__dsound(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pFramesOut != NULL);
- if (pDevice->playback.usingDefaultChannels) {
- if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) {
- DWORD speakerConfig;
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
+ }
- /* It supports at least stereo, but could support more. */
- wf.Format.nChannels = 2;
+ for (;;) {
+ resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pFramesOut, frameCount);
+ if (resultALSA >= 0) {
+ break; /* Success. */
+ } else {
+ if (resultALSA == -EAGAIN) {
+ /*printf("TRACE: EGAIN (read)\n");*/
+ continue; /* Try again. */
+ } else if (resultALSA == -EPIPE) {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("TRACE: EPIPE (read)\n");
+ #endif
- /* Look at the speaker configuration to get a better idea on the channel count. */
- if (SUCCEEDED(ma_IDirectSound_GetSpeakerConfig((ma_IDirectSound*)pDevice->dsound.pPlayback, &speakerConfig))) {
- ma_get_channels_from_speaker_config__dsound(speakerConfig, &wf.Format.nChannels, &wf.dwChannelMask);
+ /* Overrun. Recover and try again. If this fails we need to return an error. */
+ resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, resultALSA, MA_TRUE);
+ if (resultALSA < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun.", ma_result_from_errno((int)-resultALSA));
}
- } else {
- /* It does not support stereo, which means we are stuck with mono. */
- wf.Format.nChannels = 1;
- }
- }
- if (pDevice->usingDefaultSampleRate) {
- /* We base the sample rate on the values returned by GetCaps(). */
- if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) {
- wf.Format.nSamplesPerSec = ma_get_best_sample_rate_within_range(caps.dwMinSecondarySampleRate, caps.dwMaxSecondarySampleRate);
- } else {
- wf.Format.nSamplesPerSec = caps.dwMaxSecondarySampleRate;
+ resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
+ if (resultALSA < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun.", ma_result_from_errno((int)-resultALSA));
+ }
+
+ resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pFramesOut, frameCount);
+ if (resultALSA < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to read data from the internal device.", ma_result_from_errno((int)-resultALSA));
+ }
}
}
+ }
- wf.Format.nBlockAlign = (wf.Format.nChannels * wf.Format.wBitsPerSample) / 8;
- wf.Format.nAvgBytesPerSec = wf.Format.nBlockAlign * wf.Format.nSamplesPerSec;
+ if (pFramesRead != NULL) {
+ *pFramesRead = resultALSA;
+ }
- /*
- From MSDN:
-
- The method succeeds even if the hardware does not support the requested format; DirectSound sets the buffer to the closest
- supported format. To determine whether this has happened, an application can call the GetFormat method for the primary buffer
- and compare the result with the format that was requested with the SetFormat method.
- */
- if (FAILED(ma_IDirectSoundBuffer_SetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (WAVEFORMATEX*)&wf))) {
- ma_device_uninit__dsound(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to set format of playback device's primary buffer.", MA_FORMAT_NOT_SUPPORTED);
- }
+ return MA_SUCCESS;
+}
- /* Get the _actual_ properties of the buffer. */
- pActualFormat = (WAVEFORMATEXTENSIBLE*)rawdata;
- if (FAILED(ma_IDirectSoundBuffer_GetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL))) {
- ma_device_uninit__dsound(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the playback device's primary buffer.", MA_FORMAT_NOT_SUPPORTED);
- }
+static ma_result ma_device_write__alsa(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
+{
+ ma_snd_pcm_sframes_t resultALSA;
- pDevice->playback.internalFormat = ma_format_from_WAVEFORMATEX((WAVEFORMATEX*)pActualFormat);
- pDevice->playback.internalChannels = pActualFormat->Format.nChannels;
- pDevice->playback.internalSampleRate = pActualFormat->Format.nSamplesPerSec;
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pFrames != NULL);
- /* Get the internal channel map based on the channel mask. */
- if (pActualFormat->Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE) {
- ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
+ }
+
+ for (;;) {
+ resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pFrames, frameCount);
+ if (resultALSA >= 0) {
+ break; /* Success. */
} else {
- ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
- }
+ if (resultALSA == -EAGAIN) {
+ /*printf("TRACE: EGAIN (write)\n");*/
+ continue; /* Try again. */
+ } else if (resultALSA == -EPIPE) {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("TRACE: EPIPE (write)\n");
+ #endif
- /* The size of the buffer must be a clean multiple of the period count. */
- bufferSizeInFrames = (ma_calculate_buffer_size_in_frames_from_milliseconds(bufferSizeInMilliseconds, pDevice->playback.internalSampleRate) / pConfig->periods) * pConfig->periods;
+ /* Underrun. Recover and try again. If this fails we need to return an error. */
+ resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, resultALSA, MA_TRUE);
+ if (resultALSA < 0) { /* MA_TRUE=silent (don't print anything on error). */
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun.", ma_result_from_errno((int)-resultALSA));
+ }
- /*
- Meaning of dwFlags (from MSDN):
-
- DSBCAPS_CTRLPOSITIONNOTIFY
- The buffer has position notification capability.
-
- DSBCAPS_GLOBALFOCUS
- With this flag set, an application using DirectSound can continue to play its buffers if the user switches focus to
- another application, even if the new application uses DirectSound.
-
- DSBCAPS_GETCURRENTPOSITION2
- In the first version of DirectSound, the play cursor was significantly ahead of the actual playing sound on emulated
- sound cards; it was directly behind the write cursor. Now, if the DSBCAPS_GETCURRENTPOSITION2 flag is specified, the
- application can get a more accurate play cursor.
- */
- ma_zero_object(&descDS);
- descDS.dwSize = sizeof(descDS);
- descDS.dwFlags = MA_DSBCAPS_CTRLPOSITIONNOTIFY | MA_DSBCAPS_GLOBALFOCUS | MA_DSBCAPS_GETCURRENTPOSITION2;
- descDS.dwBufferBytes = bufferSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- descDS.lpwfxFormat = (WAVEFORMATEX*)&wf;
- if (FAILED(ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDS, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackBuffer, NULL))) {
- ma_device_uninit__dsound(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's secondary buffer.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ /*
+ In my testing I have had a situation where writei() does not automatically restart the device even though I've set it
+ up as such in the software parameters. What will happen is writei() will block indefinitely even though the number of
+ frames is well beyond the auto-start threshold. To work around this I've needed to add an explicit start here. Not sure
+ if this is me just being stupid and not recovering the device properly, but this definitely feels like something isn't
+ quite right here.
+ */
+ resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback);
+ if (resultALSA < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun.", ma_result_from_errno((int)-resultALSA));
+ }
+
+ resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pFrames, frameCount);
+ if (resultALSA < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to write data to device after underrun.", ma_result_from_errno((int)-resultALSA));
+ }
+ }
}
+ }
- /* DirectSound should give us a buffer exactly the size we asked for. */
- pDevice->playback.internalBufferSizeInFrames = bufferSizeInFrames;
- pDevice->playback.internalPeriods = pConfig->periods;
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = resultALSA;
}
- (void)pContext;
return MA_SUCCESS;
}
-
-ma_result ma_device_main_loop__dsound(ma_device* pDevice)
+static ma_result ma_device_main_loop__alsa(ma_device* pDevice)
{
ma_result result = MA_SUCCESS;
- ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- HRESULT hr;
- DWORD lockOffsetInBytesCapture;
- DWORD lockSizeInBytesCapture;
- DWORD mappedSizeInBytesCapture;
- void* pMappedBufferCapture;
- DWORD lockOffsetInBytesPlayback;
- DWORD lockSizeInBytesPlayback;
- DWORD mappedSizeInBytesPlayback;
- void* pMappedBufferPlayback;
- DWORD prevReadCursorInBytesCapture = 0;
- DWORD prevPlayCursorInBytesPlayback = 0;
- ma_bool32 physicalPlayCursorLoopFlagPlayback = 0;
- DWORD virtualWriteCursorInBytesPlayback = 0;
- ma_bool32 virtualWriteCursorLoopFlagPlayback = 0;
- ma_bool32 isPlaybackDeviceStarted = MA_FALSE;
- ma_uint32 framesWrittenToPlaybackDevice = 0; /* For knowing whether or not the playback device needs to be started. */
- ma_uint32 waitTimeInMilliseconds = 1;
+ int resultALSA;
+ ma_bool32 exitLoop = MA_FALSE;
- ma_assert(pDevice != NULL);
+ MA_ASSERT(pDevice != NULL);
- /* The first thing to do is start the capture device. The playback device is only started after the first period is written. */
+ /* Capture devices need to be started immediately. */
if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- if (FAILED(ma_IDirectSoundCaptureBuffer_Start((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, MA_DSCBSTART_LOOPING))) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Start() failed.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
+ if (resultALSA < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device in preparation for reading.", ma_result_from_errno(-resultALSA));
+ }
+ }
+
+ while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
+ switch (pDevice->type)
+ {
+ case ma_device_type_duplex:
+ {
+ if (pDevice->alsa.isUsingMMapCapture || pDevice->alsa.isUsingMMapPlayback) {
+ /* MMAP */
+ return MA_INVALID_OPERATION; /* Not yet implemented. */
+ } else {
+ /* readi() and writei() */
+
+ /* The process is: device_read -> convert -> callback -> convert -> device_write */
+ ma_uint32 totalCapturedDeviceFramesProcessed = 0;
+ ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames);
+
+ while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) {
+ ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 capturedDeviceFramesRemaining;
+ ma_uint32 capturedDeviceFramesProcessed;
+ ma_uint32 capturedDeviceFramesToProcess;
+ ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed;
+ if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) {
+ capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames;
+ }
+
+ result = ma_device_read__alsa(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedDeviceFramesRemaining = capturedDeviceFramesToProcess;
+ capturedDeviceFramesProcessed = 0;
+
+ for (;;) {
+ ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames);
+ ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining;
+ ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+
+ /* Convert capture data from device format to client format. */
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ /*
+ If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small
+ which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE.
+ */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+
+ ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/
+
+ capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+ capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+
+ /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */
+ for (;;) {
+ ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration;
+ ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ result = ma_device_write__alsa(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+ }
+
+ /* In case an error happened from ma_device_write__alsa()... */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+ }
+
+ totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed;
+ }
+ }
+ } break;
+
+ case ma_device_type_capture:
+ {
+ if (pDevice->alsa.isUsingMMapCapture) {
+ /* MMAP */
+ return MA_INVALID_OPERATION; /* Not yet implemented. */
+ } else {
+ /* readi() */
+
+ /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames;
+ ma_uint32 framesReadThisPeriod = 0;
+ while (framesReadThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToReadThisIteration = framesRemainingInPeriod;
+ if (framesToReadThisIteration > intermediaryBufferSizeInFrames) {
+ framesToReadThisIteration = intermediaryBufferSizeInFrames;
+ }
+
+ result = ma_device_read__alsa(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer);
+
+ framesReadThisPeriod += framesProcessed;
+ }
+ }
+ } break;
+
+ case ma_device_type_playback:
+ {
+ if (pDevice->alsa.isUsingMMapPlayback) {
+ /* MMAP */
+ return MA_INVALID_OPERATION; /* Not yet implemented. */
+ } else {
+ /* writei() */
+
+ /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames;
+ ma_uint32 framesWrittenThisPeriod = 0;
+ while (framesWrittenThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod;
+ if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) {
+ framesToWriteThisIteration = intermediaryBufferSizeInFrames;
+ }
+
+ ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer);
+
+ result = ma_device_write__alsa(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ framesWrittenThisPeriod += framesProcessed;
+ }
+ }
+ } break;
+
+ /* To silence a warning. Will never hit this. */
+ case ma_device_type_loopback:
+ default: break;
+ }
+ }
+
+ /* Here is where the device needs to be stopped. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_snd_pcm_drain_proc)pDevice->pContext->alsa.snd_pcm_drain)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
+
+ /* We need to prepare the device again, otherwise we won't be able to restart the device. */
+ if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) < 0) {
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[ALSA] Failed to prepare capture device after stopping.\n");
+ #endif
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ((ma_snd_pcm_drain_proc)pDevice->pContext->alsa.snd_pcm_drain)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback);
+
+ /* We need to prepare the device again, otherwise we won't be able to restart the device. */
+ if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) < 0) {
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[ALSA] Failed to prepare playback device after stopping.\n");
+ #endif
}
}
-
- while (ma_device__get_state(pDevice) == MA_STATE_STARTED) {
- switch (pDevice->type)
- {
- case ma_device_type_duplex:
- {
- DWORD physicalCaptureCursorInBytes;
- DWORD physicalReadCursorInBytes;
- if (FAILED(ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes))) {
- return MA_ERROR;
- }
- /* If nothing is available we just sleep for a bit and return from this iteration. */
- if (physicalReadCursorInBytes == prevReadCursorInBytesCapture) {
- ma_sleep(waitTimeInMilliseconds);
- continue; /* Nothing is available in the capture buffer. */
- }
+ return result;
+}
- /*
- The current position has moved. We need to map all of the captured samples and write them to the playback device, making sure
- we don't return until every frame has been copied over.
- */
- if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) {
- /* The capture position has not looped. This is the simple case. */
- lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
- lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture);
- } else {
- /*
- The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything,
- do it again from the start.
- */
- if (prevReadCursorInBytesCapture < pDevice->capture.internalBufferSizeInFrames*bpfCapture) {
- /* Lock up to the end of the buffer. */
- lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
- lockSizeInBytesCapture = (pDevice->capture.internalBufferSizeInFrames*bpfCapture) - prevReadCursorInBytesCapture;
- } else {
- /* Lock starting from the start of the buffer. */
- lockOffsetInBytesCapture = 0;
- lockSizeInBytesCapture = physicalReadCursorInBytes;
- }
- }
+static ma_result ma_context_uninit__alsa(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_alsa);
- if (lockSizeInBytesCapture == 0) {
- ma_sleep(waitTimeInMilliseconds);
- continue; /* Nothing is available in the capture buffer. */
- }
+ /* Clean up memory for memory leak checkers. */
+ ((ma_snd_config_update_free_global_proc)pContext->alsa.snd_config_update_free_global)();
- hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER);
- }
+#ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->alsa.asoundSO);
+#endif
+ ma_mutex_uninit(&pContext->alsa.internalDeviceEnumLock);
- /* At this point we have some input data that we need to output. We do not return until every mapped frame of the input data is written to the playback device. */
- pDevice->capture._dspFrameCount = mappedSizeInBytesCapture / bpfCapture;
- pDevice->capture._dspFrames = (const ma_uint8*)pMappedBufferCapture;
- for (;;) { /* Keep writing to the playback device. */
- ma_uint8 inputFramesInExternalFormat[4096];
- ma_uint32 inputFramesInExternalFormatCap = sizeof(inputFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
- ma_uint32 inputFramesInExternalFormatCount;
- ma_uint8 outputFramesInExternalFormat[4096];
- ma_uint32 outputFramesInExternalFormatCap = sizeof(outputFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
-
- inputFramesInExternalFormatCount = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, inputFramesInExternalFormat, ma_min(inputFramesInExternalFormatCap, outputFramesInExternalFormatCap));
- if (inputFramesInExternalFormatCount == 0) {
- break; /* No more input data. */
- }
+ return MA_SUCCESS;
+}
- pDevice->onData(pDevice, outputFramesInExternalFormat, inputFramesInExternalFormat, inputFramesInExternalFormatCount);
+static ma_result ma_context_init__alsa(const ma_context_config* pConfig, ma_context* pContext)
+{
+#ifndef MA_NO_RUNTIME_LINKING
+ const char* libasoundNames[] = {
+ "libasound.so.2",
+ "libasound.so"
+ };
+ size_t i;
- /* At this point we have input and output data in external format. All we need to do now is convert it to the output format. This may take a few passes. */
- pDevice->playback._dspFrameCount = inputFramesInExternalFormatCount;
- pDevice->playback._dspFrames = (const ma_uint8*)outputFramesInExternalFormat;
- for (;;) {
- ma_uint32 framesWrittenThisIteration;
- DWORD physicalPlayCursorInBytes;
- DWORD physicalWriteCursorInBytes;
- DWORD availableBytesPlayback;
- DWORD silentPaddingInBytes = 0; /* <-- Must be initialized to 0. */
+ for (i = 0; i < ma_countof(libasoundNames); ++i) {
+ pContext->alsa.asoundSO = ma_dlopen(pContext, libasoundNames[i]);
+ if (pContext->alsa.asoundSO != NULL) {
+ break;
+ }
+ }
+
+ if (pContext->alsa.asoundSO == NULL) {
+#ifdef MA_DEBUG_OUTPUT
+ printf("[ALSA] Failed to open shared object.\n");
+#endif
+ return MA_NO_BACKEND;
+ }
+
+ pContext->alsa.snd_pcm_open = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_open");
+ pContext->alsa.snd_pcm_close = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_close");
+ pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_sizeof");
+ pContext->alsa.snd_pcm_hw_params_any = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_any");
+ pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format");
+ pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format_first");
+ pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format_mask");
+ pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels_near");
+ pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_resample");
+ pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_near");
+ pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_buffer_size_near");
+ pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_periods_near");
+ pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_access");
+ pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format");
+ pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels");
+ pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_min");
+ pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_max");
+ pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate");
+ pContext->alsa.snd_pcm_hw_params_get_rate_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_min");
+ pContext->alsa.snd_pcm_hw_params_get_rate_max = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_max");
+ pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_buffer_size");
+ pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_periods");
+ pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_access");
+ pContext->alsa.snd_pcm_hw_params = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params");
+ pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_sizeof");
+ pContext->alsa.snd_pcm_sw_params_current = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_current");
+ pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_get_boundary");
+ pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_avail_min");
+ pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_start_threshold");
+ pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_stop_threshold");
+ pContext->alsa.snd_pcm_sw_params = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params");
+ pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_format_mask_sizeof");
+ pContext->alsa.snd_pcm_format_mask_test = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_format_mask_test");
+ pContext->alsa.snd_pcm_get_chmap = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_get_chmap");
+ pContext->alsa.snd_pcm_state = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_state");
+ pContext->alsa.snd_pcm_prepare = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_prepare");
+ pContext->alsa.snd_pcm_start = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_start");
+ pContext->alsa.snd_pcm_drop = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_drop");
+ pContext->alsa.snd_pcm_drain = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_drain");
+ pContext->alsa.snd_device_name_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_hint");
+ pContext->alsa.snd_device_name_get_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_get_hint");
+ pContext->alsa.snd_card_get_index = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_card_get_index");
+ pContext->alsa.snd_device_name_free_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_free_hint");
+ pContext->alsa.snd_pcm_mmap_begin = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_mmap_begin");
+ pContext->alsa.snd_pcm_mmap_commit = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_mmap_commit");
+ pContext->alsa.snd_pcm_recover = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_recover");
+ pContext->alsa.snd_pcm_readi = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_readi");
+ pContext->alsa.snd_pcm_writei = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_writei");
+ pContext->alsa.snd_pcm_avail = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_avail");
+ pContext->alsa.snd_pcm_avail_update = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_avail_update");
+ pContext->alsa.snd_pcm_wait = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_wait");
+ pContext->alsa.snd_pcm_info = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info");
+ pContext->alsa.snd_pcm_info_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info_sizeof");
+ pContext->alsa.snd_pcm_info_get_name = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info_get_name");
+ pContext->alsa.snd_config_update_free_global = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_config_update_free_global");
+#else
+ /* The system below is just for type safety. */
+ ma_snd_pcm_open_proc _snd_pcm_open = snd_pcm_open;
+ ma_snd_pcm_close_proc _snd_pcm_close = snd_pcm_close;
+ ma_snd_pcm_hw_params_sizeof_proc _snd_pcm_hw_params_sizeof = snd_pcm_hw_params_sizeof;
+ ma_snd_pcm_hw_params_any_proc _snd_pcm_hw_params_any = snd_pcm_hw_params_any;
+ ma_snd_pcm_hw_params_set_format_proc _snd_pcm_hw_params_set_format = snd_pcm_hw_params_set_format;
+ ma_snd_pcm_hw_params_set_format_first_proc _snd_pcm_hw_params_set_format_first = snd_pcm_hw_params_set_format_first;
+ ma_snd_pcm_hw_params_get_format_mask_proc _snd_pcm_hw_params_get_format_mask = snd_pcm_hw_params_get_format_mask;
+ ma_snd_pcm_hw_params_set_channels_near_proc _snd_pcm_hw_params_set_channels_near = snd_pcm_hw_params_set_channels_near;
+ ma_snd_pcm_hw_params_set_rate_resample_proc _snd_pcm_hw_params_set_rate_resample = snd_pcm_hw_params_set_rate_resample;
+ ma_snd_pcm_hw_params_set_rate_near_proc _snd_pcm_hw_params_set_rate_near = snd_pcm_hw_params_set_rate_near;
+ ma_snd_pcm_hw_params_set_buffer_size_near_proc _snd_pcm_hw_params_set_buffer_size_near = snd_pcm_hw_params_set_buffer_size_near;
+ ma_snd_pcm_hw_params_set_periods_near_proc _snd_pcm_hw_params_set_periods_near = snd_pcm_hw_params_set_periods_near;
+ ma_snd_pcm_hw_params_set_access_proc _snd_pcm_hw_params_set_access = snd_pcm_hw_params_set_access;
+ ma_snd_pcm_hw_params_get_format_proc _snd_pcm_hw_params_get_format = snd_pcm_hw_params_get_format;
+ ma_snd_pcm_hw_params_get_channels_proc _snd_pcm_hw_params_get_channels = snd_pcm_hw_params_get_channels;
+ ma_snd_pcm_hw_params_get_channels_min_proc _snd_pcm_hw_params_get_channels_min = snd_pcm_hw_params_get_channels_min;
+ ma_snd_pcm_hw_params_get_channels_max_proc _snd_pcm_hw_params_get_channels_max = snd_pcm_hw_params_get_channels_max;
+ ma_snd_pcm_hw_params_get_rate_proc _snd_pcm_hw_params_get_rate = snd_pcm_hw_params_get_rate;
+ ma_snd_pcm_hw_params_get_rate_min_proc _snd_pcm_hw_params_get_rate_min = snd_pcm_hw_params_get_rate_min;
+ ma_snd_pcm_hw_params_get_rate_max_proc _snd_pcm_hw_params_get_rate_max = snd_pcm_hw_params_get_rate_max;
+ ma_snd_pcm_hw_params_get_buffer_size_proc _snd_pcm_hw_params_get_buffer_size = snd_pcm_hw_params_get_buffer_size;
+ ma_snd_pcm_hw_params_get_periods_proc _snd_pcm_hw_params_get_periods = snd_pcm_hw_params_get_periods;
+ ma_snd_pcm_hw_params_get_access_proc _snd_pcm_hw_params_get_access = snd_pcm_hw_params_get_access;
+ ma_snd_pcm_hw_params_proc _snd_pcm_hw_params = snd_pcm_hw_params;
+ ma_snd_pcm_sw_params_sizeof_proc _snd_pcm_sw_params_sizeof = snd_pcm_sw_params_sizeof;
+ ma_snd_pcm_sw_params_current_proc _snd_pcm_sw_params_current = snd_pcm_sw_params_current;
+ ma_snd_pcm_sw_params_get_boundary_proc _snd_pcm_sw_params_get_boundary = snd_pcm_sw_params_get_boundary;
+ ma_snd_pcm_sw_params_set_avail_min_proc _snd_pcm_sw_params_set_avail_min = snd_pcm_sw_params_set_avail_min;
+ ma_snd_pcm_sw_params_set_start_threshold_proc _snd_pcm_sw_params_set_start_threshold = snd_pcm_sw_params_set_start_threshold;
+ ma_snd_pcm_sw_params_set_stop_threshold_proc _snd_pcm_sw_params_set_stop_threshold = snd_pcm_sw_params_set_stop_threshold;
+ ma_snd_pcm_sw_params_proc _snd_pcm_sw_params = snd_pcm_sw_params;
+ ma_snd_pcm_format_mask_sizeof_proc _snd_pcm_format_mask_sizeof = snd_pcm_format_mask_sizeof;
+ ma_snd_pcm_format_mask_test_proc _snd_pcm_format_mask_test = snd_pcm_format_mask_test;
+ ma_snd_pcm_get_chmap_proc _snd_pcm_get_chmap = snd_pcm_get_chmap;
+ ma_snd_pcm_state_proc _snd_pcm_state = snd_pcm_state;
+ ma_snd_pcm_prepare_proc _snd_pcm_prepare = snd_pcm_prepare;
+ ma_snd_pcm_start_proc _snd_pcm_start = snd_pcm_start;
+ ma_snd_pcm_drop_proc _snd_pcm_drop = snd_pcm_drop;
+ ma_snd_pcm_drain_proc _snd_pcm_drain = snd_pcm_drain;
+ ma_snd_device_name_hint_proc _snd_device_name_hint = snd_device_name_hint;
+ ma_snd_device_name_get_hint_proc _snd_device_name_get_hint = snd_device_name_get_hint;
+ ma_snd_card_get_index_proc _snd_card_get_index = snd_card_get_index;
+ ma_snd_device_name_free_hint_proc _snd_device_name_free_hint = snd_device_name_free_hint;
+ ma_snd_pcm_mmap_begin_proc _snd_pcm_mmap_begin = snd_pcm_mmap_begin;
+ ma_snd_pcm_mmap_commit_proc _snd_pcm_mmap_commit = snd_pcm_mmap_commit;
+ ma_snd_pcm_recover_proc _snd_pcm_recover = snd_pcm_recover;
+ ma_snd_pcm_readi_proc _snd_pcm_readi = snd_pcm_readi;
+ ma_snd_pcm_writei_proc _snd_pcm_writei = snd_pcm_writei;
+ ma_snd_pcm_avail_proc _snd_pcm_avail = snd_pcm_avail;
+ ma_snd_pcm_avail_update_proc _snd_pcm_avail_update = snd_pcm_avail_update;
+ ma_snd_pcm_wait_proc _snd_pcm_wait = snd_pcm_wait;
+ ma_snd_pcm_info_proc _snd_pcm_info = snd_pcm_info;
+ ma_snd_pcm_info_sizeof_proc _snd_pcm_info_sizeof = snd_pcm_info_sizeof;
+ ma_snd_pcm_info_get_name_proc _snd_pcm_info_get_name = snd_pcm_info_get_name;
+ ma_snd_config_update_free_global_proc _snd_config_update_free_global = snd_config_update_free_global;
+
+ pContext->alsa.snd_pcm_open = (ma_proc)_snd_pcm_open;
+ pContext->alsa.snd_pcm_close = (ma_proc)_snd_pcm_close;
+ pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)_snd_pcm_hw_params_sizeof;
+ pContext->alsa.snd_pcm_hw_params_any = (ma_proc)_snd_pcm_hw_params_any;
+ pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)_snd_pcm_hw_params_set_format;
+ pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)_snd_pcm_hw_params_set_format_first;
+ pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)_snd_pcm_hw_params_get_format_mask;
+ pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)_snd_pcm_hw_params_set_channels_near;
+ pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)_snd_pcm_hw_params_set_rate_resample;
+ pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)_snd_pcm_hw_params_set_rate_near;
+ pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)_snd_pcm_hw_params_set_buffer_size_near;
+ pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)_snd_pcm_hw_params_set_periods_near;
+ pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)_snd_pcm_hw_params_set_access;
+ pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)_snd_pcm_hw_params_get_format;
+ pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)_snd_pcm_hw_params_get_channels;
+ pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)_snd_pcm_hw_params_get_channels_min;
+ pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)_snd_pcm_hw_params_get_channels_max;
+ pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)_snd_pcm_hw_params_get_rate;
+ pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)_snd_pcm_hw_params_get_buffer_size;
+ pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)_snd_pcm_hw_params_get_periods;
+ pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)_snd_pcm_hw_params_get_access;
+ pContext->alsa.snd_pcm_hw_params = (ma_proc)_snd_pcm_hw_params;
+ pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)_snd_pcm_sw_params_sizeof;
+ pContext->alsa.snd_pcm_sw_params_current = (ma_proc)_snd_pcm_sw_params_current;
+ pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)_snd_pcm_sw_params_get_boundary;
+ pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)_snd_pcm_sw_params_set_avail_min;
+ pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)_snd_pcm_sw_params_set_start_threshold;
+ pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)_snd_pcm_sw_params_set_stop_threshold;
+ pContext->alsa.snd_pcm_sw_params = (ma_proc)_snd_pcm_sw_params;
+ pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)_snd_pcm_format_mask_sizeof;
+ pContext->alsa.snd_pcm_format_mask_test = (ma_proc)_snd_pcm_format_mask_test;
+ pContext->alsa.snd_pcm_get_chmap = (ma_proc)_snd_pcm_get_chmap;
+ pContext->alsa.snd_pcm_state = (ma_proc)_snd_pcm_state;
+ pContext->alsa.snd_pcm_prepare = (ma_proc)_snd_pcm_prepare;
+ pContext->alsa.snd_pcm_start = (ma_proc)_snd_pcm_start;
+ pContext->alsa.snd_pcm_drop = (ma_proc)_snd_pcm_drop;
+ pContext->alsa.snd_pcm_drain = (ma_proc)_snd_pcm_drain;
+ pContext->alsa.snd_device_name_hint = (ma_proc)_snd_device_name_hint;
+ pContext->alsa.snd_device_name_get_hint = (ma_proc)_snd_device_name_get_hint;
+ pContext->alsa.snd_card_get_index = (ma_proc)_snd_card_get_index;
+ pContext->alsa.snd_device_name_free_hint = (ma_proc)_snd_device_name_free_hint;
+ pContext->alsa.snd_pcm_mmap_begin = (ma_proc)_snd_pcm_mmap_begin;
+ pContext->alsa.snd_pcm_mmap_commit = (ma_proc)_snd_pcm_mmap_commit;
+ pContext->alsa.snd_pcm_recover = (ma_proc)_snd_pcm_recover;
+ pContext->alsa.snd_pcm_readi = (ma_proc)_snd_pcm_readi;
+ pContext->alsa.snd_pcm_writei = (ma_proc)_snd_pcm_writei;
+ pContext->alsa.snd_pcm_avail = (ma_proc)_snd_pcm_avail;
+ pContext->alsa.snd_pcm_avail_update = (ma_proc)_snd_pcm_avail_update;
+ pContext->alsa.snd_pcm_wait = (ma_proc)_snd_pcm_wait;
+ pContext->alsa.snd_pcm_info = (ma_proc)_snd_pcm_info;
+ pContext->alsa.snd_pcm_info_sizeof = (ma_proc)_snd_pcm_info_sizeof;
+ pContext->alsa.snd_pcm_info_get_name = (ma_proc)_snd_pcm_info_get_name;
+ pContext->alsa.snd_config_update_free_global = (ma_proc)_snd_config_update_free_global;
+#endif
- /* We need the physical play and write cursors. */
- if (FAILED(ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes))) {
- break;
- }
+ pContext->alsa.useVerboseDeviceEnumeration = pConfig->alsa.useVerboseDeviceEnumeration;
- if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) {
- physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback;
- }
- prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes;
+ if (ma_mutex_init(pContext, &pContext->alsa.internalDeviceEnumLock) != MA_SUCCESS) {
+ ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] WARNING: Failed to initialize mutex for internal device enumeration.", MA_ERROR);
+ }
- /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */
- if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
- /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
- if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
- availableBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback;
- availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */
- } else {
- /* This is an error. */
- #ifdef MA_DEBUG_OUTPUT
- printf("[DirectSound] (Duplex/Playback) WARNING: Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
- #endif
- availableBytesPlayback = 0;
- }
- } else {
- /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
- if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
- availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
- } else {
- /* This is an error. */
- #ifdef MA_DEBUG_OUTPUT
- printf("[DirectSound] (Duplex/Playback) WARNING: Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
- #endif
- availableBytesPlayback = 0;
- }
- }
+ pContext->onUninit = ma_context_uninit__alsa;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__alsa;
+ pContext->onEnumDevices = ma_context_enumerate_devices__alsa;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__alsa;
+ pContext->onDeviceInit = ma_device_init__alsa;
+ pContext->onDeviceUninit = ma_device_uninit__alsa;
+ pContext->onDeviceStart = NULL; /* Not used. Started in the main loop. */
+ pContext->onDeviceStop = NULL; /* Not used. Started in the main loop. */
+ pContext->onDeviceMainLoop = ma_device_main_loop__alsa;
- #ifdef MA_DEBUG_OUTPUT
- /*printf("[DirectSound] (Duplex/Playback) physicalPlayCursorInBytes=%d, availableBytesPlayback=%d\n", physicalPlayCursorInBytes, availableBytesPlayback);*/
- #endif
+ return MA_SUCCESS;
+}
+#endif /* ALSA */
- /* If there's no room available for writing we need to wait for more. */
- if (availableBytesPlayback == 0) {
- /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */
- if (!isPlaybackDeviceStarted) {
- if (FAILED(ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING))) {
- ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
- isPlaybackDeviceStarted = MA_TRUE;
- } else {
- ma_sleep(waitTimeInMilliseconds);
- continue;
- }
- }
- /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */
- lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback;
- if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
- /* Same loop iteration. Go up to the end of the buffer. */
- lockSizeInBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback;
- } else {
- /* Different loop iterations. Go up to the physical play cursor. */
- lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
- }
+/******************************************************************************
- hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0);
- if (FAILED(hr)) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER);
- break;
- }
+PulseAudio Backend
- /*
- Experiment: If the playback buffer is being starved, pad it with some silence to get it back in sync. This will cause a glitch, but it may prevent
- endless glitching due to it constantly running out of data.
- */
- if (isPlaybackDeviceStarted) {
- DWORD bytesQueuedForPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - availableBytesPlayback;
- if (bytesQueuedForPlayback < ((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)*bpfPlayback)) {
- silentPaddingInBytes = ((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)*2*bpfPlayback) - bytesQueuedForPlayback;
- if (silentPaddingInBytes > lockSizeInBytesPlayback) {
- silentPaddingInBytes = lockSizeInBytesPlayback;
- }
+******************************************************************************/
+#ifdef MA_HAS_PULSEAUDIO
+/*
+It is assumed pulseaudio.h is available when compile-time linking is being used. We use this for type safety when using
+compile time linking (we don't have this luxury when using runtime linking without headers).
- #ifdef MA_DEBUG_OUTPUT
- printf("[DirectSound] (Duplex/Playback) Playback buffer starved. availableBytesPlayback=%d, silentPaddingInBytes=%d\n", availableBytesPlayback, silentPaddingInBytes);
- #endif
- }
- }
+When using compile time linking, each of our ma_* equivalents should use the sames types as defined by the header. The
+reason for this is that it allow us to take advantage of proper type safety.
+*/
+#ifdef MA_NO_RUNTIME_LINKING
+#include
- /* At this point we have a buffer for output. */
- if (silentPaddingInBytes > 0) {
- ma_zero_memory(pMappedBufferPlayback, silentPaddingInBytes);
- framesWrittenThisIteration = silentPaddingInBytes/bpfPlayback;
- } else {
- framesWrittenThisIteration = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, pMappedBufferPlayback, mappedSizeInBytesPlayback/bpfPlayback);
- }
-
+#define MA_PA_OK PA_OK
+#define MA_PA_ERR_ACCESS PA_ERR_ACCESS
+#define MA_PA_ERR_INVALID PA_ERR_INVALID
+#define MA_PA_ERR_NOENTITY PA_ERR_NOENTITY
- hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedBufferPlayback, framesWrittenThisIteration*bpfPlayback, NULL, 0);
- if (FAILED(hr)) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER);
- break;
- }
+#define MA_PA_CHANNELS_MAX PA_CHANNELS_MAX
+#define MA_PA_RATE_MAX PA_RATE_MAX
- virtualWriteCursorInBytesPlayback += framesWrittenThisIteration*bpfPlayback;
- if ((virtualWriteCursorInBytesPlayback/bpfPlayback) == pDevice->playback.internalBufferSizeInFrames) {
- virtualWriteCursorInBytesPlayback = 0;
- virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback;
- }
-
- /*
- We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds
- a bit of a buffer to prevent the playback buffer from getting starved.
- */
- framesWrittenToPlaybackDevice += framesWrittenThisIteration;
- if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= ((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)*2)) {
- if (FAILED(ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING))) {
- ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
- isPlaybackDeviceStarted = MA_TRUE;
- }
+typedef pa_context_flags_t ma_pa_context_flags_t;
+#define MA_PA_CONTEXT_NOFLAGS PA_CONTEXT_NOFLAGS
+#define MA_PA_CONTEXT_NOAUTOSPAWN PA_CONTEXT_NOAUTOSPAWN
+#define MA_PA_CONTEXT_NOFAIL PA_CONTEXT_NOFAIL
- if (framesWrittenThisIteration < mappedSizeInBytesPlayback/bpfPlayback) {
- break; /* We're finished with the output data.*/
- }
- }
+typedef pa_stream_flags_t ma_pa_stream_flags_t;
+#define MA_PA_STREAM_NOFLAGS PA_STREAM_NOFLAGS
+#define MA_PA_STREAM_START_CORKED PA_STREAM_START_CORKED
+#define MA_PA_STREAM_INTERPOLATE_TIMING PA_STREAM_INTERPOLATE_TIMING
+#define MA_PA_STREAM_NOT_MONOTONIC PA_STREAM_NOT_MONOTONIC
+#define MA_PA_STREAM_AUTO_TIMING_UPDATE PA_STREAM_AUTO_TIMING_UPDATE
+#define MA_PA_STREAM_NO_REMAP_CHANNELS PA_STREAM_NO_REMAP_CHANNELS
+#define MA_PA_STREAM_NO_REMIX_CHANNELS PA_STREAM_NO_REMIX_CHANNELS
+#define MA_PA_STREAM_FIX_FORMAT PA_STREAM_FIX_FORMAT
+#define MA_PA_STREAM_FIX_RATE PA_STREAM_FIX_RATE
+#define MA_PA_STREAM_FIX_CHANNELS PA_STREAM_FIX_CHANNELS
+#define MA_PA_STREAM_DONT_MOVE PA_STREAM_DONT_MOVE
+#define MA_PA_STREAM_VARIABLE_RATE PA_STREAM_VARIABLE_RATE
+#define MA_PA_STREAM_PEAK_DETECT PA_STREAM_PEAK_DETECT
+#define MA_PA_STREAM_START_MUTED PA_STREAM_START_MUTED
+#define MA_PA_STREAM_ADJUST_LATENCY PA_STREAM_ADJUST_LATENCY
+#define MA_PA_STREAM_EARLY_REQUESTS PA_STREAM_EARLY_REQUESTS
+#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND
+#define MA_PA_STREAM_START_UNMUTED PA_STREAM_START_UNMUTED
+#define MA_PA_STREAM_FAIL_ON_SUSPEND PA_STREAM_FAIL_ON_SUSPEND
+#define MA_PA_STREAM_RELATIVE_VOLUME PA_STREAM_RELATIVE_VOLUME
+#define MA_PA_STREAM_PASSTHROUGH PA_STREAM_PASSTHROUGH
- if (inputFramesInExternalFormatCount < inputFramesInExternalFormatCap) {
- break; /* We just consumed every input sample. */
- }
- }
+typedef pa_sink_flags_t ma_pa_sink_flags_t;
+#define MA_PA_SINK_NOFLAGS PA_SINK_NOFLAGS
+#define MA_PA_SINK_HW_VOLUME_CTRL PA_SINK_HW_VOLUME_CTRL
+#define MA_PA_SINK_LATENCY PA_SINK_LATENCY
+#define MA_PA_SINK_HARDWARE PA_SINK_HARDWARE
+#define MA_PA_SINK_NETWORK PA_SINK_NETWORK
+#define MA_PA_SINK_HW_MUTE_CTRL PA_SINK_HW_MUTE_CTRL
+#define MA_PA_SINK_DECIBEL_VOLUME PA_SINK_DECIBEL_VOLUME
+#define MA_PA_SINK_FLAT_VOLUME PA_SINK_FLAT_VOLUME
+#define MA_PA_SINK_DYNAMIC_LATENCY PA_SINK_DYNAMIC_LATENCY
+#define MA_PA_SINK_SET_FORMATS PA_SINK_SET_FORMATS
+typedef pa_source_flags_t ma_pa_source_flags_t;
+#define MA_PA_SOURCE_NOFLAGS PA_SOURCE_NOFLAGS
+#define MA_PA_SOURCE_HW_VOLUME_CTRL PA_SOURCE_HW_VOLUME_CTRL
+#define MA_PA_SOURCE_LATENCY PA_SOURCE_LATENCY
+#define MA_PA_SOURCE_HARDWARE PA_SOURCE_HARDWARE
+#define MA_PA_SOURCE_NETWORK PA_SOURCE_NETWORK
+#define MA_PA_SOURCE_HW_MUTE_CTRL PA_SOURCE_HW_MUTE_CTRL
+#define MA_PA_SOURCE_DECIBEL_VOLUME PA_SOURCE_DECIBEL_VOLUME
+#define MA_PA_SOURCE_DYNAMIC_LATENCY PA_SOURCE_DYNAMIC_LATENCY
+#define MA_PA_SOURCE_FLAT_VOLUME PA_SOURCE_FLAT_VOLUME
- /* At this point we're done with the mapped portion of the capture buffer. */
- hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedBufferCapture, mappedSizeInBytesCapture, NULL, 0);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER);
- }
- prevReadCursorInBytesCapture = (lockOffsetInBytesCapture + mappedSizeInBytesCapture);
- } break;
+typedef pa_context_state_t ma_pa_context_state_t;
+#define MA_PA_CONTEXT_UNCONNECTED PA_CONTEXT_UNCONNECTED
+#define MA_PA_CONTEXT_CONNECTING PA_CONTEXT_CONNECTING
+#define MA_PA_CONTEXT_AUTHORIZING PA_CONTEXT_AUTHORIZING
+#define MA_PA_CONTEXT_SETTING_NAME PA_CONTEXT_SETTING_NAME
+#define MA_PA_CONTEXT_READY PA_CONTEXT_READY
+#define MA_PA_CONTEXT_FAILED PA_CONTEXT_FAILED
+#define MA_PA_CONTEXT_TERMINATED PA_CONTEXT_TERMINATED
+
+typedef pa_stream_state_t ma_pa_stream_state_t;
+#define MA_PA_STREAM_UNCONNECTED PA_STREAM_UNCONNECTED
+#define MA_PA_STREAM_CREATING PA_STREAM_CREATING
+#define MA_PA_STREAM_READY PA_STREAM_READY
+#define MA_PA_STREAM_FAILED PA_STREAM_FAILED
+#define MA_PA_STREAM_TERMINATED PA_STREAM_TERMINATED
+typedef pa_operation_state_t ma_pa_operation_state_t;
+#define MA_PA_OPERATION_RUNNING PA_OPERATION_RUNNING
+#define MA_PA_OPERATION_DONE PA_OPERATION_DONE
+#define MA_PA_OPERATION_CANCELLED PA_OPERATION_CANCELLED
+typedef pa_sink_state_t ma_pa_sink_state_t;
+#define MA_PA_SINK_INVALID_STATE PA_SINK_INVALID_STATE
+#define MA_PA_SINK_RUNNING PA_SINK_RUNNING
+#define MA_PA_SINK_IDLE PA_SINK_IDLE
+#define MA_PA_SINK_SUSPENDED PA_SINK_SUSPENDED
- case ma_device_type_capture:
- {
- DWORD physicalCaptureCursorInBytes;
- DWORD physicalReadCursorInBytes;
- if (FAILED(ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes))) {
- return MA_ERROR;
- }
+typedef pa_source_state_t ma_pa_source_state_t;
+#define MA_PA_SOURCE_INVALID_STATE PA_SOURCE_INVALID_STATE
+#define MA_PA_SOURCE_RUNNING PA_SOURCE_RUNNING
+#define MA_PA_SOURCE_IDLE PA_SOURCE_IDLE
+#define MA_PA_SOURCE_SUSPENDED PA_SOURCE_SUSPENDED
- /* If the previous capture position is the same as the current position we need to wait a bit longer. */
- if (prevReadCursorInBytesCapture == physicalReadCursorInBytes) {
- ma_sleep(waitTimeInMilliseconds);
- continue;
- }
+typedef pa_seek_mode_t ma_pa_seek_mode_t;
+#define MA_PA_SEEK_RELATIVE PA_SEEK_RELATIVE
+#define MA_PA_SEEK_ABSOLUTE PA_SEEK_ABSOLUTE
+#define MA_PA_SEEK_RELATIVE_ON_READ PA_SEEK_RELATIVE_ON_READ
+#define MA_PA_SEEK_RELATIVE_END PA_SEEK_RELATIVE_END
- /* Getting here means we have capture data available. */
- if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) {
- /* The capture position has not looped. This is the simple case. */
- lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
- lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture);
- } else {
- /*
- The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything,
- do it again from the start.
- */
- if (prevReadCursorInBytesCapture < pDevice->capture.internalBufferSizeInFrames*bpfCapture) {
- /* Lock up to the end of the buffer. */
- lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
- lockSizeInBytesCapture = (pDevice->capture.internalBufferSizeInFrames*bpfCapture) - prevReadCursorInBytesCapture;
- } else {
- /* Lock starting from the start of the buffer. */
- lockOffsetInBytesCapture = 0;
- lockSizeInBytesCapture = physicalReadCursorInBytes;
- }
- }
+typedef pa_channel_position_t ma_pa_channel_position_t;
+#define MA_PA_CHANNEL_POSITION_INVALID PA_CHANNEL_POSITION_INVALID
+#define MA_PA_CHANNEL_POSITION_MONO PA_CHANNEL_POSITION_MONO
+#define MA_PA_CHANNEL_POSITION_FRONT_LEFT PA_CHANNEL_POSITION_FRONT_LEFT
+#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT PA_CHANNEL_POSITION_FRONT_RIGHT
+#define MA_PA_CHANNEL_POSITION_FRONT_CENTER PA_CHANNEL_POSITION_FRONT_CENTER
+#define MA_PA_CHANNEL_POSITION_REAR_CENTER PA_CHANNEL_POSITION_REAR_CENTER
+#define MA_PA_CHANNEL_POSITION_REAR_LEFT PA_CHANNEL_POSITION_REAR_LEFT
+#define MA_PA_CHANNEL_POSITION_REAR_RIGHT PA_CHANNEL_POSITION_REAR_RIGHT
+#define MA_PA_CHANNEL_POSITION_LFE PA_CHANNEL_POSITION_LFE
+#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER
+#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER
+#define MA_PA_CHANNEL_POSITION_SIDE_LEFT PA_CHANNEL_POSITION_SIDE_LEFT
+#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT PA_CHANNEL_POSITION_SIDE_RIGHT
+#define MA_PA_CHANNEL_POSITION_AUX0 PA_CHANNEL_POSITION_AUX0
+#define MA_PA_CHANNEL_POSITION_AUX1 PA_CHANNEL_POSITION_AUX1
+#define MA_PA_CHANNEL_POSITION_AUX2 PA_CHANNEL_POSITION_AUX2
+#define MA_PA_CHANNEL_POSITION_AUX3 PA_CHANNEL_POSITION_AUX3
+#define MA_PA_CHANNEL_POSITION_AUX4 PA_CHANNEL_POSITION_AUX4
+#define MA_PA_CHANNEL_POSITION_AUX5 PA_CHANNEL_POSITION_AUX5
+#define MA_PA_CHANNEL_POSITION_AUX6 PA_CHANNEL_POSITION_AUX6
+#define MA_PA_CHANNEL_POSITION_AUX7 PA_CHANNEL_POSITION_AUX7
+#define MA_PA_CHANNEL_POSITION_AUX8 PA_CHANNEL_POSITION_AUX8
+#define MA_PA_CHANNEL_POSITION_AUX9 PA_CHANNEL_POSITION_AUX9
+#define MA_PA_CHANNEL_POSITION_AUX10 PA_CHANNEL_POSITION_AUX10
+#define MA_PA_CHANNEL_POSITION_AUX11 PA_CHANNEL_POSITION_AUX11
+#define MA_PA_CHANNEL_POSITION_AUX12 PA_CHANNEL_POSITION_AUX12
+#define MA_PA_CHANNEL_POSITION_AUX13 PA_CHANNEL_POSITION_AUX13
+#define MA_PA_CHANNEL_POSITION_AUX14 PA_CHANNEL_POSITION_AUX14
+#define MA_PA_CHANNEL_POSITION_AUX15 PA_CHANNEL_POSITION_AUX15
+#define MA_PA_CHANNEL_POSITION_AUX16 PA_CHANNEL_POSITION_AUX16
+#define MA_PA_CHANNEL_POSITION_AUX17 PA_CHANNEL_POSITION_AUX17
+#define MA_PA_CHANNEL_POSITION_AUX18 PA_CHANNEL_POSITION_AUX18
+#define MA_PA_CHANNEL_POSITION_AUX19 PA_CHANNEL_POSITION_AUX19
+#define MA_PA_CHANNEL_POSITION_AUX20 PA_CHANNEL_POSITION_AUX20
+#define MA_PA_CHANNEL_POSITION_AUX21 PA_CHANNEL_POSITION_AUX21
+#define MA_PA_CHANNEL_POSITION_AUX22 PA_CHANNEL_POSITION_AUX22
+#define MA_PA_CHANNEL_POSITION_AUX23 PA_CHANNEL_POSITION_AUX23
+#define MA_PA_CHANNEL_POSITION_AUX24 PA_CHANNEL_POSITION_AUX24
+#define MA_PA_CHANNEL_POSITION_AUX25 PA_CHANNEL_POSITION_AUX25
+#define MA_PA_CHANNEL_POSITION_AUX26 PA_CHANNEL_POSITION_AUX26
+#define MA_PA_CHANNEL_POSITION_AUX27 PA_CHANNEL_POSITION_AUX27
+#define MA_PA_CHANNEL_POSITION_AUX28 PA_CHANNEL_POSITION_AUX28
+#define MA_PA_CHANNEL_POSITION_AUX29 PA_CHANNEL_POSITION_AUX29
+#define MA_PA_CHANNEL_POSITION_AUX30 PA_CHANNEL_POSITION_AUX30
+#define MA_PA_CHANNEL_POSITION_AUX31 PA_CHANNEL_POSITION_AUX31
+#define MA_PA_CHANNEL_POSITION_TOP_CENTER PA_CHANNEL_POSITION_TOP_CENTER
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT PA_CHANNEL_POSITION_TOP_FRONT_LEFT
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT PA_CHANNEL_POSITION_TOP_FRONT_RIGHT
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER PA_CHANNEL_POSITION_TOP_FRONT_CENTER
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT PA_CHANNEL_POSITION_TOP_REAR_LEFT
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT PA_CHANNEL_POSITION_TOP_REAR_RIGHT
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER PA_CHANNEL_POSITION_TOP_REAR_CENTER
+#define MA_PA_CHANNEL_POSITION_LEFT PA_CHANNEL_POSITION_LEFT
+#define MA_PA_CHANNEL_POSITION_RIGHT PA_CHANNEL_POSITION_RIGHT
+#define MA_PA_CHANNEL_POSITION_CENTER PA_CHANNEL_POSITION_CENTER
+#define MA_PA_CHANNEL_POSITION_SUBWOOFER PA_CHANNEL_POSITION_SUBWOOFER
- #ifdef MA_DEBUG_OUTPUT
- /*printf("[DirectSound] (Capture) physicalCaptureCursorInBytes=%d, physicalReadCursorInBytes=%d\n", physicalCaptureCursorInBytes, physicalReadCursorInBytes);*/
- /*printf("[DirectSound] (Capture) lockOffsetInBytesCapture=%d, lockSizeInBytesCapture=%d\n", lockOffsetInBytesCapture, lockSizeInBytesCapture);*/
- #endif
+typedef pa_channel_map_def_t ma_pa_channel_map_def_t;
+#define MA_PA_CHANNEL_MAP_AIFF PA_CHANNEL_MAP_AIFF
+#define MA_PA_CHANNEL_MAP_ALSA PA_CHANNEL_MAP_ALSA
+#define MA_PA_CHANNEL_MAP_AUX PA_CHANNEL_MAP_AUX
+#define MA_PA_CHANNEL_MAP_WAVEEX PA_CHANNEL_MAP_WAVEEX
+#define MA_PA_CHANNEL_MAP_OSS PA_CHANNEL_MAP_OSS
+#define MA_PA_CHANNEL_MAP_DEFAULT PA_CHANNEL_MAP_DEFAULT
- if (lockSizeInBytesCapture < (pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods)) {
- ma_sleep(waitTimeInMilliseconds);
- continue; /* Nothing is available in the capture buffer. */
- }
+typedef pa_sample_format_t ma_pa_sample_format_t;
+#define MA_PA_SAMPLE_INVALID PA_SAMPLE_INVALID
+#define MA_PA_SAMPLE_U8 PA_SAMPLE_U8
+#define MA_PA_SAMPLE_ALAW PA_SAMPLE_ALAW
+#define MA_PA_SAMPLE_ULAW PA_SAMPLE_ULAW
+#define MA_PA_SAMPLE_S16LE PA_SAMPLE_S16LE
+#define MA_PA_SAMPLE_S16BE PA_SAMPLE_S16BE
+#define MA_PA_SAMPLE_FLOAT32LE PA_SAMPLE_FLOAT32LE
+#define MA_PA_SAMPLE_FLOAT32BE PA_SAMPLE_FLOAT32BE
+#define MA_PA_SAMPLE_S32LE PA_SAMPLE_S32LE
+#define MA_PA_SAMPLE_S32BE PA_SAMPLE_S32BE
+#define MA_PA_SAMPLE_S24LE PA_SAMPLE_S24LE
+#define MA_PA_SAMPLE_S24BE PA_SAMPLE_S24BE
+#define MA_PA_SAMPLE_S24_32LE PA_SAMPLE_S24_32LE
+#define MA_PA_SAMPLE_S24_32BE PA_SAMPLE_S24_32BE
- hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER);
- }
+typedef pa_mainloop ma_pa_mainloop;
+typedef pa_mainloop_api ma_pa_mainloop_api;
+typedef pa_context ma_pa_context;
+typedef pa_operation ma_pa_operation;
+typedef pa_stream ma_pa_stream;
+typedef pa_spawn_api ma_pa_spawn_api;
+typedef pa_buffer_attr ma_pa_buffer_attr;
+typedef pa_channel_map ma_pa_channel_map;
+typedef pa_cvolume ma_pa_cvolume;
+typedef pa_sample_spec ma_pa_sample_spec;
+typedef pa_sink_info ma_pa_sink_info;
+typedef pa_source_info ma_pa_source_info;
- #ifdef MA_DEBUG_OUTPUT
- if (lockSizeInBytesCapture != mappedSizeInBytesCapture) {
- printf("[DirectSound] (Capture) lockSizeInBytesCapture=%d != mappedSizeInBytesCapture=%d\n", lockSizeInBytesCapture, mappedSizeInBytesCapture);
- }
- #endif
+typedef pa_context_notify_cb_t ma_pa_context_notify_cb_t;
+typedef pa_sink_info_cb_t ma_pa_sink_info_cb_t;
+typedef pa_source_info_cb_t ma_pa_source_info_cb_t;
+typedef pa_stream_success_cb_t ma_pa_stream_success_cb_t;
+typedef pa_stream_request_cb_t ma_pa_stream_request_cb_t;
+typedef pa_free_cb_t ma_pa_free_cb_t;
+#else
+#define MA_PA_OK 0
+#define MA_PA_ERR_ACCESS 1
+#define MA_PA_ERR_INVALID 2
+#define MA_PA_ERR_NOENTITY 5
- ma_device__send_frames_to_client(pDevice, mappedSizeInBytesCapture/bpfCapture, pMappedBufferCapture);
+#define MA_PA_CHANNELS_MAX 32
+#define MA_PA_RATE_MAX 384000
- hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedBufferCapture, mappedSizeInBytesCapture, NULL, 0);
- if (FAILED(hr)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER);
- }
- prevReadCursorInBytesCapture = lockOffsetInBytesCapture + mappedSizeInBytesCapture;
+typedef int ma_pa_context_flags_t;
+#define MA_PA_CONTEXT_NOFLAGS 0x00000000
+#define MA_PA_CONTEXT_NOAUTOSPAWN 0x00000001
+#define MA_PA_CONTEXT_NOFAIL 0x00000002
- if (prevReadCursorInBytesCapture == (pDevice->capture.internalBufferSizeInFrames*bpfCapture)) {
- prevReadCursorInBytesCapture = 0;
- }
- } break;
+typedef int ma_pa_stream_flags_t;
+#define MA_PA_STREAM_NOFLAGS 0x00000000
+#define MA_PA_STREAM_START_CORKED 0x00000001
+#define MA_PA_STREAM_INTERPOLATE_TIMING 0x00000002
+#define MA_PA_STREAM_NOT_MONOTONIC 0x00000004
+#define MA_PA_STREAM_AUTO_TIMING_UPDATE 0x00000008
+#define MA_PA_STREAM_NO_REMAP_CHANNELS 0x00000010
+#define MA_PA_STREAM_NO_REMIX_CHANNELS 0x00000020
+#define MA_PA_STREAM_FIX_FORMAT 0x00000040
+#define MA_PA_STREAM_FIX_RATE 0x00000080
+#define MA_PA_STREAM_FIX_CHANNELS 0x00000100
+#define MA_PA_STREAM_DONT_MOVE 0x00000200
+#define MA_PA_STREAM_VARIABLE_RATE 0x00000400
+#define MA_PA_STREAM_PEAK_DETECT 0x00000800
+#define MA_PA_STREAM_START_MUTED 0x00001000
+#define MA_PA_STREAM_ADJUST_LATENCY 0x00002000
+#define MA_PA_STREAM_EARLY_REQUESTS 0x00004000
+#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND 0x00008000
+#define MA_PA_STREAM_START_UNMUTED 0x00010000
+#define MA_PA_STREAM_FAIL_ON_SUSPEND 0x00020000
+#define MA_PA_STREAM_RELATIVE_VOLUME 0x00040000
+#define MA_PA_STREAM_PASSTHROUGH 0x00080000
+typedef int ma_pa_sink_flags_t;
+#define MA_PA_SINK_NOFLAGS 0x00000000
+#define MA_PA_SINK_HW_VOLUME_CTRL 0x00000001
+#define MA_PA_SINK_LATENCY 0x00000002
+#define MA_PA_SINK_HARDWARE 0x00000004
+#define MA_PA_SINK_NETWORK 0x00000008
+#define MA_PA_SINK_HW_MUTE_CTRL 0x00000010
+#define MA_PA_SINK_DECIBEL_VOLUME 0x00000020
+#define MA_PA_SINK_FLAT_VOLUME 0x00000040
+#define MA_PA_SINK_DYNAMIC_LATENCY 0x00000080
+#define MA_PA_SINK_SET_FORMATS 0x00000100
+typedef int ma_pa_source_flags_t;
+#define MA_PA_SOURCE_NOFLAGS 0x00000000
+#define MA_PA_SOURCE_HW_VOLUME_CTRL 0x00000001
+#define MA_PA_SOURCE_LATENCY 0x00000002
+#define MA_PA_SOURCE_HARDWARE 0x00000004
+#define MA_PA_SOURCE_NETWORK 0x00000008
+#define MA_PA_SOURCE_HW_MUTE_CTRL 0x00000010
+#define MA_PA_SOURCE_DECIBEL_VOLUME 0x00000020
+#define MA_PA_SOURCE_DYNAMIC_LATENCY 0x00000040
+#define MA_PA_SOURCE_FLAT_VOLUME 0x00000080
- case ma_device_type_playback:
- {
- DWORD availableBytesPlayback;
- DWORD physicalPlayCursorInBytes;
- DWORD physicalWriteCursorInBytes;
- if (FAILED(ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes))) {
- break;
- }
+typedef int ma_pa_context_state_t;
+#define MA_PA_CONTEXT_UNCONNECTED 0
+#define MA_PA_CONTEXT_CONNECTING 1
+#define MA_PA_CONTEXT_AUTHORIZING 2
+#define MA_PA_CONTEXT_SETTING_NAME 3
+#define MA_PA_CONTEXT_READY 4
+#define MA_PA_CONTEXT_FAILED 5
+#define MA_PA_CONTEXT_TERMINATED 6
- if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) {
- physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback;
- }
- prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes;
+typedef int ma_pa_stream_state_t;
+#define MA_PA_STREAM_UNCONNECTED 0
+#define MA_PA_STREAM_CREATING 1
+#define MA_PA_STREAM_READY 2
+#define MA_PA_STREAM_FAILED 3
+#define MA_PA_STREAM_TERMINATED 4
- /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */
- if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
- /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
- if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
- availableBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback;
- availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */
- } else {
- /* This is an error. */
- #ifdef MA_DEBUG_OUTPUT
- printf("[DirectSound] (Playback) WARNING: Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
- #endif
- availableBytesPlayback = 0;
- }
- } else {
- /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
- if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
- availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
- } else {
- /* This is an error. */
- #ifdef MA_DEBUG_OUTPUT
- printf("[DirectSound] (Playback) WARNING: Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%d, virtualWriteCursorInBytes=%d.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback);
- #endif
- availableBytesPlayback = 0;
- }
- }
+typedef int ma_pa_operation_state_t;
+#define MA_PA_OPERATION_RUNNING 0
+#define MA_PA_OPERATION_DONE 1
+#define MA_PA_OPERATION_CANCELLED 2
- #ifdef MA_DEBUG_OUTPUT
- /*printf("[DirectSound] (Playback) physicalPlayCursorInBytes=%d, availableBytesPlayback=%d\n", physicalPlayCursorInBytes, availableBytesPlayback);*/
- #endif
+typedef int ma_pa_sink_state_t;
+#define MA_PA_SINK_INVALID_STATE -1
+#define MA_PA_SINK_RUNNING 0
+#define MA_PA_SINK_IDLE 1
+#define MA_PA_SINK_SUSPENDED 2
- /* If there's no room available for writing we need to wait for more. */
- if (availableBytesPlayback < (pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)) {
- /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */
- if (availableBytesPlayback == 0 && !isPlaybackDeviceStarted) {
- if (FAILED(ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING))) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
- isPlaybackDeviceStarted = MA_TRUE;
- } else {
- ma_sleep(waitTimeInMilliseconds);
- continue;
- }
- }
+typedef int ma_pa_source_state_t;
+#define MA_PA_SOURCE_INVALID_STATE -1
+#define MA_PA_SOURCE_RUNNING 0
+#define MA_PA_SOURCE_IDLE 1
+#define MA_PA_SOURCE_SUSPENDED 2
- /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */
- lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback;
- if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
- /* Same loop iteration. Go up to the end of the buffer. */
- lockSizeInBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback;
- } else {
- /* Different loop iterations. Go up to the physical play cursor. */
- lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
- }
+typedef int ma_pa_seek_mode_t;
+#define MA_PA_SEEK_RELATIVE 0
+#define MA_PA_SEEK_ABSOLUTE 1
+#define MA_PA_SEEK_RELATIVE_ON_READ 2
+#define MA_PA_SEEK_RELATIVE_END 3
- hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0);
- if (FAILED(hr)) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER);
- break;
- }
+typedef int ma_pa_channel_position_t;
+#define MA_PA_CHANNEL_POSITION_INVALID -1
+#define MA_PA_CHANNEL_POSITION_MONO 0
+#define MA_PA_CHANNEL_POSITION_FRONT_LEFT 1
+#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT 2
+#define MA_PA_CHANNEL_POSITION_FRONT_CENTER 3
+#define MA_PA_CHANNEL_POSITION_REAR_CENTER 4
+#define MA_PA_CHANNEL_POSITION_REAR_LEFT 5
+#define MA_PA_CHANNEL_POSITION_REAR_RIGHT 6
+#define MA_PA_CHANNEL_POSITION_LFE 7
+#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER 8
+#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER 9
+#define MA_PA_CHANNEL_POSITION_SIDE_LEFT 10
+#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT 11
+#define MA_PA_CHANNEL_POSITION_AUX0 12
+#define MA_PA_CHANNEL_POSITION_AUX1 13
+#define MA_PA_CHANNEL_POSITION_AUX2 14
+#define MA_PA_CHANNEL_POSITION_AUX3 15
+#define MA_PA_CHANNEL_POSITION_AUX4 16
+#define MA_PA_CHANNEL_POSITION_AUX5 17
+#define MA_PA_CHANNEL_POSITION_AUX6 18
+#define MA_PA_CHANNEL_POSITION_AUX7 19
+#define MA_PA_CHANNEL_POSITION_AUX8 20
+#define MA_PA_CHANNEL_POSITION_AUX9 21
+#define MA_PA_CHANNEL_POSITION_AUX10 22
+#define MA_PA_CHANNEL_POSITION_AUX11 23
+#define MA_PA_CHANNEL_POSITION_AUX12 24
+#define MA_PA_CHANNEL_POSITION_AUX13 25
+#define MA_PA_CHANNEL_POSITION_AUX14 26
+#define MA_PA_CHANNEL_POSITION_AUX15 27
+#define MA_PA_CHANNEL_POSITION_AUX16 28
+#define MA_PA_CHANNEL_POSITION_AUX17 29
+#define MA_PA_CHANNEL_POSITION_AUX18 30
+#define MA_PA_CHANNEL_POSITION_AUX19 31
+#define MA_PA_CHANNEL_POSITION_AUX20 32
+#define MA_PA_CHANNEL_POSITION_AUX21 33
+#define MA_PA_CHANNEL_POSITION_AUX22 34
+#define MA_PA_CHANNEL_POSITION_AUX23 35
+#define MA_PA_CHANNEL_POSITION_AUX24 36
+#define MA_PA_CHANNEL_POSITION_AUX25 37
+#define MA_PA_CHANNEL_POSITION_AUX26 38
+#define MA_PA_CHANNEL_POSITION_AUX27 39
+#define MA_PA_CHANNEL_POSITION_AUX28 40
+#define MA_PA_CHANNEL_POSITION_AUX29 41
+#define MA_PA_CHANNEL_POSITION_AUX30 42
+#define MA_PA_CHANNEL_POSITION_AUX31 43
+#define MA_PA_CHANNEL_POSITION_TOP_CENTER 44
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT 45
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT 46
+#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER 47
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT 48
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT 49
+#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER 50
+#define MA_PA_CHANNEL_POSITION_LEFT MA_PA_CHANNEL_POSITION_FRONT_LEFT
+#define MA_PA_CHANNEL_POSITION_RIGHT MA_PA_CHANNEL_POSITION_FRONT_RIGHT
+#define MA_PA_CHANNEL_POSITION_CENTER MA_PA_CHANNEL_POSITION_FRONT_CENTER
+#define MA_PA_CHANNEL_POSITION_SUBWOOFER MA_PA_CHANNEL_POSITION_LFE
- /* At this point we have a buffer for output. */
- ma_device__read_frames_from_client(pDevice, (mappedSizeInBytesPlayback/bpfPlayback), pMappedBufferPlayback);
+typedef int ma_pa_channel_map_def_t;
+#define MA_PA_CHANNEL_MAP_AIFF 0
+#define MA_PA_CHANNEL_MAP_ALSA 1
+#define MA_PA_CHANNEL_MAP_AUX 2
+#define MA_PA_CHANNEL_MAP_WAVEEX 3
+#define MA_PA_CHANNEL_MAP_OSS 4
+#define MA_PA_CHANNEL_MAP_DEFAULT MA_PA_CHANNEL_MAP_AIFF
- hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedBufferPlayback, mappedSizeInBytesPlayback, NULL, 0);
- if (FAILED(hr)) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER);
- break;
- }
+typedef int ma_pa_sample_format_t;
+#define MA_PA_SAMPLE_INVALID -1
+#define MA_PA_SAMPLE_U8 0
+#define MA_PA_SAMPLE_ALAW 1
+#define MA_PA_SAMPLE_ULAW 2
+#define MA_PA_SAMPLE_S16LE 3
+#define MA_PA_SAMPLE_S16BE 4
+#define MA_PA_SAMPLE_FLOAT32LE 5
+#define MA_PA_SAMPLE_FLOAT32BE 6
+#define MA_PA_SAMPLE_S32LE 7
+#define MA_PA_SAMPLE_S32BE 8
+#define MA_PA_SAMPLE_S24LE 9
+#define MA_PA_SAMPLE_S24BE 10
+#define MA_PA_SAMPLE_S24_32LE 11
+#define MA_PA_SAMPLE_S24_32BE 12
- virtualWriteCursorInBytesPlayback += mappedSizeInBytesPlayback;
- if (virtualWriteCursorInBytesPlayback == pDevice->playback.internalBufferSizeInFrames*bpfPlayback) {
- virtualWriteCursorInBytesPlayback = 0;
- virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback;
- }
-
- /*
- We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds
- a bit of a buffer to prevent the playback buffer from getting starved.
- */
- framesWrittenToPlaybackDevice += mappedSizeInBytesPlayback/bpfPlayback;
- if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= (pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)) {
- if (FAILED(ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING))) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
- isPlaybackDeviceStarted = MA_TRUE;
- }
- } break;
+typedef struct ma_pa_mainloop ma_pa_mainloop;
+typedef struct ma_pa_mainloop_api ma_pa_mainloop_api;
+typedef struct ma_pa_context ma_pa_context;
+typedef struct ma_pa_operation ma_pa_operation;
+typedef struct ma_pa_stream ma_pa_stream;
+typedef struct ma_pa_spawn_api ma_pa_spawn_api;
+typedef struct
+{
+ ma_uint32 maxlength;
+ ma_uint32 tlength;
+ ma_uint32 prebuf;
+ ma_uint32 minreq;
+ ma_uint32 fragsize;
+} ma_pa_buffer_attr;
- default: return MA_INVALID_ARGS; /* Invalid device type. */
- }
+typedef struct
+{
+ ma_uint8 channels;
+ ma_pa_channel_position_t map[MA_PA_CHANNELS_MAX];
+} ma_pa_channel_map;
- if (result != MA_SUCCESS) {
- return result;
- }
- }
+typedef struct
+{
+ ma_uint8 channels;
+ ma_uint32 values[MA_PA_CHANNELS_MAX];
+} ma_pa_cvolume;
- /* Getting here means the device is being stopped. */
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- if (FAILED(ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer))) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Stop() failed.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- }
- }
+typedef struct
+{
+ ma_pa_sample_format_t format;
+ ma_uint32 rate;
+ ma_uint8 channels;
+} ma_pa_sample_spec;
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- /* The playback device should be drained before stopping. All we do is wait until the available bytes is equal to the size of the buffer. */
- if (isPlaybackDeviceStarted) {
- for (;;) {
- DWORD availableBytesPlayback = 0;
- DWORD physicalPlayCursorInBytes;
- DWORD physicalWriteCursorInBytes;
- if (FAILED(ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes))) {
- break;
- }
+typedef struct
+{
+ const char* name;
+ ma_uint32 index;
+ const char* description;
+ ma_pa_sample_spec sample_spec;
+ ma_pa_channel_map channel_map;
+ ma_uint32 owner_module;
+ ma_pa_cvolume volume;
+ int mute;
+ ma_uint32 monitor_source;
+ const char* monitor_source_name;
+ ma_uint64 latency;
+ const char* driver;
+ ma_pa_sink_flags_t flags;
+ void* proplist;
+ ma_uint64 configured_latency;
+ ma_uint32 base_volume;
+ ma_pa_sink_state_t state;
+ ma_uint32 n_volume_steps;
+ ma_uint32 card;
+ ma_uint32 n_ports;
+ void** ports;
+ void* active_port;
+ ma_uint8 n_formats;
+ void** formats;
+} ma_pa_sink_info;
- if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) {
- physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback;
- }
- prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes;
+typedef struct
+{
+ const char *name;
+ ma_uint32 index;
+ const char *description;
+ ma_pa_sample_spec sample_spec;
+ ma_pa_channel_map channel_map;
+ ma_uint32 owner_module;
+ ma_pa_cvolume volume;
+ int mute;
+ ma_uint32 monitor_of_sink;
+ const char *monitor_of_sink_name;
+ ma_uint64 latency;
+ const char *driver;
+ ma_pa_source_flags_t flags;
+ void* proplist;
+ ma_uint64 configured_latency;
+ ma_uint32 base_volume;
+ ma_pa_source_state_t state;
+ ma_uint32 n_volume_steps;
+ ma_uint32 card;
+ ma_uint32 n_ports;
+ void** ports;
+ void* active_port;
+ ma_uint8 n_formats;
+ void** formats;
+} ma_pa_source_info;
- if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
- /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
- if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
- availableBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback;
- availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */
- } else {
- break;
- }
- } else {
- /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
- if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
- availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
- } else {
- break;
- }
- }
+typedef void (* ma_pa_context_notify_cb_t)(ma_pa_context* c, void* userdata);
+typedef void (* ma_pa_sink_info_cb_t) (ma_pa_context* c, const ma_pa_sink_info* i, int eol, void* userdata);
+typedef void (* ma_pa_source_info_cb_t) (ma_pa_context* c, const ma_pa_source_info* i, int eol, void* userdata);
+typedef void (* ma_pa_stream_success_cb_t)(ma_pa_stream* s, int success, void* userdata);
+typedef void (* ma_pa_stream_request_cb_t)(ma_pa_stream* s, size_t nbytes, void* userdata);
+typedef void (* ma_pa_free_cb_t) (void* p);
+#endif
- if (availableBytesPlayback >= (pDevice->playback.internalBufferSizeInFrames*bpfPlayback)) {
- break;
- }
- ma_sleep(waitTimeInMilliseconds);
- }
- }
+typedef ma_pa_mainloop* (* ma_pa_mainloop_new_proc) ();
+typedef void (* ma_pa_mainloop_free_proc) (ma_pa_mainloop* m);
+typedef ma_pa_mainloop_api* (* ma_pa_mainloop_get_api_proc) (ma_pa_mainloop* m);
+typedef int (* ma_pa_mainloop_iterate_proc) (ma_pa_mainloop* m, int block, int* retval);
+typedef void (* ma_pa_mainloop_wakeup_proc) (ma_pa_mainloop* m);
+typedef ma_pa_context* (* ma_pa_context_new_proc) (ma_pa_mainloop_api* mainloop, const char* name);
+typedef void (* ma_pa_context_unref_proc) (ma_pa_context* c);
+typedef int (* ma_pa_context_connect_proc) (ma_pa_context* c, const char* server, ma_pa_context_flags_t flags, const ma_pa_spawn_api* api);
+typedef void (* ma_pa_context_disconnect_proc) (ma_pa_context* c);
+typedef void (* ma_pa_context_set_state_callback_proc) (ma_pa_context* c, ma_pa_context_notify_cb_t cb, void* userdata);
+typedef ma_pa_context_state_t (* ma_pa_context_get_state_proc) (ma_pa_context* c);
+typedef ma_pa_operation* (* ma_pa_context_get_sink_info_list_proc) (ma_pa_context* c, ma_pa_sink_info_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_context_get_source_info_list_proc) (ma_pa_context* c, ma_pa_source_info_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_context_get_sink_info_by_name_proc) (ma_pa_context* c, const char* name, ma_pa_sink_info_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_context_get_source_info_by_name_proc)(ma_pa_context* c, const char* name, ma_pa_source_info_cb_t cb, void* userdata);
+typedef void (* ma_pa_operation_unref_proc) (ma_pa_operation* o);
+typedef ma_pa_operation_state_t (* ma_pa_operation_get_state_proc) (ma_pa_operation* o);
+typedef ma_pa_channel_map* (* ma_pa_channel_map_init_extend_proc) (ma_pa_channel_map* m, unsigned channels, ma_pa_channel_map_def_t def);
+typedef int (* ma_pa_channel_map_valid_proc) (const ma_pa_channel_map* m);
+typedef int (* ma_pa_channel_map_compatible_proc) (const ma_pa_channel_map* m, const ma_pa_sample_spec* ss);
+typedef ma_pa_stream* (* ma_pa_stream_new_proc) (ma_pa_context* c, const char* name, const ma_pa_sample_spec* ss, const ma_pa_channel_map* map);
+typedef void (* ma_pa_stream_unref_proc) (ma_pa_stream* s);
+typedef int (* ma_pa_stream_connect_playback_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags, const ma_pa_cvolume* volume, ma_pa_stream* sync_stream);
+typedef int (* ma_pa_stream_connect_record_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags);
+typedef int (* ma_pa_stream_disconnect_proc) (ma_pa_stream* s);
+typedef ma_pa_stream_state_t (* ma_pa_stream_get_state_proc) (ma_pa_stream* s);
+typedef const ma_pa_sample_spec* (* ma_pa_stream_get_sample_spec_proc) (ma_pa_stream* s);
+typedef const ma_pa_channel_map* (* ma_pa_stream_get_channel_map_proc) (ma_pa_stream* s);
+typedef const ma_pa_buffer_attr* (* ma_pa_stream_get_buffer_attr_proc) (ma_pa_stream* s);
+typedef ma_pa_operation* (* ma_pa_stream_set_buffer_attr_proc) (ma_pa_stream* s, const ma_pa_buffer_attr* attr, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef const char* (* ma_pa_stream_get_device_name_proc) (ma_pa_stream* s);
+typedef void (* ma_pa_stream_set_write_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata);
+typedef void (* ma_pa_stream_set_read_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_stream_flush_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_stream_drain_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef int (* ma_pa_stream_is_corked_proc) (ma_pa_stream* s);
+typedef ma_pa_operation* (* ma_pa_stream_cork_proc) (ma_pa_stream* s, int b, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef ma_pa_operation* (* ma_pa_stream_trigger_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata);
+typedef int (* ma_pa_stream_begin_write_proc) (ma_pa_stream* s, void** data, size_t* nbytes);
+typedef int (* ma_pa_stream_write_proc) (ma_pa_stream* s, const void* data, size_t nbytes, ma_pa_free_cb_t free_cb, int64_t offset, ma_pa_seek_mode_t seek);
+typedef int (* ma_pa_stream_peek_proc) (ma_pa_stream* s, const void** data, size_t* nbytes);
+typedef int (* ma_pa_stream_drop_proc) (ma_pa_stream* s);
+typedef size_t (* ma_pa_stream_writable_size_proc) (ma_pa_stream* s);
+typedef size_t (* ma_pa_stream_readable_size_proc) (ma_pa_stream* s);
- if (FAILED(ma_IDirectSoundBuffer_Stop((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer))) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Stop() failed.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- }
+typedef struct
+{
+ ma_uint32 count;
+ ma_uint32 capacity;
+ ma_device_info* pInfo;
+} ma_pulse_device_enum_data;
- ma_IDirectSoundBuffer_SetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0);
+static ma_result ma_result_from_pulse(int result)
+{
+ switch (result) {
+ case MA_PA_OK: return MA_SUCCESS;
+ case MA_PA_ERR_ACCESS: return MA_ACCESS_DENIED;
+ case MA_PA_ERR_INVALID: return MA_INVALID_ARGS;
+ case MA_PA_ERR_NOENTITY: return MA_NO_DEVICE;
+ default: return MA_ERROR;
}
-
- return MA_SUCCESS;
}
-ma_result ma_context_uninit__dsound(ma_context* pContext)
+#if 0
+static ma_pa_sample_format_t ma_format_to_pulse(ma_format format)
{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_dsound);
-
- ma_dlclose(pContext, pContext->dsound.hDSoundDLL);
+ if (ma_is_little_endian()) {
+ switch (format) {
+ case ma_format_s16: return MA_PA_SAMPLE_S16LE;
+ case ma_format_s24: return MA_PA_SAMPLE_S24LE;
+ case ma_format_s32: return MA_PA_SAMPLE_S32LE;
+ case ma_format_f32: return MA_PA_SAMPLE_FLOAT32LE;
+ default: break;
+ }
+ } else {
+ switch (format) {
+ case ma_format_s16: return MA_PA_SAMPLE_S16BE;
+ case ma_format_s24: return MA_PA_SAMPLE_S24BE;
+ case ma_format_s32: return MA_PA_SAMPLE_S32BE;
+ case ma_format_f32: return MA_PA_SAMPLE_FLOAT32BE;
+ default: break;
+ }
+ }
- return MA_SUCCESS;
+ /* Endian agnostic. */
+ switch (format) {
+ case ma_format_u8: return MA_PA_SAMPLE_U8;
+ default: return MA_PA_SAMPLE_INVALID;
+ }
}
+#endif
-ma_result ma_context_init__dsound(const ma_context_config* pConfig, ma_context* pContext)
+static ma_format ma_format_from_pulse(ma_pa_sample_format_t format)
{
- ma_assert(pContext != NULL);
-
- (void)pConfig;
-
- pContext->dsound.hDSoundDLL = ma_dlopen(pContext, "dsound.dll");
- if (pContext->dsound.hDSoundDLL == NULL) {
- return MA_API_NOT_FOUND;
+ if (ma_is_little_endian()) {
+ switch (format) {
+ case MA_PA_SAMPLE_S16LE: return ma_format_s16;
+ case MA_PA_SAMPLE_S24LE: return ma_format_s24;
+ case MA_PA_SAMPLE_S32LE: return ma_format_s32;
+ case MA_PA_SAMPLE_FLOAT32LE: return ma_format_f32;
+ default: break;
+ }
+ } else {
+ switch (format) {
+ case MA_PA_SAMPLE_S16BE: return ma_format_s16;
+ case MA_PA_SAMPLE_S24BE: return ma_format_s24;
+ case MA_PA_SAMPLE_S32BE: return ma_format_s32;
+ case MA_PA_SAMPLE_FLOAT32BE: return ma_format_f32;
+ default: break;
+ }
}
- pContext->dsound.DirectSoundCreate = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCreate");
- pContext->dsound.DirectSoundEnumerateA = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundEnumerateA");
- pContext->dsound.DirectSoundCaptureCreate = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCaptureCreate");
- pContext->dsound.DirectSoundCaptureEnumerateA = ma_dlsym(pContext, pContext->dsound.hDSoundDLL, "DirectSoundCaptureEnumerateA");
-
- pContext->onUninit = ma_context_uninit__dsound;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__dsound;
- pContext->onEnumDevices = ma_context_enumerate_devices__dsound;
- pContext->onGetDeviceInfo = ma_context_get_device_info__dsound;
- pContext->onDeviceInit = ma_device_init__dsound;
- pContext->onDeviceUninit = ma_device_uninit__dsound;
- pContext->onDeviceStart = NULL; /* Not used. Started in onDeviceMainLoop. */
- pContext->onDeviceStop = NULL; /* Not used. Stopped in onDeviceMainLoop. */
- pContext->onDeviceWrite = NULL;
- pContext->onDeviceRead = NULL;
- pContext->onDeviceMainLoop = ma_device_main_loop__dsound;
-
- return MA_SUCCESS;
+ /* Endian agnostic. */
+ switch (format) {
+ case MA_PA_SAMPLE_U8: return ma_format_u8;
+ default: return ma_format_unknown;
+ }
}
-#endif
-
-
-
-/******************************************************************************
-
-WinMM Backend
-
-******************************************************************************/
-#ifdef MA_HAS_WINMM
-/*
-Some older compilers don't have WAVEOUTCAPS2A and WAVEINCAPS2A, so we'll need to write this ourselves. These structures
-are exactly the same as the older ones but they have a few GUIDs for manufacturer/product/name identification. I'm keeping
-the names the same as the Win32 library for consistency, but namespaced to avoid naming conflicts with the Win32 version.
-*/
-typedef struct
+static ma_channel ma_channel_position_from_pulse(ma_pa_channel_position_t position)
{
- WORD wMid;
- WORD wPid;
- MMVERSION vDriverVersion;
- CHAR szPname[MAXPNAMELEN];
- DWORD dwFormats;
- WORD wChannels;
- WORD wReserved1;
- DWORD dwSupport;
- GUID ManufacturerGuid;
- GUID ProductGuid;
- GUID NameGuid;
-} MA_WAVEOUTCAPS2A;
-typedef struct
-{
- WORD wMid;
- WORD wPid;
- MMVERSION vDriverVersion;
- CHAR szPname[MAXPNAMELEN];
- DWORD dwFormats;
- WORD wChannels;
- WORD wReserved1;
- GUID ManufacturerGuid;
- GUID ProductGuid;
- GUID NameGuid;
-} MA_WAVEINCAPS2A;
-
-typedef UINT (WINAPI * MA_PFN_waveOutGetNumDevs)(void);
-typedef MMRESULT (WINAPI * MA_PFN_waveOutGetDevCapsA)(ma_uintptr uDeviceID, LPWAVEOUTCAPSA pwoc, UINT cbwoc);
-typedef MMRESULT (WINAPI * MA_PFN_waveOutOpen)(LPHWAVEOUT phwo, UINT uDeviceID, LPCWAVEFORMATEX pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen);
-typedef MMRESULT (WINAPI * MA_PFN_waveOutClose)(HWAVEOUT hwo);
-typedef MMRESULT (WINAPI * MA_PFN_waveOutPrepareHeader)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh);
-typedef MMRESULT (WINAPI * MA_PFN_waveOutUnprepareHeader)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh);
-typedef MMRESULT (WINAPI * MA_PFN_waveOutWrite)(HWAVEOUT hwo, LPWAVEHDR pwh, UINT cbwh);
-typedef MMRESULT (WINAPI * MA_PFN_waveOutReset)(HWAVEOUT hwo);
-typedef UINT (WINAPI * MA_PFN_waveInGetNumDevs)(void);
-typedef MMRESULT (WINAPI * MA_PFN_waveInGetDevCapsA)(ma_uintptr uDeviceID, LPWAVEINCAPSA pwic, UINT cbwic);
-typedef MMRESULT (WINAPI * MA_PFN_waveInOpen)(LPHWAVEIN phwi, UINT uDeviceID, LPCWAVEFORMATEX pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen);
-typedef MMRESULT (WINAPI * MA_PFN_waveInClose)(HWAVEIN hwi);
-typedef MMRESULT (WINAPI * MA_PFN_waveInPrepareHeader)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh);
-typedef MMRESULT (WINAPI * MA_PFN_waveInUnprepareHeader)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh);
-typedef MMRESULT (WINAPI * MA_PFN_waveInAddBuffer)(HWAVEIN hwi, LPWAVEHDR pwh, UINT cbwh);
-typedef MMRESULT (WINAPI * MA_PFN_waveInStart)(HWAVEIN hwi);
-typedef MMRESULT (WINAPI * MA_PFN_waveInReset)(HWAVEIN hwi);
+ switch (position)
+ {
+ case MA_PA_CHANNEL_POSITION_INVALID: return MA_CHANNEL_NONE;
+ case MA_PA_CHANNEL_POSITION_MONO: return MA_CHANNEL_MONO;
+ case MA_PA_CHANNEL_POSITION_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT;
+ case MA_PA_CHANNEL_POSITION_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT;
+ case MA_PA_CHANNEL_POSITION_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER;
+ case MA_PA_CHANNEL_POSITION_REAR_CENTER: return MA_CHANNEL_BACK_CENTER;
+ case MA_PA_CHANNEL_POSITION_REAR_LEFT: return MA_CHANNEL_BACK_LEFT;
+ case MA_PA_CHANNEL_POSITION_REAR_RIGHT: return MA_CHANNEL_BACK_RIGHT;
+ case MA_PA_CHANNEL_POSITION_LFE: return MA_CHANNEL_LFE;
+ case MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case MA_PA_CHANNEL_POSITION_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT;
+ case MA_PA_CHANNEL_POSITION_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT;
+ case MA_PA_CHANNEL_POSITION_AUX0: return MA_CHANNEL_AUX_0;
+ case MA_PA_CHANNEL_POSITION_AUX1: return MA_CHANNEL_AUX_1;
+ case MA_PA_CHANNEL_POSITION_AUX2: return MA_CHANNEL_AUX_2;
+ case MA_PA_CHANNEL_POSITION_AUX3: return MA_CHANNEL_AUX_3;
+ case MA_PA_CHANNEL_POSITION_AUX4: return MA_CHANNEL_AUX_4;
+ case MA_PA_CHANNEL_POSITION_AUX5: return MA_CHANNEL_AUX_5;
+ case MA_PA_CHANNEL_POSITION_AUX6: return MA_CHANNEL_AUX_6;
+ case MA_PA_CHANNEL_POSITION_AUX7: return MA_CHANNEL_AUX_7;
+ case MA_PA_CHANNEL_POSITION_AUX8: return MA_CHANNEL_AUX_8;
+ case MA_PA_CHANNEL_POSITION_AUX9: return MA_CHANNEL_AUX_9;
+ case MA_PA_CHANNEL_POSITION_AUX10: return MA_CHANNEL_AUX_10;
+ case MA_PA_CHANNEL_POSITION_AUX11: return MA_CHANNEL_AUX_11;
+ case MA_PA_CHANNEL_POSITION_AUX12: return MA_CHANNEL_AUX_12;
+ case MA_PA_CHANNEL_POSITION_AUX13: return MA_CHANNEL_AUX_13;
+ case MA_PA_CHANNEL_POSITION_AUX14: return MA_CHANNEL_AUX_14;
+ case MA_PA_CHANNEL_POSITION_AUX15: return MA_CHANNEL_AUX_15;
+ case MA_PA_CHANNEL_POSITION_AUX16: return MA_CHANNEL_AUX_16;
+ case MA_PA_CHANNEL_POSITION_AUX17: return MA_CHANNEL_AUX_17;
+ case MA_PA_CHANNEL_POSITION_AUX18: return MA_CHANNEL_AUX_18;
+ case MA_PA_CHANNEL_POSITION_AUX19: return MA_CHANNEL_AUX_19;
+ case MA_PA_CHANNEL_POSITION_AUX20: return MA_CHANNEL_AUX_20;
+ case MA_PA_CHANNEL_POSITION_AUX21: return MA_CHANNEL_AUX_21;
+ case MA_PA_CHANNEL_POSITION_AUX22: return MA_CHANNEL_AUX_22;
+ case MA_PA_CHANNEL_POSITION_AUX23: return MA_CHANNEL_AUX_23;
+ case MA_PA_CHANNEL_POSITION_AUX24: return MA_CHANNEL_AUX_24;
+ case MA_PA_CHANNEL_POSITION_AUX25: return MA_CHANNEL_AUX_25;
+ case MA_PA_CHANNEL_POSITION_AUX26: return MA_CHANNEL_AUX_26;
+ case MA_PA_CHANNEL_POSITION_AUX27: return MA_CHANNEL_AUX_27;
+ case MA_PA_CHANNEL_POSITION_AUX28: return MA_CHANNEL_AUX_28;
+ case MA_PA_CHANNEL_POSITION_AUX29: return MA_CHANNEL_AUX_29;
+ case MA_PA_CHANNEL_POSITION_AUX30: return MA_CHANNEL_AUX_30;
+ case MA_PA_CHANNEL_POSITION_AUX31: return MA_CHANNEL_AUX_31;
+ case MA_PA_CHANNEL_POSITION_TOP_CENTER: return MA_CHANNEL_TOP_CENTER;
+ case MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT: return MA_CHANNEL_TOP_BACK_LEFT;
+ case MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT;
+ case MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER: return MA_CHANNEL_TOP_BACK_CENTER;
+ default: return MA_CHANNEL_NONE;
+ }
+}
-ma_result ma_result_from_MMRESULT(MMRESULT resultMM)
+#if 0
+static ma_pa_channel_position_t ma_channel_position_to_pulse(ma_channel position)
{
- switch (resultMM) {
- case MMSYSERR_NOERROR: return MA_SUCCESS;
- case MMSYSERR_BADDEVICEID: return MA_INVALID_ARGS;
- case MMSYSERR_INVALHANDLE: return MA_INVALID_ARGS;
- case MMSYSERR_NOMEM: return MA_OUT_OF_MEMORY;
- case MMSYSERR_INVALFLAG: return MA_INVALID_ARGS;
- case MMSYSERR_INVALPARAM: return MA_INVALID_ARGS;
- case MMSYSERR_HANDLEBUSY: return MA_DEVICE_BUSY;
- case MMSYSERR_ERROR: return MA_ERROR;
- default: return MA_ERROR;
+ switch (position)
+ {
+ case MA_CHANNEL_NONE: return MA_PA_CHANNEL_POSITION_INVALID;
+ case MA_CHANNEL_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_FRONT_LEFT;
+ case MA_CHANNEL_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT;
+ case MA_CHANNEL_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_CENTER;
+ case MA_CHANNEL_LFE: return MA_PA_CHANNEL_POSITION_LFE;
+ case MA_CHANNEL_BACK_LEFT: return MA_PA_CHANNEL_POSITION_REAR_LEFT;
+ case MA_CHANNEL_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_REAR_RIGHT;
+ case MA_CHANNEL_FRONT_LEFT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
+ case MA_CHANNEL_FRONT_RIGHT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
+ case MA_CHANNEL_BACK_CENTER: return MA_PA_CHANNEL_POSITION_REAR_CENTER;
+ case MA_CHANNEL_SIDE_LEFT: return MA_PA_CHANNEL_POSITION_SIDE_LEFT;
+ case MA_CHANNEL_SIDE_RIGHT: return MA_PA_CHANNEL_POSITION_SIDE_RIGHT;
+ case MA_CHANNEL_TOP_CENTER: return MA_PA_CHANNEL_POSITION_TOP_CENTER;
+ case MA_CHANNEL_TOP_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT;
+ case MA_CHANNEL_TOP_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER;
+ case MA_CHANNEL_TOP_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT;
+ case MA_CHANNEL_TOP_BACK_LEFT: return MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT;
+ case MA_CHANNEL_TOP_BACK_CENTER: return MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER;
+ case MA_CHANNEL_TOP_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT;
+ case MA_CHANNEL_19: return MA_PA_CHANNEL_POSITION_AUX18;
+ case MA_CHANNEL_20: return MA_PA_CHANNEL_POSITION_AUX19;
+ case MA_CHANNEL_21: return MA_PA_CHANNEL_POSITION_AUX20;
+ case MA_CHANNEL_22: return MA_PA_CHANNEL_POSITION_AUX21;
+ case MA_CHANNEL_23: return MA_PA_CHANNEL_POSITION_AUX22;
+ case MA_CHANNEL_24: return MA_PA_CHANNEL_POSITION_AUX23;
+ case MA_CHANNEL_25: return MA_PA_CHANNEL_POSITION_AUX24;
+ case MA_CHANNEL_26: return MA_PA_CHANNEL_POSITION_AUX25;
+ case MA_CHANNEL_27: return MA_PA_CHANNEL_POSITION_AUX26;
+ case MA_CHANNEL_28: return MA_PA_CHANNEL_POSITION_AUX27;
+ case MA_CHANNEL_29: return MA_PA_CHANNEL_POSITION_AUX28;
+ case MA_CHANNEL_30: return MA_PA_CHANNEL_POSITION_AUX29;
+ case MA_CHANNEL_31: return MA_PA_CHANNEL_POSITION_AUX30;
+ case MA_CHANNEL_32: return MA_PA_CHANNEL_POSITION_AUX31;
+ default: return (ma_pa_channel_position_t)position;
}
}
+#endif
-char* ma_find_last_character(char* str, char ch)
+static ma_result ma_wait_for_operation__pulse(ma_context* pContext, ma_pa_mainloop* pMainLoop, ma_pa_operation* pOP)
{
- char* last;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pMainLoop != NULL);
+ MA_ASSERT(pOP != NULL);
- if (str == NULL) {
- return NULL;
+ while (((ma_pa_operation_get_state_proc)pContext->pulse.pa_operation_get_state)(pOP) == MA_PA_OPERATION_RUNNING) {
+ int error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL);
+ if (error < 0) {
+ return ma_result_from_pulse(error);
+ }
}
- last = NULL;
- while (*str != '\0') {
- if (*str == ch) {
- last = str;
- }
+ return MA_SUCCESS;
+}
- str += 1;
- }
+static ma_result ma_device__wait_for_operation__pulse(ma_device* pDevice, ma_pa_operation* pOP)
+{
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pOP != NULL);
- return last;
+ return ma_wait_for_operation__pulse(pDevice->pContext, (ma_pa_mainloop*)pDevice->pulse.pMainLoop, pOP);
}
-/*
-Our own "WAVECAPS" structure that contains generic information shared between WAVEOUTCAPS2 and WAVEINCAPS2 so
-we can do things generically and typesafely. Names are being kept the same for consistency.
-*/
-typedef struct
+static ma_bool32 ma_context_is_device_id_equal__pulse(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
{
- CHAR szPname[MAXPNAMELEN];
- DWORD dwFormats;
- WORD wChannels;
- GUID NameGuid;
-} MA_WAVECAPSA;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
+
+ return ma_strcmp(pID0->pulse, pID1->pulse) == 0;
+}
+
-ma_result ma_get_best_info_from_formats_flags__winmm(DWORD dwFormats, WORD channels, WORD* pBitsPerSample, DWORD* pSampleRate)
+typedef struct
{
- WORD bitsPerSample = 0;
- DWORD sampleRate = 0;
+ ma_context* pContext;
+ ma_enum_devices_callback_proc callback;
+ void* pUserData;
+ ma_bool32 isTerminated;
+} ma_context_enumerate_devices_callback_data__pulse;
- if (pBitsPerSample) {
- *pBitsPerSample = 0;
- }
- if (pSampleRate) {
- *pSampleRate = 0;
- }
+static void ma_context_enumerate_devices_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pSinkInfo, int endOfList, void* pUserData)
+{
+ ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData;
+ ma_device_info deviceInfo;
- if (channels == 1) {
- bitsPerSample = 16;
- if ((dwFormats & WAVE_FORMAT_48M16) != 0) {
- sampleRate = 48000;
- } else if ((dwFormats & WAVE_FORMAT_44M16) != 0) {
- sampleRate = 44100;
- } else if ((dwFormats & WAVE_FORMAT_2M16) != 0) {
- sampleRate = 22050;
- } else if ((dwFormats & WAVE_FORMAT_1M16) != 0) {
- sampleRate = 11025;
- } else if ((dwFormats & WAVE_FORMAT_96M16) != 0) {
- sampleRate = 96000;
- } else {
- bitsPerSample = 8;
- if ((dwFormats & WAVE_FORMAT_48M08) != 0) {
- sampleRate = 48000;
- } else if ((dwFormats & WAVE_FORMAT_44M08) != 0) {
- sampleRate = 44100;
- } else if ((dwFormats & WAVE_FORMAT_2M08) != 0) {
- sampleRate = 22050;
- } else if ((dwFormats & WAVE_FORMAT_1M08) != 0) {
- sampleRate = 11025;
- } else if ((dwFormats & WAVE_FORMAT_96M08) != 0) {
- sampleRate = 96000;
- } else {
- return MA_FORMAT_NOT_SUPPORTED;
- }
- }
- } else {
- bitsPerSample = 16;
- if ((dwFormats & WAVE_FORMAT_48S16) != 0) {
- sampleRate = 48000;
- } else if ((dwFormats & WAVE_FORMAT_44S16) != 0) {
- sampleRate = 44100;
- } else if ((dwFormats & WAVE_FORMAT_2S16) != 0) {
- sampleRate = 22050;
- } else if ((dwFormats & WAVE_FORMAT_1S16) != 0) {
- sampleRate = 11025;
- } else if ((dwFormats & WAVE_FORMAT_96S16) != 0) {
- sampleRate = 96000;
- } else {
- bitsPerSample = 8;
- if ((dwFormats & WAVE_FORMAT_48S08) != 0) {
- sampleRate = 48000;
- } else if ((dwFormats & WAVE_FORMAT_44S08) != 0) {
- sampleRate = 44100;
- } else if ((dwFormats & WAVE_FORMAT_2S08) != 0) {
- sampleRate = 22050;
- } else if ((dwFormats & WAVE_FORMAT_1S08) != 0) {
- sampleRate = 11025;
- } else if ((dwFormats & WAVE_FORMAT_96S08) != 0) {
- sampleRate = 96000;
- } else {
- return MA_FORMAT_NOT_SUPPORTED;
- }
- }
- }
+ MA_ASSERT(pData != NULL);
- if (pBitsPerSample) {
- *pBitsPerSample = bitsPerSample;
- }
- if (pSampleRate) {
- *pSampleRate = sampleRate;
+ if (endOfList || pData->isTerminated) {
+ return;
}
- return MA_SUCCESS;
-}
-
-ma_result ma_formats_flags_to_WAVEFORMATEX__winmm(DWORD dwFormats, WORD channels, WAVEFORMATEX* pWF)
-{
- ma_assert(pWF != NULL);
+ MA_ZERO_OBJECT(&deviceInfo);
- ma_zero_object(pWF);
- pWF->cbSize = sizeof(*pWF);
- pWF->wFormatTag = WAVE_FORMAT_PCM;
- pWF->nChannels = (WORD)channels;
- if (pWF->nChannels > 2) {
- pWF->nChannels = 2;
+ /* The name from PulseAudio is the ID for miniaudio. */
+ if (pSinkInfo->name != NULL) {
+ ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSinkInfo->name, (size_t)-1);
}
- if (channels == 1) {
- pWF->wBitsPerSample = 16;
- if ((dwFormats & WAVE_FORMAT_48M16) != 0) {
- pWF->nSamplesPerSec = 48000;
- } else if ((dwFormats & WAVE_FORMAT_44M16) != 0) {
- pWF->nSamplesPerSec = 44100;
- } else if ((dwFormats & WAVE_FORMAT_2M16) != 0) {
- pWF->nSamplesPerSec = 22050;
- } else if ((dwFormats & WAVE_FORMAT_1M16) != 0) {
- pWF->nSamplesPerSec = 11025;
- } else if ((dwFormats & WAVE_FORMAT_96M16) != 0) {
- pWF->nSamplesPerSec = 96000;
- } else {
- pWF->wBitsPerSample = 8;
- if ((dwFormats & WAVE_FORMAT_48M08) != 0) {
- pWF->nSamplesPerSec = 48000;
- } else if ((dwFormats & WAVE_FORMAT_44M08) != 0) {
- pWF->nSamplesPerSec = 44100;
- } else if ((dwFormats & WAVE_FORMAT_2M08) != 0) {
- pWF->nSamplesPerSec = 22050;
- } else if ((dwFormats & WAVE_FORMAT_1M08) != 0) {
- pWF->nSamplesPerSec = 11025;
- } else if ((dwFormats & WAVE_FORMAT_96M08) != 0) {
- pWF->nSamplesPerSec = 96000;
- } else {
- return MA_FORMAT_NOT_SUPPORTED;
- }
- }
- } else {
- pWF->wBitsPerSample = 16;
- if ((dwFormats & WAVE_FORMAT_48S16) != 0) {
- pWF->nSamplesPerSec = 48000;
- } else if ((dwFormats & WAVE_FORMAT_44S16) != 0) {
- pWF->nSamplesPerSec = 44100;
- } else if ((dwFormats & WAVE_FORMAT_2S16) != 0) {
- pWF->nSamplesPerSec = 22050;
- } else if ((dwFormats & WAVE_FORMAT_1S16) != 0) {
- pWF->nSamplesPerSec = 11025;
- } else if ((dwFormats & WAVE_FORMAT_96S16) != 0) {
- pWF->nSamplesPerSec = 96000;
- } else {
- pWF->wBitsPerSample = 8;
- if ((dwFormats & WAVE_FORMAT_48S08) != 0) {
- pWF->nSamplesPerSec = 48000;
- } else if ((dwFormats & WAVE_FORMAT_44S08) != 0) {
- pWF->nSamplesPerSec = 44100;
- } else if ((dwFormats & WAVE_FORMAT_2S08) != 0) {
- pWF->nSamplesPerSec = 22050;
- } else if ((dwFormats & WAVE_FORMAT_1S08) != 0) {
- pWF->nSamplesPerSec = 11025;
- } else if ((dwFormats & WAVE_FORMAT_96S08) != 0) {
- pWF->nSamplesPerSec = 96000;
- } else {
- return MA_FORMAT_NOT_SUPPORTED;
- }
- }
+ /* The description from PulseAudio is the name for miniaudio. */
+ if (pSinkInfo->description != NULL) {
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSinkInfo->description, (size_t)-1);
}
- pWF->nBlockAlign = (pWF->nChannels * pWF->wBitsPerSample) / 8;
- pWF->nAvgBytesPerSec = pWF->nBlockAlign * pWF->nSamplesPerSec;
+ pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_playback, &deviceInfo, pData->pUserData);
- return MA_SUCCESS;
+ (void)pPulseContext; /* Unused. */
}
-ma_result ma_context_get_device_info_from_WAVECAPS(ma_context* pContext, MA_WAVECAPSA* pCaps, ma_device_info* pDeviceInfo)
+static void ma_context_enumerate_devices_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pSinkInfo, int endOfList, void* pUserData)
{
- WORD bitsPerSample;
- DWORD sampleRate;
- ma_result result;
+ ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData;
+ ma_device_info deviceInfo;
- ma_assert(pContext != NULL);
- ma_assert(pCaps != NULL);
- ma_assert(pDeviceInfo != NULL);
+ MA_ASSERT(pData != NULL);
- /*
- Name / Description
-
- Unfortunately the name specified in WAVE(OUT/IN)CAPS2 is limited to 31 characters. This results in an unprofessional looking
- situation where the names of the devices are truncated. To help work around this, we need to look at the name GUID and try
- looking in the registry for the full name. If we can't find it there, we need to just fall back to the default name.
- */
+ if (endOfList || pData->isTerminated) {
+ return;
+ }
- /* Set the default to begin with. */
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), pCaps->szPname, (size_t)-1);
+ MA_ZERO_OBJECT(&deviceInfo);
- /*
- Now try the registry. There's a few things to consider here:
- - The name GUID can be null, in which we case we just need to stick to the original 31 characters.
- - If the name GUID is not present in the registry we'll also need to stick to the original 31 characters.
- - I like consistency, so I want the returned device names to be consistent with those returned by WASAPI and DirectSound. The
- problem, however is that WASAPI and DirectSound use " ()" format (such as "Speakers (High Definition Audio)"),
- but WinMM does not specificy the component name. From my admittedly limited testing, I've notice the component name seems to
- usually fit within the 31 characters of the fixed sized buffer, so what I'm going to do is parse that string for the component
- name, and then concatenate the name from the registry.
- */
- if (!ma_is_guid_equal(&pCaps->NameGuid, &MA_GUID_NULL)) {
- wchar_t guidStrW[256];
- if (((MA_PFN_StringFromGUID2)pContext->win32.StringFromGUID2)(&pCaps->NameGuid, guidStrW, ma_countof(guidStrW)) > 0) {
- char guidStr[256];
- char keyStr[1024];
- HKEY hKey;
+ /* The name from PulseAudio is the ID for miniaudio. */
+ if (pSinkInfo->name != NULL) {
+ ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSinkInfo->name, (size_t)-1);
+ }
- WideCharToMultiByte(CP_UTF8, 0, guidStrW, -1, guidStr, sizeof(guidStr), 0, FALSE);
+ /* The description from PulseAudio is the name for miniaudio. */
+ if (pSinkInfo->description != NULL) {
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSinkInfo->description, (size_t)-1);
+ }
- ma_strcpy_s(keyStr, sizeof(keyStr), "SYSTEM\\CurrentControlSet\\Control\\MediaCategories\\");
- ma_strcat_s(keyStr, sizeof(keyStr), guidStr);
+ pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_capture, &deviceInfo, pData->pUserData);
- if (((MA_PFN_RegOpenKeyExA)pContext->win32.RegOpenKeyExA)(HKEY_LOCAL_MACHINE, keyStr, 0, KEY_READ, &hKey) == ERROR_SUCCESS) {
- BYTE nameFromReg[512];
- DWORD nameFromRegSize = sizeof(nameFromReg);
- result = ((MA_PFN_RegQueryValueExA)pContext->win32.RegQueryValueExA)(hKey, "Name", 0, NULL, (LPBYTE)nameFromReg, (LPDWORD)&nameFromRegSize);
- ((MA_PFN_RegCloseKey)pContext->win32.RegCloseKey)(hKey);
+ (void)pPulseContext; /* Unused. */
+}
+
+static ma_result ma_context_enumerate_devices__pulse(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_result result = MA_SUCCESS;
+ ma_context_enumerate_devices_callback_data__pulse callbackData;
+ ma_pa_operation* pOP = NULL;
+ ma_pa_mainloop* pMainLoop;
+ ma_pa_mainloop_api* pAPI;
+ ma_pa_context* pPulseContext;
+ int error;
- if (result == ERROR_SUCCESS) {
- /* We have the value from the registry, so now we need to construct the name string. */
- char name[1024];
- if (ma_strcpy_s(name, sizeof(name), pDeviceInfo->name) == 0) {
- char* nameBeg = ma_find_last_character(name, '(');
- if (nameBeg != NULL) {
- size_t leadingLen = (nameBeg - name);
- ma_strncpy_s(nameBeg + 1, sizeof(name) - leadingLen, (const char*)nameFromReg, (size_t)-1);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
- /* The closing ")", if it can fit. */
- if (leadingLen + nameFromRegSize < sizeof(name)-1) {
- ma_strcat_s(name, sizeof(name), ")");
- }
+ callbackData.pContext = pContext;
+ callbackData.callback = callback;
+ callbackData.pUserData = pUserData;
+ callbackData.isTerminated = MA_FALSE;
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), name, (size_t)-1);
- }
- }
- }
- }
- }
+ pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)();
+ if (pMainLoop == NULL) {
+ return MA_FAILED_TO_INIT_BACKEND;
}
+ pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop);
+ if (pAPI == NULL) {
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
- result = ma_get_best_info_from_formats_flags__winmm(pCaps->dwFormats, pCaps->wChannels, &bitsPerSample, &sampleRate);
- if (result != MA_SUCCESS) {
- return result;
+ pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName);
+ if (pPulseContext == NULL) {
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ return MA_FAILED_TO_INIT_BACKEND;
}
- pDeviceInfo->minChannels = pCaps->wChannels;
- pDeviceInfo->maxChannels = pCaps->wChannels;
- pDeviceInfo->minSampleRate = sampleRate;
- pDeviceInfo->maxSampleRate = sampleRate;
- pDeviceInfo->formatCount = 1;
- if (bitsPerSample == 8) {
- pDeviceInfo->formats[0] = ma_format_u8;
- } else if (bitsPerSample == 16) {
- pDeviceInfo->formats[0] = ma_format_s16;
- } else if (bitsPerSample == 24) {
- pDeviceInfo->formats[0] = ma_format_s24;
- } else if (bitsPerSample == 32) {
- pDeviceInfo->formats[0] = ma_format_s32;
- } else {
- return MA_FORMAT_NOT_SUPPORTED;
+ error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, (pContext->pulse.tryAutoSpawn) ? 0 : MA_PA_CONTEXT_NOAUTOSPAWN, NULL);
+ if (error != MA_PA_OK) {
+ ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ return ma_result_from_pulse(error);
}
- return MA_SUCCESS;
-}
+ for (;;) {
+ ma_pa_context_state_t state = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext);
+ if (state == MA_PA_CONTEXT_READY) {
+ break; /* Success. */
+ }
+ if (state == MA_PA_CONTEXT_CONNECTING || state == MA_PA_CONTEXT_AUTHORIZING || state == MA_PA_CONTEXT_SETTING_NAME) {
+ error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL);
+ if (error < 0) {
+ result = ma_result_from_pulse(error);
+ goto done;
+ }
-ma_result ma_context_get_device_info_from_WAVEOUTCAPS2(ma_context* pContext, MA_WAVEOUTCAPS2A* pCaps, ma_device_info* pDeviceInfo)
-{
- MA_WAVECAPSA caps;
+#ifdef MA_DEBUG_OUTPUT
+ printf("[PulseAudio] pa_context_get_state() returned %d. Waiting.\n", state);
+#endif
+ continue; /* Keep trying. */
+ }
+ if (state == MA_PA_CONTEXT_UNCONNECTED || state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) {
+#ifdef MA_DEBUG_OUTPUT
+ printf("[PulseAudio] pa_context_get_state() returned %d. Failed.\n", state);
+#endif
+ goto done; /* Failed. */
+ }
+ }
- ma_assert(pContext != NULL);
- ma_assert(pCaps != NULL);
- ma_assert(pDeviceInfo != NULL);
- ma_copy_memory(caps.szPname, pCaps->szPname, sizeof(caps.szPname));
- caps.dwFormats = pCaps->dwFormats;
- caps.wChannels = pCaps->wChannels;
- caps.NameGuid = pCaps->NameGuid;
- return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo);
-}
+ /* Playback. */
+ if (!callbackData.isTerminated) {
+ pOP = ((ma_pa_context_get_sink_info_list_proc)pContext->pulse.pa_context_get_sink_info_list)(pPulseContext, ma_context_enumerate_devices_sink_callback__pulse, &callbackData);
+ if (pOP == NULL) {
+ result = MA_ERROR;
+ goto done;
+ }
-ma_result ma_context_get_device_info_from_WAVEINCAPS2(ma_context* pContext, MA_WAVEINCAPS2A* pCaps, ma_device_info* pDeviceInfo)
-{
- MA_WAVECAPSA caps;
+ result = ma_wait_for_operation__pulse(pContext, pMainLoop, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ if (result != MA_SUCCESS) {
+ goto done;
+ }
+ }
- ma_assert(pContext != NULL);
- ma_assert(pCaps != NULL);
- ma_assert(pDeviceInfo != NULL);
- ma_copy_memory(caps.szPname, pCaps->szPname, sizeof(caps.szPname));
- caps.dwFormats = pCaps->dwFormats;
- caps.wChannels = pCaps->wChannels;
- caps.NameGuid = pCaps->NameGuid;
- return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo);
+ /* Capture. */
+ if (!callbackData.isTerminated) {
+ pOP = ((ma_pa_context_get_source_info_list_proc)pContext->pulse.pa_context_get_source_info_list)(pPulseContext, ma_context_enumerate_devices_source_callback__pulse, &callbackData);
+ if (pOP == NULL) {
+ result = MA_ERROR;
+ goto done;
+ }
+
+ result = ma_wait_for_operation__pulse(pContext, pMainLoop, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ if (result != MA_SUCCESS) {
+ goto done;
+ }
+ }
+
+done:
+ ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)(pPulseContext);
+ ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ return result;
}
-ma_bool32 ma_context_is_device_id_equal__winmm(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+typedef struct
{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
-
- return pID0->winmm == pID1->winmm;
-}
+ ma_device_info* pDeviceInfo;
+ ma_bool32 foundDevice;
+} ma_context_get_device_info_callback_data__pulse;
-ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+static void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData)
{
- UINT playbackDeviceCount;
- UINT captureDeviceCount;
- UINT iPlaybackDevice;
- UINT iCaptureDevice;
+ ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData;
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
+ if (endOfList > 0) {
+ return;
+ }
- /* Playback. */
- playbackDeviceCount = ((MA_PFN_waveOutGetNumDevs)pContext->winmm.waveOutGetNumDevs)();
- for (iPlaybackDevice = 0; iPlaybackDevice < playbackDeviceCount; ++iPlaybackDevice) {
- MMRESULT result;
- MA_WAVEOUTCAPS2A caps;
+ MA_ASSERT(pData != NULL);
+ pData->foundDevice = MA_TRUE;
- ma_zero_object(&caps);
+ if (pInfo->name != NULL) {
+ ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1);
+ }
- result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(iPlaybackDevice, (WAVEOUTCAPSA*)&caps, sizeof(caps));
- if (result == MMSYSERR_NOERROR) {
- ma_device_info deviceInfo;
+ if (pInfo->description != NULL) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1);
+ }
- ma_zero_object(&deviceInfo);
- deviceInfo.id.winmm = iPlaybackDevice;
+ pData->pDeviceInfo->minChannels = pInfo->sample_spec.channels;
+ pData->pDeviceInfo->maxChannels = pInfo->sample_spec.channels;
+ pData->pDeviceInfo->minSampleRate = pInfo->sample_spec.rate;
+ pData->pDeviceInfo->maxSampleRate = pInfo->sample_spec.rate;
+ pData->pDeviceInfo->formatCount = 1;
+ pData->pDeviceInfo->formats[0] = ma_format_from_pulse(pInfo->sample_spec.format);
- if (ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) {
- ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
- if (cbResult == MA_FALSE) {
- return MA_SUCCESS; /* Enumeration was stopped. */
- }
- }
- }
- }
+ (void)pPulseContext; /* Unused. */
+}
- /* Capture. */
- captureDeviceCount = ((MA_PFN_waveInGetNumDevs)pContext->winmm.waveInGetNumDevs)();
- for (iCaptureDevice = 0; iCaptureDevice < captureDeviceCount; ++iCaptureDevice) {
- MMRESULT result;
- MA_WAVEINCAPS2A caps;
+static void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData;
- ma_zero_object(&caps);
+ if (endOfList > 0) {
+ return;
+ }
- result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(iCaptureDevice, (WAVEINCAPSA*)&caps, sizeof(caps));
- if (result == MMSYSERR_NOERROR) {
- ma_device_info deviceInfo;
+ MA_ASSERT(pData != NULL);
+ pData->foundDevice = MA_TRUE;
- ma_zero_object(&deviceInfo);
- deviceInfo.id.winmm = iCaptureDevice;
+ if (pInfo->name != NULL) {
+ ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1);
+ }
- if (ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) {
- ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- if (cbResult == MA_FALSE) {
- return MA_SUCCESS; /* Enumeration was stopped. */
- }
- }
- }
+ if (pInfo->description != NULL) {
+ ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1);
}
- return MA_SUCCESS;
+ pData->pDeviceInfo->minChannels = pInfo->sample_spec.channels;
+ pData->pDeviceInfo->maxChannels = pInfo->sample_spec.channels;
+ pData->pDeviceInfo->minSampleRate = pInfo->sample_spec.rate;
+ pData->pDeviceInfo->maxSampleRate = pInfo->sample_spec.rate;
+ pData->pDeviceInfo->formatCount = 1;
+ pData->pDeviceInfo->formats[0] = ma_format_from_pulse(pInfo->sample_spec.format);
+
+ (void)pPulseContext; /* Unused. */
}
-ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+static ma_result ma_context_get_device_info__pulse(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
{
- UINT winMMDeviceID;
+ ma_result result = MA_SUCCESS;
+ ma_context_get_device_info_callback_data__pulse callbackData;
+ ma_pa_operation* pOP = NULL;
+ ma_pa_mainloop* pMainLoop;
+ ma_pa_mainloop_api* pAPI;
+ ma_pa_context* pPulseContext;
+ int error;
- ma_assert(pContext != NULL);
+ MA_ASSERT(pContext != NULL);
+ /* No exclusive mode with the PulseAudio backend. */
if (shareMode == ma_share_mode_exclusive) {
return MA_SHARE_MODE_NOT_SUPPORTED;
}
- winMMDeviceID = 0;
- if (pDeviceID != NULL) {
- winMMDeviceID = (UINT)pDeviceID->winmm;
+ callbackData.pDeviceInfo = pDeviceInfo;
+ callbackData.foundDevice = MA_FALSE;
+
+ pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)();
+ if (pMainLoop == NULL) {
+ return MA_FAILED_TO_INIT_BACKEND;
}
- pDeviceInfo->id.winmm = winMMDeviceID;
+ pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop);
+ if (pAPI == NULL) {
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
- if (deviceType == ma_device_type_playback) {
- MMRESULT result;
- MA_WAVEOUTCAPS2A caps;
+ pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName);
+ if (pPulseContext == NULL) {
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
- ma_zero_object(&caps);
-
- result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceID, (WAVEOUTCAPSA*)&caps, sizeof(caps));
- if (result == MMSYSERR_NOERROR) {
- return ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, pDeviceInfo);
+ error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, 0, NULL);
+ if (error != MA_PA_OK) {
+ ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ return ma_result_from_pulse(error);
+ }
+
+ for (;;) {
+ ma_pa_context_state_t state = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext);
+ if (state == MA_PA_CONTEXT_READY) {
+ break; /* Success. */
}
- } else {
- MMRESULT result;
- MA_WAVEINCAPS2A caps;
+ if (state == MA_PA_CONTEXT_CONNECTING || state == MA_PA_CONTEXT_AUTHORIZING || state == MA_PA_CONTEXT_SETTING_NAME) {
+ error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL);
+ if (error < 0) {
+ result = ma_result_from_pulse(error);
+ goto done;
+ }
- ma_zero_object(&caps);
-
- result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceID, (WAVEINCAPSA*)&caps, sizeof(caps));
- if (result == MMSYSERR_NOERROR) {
- return ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, pDeviceInfo);
+#ifdef MA_DEBUG_OUTPUT
+ printf("[PulseAudio] pa_context_get_state() returned %d. Waiting.\n", state);
+#endif
+ continue; /* Keep trying. */
+ }
+ if (state == MA_PA_CONTEXT_UNCONNECTED || state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) {
+#ifdef MA_DEBUG_OUTPUT
+ printf("[PulseAudio] pa_context_get_state() returned %d. Failed.\n", state);
+#endif
+ goto done; /* Failed. */
}
}
- return MA_NO_DEVICE;
-}
-
-
-void ma_device_uninit__winmm(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
+ if (deviceType == ma_device_type_playback) {
+ pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)(pPulseContext, pDeviceID->pulse, ma_context_get_device_info_sink_callback__pulse, &callbackData);
+ } else {
+ pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)(pPulseContext, pDeviceID->pulse, ma_context_get_device_info_source_callback__pulse, &callbackData);
+ }
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((HWAVEIN)pDevice->winmm.hDeviceCapture);
- CloseHandle((HANDLE)pDevice->winmm.hEventCapture);
+ if (pOP != NULL) {
+ ma_wait_for_operation__pulse(pContext, pMainLoop, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ } else {
+ result = MA_ERROR;
+ goto done;
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
- ((MA_PFN_waveOutClose)pDevice->pContext->winmm.waveOutClose)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
- CloseHandle((HANDLE)pDevice->winmm.hEventPlayback);
+ if (!callbackData.foundDevice) {
+ result = MA_NO_DEVICE;
+ goto done;
}
- ma_free(pDevice->winmm._pHeapData);
- ma_zero_object(&pDevice->winmm); /* Safety. */
+done:
+ ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)(pPulseContext);
+ ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ return result;
}
-ma_result ma_device_init__winmm(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+
+static void ma_pulse_device_state_callback(ma_pa_context* pPulseContext, void* pUserData)
{
- const char* errorMsg = "";
- ma_result errorCode = MA_ERROR;
- ma_result result = MA_SUCCESS;
- ma_uint32 heapSize;
- UINT winMMDeviceIDPlayback = 0;
- UINT winMMDeviceIDCapture = 0;
- ma_uint32 bufferSizeInMilliseconds;
+ ma_device* pDevice;
+ ma_context* pContext;
- ma_assert(pDevice != NULL);
- ma_zero_object(&pDevice->winmm);
+ pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
- /* No exlusive mode with WinMM. */
- if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
- ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
- }
+ pContext = pDevice->pContext;
+ MA_ASSERT(pContext != NULL);
- bufferSizeInMilliseconds = pConfig->bufferSizeInMilliseconds;
- if (bufferSizeInMilliseconds == 0) {
- bufferSizeInMilliseconds = ma_calculate_buffer_size_in_milliseconds_from_frames(pConfig->bufferSizeInFrames, pConfig->sampleRate);
- }
-
- /* WinMM has horrible latency. */
- if (pDevice->usingDefaultBufferSize) {
- if (pConfig->performanceProfile == ma_performance_profile_low_latency) {
- bufferSizeInMilliseconds = 40 * pConfig->periods;
- } else {
- bufferSizeInMilliseconds = 400 * pConfig->periods;
- }
- }
+ pDevice->pulse.pulseContextState = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext);
+}
+void ma_device_sink_info_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_pa_sink_info* pInfoOut;
- if (pConfig->playback.pDeviceID != NULL) {
- winMMDeviceIDPlayback = (UINT)pConfig->playback.pDeviceID->winmm;
- }
- if (pConfig->capture.pDeviceID != NULL) {
- winMMDeviceIDCapture = (UINT)pConfig->capture.pDeviceID->winmm;
+ if (endOfList > 0) {
+ return;
}
- /* The capture device needs to be initialized first. */
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- WAVEINCAPSA caps;
- WAVEFORMATEX wf;
- MMRESULT resultMM;
-
- /* We use an event to know when a new fragment needs to be enqueued. */
- pDevice->winmm.hEventCapture = (ma_handle)CreateEvent(NULL, TRUE, TRUE, NULL);
- if (pDevice->winmm.hEventCapture == NULL) {
- errorMsg = "[WinMM] Failed to create event for fragment enqueing for the capture device.", errorCode = MA_FAILED_TO_CREATE_EVENT;
- goto on_error;
- }
+ pInfoOut = (ma_pa_sink_info*)pUserData;
+ MA_ASSERT(pInfoOut != NULL);
- /* The format should be based on the device's actual format. */
- if (((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceIDCapture, &caps, sizeof(caps)) != MMSYSERR_NOERROR) {
- errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED;
- goto on_error;
- }
+ *pInfoOut = *pInfo;
- result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf);
- if (result != MA_SUCCESS) {
- errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result;
- goto on_error;
- }
+ (void)pPulseContext; /* Unused. */
+}
- resultMM = ((MA_PFN_waveInOpen)pDevice->pContext->winmm.waveInOpen)((LPHWAVEIN)&pDevice->winmm.hDeviceCapture, winMMDeviceIDCapture, &wf, (DWORD_PTR)pDevice->winmm.hEventCapture, (DWORD_PTR)pDevice, CALLBACK_EVENT | WAVE_ALLOWSYNC);
- if (resultMM != MMSYSERR_NOERROR) {
- errorMsg = "[WinMM] Failed to open capture device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE;
- goto on_error;
- }
+static void ma_device_source_info_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_pa_source_info* pInfoOut;
- pDevice->capture.internalFormat = ma_format_from_WAVEFORMATEX(&wf);
- pDevice->capture.internalChannels = wf.nChannels;
- pDevice->capture.internalSampleRate = wf.nSamplesPerSec;
- ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
- pDevice->capture.internalPeriods = pConfig->periods;
- pDevice->capture.internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(bufferSizeInMilliseconds, pDevice->capture.internalSampleRate);
+ if (endOfList > 0) {
+ return;
}
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- WAVEOUTCAPSA caps;
- WAVEFORMATEX wf;
- MMRESULT resultMM;
-
- /* We use an event to know when a new fragment needs to be enqueued. */
- pDevice->winmm.hEventPlayback = (ma_handle)CreateEvent(NULL, TRUE, TRUE, NULL);
- if (pDevice->winmm.hEventPlayback == NULL) {
- errorMsg = "[WinMM] Failed to create event for fragment enqueing for the playback device.", errorCode = MA_FAILED_TO_CREATE_EVENT;
- goto on_error;
- }
-
- /* The format should be based on the device's actual format. */
- if (((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceIDPlayback, &caps, sizeof(caps)) != MMSYSERR_NOERROR) {
- errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED;
- goto on_error;
- }
-
- result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf);
- if (result != MA_SUCCESS) {
- errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result;
- goto on_error;
- }
+ pInfoOut = (ma_pa_source_info*)pUserData;
+ MA_ASSERT(pInfoOut != NULL);
- resultMM = ((MA_PFN_waveOutOpen)pContext->winmm.waveOutOpen)((LPHWAVEOUT)&pDevice->winmm.hDevicePlayback, winMMDeviceIDPlayback, &wf, (DWORD_PTR)pDevice->winmm.hEventPlayback, (DWORD_PTR)pDevice, CALLBACK_EVENT | WAVE_ALLOWSYNC);
- if (resultMM != MMSYSERR_NOERROR) {
- errorMsg = "[WinMM] Failed to open playback device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE;
- goto on_error;
- }
+ *pInfoOut = *pInfo;
- pDevice->playback.internalFormat = ma_format_from_WAVEFORMATEX(&wf);
- pDevice->playback.internalChannels = wf.nChannels;
- pDevice->playback.internalSampleRate = wf.nSamplesPerSec;
- ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
- pDevice->playback.internalPeriods = pConfig->periods;
- pDevice->playback.internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(bufferSizeInMilliseconds, pDevice->playback.internalSampleRate);
- }
+ (void)pPulseContext; /* Unused. */
+}
- /*
- The heap allocated data is allocated like so:
-
- [Capture WAVEHDRs][Playback WAVEHDRs][Capture Intermediary Buffer][Playback Intermediary Buffer]
- */
- heapSize = 0;
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- heapSize += sizeof(WAVEHDR)*pDevice->capture.internalPeriods + (pDevice->capture.internalBufferSizeInFrames*ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
- }
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- heapSize += sizeof(WAVEHDR)*pDevice->playback.internalPeriods + (pDevice->playback.internalBufferSizeInFrames*ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
- }
+static void ma_device_sink_name_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_device* pDevice;
- pDevice->winmm._pHeapData = (ma_uint8*)ma_malloc(heapSize);
- if (pDevice->winmm._pHeapData == NULL) {
- errorMsg = "[WinMM] Failed to allocate memory for the intermediary buffer.", errorCode = MA_OUT_OF_MEMORY;
- goto on_error;
+ if (endOfList > 0) {
+ return;
}
- ma_zero_memory(pDevice->winmm._pHeapData, heapSize);
-
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ma_uint32 iPeriod;
+ pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
- if (pConfig->deviceType == ma_device_type_capture) {
- pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData;
- pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods));
- } else {
- pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData;
- pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods + pDevice->playback.internalPeriods));
- }
+ ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), pInfo->description, (size_t)-1);
- /* Prepare headers. */
- for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) {
- ma_uint32 fragmentSizeInBytes = ma_get_fragment_size_in_bytes(pDevice->capture.internalBufferSizeInFrames, pDevice->capture.internalPeriods, pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ (void)pPulseContext; /* Unused. */
+}
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].lpData = (LPSTR)(pDevice->winmm.pIntermediaryBufferCapture + (fragmentSizeInBytes*iPeriod));
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwBufferLength = fragmentSizeInBytes;
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwFlags = 0L;
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwLoops = 0L;
- ((MA_PFN_waveInPrepareHeader)pContext->winmm.waveInPrepareHeader)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR));
+static void ma_device_source_name_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData)
+{
+ ma_device* pDevice;
- /*
- The user data of the WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means
- it's unlocked and available for writing. A value of 1 means it's locked.
- */
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwUser = 0;
- }
+ if (endOfList > 0) {
+ return;
}
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ma_uint32 iPeriod;
- if (pConfig->deviceType == ma_device_type_playback) {
- pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData;
- pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*pDevice->playback.internalPeriods);
- } else {
- pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods));
- pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(WAVEHDR)*(pDevice->capture.internalPeriods + pDevice->playback.internalPeriods)) + (pDevice->playback.internalBufferSizeInFrames*ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
- }
+ pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
- /* Prepare headers. */
- for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) {
- ma_uint32 fragmentSizeInBytes = ma_get_fragment_size_in_bytes(pDevice->playback.internalBufferSizeInFrames, pDevice->playback.internalPeriods, pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), pInfo->description, (size_t)-1);
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].lpData = (LPSTR)(pDevice->winmm.pIntermediaryBufferPlayback + (fragmentSizeInBytes*iPeriod));
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwBufferLength = fragmentSizeInBytes;
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwFlags = 0L;
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwLoops = 0L;
- ((MA_PFN_waveOutPrepareHeader)pContext->winmm.waveOutPrepareHeader)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(WAVEHDR));
+ (void)pPulseContext; /* Unused. */
+}
- /*
- The user data of the WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means
- it's unlocked and available for writing. A value of 1 means it's locked.
- */
- ((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwUser = 0;
- }
- }
+static void ma_device_uninit__pulse(ma_device* pDevice)
+{
+ ma_context* pContext;
- return MA_SUCCESS;
+ MA_ASSERT(pDevice != NULL);
-on_error:
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- if (pDevice->winmm.pWAVEHDRCapture != NULL) {
- ma_uint32 iPeriod;
- for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) {
- ((MA_PFN_waveInUnprepareHeader)pContext->winmm.waveInUnprepareHeader)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR));
- }
- }
+ pContext = pDevice->pContext;
+ MA_ASSERT(pContext != NULL);
- ((MA_PFN_waveInClose)pContext->winmm.waveInClose)((HWAVEIN)pDevice->winmm.hDeviceCapture);
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
}
-
if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- if (pDevice->winmm.pWAVEHDRCapture != NULL) {
- ma_uint32 iPeriod;
- for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) {
- ((MA_PFN_waveOutUnprepareHeader)pContext->winmm.waveOutUnprepareHeader)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &((WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(WAVEHDR));
- }
- }
-
- ((MA_PFN_waveOutClose)pContext->winmm.waveOutClose)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
+ ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
}
- ma_free(pDevice->winmm._pHeapData);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, errorMsg, errorCode);
+ ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pDevice->pulse.pPulseContext);
+ ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pDevice->pulse.pPulseContext);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop);
}
-ma_result ma_device_stop__winmm(ma_device* pDevice)
+static ma_pa_buffer_attr ma_device__pa_buffer_attr_new(ma_uint32 periodSizeInFrames, ma_uint32 periods, const ma_pa_sample_spec* ss)
{
- MMRESULT resultMM;
-
- ma_assert(pDevice != NULL);
-
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- if (pDevice->winmm.hDeviceCapture == NULL) {
- return MA_INVALID_ARGS;
- }
+ ma_pa_buffer_attr attr;
+ attr.maxlength = periodSizeInFrames * periods * ma_get_bytes_per_frame(ma_format_from_pulse(ss->format), ss->channels);
+ attr.tlength = attr.maxlength / periods;
+ attr.prebuf = (ma_uint32)-1;
+ attr.minreq = (ma_uint32)-1;
+ attr.fragsize = attr.maxlength / periods;
- resultMM = ((MA_PFN_waveInReset)pDevice->pContext->winmm.waveInReset)((HWAVEIN)pDevice->winmm.hDeviceCapture);
- if (resultMM != MMSYSERR_NOERROR) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] WARNING: Failed to reset capture device.", ma_result_from_MMRESULT(resultMM));
- }
- }
+ return attr;
+}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- if (pDevice->winmm.hDevicePlayback == NULL) {
- return MA_INVALID_ARGS;
- }
+static ma_pa_stream* ma_device__pa_stream_new__pulse(ma_device* pDevice, const char* pStreamName, const ma_pa_sample_spec* ss, const ma_pa_channel_map* cmap)
+{
+ static int g_StreamCounter = 0;
+ char actualStreamName[256];
- resultMM = ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((HWAVEOUT)pDevice->winmm.hDevicePlayback);
- if (resultMM != MMSYSERR_NOERROR) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] WARNING: Failed to reset playback device.", ma_result_from_MMRESULT(resultMM));
- }
+ if (pStreamName != NULL) {
+ ma_strncpy_s(actualStreamName, sizeof(actualStreamName), pStreamName, (size_t)-1);
+ } else {
+ ma_strcpy_s(actualStreamName, sizeof(actualStreamName), "miniaudio:");
+ ma_itoa_s(g_StreamCounter, actualStreamName + 8, sizeof(actualStreamName)-8, 10); /* 8 = strlen("miniaudio:") */
}
+ g_StreamCounter += 1;
- ma_atomic_exchange_32(&pDevice->winmm.isStarted, MA_FALSE);
- return MA_SUCCESS;
+ return ((ma_pa_stream_new_proc)pDevice->pContext->pulse.pa_stream_new)((ma_pa_context*)pDevice->pulse.pPulseContext, actualStreamName, ss, cmap);
}
-ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount)
+static ma_result ma_device_init__pulse(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
ma_result result = MA_SUCCESS;
- MMRESULT resultMM;
- ma_uint32 totalFramesWritten;
- WAVEHDR* pWAVEHDR;
+ int error = 0;
+ const char* devPlayback = NULL;
+ const char* devCapture = NULL;
+ ma_uint32 periodSizeInMilliseconds;
+ ma_pa_sink_info sinkInfo;
+ ma_pa_source_info sourceInfo;
+ ma_pa_operation* pOP = NULL;
+ ma_pa_sample_spec ss;
+ ma_pa_channel_map cmap;
+ ma_pa_buffer_attr attr;
+ const ma_pa_sample_spec* pActualSS = NULL;
+ const ma_pa_channel_map* pActualCMap = NULL;
+ const ma_pa_buffer_attr* pActualAttr = NULL;
+ ma_uint32 iChannel;
+ ma_pa_stream_flags_t streamFlags;
- ma_assert(pDevice != NULL);
- ma_assert(pPCMFrames != NULL);
+ MA_ASSERT(pDevice != NULL);
+ MA_ZERO_OBJECT(&pDevice->pulse);
- pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback;
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
- /* Keep processing as much data as possible. */
- totalFramesWritten = 0;
- while (totalFramesWritten < frameCount) {
- /* If the current header has some space available we need to write part of it. */
- if (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser == 0) { /* 0 = unlocked. */
- /*
- This header has room in it. We copy as much of it as we can. If we end up fully consuming the buffer we need to
- write it out and move on to the next iteration.
- */
- ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedPlayback;
+ /* No exclusive mode with the PulseAudio backend. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
- ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesWritten));
- const void* pSrc = ma_offset_ptr(pPCMFrames, totalFramesWritten*bpf);
- void* pDst = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].lpData, pDevice->winmm.headerFramesConsumedPlayback*bpf);
- ma_copy_memory(pDst, pSrc, framesToCopy*bpf);
+ if ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID != NULL) {
+ devPlayback = pConfig->playback.pDeviceID->pulse;
+ }
+ if ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID != NULL) {
+ devCapture = pConfig->capture.pDeviceID->pulse;
+ }
- pDevice->winmm.headerFramesConsumedPlayback += framesToCopy;
- totalFramesWritten += framesToCopy;
+ periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds;
+ if (periodSizeInMilliseconds == 0) {
+ periodSizeInMilliseconds = ma_calculate_buffer_size_in_milliseconds_from_frames(pConfig->periodSizeInFrames, pConfig->sampleRate);
+ }
- /* If we've consumed the buffer entirely we need to write it out to the device. */
- if (pDevice->winmm.headerFramesConsumedPlayback == (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf)) {
- pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 1; /* 1 = locked. */
- pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags &= ~WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */
+ pDevice->pulse.pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)();
+ if (pDevice->pulse.pMainLoop == NULL) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create main loop for device.", MA_FAILED_TO_INIT_BACKEND);
+ goto on_error0;
+ }
- /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */
- ResetEvent((HANDLE)pDevice->winmm.hEventPlayback);
+ pDevice->pulse.pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)((ma_pa_mainloop*)pDevice->pulse.pMainLoop);
+ if (pDevice->pulse.pAPI == NULL) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve PulseAudio main loop.", MA_FAILED_TO_INIT_BACKEND);
+ goto on_error1;
+ }
- /* The device will be started here. */
- resultMM = ((MA_PFN_waveOutWrite)pDevice->pContext->winmm.waveOutWrite)((HWAVEOUT)pDevice->winmm.hDevicePlayback, &pWAVEHDR[pDevice->winmm.iNextHeaderPlayback], sizeof(WAVEHDR));
- if (resultMM != MMSYSERR_NOERROR) {
- result = ma_result_from_MMRESULT(resultMM);
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] waveOutWrite() failed.", result);
- break;
- }
- ma_atomic_exchange_32(&pDevice->winmm.isStarted, MA_TRUE);
+ pDevice->pulse.pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)((ma_pa_mainloop_api*)pDevice->pulse.pAPI, pContext->pulse.pApplicationName);
+ if (pDevice->pulse.pPulseContext == NULL) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio context for device.", MA_FAILED_TO_INIT_BACKEND);
+ goto on_error1;
+ }
- /* Make sure we move to the next header. */
- pDevice->winmm.iNextHeaderPlayback = (pDevice->winmm.iNextHeaderPlayback + 1) % pDevice->playback.internalPeriods;
- pDevice->winmm.headerFramesConsumedPlayback = 0;
- }
+ error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)((ma_pa_context*)pDevice->pulse.pPulseContext, pContext->pulse.pServerName, (pContext->pulse.tryAutoSpawn) ? 0 : MA_PA_CONTEXT_NOAUTOSPAWN, NULL);
+ if (error != MA_PA_OK) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio context.", ma_result_from_pulse(error));
+ goto on_error2;
+ }
- /* If at this point we have consumed the entire input buffer we can return. */
- ma_assert(totalFramesWritten <= frameCount);
- if (totalFramesWritten == frameCount) {
- break;
- }
- /* Getting here means there's more to process. */
- continue;
- }
+ pDevice->pulse.pulseContextState = MA_PA_CONTEXT_UNCONNECTED;
+ ((ma_pa_context_set_state_callback_proc)pContext->pulse.pa_context_set_state_callback)((ma_pa_context*)pDevice->pulse.pPulseContext, ma_pulse_device_state_callback, pDevice);
- /* Getting here means there isn't enough room in the buffer and we need to wait for one to become available. */
- if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) {
- result = MA_ERROR;
+ /* Wait for PulseAudio to get itself ready before returning. */
+ for (;;) {
+ if (pDevice->pulse.pulseContextState == MA_PA_CONTEXT_READY) {
break;
}
- /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */
- if ((pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags & WHDR_DONE) != 0) {
- pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 0; /* 0 = unlocked (make it available for writing). */
- pDevice->winmm.headerFramesConsumedPlayback = 0;
+ /* An error may have occurred. */
+ if (pDevice->pulse.pulseContextState == MA_PA_CONTEXT_FAILED || pDevice->pulse.pulseContextState == MA_PA_CONTEXT_TERMINATED) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while connecting the PulseAudio context.", MA_ERROR);
+ goto on_error3;
}
- /* If the device has been stopped we need to break. */
- if (!pDevice->winmm.isStarted) {
- break;
+ error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL);
+ if (error < 0) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] The PulseAudio main loop returned an error while connecting the PulseAudio context.", ma_result_from_pulse(error));
+ goto on_error3;
}
}
- return result;
-}
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devCapture, ma_device_source_info_callback, &sourceInfo);
+ if (pOP != NULL) {
+ ma_device__wait_for_operation__pulse(pDevice, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ } else {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve source info for capture device.", ma_result_from_pulse(error));
+ goto on_error3;
+ }
-ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount)
-{
- ma_result result = MA_SUCCESS;
- MMRESULT resultMM;
- ma_uint32 totalFramesRead;
- WAVEHDR* pWAVEHDR;
+ ss = sourceInfo.sample_spec;
+ cmap = sourceInfo.channel_map;
- ma_assert(pDevice != NULL);
- ma_assert(pPCMFrames != NULL);
+ pDevice->capture.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, ss.rate);
+ pDevice->capture.internalPeriods = pConfig->periods;
- pWAVEHDR = (WAVEHDR*)pDevice->winmm.pWAVEHDRCapture;
+ attr = ma_device__pa_buffer_attr_new(pDevice->capture.internalPeriodSizeInFrames, pConfig->periods, &ss);
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[PulseAudio] Capture attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->capture.internalPeriodSizeInFrames);
+ #endif
- /* We want to start the device immediately. */
- if (!pDevice->winmm.isStarted) {
- ma_uint32 iPeriod;
+ pDevice->pulse.pStreamCapture = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNameCapture, &ss, &cmap);
+ if (pDevice->pulse.pStreamCapture == NULL) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio capture stream.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ goto on_error3;
+ }
- /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */
- ResetEvent((HANDLE)pDevice->winmm.hEventCapture);
+ streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS;
+ if (devCapture != NULL) {
+ streamFlags |= MA_PA_STREAM_DONT_MOVE;
+ }
- /* To start the device we attach all of the buffers and then start it. As the buffers are filled with data we will get notifications. */
- for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) {
- resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((LPWAVEHDR)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(WAVEHDR));
- if (resultMM != MMSYSERR_NOERROR) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] Failed to attach input buffers to capture device in preparation for capture.", ma_result_from_MMRESULT(resultMM));
+ error = ((ma_pa_stream_connect_record_proc)pContext->pulse.pa_stream_connect_record)((ma_pa_stream*)pDevice->pulse.pStreamCapture, devCapture, &attr, streamFlags);
+ if (error != MA_PA_OK) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio capture stream.", ma_result_from_pulse(error));
+ goto on_error4;
+ }
+
+ while (((ma_pa_stream_get_state_proc)pContext->pulse.pa_stream_get_state)((ma_pa_stream*)pDevice->pulse.pStreamCapture) != MA_PA_STREAM_READY) {
+ error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL);
+ if (error < 0) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] The PulseAudio main loop returned an error while connecting the PulseAudio capture stream.", ma_result_from_pulse(error));
+ goto on_error5;
}
+ }
- /* Make sure all of the buffers start out locked. We don't want to access them until the backend tells us we can. */
- pWAVEHDR[iPeriod].dwUser = 1; /* 1 = locked. */
+ /* Internal format. */
+ pActualSS = ((ma_pa_stream_get_sample_spec_proc)pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ if (pActualSS != NULL) {
+ /* If anything has changed between the requested and the actual sample spec, we need to update the buffer. */
+ if (ss.format != pActualSS->format || ss.channels != pActualSS->channels || ss.rate != pActualSS->rate) {
+ attr = ma_device__pa_buffer_attr_new(pDevice->capture.internalPeriodSizeInFrames, pConfig->periods, pActualSS);
+
+ pOP = ((ma_pa_stream_set_buffer_attr_proc)pContext->pulse.pa_stream_set_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture, &attr, NULL, NULL);
+ if (pOP != NULL) {
+ ma_device__wait_for_operation__pulse(pDevice, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ }
+ }
+
+ ss = *pActualSS;
}
- /* Capture devices need to be explicitly started, unlike playback devices. */
- resultMM = ((MA_PFN_waveInStart)pDevice->pContext->winmm.waveInStart)((HWAVEIN)pDevice->winmm.hDeviceCapture);
- if (resultMM != MMSYSERR_NOERROR) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] Failed to start backend device.", ma_result_from_MMRESULT(resultMM));
+ pDevice->capture.internalFormat = ma_format_from_pulse(ss.format);
+ pDevice->capture.internalChannels = ss.channels;
+ pDevice->capture.internalSampleRate = ss.rate;
+
+ /* Internal channel map. */
+ pActualCMap = ((ma_pa_stream_get_channel_map_proc)pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ if (pActualCMap != NULL) {
+ cmap = *pActualCMap;
+ }
+ for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) {
+ pDevice->capture.internalChannelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]);
+ }
+
+ /* Buffer. */
+ pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ if (pActualAttr != NULL) {
+ attr = *pActualAttr;
+ }
+ pDevice->capture.internalPeriods = attr.maxlength / attr.fragsize;
+ pDevice->capture.internalPeriodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels) / pDevice->capture.internalPeriods;
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[PulseAudio] Capture actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->capture.internalPeriodSizeInFrames);
+ #endif
+
+ /* Name. */
+ devCapture = ((ma_pa_stream_get_device_name_proc)pContext->pulse.pa_stream_get_device_name)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ if (devCapture != NULL) {
+ ma_pa_operation* pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devCapture, ma_device_source_name_callback, pDevice);
+ if (pOP != NULL) {
+ ma_device__wait_for_operation__pulse(pDevice, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ }
}
-
- ma_atomic_exchange_32(&pDevice->winmm.isStarted, MA_TRUE);
}
- /* Keep processing as much data as possible. */
- totalFramesRead = 0;
- while (totalFramesRead < frameCount) {
- /* If the current header has some space available we need to write part of it. */
- if (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser == 0) { /* 0 = unlocked. */
- /* The buffer is available for reading. If we fully consume it we need to add it back to the buffer. */
- ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedCapture;
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devPlayback, ma_device_sink_info_callback, &sinkInfo);
+ if (pOP != NULL) {
+ ma_device__wait_for_operation__pulse(pDevice, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ } else {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve sink info for playback device.", ma_result_from_pulse(error));
+ goto on_error3;
+ }
- ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesRead));
- const void* pSrc = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderCapture].lpData, pDevice->winmm.headerFramesConsumedCapture*bpf);
- void* pDst = ma_offset_ptr(pPCMFrames, totalFramesRead*bpf);
- ma_copy_memory(pDst, pSrc, framesToCopy*bpf);
+ ss = sinkInfo.sample_spec;
+ cmap = sinkInfo.channel_map;
- pDevice->winmm.headerFramesConsumedCapture += framesToCopy;
- totalFramesRead += framesToCopy;
+ pDevice->playback.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, ss.rate);
+ pDevice->playback.internalPeriods = pConfig->periods;
- /* If we've consumed the buffer entirely we need to add it back to the device. */
- if (pDevice->winmm.headerFramesConsumedCapture == (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf)) {
- pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 1; /* 1 = locked. */
- pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags &= ~WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */
+ attr = ma_device__pa_buffer_attr_new(pDevice->playback.internalPeriodSizeInFrames, pConfig->periods, &ss);
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[PulseAudio] Playback attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->playback.internalPeriodSizeInFrames);
+ #endif
- /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */
- ResetEvent((HANDLE)pDevice->winmm.hEventCapture);
+ pDevice->pulse.pStreamPlayback = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNamePlayback, &ss, &cmap);
+ if (pDevice->pulse.pStreamPlayback == NULL) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio playback stream.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ goto on_error3;
+ }
- /* The device will be started here. */
- resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((HWAVEIN)pDevice->winmm.hDeviceCapture, &((LPWAVEHDR)pDevice->winmm.pWAVEHDRCapture)[pDevice->winmm.iNextHeaderCapture], sizeof(WAVEHDR));
- if (resultMM != MMSYSERR_NOERROR) {
- result = ma_result_from_MMRESULT(resultMM);
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WinMM] waveInAddBuffer() failed.", result);
- break;
- }
+ streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS;
+ if (devPlayback != NULL) {
+ streamFlags |= MA_PA_STREAM_DONT_MOVE;
+ }
- /* Make sure we move to the next header. */
- pDevice->winmm.iNextHeaderCapture = (pDevice->winmm.iNextHeaderCapture + 1) % pDevice->capture.internalPeriods;
- pDevice->winmm.headerFramesConsumedCapture = 0;
+ error = ((ma_pa_stream_connect_playback_proc)pContext->pulse.pa_stream_connect_playback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, devPlayback, &attr, streamFlags, NULL, NULL);
+ if (error != MA_PA_OK) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio playback stream.", ma_result_from_pulse(error));
+ goto on_error6;
+ }
+
+ while (((ma_pa_stream_get_state_proc)pContext->pulse.pa_stream_get_state)((ma_pa_stream*)pDevice->pulse.pStreamPlayback) != MA_PA_STREAM_READY) {
+ error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL);
+ if (error < 0) {
+ result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] The PulseAudio main loop returned an error while connecting the PulseAudio playback stream.", ma_result_from_pulse(error));
+ goto on_error7;
}
+ }
- /* If at this point we have filled the entire input buffer we can return. */
- ma_assert(totalFramesRead <= frameCount);
- if (totalFramesRead == frameCount) {
- break;
+ /* Internal format. */
+ pActualSS = ((ma_pa_stream_get_sample_spec_proc)pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ if (pActualSS != NULL) {
+ /* If anything has changed between the requested and the actual sample spec, we need to update the buffer. */
+ if (ss.format != pActualSS->format || ss.channels != pActualSS->channels || ss.rate != pActualSS->rate) {
+ attr = ma_device__pa_buffer_attr_new(pDevice->playback.internalPeriodSizeInFrames, pConfig->periods, pActualSS);
+
+ pOP = ((ma_pa_stream_set_buffer_attr_proc)pContext->pulse.pa_stream_set_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, &attr, NULL, NULL);
+ if (pOP != NULL) {
+ ma_device__wait_for_operation__pulse(pDevice, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ }
}
- /* Getting here means there's more to process. */
- continue;
+ ss = *pActualSS;
}
- /* Getting here means there isn't enough any data left to send to the client which means we need to wait for more. */
- if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventCapture, INFINITE) != WAIT_OBJECT_0) {
- result = MA_ERROR;
- break;
+ pDevice->playback.internalFormat = ma_format_from_pulse(ss.format);
+ pDevice->playback.internalChannels = ss.channels;
+ pDevice->playback.internalSampleRate = ss.rate;
+
+ /* Internal channel map. */
+ pActualCMap = ((ma_pa_stream_get_channel_map_proc)pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ if (pActualCMap != NULL) {
+ cmap = *pActualCMap;
+ }
+ for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) {
+ pDevice->playback.internalChannelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]);
}
- /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */
- if ((pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags & WHDR_DONE) != 0) {
- pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 0; /* 0 = unlocked (make it available for reading). */
- pDevice->winmm.headerFramesConsumedCapture = 0;
+ /* Buffer. */
+ pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ if (pActualAttr != NULL) {
+ attr = *pActualAttr;
}
+ pDevice->playback.internalPeriods = attr.maxlength / attr.tlength;
+ pDevice->playback.internalPeriodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels) / pDevice->playback.internalPeriods;
+ #ifdef MA_DEBUG_OUTPUT
+ printf("[PulseAudio] Playback actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->playback.internalPeriodSizeInFrames);
+ #endif
- /* If the device has been stopped we need to break. */
- if (!pDevice->winmm.isStarted) {
- break;
+ /* Name. */
+ devPlayback = ((ma_pa_stream_get_device_name_proc)pContext->pulse.pa_stream_get_device_name)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ if (devPlayback != NULL) {
+ ma_pa_operation* pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devPlayback, ma_device_sink_name_callback, pDevice);
+ if (pOP != NULL) {
+ ma_device__wait_for_operation__pulse(pDevice, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ }
}
}
+ return MA_SUCCESS;
+
+
+on_error7:
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ }
+on_error6:
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ }
+on_error5:
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ }
+on_error4:
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ }
+on_error3: ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pDevice->pulse.pPulseContext);
+on_error2: ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pDevice->pulse.pPulseContext);
+on_error1: ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop);
+on_error0:
return result;
}
-ma_result ma_context_uninit__winmm(ma_context* pContext)
+
+static void ma_pulse_operation_complete_callback(ma_pa_stream* pStream, int success, void* pUserData)
{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_winmm);
+ ma_bool32* pIsSuccessful = (ma_bool32*)pUserData;
+ MA_ASSERT(pIsSuccessful != NULL);
- ma_dlclose(pContext, pContext->winmm.hWinMM);
- return MA_SUCCESS;
+ *pIsSuccessful = (ma_bool32)success;
+
+ (void)pStream; /* Unused. */
}
-ma_result ma_context_init__winmm(const ma_context_config* pConfig, ma_context* pContext)
+static ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type deviceType, int cork)
{
- ma_assert(pContext != NULL);
-
- (void)pConfig;
+ ma_context* pContext = pDevice->pContext;
+ ma_bool32 wasSuccessful;
+ ma_pa_stream* pStream;
+ ma_pa_operation* pOP;
+ ma_result result;
- pContext->winmm.hWinMM = ma_dlopen(pContext, "winmm.dll");
- if (pContext->winmm.hWinMM == NULL) {
- return MA_NO_BACKEND;
+ /* This should not be called with a duplex device type. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
}
- pContext->winmm.waveOutGetNumDevs = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutGetNumDevs");
- pContext->winmm.waveOutGetDevCapsA = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutGetDevCapsA");
- pContext->winmm.waveOutOpen = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutOpen");
- pContext->winmm.waveOutClose = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutClose");
- pContext->winmm.waveOutPrepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutPrepareHeader");
- pContext->winmm.waveOutUnprepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutUnprepareHeader");
- pContext->winmm.waveOutWrite = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutWrite");
- pContext->winmm.waveOutReset = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveOutReset");
- pContext->winmm.waveInGetNumDevs = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInGetNumDevs");
- pContext->winmm.waveInGetDevCapsA = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInGetDevCapsA");
- pContext->winmm.waveInOpen = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInOpen");
- pContext->winmm.waveInClose = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInClose");
- pContext->winmm.waveInPrepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInPrepareHeader");
- pContext->winmm.waveInUnprepareHeader = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInUnprepareHeader");
- pContext->winmm.waveInAddBuffer = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInAddBuffer");
- pContext->winmm.waveInStart = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInStart");
- pContext->winmm.waveInReset = ma_dlsym(pContext, pContext->winmm.hWinMM, "waveInReset");
-
- pContext->onUninit = ma_context_uninit__winmm;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__winmm;
- pContext->onEnumDevices = ma_context_enumerate_devices__winmm;
- pContext->onGetDeviceInfo = ma_context_get_device_info__winmm;
- pContext->onDeviceInit = ma_device_init__winmm;
- pContext->onDeviceUninit = ma_device_uninit__winmm;
- pContext->onDeviceStart = NULL; /* Not used. Started in onDeviceWrite/onDeviceRead. */
- pContext->onDeviceStop = ma_device_stop__winmm;
- pContext->onDeviceWrite = ma_device_write__winmm;
- pContext->onDeviceRead = ma_device_read__winmm;
+ wasSuccessful = MA_FALSE;
- return MA_SUCCESS;
-}
-#endif
+ pStream = (ma_pa_stream*)((deviceType == ma_device_type_capture) ? pDevice->pulse.pStreamCapture : pDevice->pulse.pStreamPlayback);
+ MA_ASSERT(pStream != NULL);
+ pOP = ((ma_pa_stream_cork_proc)pContext->pulse.pa_stream_cork)(pStream, cork, ma_pulse_operation_complete_callback, &wasSuccessful);
+ if (pOP == NULL) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to cork PulseAudio stream.", (cork == 0) ? MA_FAILED_TO_START_BACKEND_DEVICE : MA_FAILED_TO_STOP_BACKEND_DEVICE);
+ }
+ result = ma_device__wait_for_operation__pulse(pDevice, pOP);
+ ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ if (result != MA_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while waiting for the PulseAudio stream to cork.", result);
+ }
-/******************************************************************************
+ if (!wasSuccessful) {
+ if (cork) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to stop PulseAudio stream.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
+ } else {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to start PulseAudio stream.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ }
+ }
-ALSA Backend
+ return MA_SUCCESS;
+}
-******************************************************************************/
-#ifdef MA_HAS_ALSA
+static ma_result ma_device_stop__pulse(ma_device* pDevice)
+{
+ ma_result result;
+ ma_bool32 wasSuccessful;
+ ma_pa_operation* pOP;
-#ifdef MA_NO_RUNTIME_LINKING
-#include
-typedef snd_pcm_uframes_t ma_snd_pcm_uframes_t;
-typedef snd_pcm_sframes_t ma_snd_pcm_sframes_t;
-typedef snd_pcm_stream_t ma_snd_pcm_stream_t;
-typedef snd_pcm_format_t ma_snd_pcm_format_t;
-typedef snd_pcm_access_t ma_snd_pcm_access_t;
-typedef snd_pcm_t ma_snd_pcm_t;
-typedef snd_pcm_hw_params_t ma_snd_pcm_hw_params_t;
-typedef snd_pcm_sw_params_t ma_snd_pcm_sw_params_t;
-typedef snd_pcm_format_mask_t ma_snd_pcm_format_mask_t;
-typedef snd_pcm_info_t ma_snd_pcm_info_t;
-typedef snd_pcm_channel_area_t ma_snd_pcm_channel_area_t;
-typedef snd_pcm_chmap_t ma_snd_pcm_chmap_t;
+ MA_ASSERT(pDevice != NULL);
-/* snd_pcm_stream_t */
-#define MA_SND_PCM_STREAM_PLAYBACK SND_PCM_STREAM_PLAYBACK
-#define MA_SND_PCM_STREAM_CAPTURE SND_PCM_STREAM_CAPTURE
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 1);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
-/* snd_pcm_format_t */
-#define MA_SND_PCM_FORMAT_UNKNOWN SND_PCM_FORMAT_UNKNOWN
-#define MA_SND_PCM_FORMAT_U8 SND_PCM_FORMAT_U8
-#define MA_SND_PCM_FORMAT_S16_LE SND_PCM_FORMAT_S16_LE
-#define MA_SND_PCM_FORMAT_S16_BE SND_PCM_FORMAT_S16_BE
-#define MA_SND_PCM_FORMAT_S24_LE SND_PCM_FORMAT_S24_LE
-#define MA_SND_PCM_FORMAT_S24_BE SND_PCM_FORMAT_S24_BE
-#define MA_SND_PCM_FORMAT_S32_LE SND_PCM_FORMAT_S32_LE
-#define MA_SND_PCM_FORMAT_S32_BE SND_PCM_FORMAT_S32_BE
-#define MA_SND_PCM_FORMAT_FLOAT_LE SND_PCM_FORMAT_FLOAT_LE
-#define MA_SND_PCM_FORMAT_FLOAT_BE SND_PCM_FORMAT_FLOAT_BE
-#define MA_SND_PCM_FORMAT_FLOAT64_LE SND_PCM_FORMAT_FLOAT64_LE
-#define MA_SND_PCM_FORMAT_FLOAT64_BE SND_PCM_FORMAT_FLOAT64_BE
-#define MA_SND_PCM_FORMAT_MU_LAW SND_PCM_FORMAT_MU_LAW
-#define MA_SND_PCM_FORMAT_A_LAW SND_PCM_FORMAT_A_LAW
-#define MA_SND_PCM_FORMAT_S24_3LE SND_PCM_FORMAT_S24_3LE
-#define MA_SND_PCM_FORMAT_S24_3BE SND_PCM_FORMAT_S24_3BE
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ /* The stream needs to be drained if it's a playback device. */
+ pOP = ((ma_pa_stream_drain_proc)pDevice->pContext->pulse.pa_stream_drain)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_pulse_operation_complete_callback, &wasSuccessful);
+ if (pOP != NULL) {
+ ma_device__wait_for_operation__pulse(pDevice, pOP);
+ ((ma_pa_operation_unref_proc)pDevice->pContext->pulse.pa_operation_unref)(pOP);
+ }
-/* ma_snd_pcm_access_t */
-#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED SND_PCM_ACCESS_MMAP_INTERLEAVED
-#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED SND_PCM_ACCESS_MMAP_NONINTERLEAVED
-#define MA_SND_PCM_ACCESS_MMAP_COMPLEX SND_PCM_ACCESS_MMAP_COMPLEX
-#define MA_SND_PCM_ACCESS_RW_INTERLEAVED SND_PCM_ACCESS_RW_INTERLEAVED
-#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED SND_PCM_ACCESS_RW_NONINTERLEAVED
+ result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 1);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
-/* Channel positions. */
-#define MA_SND_CHMAP_UNKNOWN SND_CHMAP_UNKNOWN
-#define MA_SND_CHMAP_NA SND_CHMAP_NA
-#define MA_SND_CHMAP_MONO SND_CHMAP_MONO
-#define MA_SND_CHMAP_FL SND_CHMAP_FL
-#define MA_SND_CHMAP_FR SND_CHMAP_FR
-#define MA_SND_CHMAP_RL SND_CHMAP_RL
-#define MA_SND_CHMAP_RR SND_CHMAP_RR
-#define MA_SND_CHMAP_FC SND_CHMAP_FC
-#define MA_SND_CHMAP_LFE SND_CHMAP_LFE
-#define MA_SND_CHMAP_SL SND_CHMAP_SL
-#define MA_SND_CHMAP_SR SND_CHMAP_SR
-#define MA_SND_CHMAP_RC SND_CHMAP_RC
-#define MA_SND_CHMAP_FLC SND_CHMAP_FLC
-#define MA_SND_CHMAP_FRC SND_CHMAP_FRC
-#define MA_SND_CHMAP_RLC SND_CHMAP_RLC
-#define MA_SND_CHMAP_RRC SND_CHMAP_RRC
-#define MA_SND_CHMAP_FLW SND_CHMAP_FLW
-#define MA_SND_CHMAP_FRW SND_CHMAP_FRW
-#define MA_SND_CHMAP_FLH SND_CHMAP_FLH
-#define MA_SND_CHMAP_FCH SND_CHMAP_FCH
-#define MA_SND_CHMAP_FRH SND_CHMAP_FRH
-#define MA_SND_CHMAP_TC SND_CHMAP_TC
-#define MA_SND_CHMAP_TFL SND_CHMAP_TFL
-#define MA_SND_CHMAP_TFR SND_CHMAP_TFR
-#define MA_SND_CHMAP_TFC SND_CHMAP_TFC
-#define MA_SND_CHMAP_TRL SND_CHMAP_TRL
-#define MA_SND_CHMAP_TRR SND_CHMAP_TRR
-#define MA_SND_CHMAP_TRC SND_CHMAP_TRC
-#define MA_SND_CHMAP_TFLC SND_CHMAP_TFLC
-#define MA_SND_CHMAP_TFRC SND_CHMAP_TFRC
-#define MA_SND_CHMAP_TSL SND_CHMAP_TSL
-#define MA_SND_CHMAP_TSR SND_CHMAP_TSR
-#define MA_SND_CHMAP_LLFE SND_CHMAP_LLFE
-#define MA_SND_CHMAP_RLFE SND_CHMAP_RLFE
-#define MA_SND_CHMAP_BC SND_CHMAP_BC
-#define MA_SND_CHMAP_BLC SND_CHMAP_BLC
-#define MA_SND_CHMAP_BRC SND_CHMAP_BRC
+ return MA_SUCCESS;
+}
-/* Open mode flags. */
-#define MA_SND_PCM_NO_AUTO_RESAMPLE SND_PCM_NO_AUTO_RESAMPLE
-#define MA_SND_PCM_NO_AUTO_CHANNELS SND_PCM_NO_AUTO_CHANNELS
-#define MA_SND_PCM_NO_AUTO_FORMAT SND_PCM_NO_AUTO_FORMAT
-#else
-#include /* For EPIPE, etc. */
-typedef unsigned long ma_snd_pcm_uframes_t;
-typedef long ma_snd_pcm_sframes_t;
-typedef int ma_snd_pcm_stream_t;
-typedef int ma_snd_pcm_format_t;
-typedef int ma_snd_pcm_access_t;
-typedef struct ma_snd_pcm_t ma_snd_pcm_t;
-typedef struct ma_snd_pcm_hw_params_t ma_snd_pcm_hw_params_t;
-typedef struct ma_snd_pcm_sw_params_t ma_snd_pcm_sw_params_t;
-typedef struct ma_snd_pcm_format_mask_t ma_snd_pcm_format_mask_t;
-typedef struct ma_snd_pcm_info_t ma_snd_pcm_info_t;
-typedef struct
-{
- void* addr;
- unsigned int first;
- unsigned int step;
-} ma_snd_pcm_channel_area_t;
-typedef struct
+static ma_result ma_device_write__pulse(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
{
- unsigned int channels;
- unsigned int pos[1];
-} ma_snd_pcm_chmap_t;
+ ma_uint32 totalFramesWritten;
-/* snd_pcm_state_t */
-#define MA_SND_PCM_STATE_OPEN 0
-#define MA_SND_PCM_STATE_SETUP 1
-#define MA_SND_PCM_STATE_PREPARED 2
-#define MA_SND_PCM_STATE_RUNNING 3
-#define MA_SND_PCM_STATE_XRUN 4
-#define MA_SND_PCM_STATE_DRAINING 5
-#define MA_SND_PCM_STATE_PAUSED 6
-#define MA_SND_PCM_STATE_SUSPENDED 7
-#define MA_SND_PCM_STATE_DISCONNECTED 8
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pPCMFrames != NULL);
+ MA_ASSERT(frameCount > 0);
-/* snd_pcm_stream_t */
-#define MA_SND_PCM_STREAM_PLAYBACK 0
-#define MA_SND_PCM_STREAM_CAPTURE 1
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
+ }
-/* snd_pcm_format_t */
-#define MA_SND_PCM_FORMAT_UNKNOWN -1
-#define MA_SND_PCM_FORMAT_U8 1
-#define MA_SND_PCM_FORMAT_S16_LE 2
-#define MA_SND_PCM_FORMAT_S16_BE 3
-#define MA_SND_PCM_FORMAT_S24_LE 6
-#define MA_SND_PCM_FORMAT_S24_BE 7
-#define MA_SND_PCM_FORMAT_S32_LE 10
-#define MA_SND_PCM_FORMAT_S32_BE 11
-#define MA_SND_PCM_FORMAT_FLOAT_LE 14
-#define MA_SND_PCM_FORMAT_FLOAT_BE 15
-#define MA_SND_PCM_FORMAT_FLOAT64_LE 16
-#define MA_SND_PCM_FORMAT_FLOAT64_BE 17
-#define MA_SND_PCM_FORMAT_MU_LAW 20
-#define MA_SND_PCM_FORMAT_A_LAW 21
-#define MA_SND_PCM_FORMAT_S24_3LE 32
-#define MA_SND_PCM_FORMAT_S24_3BE 33
+ totalFramesWritten = 0;
+ while (totalFramesWritten < frameCount) {
+ if (ma_device__get_state(pDevice) != MA_STATE_STARTED) {
+ return MA_DEVICE_NOT_STARTED;
+ }
-/* snd_pcm_access_t */
-#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED 0
-#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED 1
-#define MA_SND_PCM_ACCESS_MMAP_COMPLEX 2
-#define MA_SND_PCM_ACCESS_RW_INTERLEAVED 3
-#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED 4
+ /* Place the data into the mapped buffer if we have one. */
+ if (pDevice->pulse.pMappedBufferPlayback != NULL && pDevice->pulse.mappedBufferFramesRemainingPlayback > 0) {
+ ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 mappedBufferFramesConsumed = pDevice->pulse.mappedBufferFramesCapacityPlayback - pDevice->pulse.mappedBufferFramesRemainingPlayback;
-/* Channel positions. */
-#define MA_SND_CHMAP_UNKNOWN 0
-#define MA_SND_CHMAP_NA 1
-#define MA_SND_CHMAP_MONO 2
-#define MA_SND_CHMAP_FL 3
-#define MA_SND_CHMAP_FR 4
-#define MA_SND_CHMAP_RL 5
-#define MA_SND_CHMAP_RR 6
-#define MA_SND_CHMAP_FC 7
-#define MA_SND_CHMAP_LFE 8
-#define MA_SND_CHMAP_SL 9
-#define MA_SND_CHMAP_SR 10
-#define MA_SND_CHMAP_RC 11
-#define MA_SND_CHMAP_FLC 12
-#define MA_SND_CHMAP_FRC 13
-#define MA_SND_CHMAP_RLC 14
-#define MA_SND_CHMAP_RRC 15
-#define MA_SND_CHMAP_FLW 16
-#define MA_SND_CHMAP_FRW 17
-#define MA_SND_CHMAP_FLH 18
-#define MA_SND_CHMAP_FCH 19
-#define MA_SND_CHMAP_FRH 20
-#define MA_SND_CHMAP_TC 21
-#define MA_SND_CHMAP_TFL 22
-#define MA_SND_CHMAP_TFR 23
-#define MA_SND_CHMAP_TFC 24
-#define MA_SND_CHMAP_TRL 25
-#define MA_SND_CHMAP_TRR 26
-#define MA_SND_CHMAP_TRC 27
-#define MA_SND_CHMAP_TFLC 28
-#define MA_SND_CHMAP_TFRC 29
-#define MA_SND_CHMAP_TSL 30
-#define MA_SND_CHMAP_TSR 31
-#define MA_SND_CHMAP_LLFE 32
-#define MA_SND_CHMAP_RLFE 33
-#define MA_SND_CHMAP_BC 34
-#define MA_SND_CHMAP_BLC 35
-#define MA_SND_CHMAP_BRC 36
+ void* pDst = (ma_uint8*)pDevice->pulse.pMappedBufferPlayback + (mappedBufferFramesConsumed * bpf);
+ const void* pSrc = (const ma_uint8*)pPCMFrames + (totalFramesWritten * bpf);
+ ma_uint32 framesToCopy = ma_min(pDevice->pulse.mappedBufferFramesRemainingPlayback, (frameCount - totalFramesWritten));
+ MA_COPY_MEMORY(pDst, pSrc, framesToCopy * bpf);
-/* Open mode flags. */
-#define MA_SND_PCM_NO_AUTO_RESAMPLE 0x00010000
-#define MA_SND_PCM_NO_AUTO_CHANNELS 0x00020000
-#define MA_SND_PCM_NO_AUTO_FORMAT 0x00040000
-#endif
+ pDevice->pulse.mappedBufferFramesRemainingPlayback -= framesToCopy;
+ totalFramesWritten += framesToCopy;
+ }
-typedef int (* ma_snd_pcm_open_proc) (ma_snd_pcm_t **pcm, const char *name, ma_snd_pcm_stream_t stream, int mode);
-typedef int (* ma_snd_pcm_close_proc) (ma_snd_pcm_t *pcm);
-typedef size_t (* ma_snd_pcm_hw_params_sizeof_proc) (void);
-typedef int (* ma_snd_pcm_hw_params_any_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params);
-typedef int (* ma_snd_pcm_hw_params_set_format_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t val);
-typedef int (* ma_snd_pcm_hw_params_set_format_first_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format);
-typedef void (* ma_snd_pcm_hw_params_get_format_mask_proc) (ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_mask_t *mask);
-typedef int (* ma_snd_pcm_hw_params_set_channels_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val);
-typedef int (* ma_snd_pcm_hw_params_set_rate_resample_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val);
-typedef int (* ma_snd_pcm_hw_params_set_rate_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir);
-typedef int (* ma_snd_pcm_hw_params_set_buffer_size_near_proc)(ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val);
-typedef int (* ma_snd_pcm_hw_params_set_periods_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir);
-typedef int (* ma_snd_pcm_hw_params_set_access_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t _access);
-typedef int (* ma_snd_pcm_hw_params_get_format_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format);
-typedef int (* ma_snd_pcm_hw_params_get_channels_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val);
-typedef int (* ma_snd_pcm_hw_params_get_channels_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val);
-typedef int (* ma_snd_pcm_hw_params_get_channels_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val);
-typedef int (* ma_snd_pcm_hw_params_get_rate_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir);
-typedef int (* ma_snd_pcm_hw_params_get_rate_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir);
-typedef int (* ma_snd_pcm_hw_params_get_rate_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir);
-typedef int (* ma_snd_pcm_hw_params_get_buffer_size_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val);
-typedef int (* ma_snd_pcm_hw_params_get_periods_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir);
-typedef int (* ma_snd_pcm_hw_params_get_access_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t *_access);
-typedef int (* ma_snd_pcm_hw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params);
-typedef size_t (* ma_snd_pcm_sw_params_sizeof_proc) (void);
-typedef int (* ma_snd_pcm_sw_params_current_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params);
-typedef int (* ma_snd_pcm_sw_params_get_boundary_proc) (ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t* val);
-typedef int (* ma_snd_pcm_sw_params_set_avail_min_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val);
-typedef int (* ma_snd_pcm_sw_params_set_start_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val);
-typedef int (* ma_snd_pcm_sw_params_set_stop_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val);
-typedef int (* ma_snd_pcm_sw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params);
-typedef size_t (* ma_snd_pcm_format_mask_sizeof_proc) (void);
-typedef int (* ma_snd_pcm_format_mask_test_proc) (const ma_snd_pcm_format_mask_t *mask, ma_snd_pcm_format_t val);
-typedef ma_snd_pcm_chmap_t * (* ma_snd_pcm_get_chmap_proc) (ma_snd_pcm_t *pcm);
-typedef int (* ma_snd_pcm_state_proc) (ma_snd_pcm_t *pcm);
-typedef int (* ma_snd_pcm_prepare_proc) (ma_snd_pcm_t *pcm);
-typedef int (* ma_snd_pcm_start_proc) (ma_snd_pcm_t *pcm);
-typedef int (* ma_snd_pcm_drop_proc) (ma_snd_pcm_t *pcm);
-typedef int (* ma_snd_pcm_drain_proc) (ma_snd_pcm_t *pcm);
-typedef int (* ma_snd_device_name_hint_proc) (int card, const char *iface, void ***hints);
-typedef char * (* ma_snd_device_name_get_hint_proc) (const void *hint, const char *id);
-typedef int (* ma_snd_card_get_index_proc) (const char *name);
-typedef int (* ma_snd_device_name_free_hint_proc) (void **hints);
-typedef int (* ma_snd_pcm_mmap_begin_proc) (ma_snd_pcm_t *pcm, const ma_snd_pcm_channel_area_t **areas, ma_snd_pcm_uframes_t *offset, ma_snd_pcm_uframes_t *frames);
-typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_mmap_commit_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_uframes_t offset, ma_snd_pcm_uframes_t frames);
-typedef int (* ma_snd_pcm_recover_proc) (ma_snd_pcm_t *pcm, int err, int silent);
-typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_readi_proc) (ma_snd_pcm_t *pcm, void *buffer, ma_snd_pcm_uframes_t size);
-typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_writei_proc) (ma_snd_pcm_t *pcm, const void *buffer, ma_snd_pcm_uframes_t size);
-typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_proc) (ma_snd_pcm_t *pcm);
-typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_update_proc) (ma_snd_pcm_t *pcm);
-typedef int (* ma_snd_pcm_wait_proc) (ma_snd_pcm_t *pcm, int timeout);
-typedef int (* ma_snd_pcm_info_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_info_t* info);
-typedef size_t (* ma_snd_pcm_info_sizeof_proc) ();
-typedef const char* (* ma_snd_pcm_info_get_name_proc) (const ma_snd_pcm_info_t* info);
-typedef int (* ma_snd_config_update_free_global_proc) ();
+ /*
+ Getting here means we've run out of data in the currently mapped chunk. We need to write this to the device and then try
+ mapping another chunk. If this fails we need to wait for space to become available.
+ */
+ if (pDevice->pulse.mappedBufferFramesCapacityPlayback > 0 && pDevice->pulse.mappedBufferFramesRemainingPlayback == 0) {
+ size_t nbytes = pDevice->pulse.mappedBufferFramesCapacityPlayback * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+
+ int error = ((ma_pa_stream_write_proc)pDevice->pContext->pulse.pa_stream_write)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, pDevice->pulse.pMappedBufferPlayback, nbytes, NULL, 0, MA_PA_SEEK_RELATIVE);
+ if (error < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to write data to the PulseAudio stream.", ma_result_from_pulse(error));
+ }
+
+ pDevice->pulse.pMappedBufferPlayback = NULL;
+ pDevice->pulse.mappedBufferFramesRemainingPlayback = 0;
+ pDevice->pulse.mappedBufferFramesCapacityPlayback = 0;
+ }
+
+ MA_ASSERT(totalFramesWritten <= frameCount);
+ if (totalFramesWritten == frameCount) {
+ break;
+ }
+
+ /* Getting here means we need to map a new buffer. If we don't have enough space we need to wait for more. */
+ for (;;) {
+ size_t writableSizeInBytes;
+
+ /* If the device has been corked, don't try to continue. */
+ if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamPlayback)) {
+ break;
+ }
-/* This array specifies each of the common devices that can be used for both playback and capture. */
-const char* g_maCommonDeviceNamesALSA[] = {
- "default",
- "null",
- "pulse",
- "jack"
-};
+ writableSizeInBytes = ((ma_pa_stream_writable_size_proc)pDevice->pContext->pulse.pa_stream_writable_size)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+ if (writableSizeInBytes != (size_t)-1) {
+ if (writableSizeInBytes > 0) {
+ /* Data is avaialable. */
+ size_t bytesToMap = writableSizeInBytes;
+ int error = ((ma_pa_stream_begin_write_proc)pDevice->pContext->pulse.pa_stream_begin_write)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, &pDevice->pulse.pMappedBufferPlayback, &bytesToMap);
+ if (error < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to map write buffer.", ma_result_from_pulse(error));
+ }
-/* This array allows us to blacklist specific playback devices. */
-const char* g_maBlacklistedPlaybackDeviceNamesALSA[] = {
- ""
-};
+ pDevice->pulse.mappedBufferFramesCapacityPlayback = bytesToMap / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ pDevice->pulse.mappedBufferFramesRemainingPlayback = pDevice->pulse.mappedBufferFramesCapacityPlayback;
-/* This array allows us to blacklist specific capture devices. */
-const char* g_maBlacklistedCaptureDeviceNamesALSA[] = {
- ""
-};
+ break;
+ } else {
+ /* No data available. Need to wait for more. */
+ int error = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL);
+ if (error < 0) {
+ return ma_result_from_pulse(error);
+ }
+
+ continue;
+ }
+ } else {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to query the stream's writable size.", MA_ERROR);
+ }
+ }
+ }
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = totalFramesWritten;
+ }
-/*
-This array allows miniaudio to control device-specific default buffer sizes. This uses a scaling factor. Order is important. If
-any part of the string is present in the device's name, the associated scale will be used.
-*/
-static struct
-{
- const char* name;
- float scale;
-} g_maDefaultBufferSizeScalesALSA[] = {
- {"bcm2835 IEC958/HDMI", 2.0f},
- {"bcm2835 ALSA", 2.0f}
-};
+ return MA_SUCCESS;
+}
-float ma_find_default_buffer_size_scale__alsa(const char* deviceName)
+static ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
{
- size_t i;
+ ma_uint32 totalFramesRead;
- if (deviceName == NULL) {
- return 1;
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(pPCMFrames != NULL);
+ MA_ASSERT(frameCount > 0);
+
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
}
- for (i = 0; i < ma_countof(g_maDefaultBufferSizeScalesALSA); ++i) {
- if (strstr(g_maDefaultBufferSizeScalesALSA[i].name, deviceName) != NULL) {
- return g_maDefaultBufferSizeScalesALSA[i].scale;
+ totalFramesRead = 0;
+ while (totalFramesRead < frameCount) {
+ if (ma_device__get_state(pDevice) != MA_STATE_STARTED) {
+ return MA_DEVICE_NOT_STARTED;
+ }
+
+ /*
+ If a buffer is mapped we need to read from that first. Once it's consumed we need to drop it. Note that pDevice->pulse.pMappedBufferCapture can be null in which
+ case it could be a hole. In this case we just write zeros into the output buffer.
+ */
+ if (pDevice->pulse.mappedBufferFramesRemainingCapture > 0) {
+ ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 mappedBufferFramesConsumed = pDevice->pulse.mappedBufferFramesCapacityCapture - pDevice->pulse.mappedBufferFramesRemainingCapture;
+
+ ma_uint32 framesToCopy = ma_min(pDevice->pulse.mappedBufferFramesRemainingCapture, (frameCount - totalFramesRead));
+ void* pDst = (ma_uint8*)pPCMFrames + (totalFramesRead * bpf);
+
+ /*
+ This little bit of logic here is specifically for PulseAudio and it's hole management. The buffer pointer will be set to NULL
+ when the current fragment is a hole. For a hole we just output silence.
+ */
+ if (pDevice->pulse.pMappedBufferCapture != NULL) {
+ const void* pSrc = (const ma_uint8*)pDevice->pulse.pMappedBufferCapture + (mappedBufferFramesConsumed * bpf);
+ MA_COPY_MEMORY(pDst, pSrc, framesToCopy * bpf);
+ } else {
+ MA_ZERO_MEMORY(pDst, framesToCopy * bpf);
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[PulseAudio] ma_device_read__pulse: Filling hole with silence.\n");
+ #endif
+ }
+
+ pDevice->pulse.mappedBufferFramesRemainingCapture -= framesToCopy;
+ totalFramesRead += framesToCopy;
+ }
+
+ /*
+ Getting here means we've run out of data in the currently mapped chunk. We need to drop this from the device and then try
+ mapping another chunk. If this fails we need to wait for data to become available.
+ */
+ if (pDevice->pulse.mappedBufferFramesCapacityCapture > 0 && pDevice->pulse.mappedBufferFramesRemainingCapture == 0) {
+ int error;
+
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[PulseAudio] ma_device_read__pulse: Call pa_stream_drop()\n");
+ #endif
+
+ error = ((ma_pa_stream_drop_proc)pDevice->pContext->pulse.pa_stream_drop)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ if (error != 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to drop fragment.", ma_result_from_pulse(error));
+ }
+
+ pDevice->pulse.pMappedBufferCapture = NULL;
+ pDevice->pulse.mappedBufferFramesRemainingCapture = 0;
+ pDevice->pulse.mappedBufferFramesCapacityCapture = 0;
+ }
+
+ MA_ASSERT(totalFramesRead <= frameCount);
+ if (totalFramesRead == frameCount) {
+ break;
+ }
+
+ /* Getting here means we need to map a new buffer. If we don't have enough data we wait for more. */
+ for (;;) {
+ int error;
+ size_t bytesMapped;
+
+ if (ma_device__get_state(pDevice) != MA_STATE_STARTED) {
+ break;
+ }
+
+ /* If the device has been corked, don't try to continue. */
+ if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamCapture)) {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[PulseAudio] ma_device_read__pulse: Corked.\n");
+ #endif
+ break;
+ }
+
+ MA_ASSERT(pDevice->pulse.pMappedBufferCapture == NULL); /* <-- We're about to map a buffer which means we shouldn't have an existing mapping. */
+
+ error = ((ma_pa_stream_peek_proc)pDevice->pContext->pulse.pa_stream_peek)((ma_pa_stream*)pDevice->pulse.pStreamCapture, &pDevice->pulse.pMappedBufferCapture, &bytesMapped);
+ if (error < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to peek capture buffer.", ma_result_from_pulse(error));
+ }
+
+ if (bytesMapped > 0) {
+ pDevice->pulse.mappedBufferFramesCapacityCapture = bytesMapped / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ pDevice->pulse.mappedBufferFramesRemainingCapture = pDevice->pulse.mappedBufferFramesCapacityCapture;
+
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[PulseAudio] ma_device_read__pulse: Mapped. mappedBufferFramesCapacityCapture=%d, mappedBufferFramesRemainingCapture=%d\n", pDevice->pulse.mappedBufferFramesCapacityCapture, pDevice->pulse.mappedBufferFramesRemainingCapture);
+ #endif
+
+ if (pDevice->pulse.pMappedBufferCapture == NULL) {
+ /* It's a hole. */
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[PulseAudio] ma_device_read__pulse: Call pa_stream_peek(). Hole.\n");
+ #endif
+ }
+
+ break;
+ } else {
+ if (pDevice->pulse.pMappedBufferCapture == NULL) {
+ /* Nothing available yet. Need to wait for more. */
+
+ /*
+ I have had reports of a deadlock in this part of the code. I have reproduced this when using the "Built-in Audio Analogue Stereo" device without
+ an actual microphone connected. I'm experimenting here by not blocking in pa_mainloop_iterate() and instead sleep for a bit when there are no
+ dispatches.
+ */
+ error = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 0, NULL);
+ if (error < 0) {
+ return ma_result_from_pulse(error);
+ }
+
+ /* Sleep for a bit if nothing was dispatched. */
+ if (error == 0) {
+ ma_sleep(1);
+ }
+
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[PulseAudio] ma_device_read__pulse: No data available. Waiting. mappedBufferFramesCapacityCapture=%d, mappedBufferFramesRemainingCapture=%d\n", pDevice->pulse.mappedBufferFramesCapacityCapture, pDevice->pulse.mappedBufferFramesRemainingCapture);
+ #endif
+ } else {
+ /* Getting here means we mapped 0 bytes, but have a non-NULL buffer. I don't think this should ever happen. */
+ MA_ASSERT(MA_FALSE);
+ }
+ }
}
}
- return 1;
+ if (pFramesRead != NULL) {
+ *pFramesRead = totalFramesRead;
+ }
+
+ return MA_SUCCESS;
}
-ma_snd_pcm_format_t ma_convert_ma_format_to_alsa_format(ma_format format)
+static ma_result ma_device_main_loop__pulse(ma_device* pDevice)
{
- ma_snd_pcm_format_t ALSAFormats[] = {
- MA_SND_PCM_FORMAT_UNKNOWN, /* ma_format_unknown */
- MA_SND_PCM_FORMAT_U8, /* ma_format_u8 */
- MA_SND_PCM_FORMAT_S16_LE, /* ma_format_s16 */
- MA_SND_PCM_FORMAT_S24_3LE, /* ma_format_s24 */
- MA_SND_PCM_FORMAT_S32_LE, /* ma_format_s32 */
- MA_SND_PCM_FORMAT_FLOAT_LE /* ma_format_f32 */
- };
+ ma_result result = MA_SUCCESS;
+ ma_bool32 exitLoop = MA_FALSE;
- if (ma_is_big_endian()) {
- ALSAFormats[0] = MA_SND_PCM_FORMAT_UNKNOWN;
- ALSAFormats[1] = MA_SND_PCM_FORMAT_U8;
- ALSAFormats[2] = MA_SND_PCM_FORMAT_S16_BE;
- ALSAFormats[3] = MA_SND_PCM_FORMAT_S24_3BE;
- ALSAFormats[4] = MA_SND_PCM_FORMAT_S32_BE;
- ALSAFormats[5] = MA_SND_PCM_FORMAT_FLOAT_BE;
+ MA_ASSERT(pDevice != NULL);
+
+ /* The stream needs to be uncorked first. We do this at the top for both capture and playback for PulseAudio. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 0);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 0);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
}
- return ALSAFormats[format];
-}
-ma_format ma_format_from_alsa(ma_snd_pcm_format_t formatALSA)
-{
- if (ma_is_little_endian()) {
- switch (formatALSA) {
- case MA_SND_PCM_FORMAT_S16_LE: return ma_format_s16;
- case MA_SND_PCM_FORMAT_S24_3LE: return ma_format_s24;
- case MA_SND_PCM_FORMAT_S32_LE: return ma_format_s32;
- case MA_SND_PCM_FORMAT_FLOAT_LE: return ma_format_f32;
- default: break;
- }
- } else {
- switch (formatALSA) {
- case MA_SND_PCM_FORMAT_S16_BE: return ma_format_s16;
- case MA_SND_PCM_FORMAT_S24_3BE: return ma_format_s24;
- case MA_SND_PCM_FORMAT_S32_BE: return ma_format_s32;
- case MA_SND_PCM_FORMAT_FLOAT_BE: return ma_format_f32;
+ while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
+ switch (pDevice->type)
+ {
+ case ma_device_type_duplex:
+ {
+ /* The process is: device_read -> convert -> callback -> convert -> device_write */
+ ma_uint32 totalCapturedDeviceFramesProcessed = 0;
+ ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames);
+
+ while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) {
+ ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 capturedDeviceFramesRemaining;
+ ma_uint32 capturedDeviceFramesProcessed;
+ ma_uint32 capturedDeviceFramesToProcess;
+ ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed;
+ if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) {
+ capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames;
+ }
+
+ result = ma_device_read__pulse(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedDeviceFramesRemaining = capturedDeviceFramesToProcess;
+ capturedDeviceFramesProcessed = 0;
+
+ for (;;) {
+ ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames);
+ ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining;
+ ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+
+ /* Convert capture data from device format to client format. */
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ /*
+ If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small
+ which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE.
+ */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+
+ ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/
+
+ capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+ capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+
+ /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */
+ for (;;) {
+ ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration;
+ ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ result = ma_device_write__pulse(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+ }
+
+ /* In case an error happened from ma_device_write__pulse()... */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+ }
+
+ totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed;
+ }
+ } break;
+
+ case ma_device_type_capture:
+ {
+ ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames;
+ ma_uint32 framesReadThisPeriod = 0;
+ while (framesReadThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToReadThisIteration = framesRemainingInPeriod;
+ if (framesToReadThisIteration > intermediaryBufferSizeInFrames) {
+ framesToReadThisIteration = intermediaryBufferSizeInFrames;
+ }
+
+ result = ma_device_read__pulse(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer);
+
+ framesReadThisPeriod += framesProcessed;
+ }
+ } break;
+
+ case ma_device_type_playback:
+ {
+ ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames;
+ ma_uint32 framesWrittenThisPeriod = 0;
+ while (framesWrittenThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod;
+ if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) {
+ framesToWriteThisIteration = intermediaryBufferSizeInFrames;
+ }
+
+ ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer);
+
+ result = ma_device_write__pulse(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ framesWrittenThisPeriod += framesProcessed;
+ }
+ } break;
+
+ /* To silence a warning. Will never hit this. */
+ case ma_device_type_loopback:
default: break;
}
}
- /* Endian agnostic. */
- switch (formatALSA) {
- case MA_SND_PCM_FORMAT_U8: return ma_format_u8;
- default: return ma_format_unknown;
- }
+ /* Here is where the device needs to be stopped. */
+ ma_device_stop__pulse(pDevice);
+
+ return result;
}
-ma_channel ma_convert_alsa_channel_position_to_ma_channel(unsigned int alsaChannelPos)
+
+static ma_result ma_context_uninit__pulse(ma_context* pContext)
{
- switch (alsaChannelPos)
- {
- case MA_SND_CHMAP_MONO: return MA_CHANNEL_MONO;
- case MA_SND_CHMAP_FL: return MA_CHANNEL_FRONT_LEFT;
- case MA_SND_CHMAP_FR: return MA_CHANNEL_FRONT_RIGHT;
- case MA_SND_CHMAP_RL: return MA_CHANNEL_BACK_LEFT;
- case MA_SND_CHMAP_RR: return MA_CHANNEL_BACK_RIGHT;
- case MA_SND_CHMAP_FC: return MA_CHANNEL_FRONT_CENTER;
- case MA_SND_CHMAP_LFE: return MA_CHANNEL_LFE;
- case MA_SND_CHMAP_SL: return MA_CHANNEL_SIDE_LEFT;
- case MA_SND_CHMAP_SR: return MA_CHANNEL_SIDE_RIGHT;
- case MA_SND_CHMAP_RC: return MA_CHANNEL_BACK_CENTER;
- case MA_SND_CHMAP_FLC: return MA_CHANNEL_FRONT_LEFT_CENTER;
- case MA_SND_CHMAP_FRC: return MA_CHANNEL_FRONT_RIGHT_CENTER;
- case MA_SND_CHMAP_RLC: return 0;
- case MA_SND_CHMAP_RRC: return 0;
- case MA_SND_CHMAP_FLW: return 0;
- case MA_SND_CHMAP_FRW: return 0;
- case MA_SND_CHMAP_FLH: return 0;
- case MA_SND_CHMAP_FCH: return 0;
- case MA_SND_CHMAP_FRH: return 0;
- case MA_SND_CHMAP_TC: return MA_CHANNEL_TOP_CENTER;
- case MA_SND_CHMAP_TFL: return MA_CHANNEL_TOP_FRONT_LEFT;
- case MA_SND_CHMAP_TFR: return MA_CHANNEL_TOP_FRONT_RIGHT;
- case MA_SND_CHMAP_TFC: return MA_CHANNEL_TOP_FRONT_CENTER;
- case MA_SND_CHMAP_TRL: return MA_CHANNEL_TOP_BACK_LEFT;
- case MA_SND_CHMAP_TRR: return MA_CHANNEL_TOP_BACK_RIGHT;
- case MA_SND_CHMAP_TRC: return MA_CHANNEL_TOP_BACK_CENTER;
- default: break;
- }
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_pulseaudio);
- return 0;
+ ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks);
+ pContext->pulse.pServerName = NULL;
+
+ ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks);
+ pContext->pulse.pApplicationName = NULL;
+
+#ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->pulse.pulseSO);
+#endif
+
+ return MA_SUCCESS;
}
-ma_bool32 ma_is_common_device_name__alsa(const char* name)
+static ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* pContext)
{
- size_t iName;
- for (iName = 0; iName < ma_countof(g_maCommonDeviceNamesALSA); ++iName) {
- if (ma_strcmp(name, g_maCommonDeviceNamesALSA[iName]) == 0) {
- return MA_TRUE;
+#ifndef MA_NO_RUNTIME_LINKING
+ const char* libpulseNames[] = {
+ "libpulse.so",
+ "libpulse.so.0"
+ };
+ size_t i;
+
+ for (i = 0; i < ma_countof(libpulseNames); ++i) {
+ pContext->pulse.pulseSO = ma_dlopen(pContext, libpulseNames[i]);
+ if (pContext->pulse.pulseSO != NULL) {
+ break;
}
}
- return MA_FALSE;
-}
+ if (pContext->pulse.pulseSO == NULL) {
+ return MA_NO_BACKEND;
+ }
+ pContext->pulse.pa_mainloop_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_new");
+ pContext->pulse.pa_mainloop_free = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_free");
+ pContext->pulse.pa_mainloop_get_api = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_get_api");
+ pContext->pulse.pa_mainloop_iterate = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_iterate");
+ pContext->pulse.pa_mainloop_wakeup = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_wakeup");
+ pContext->pulse.pa_context_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_new");
+ pContext->pulse.pa_context_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_unref");
+ pContext->pulse.pa_context_connect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_connect");
+ pContext->pulse.pa_context_disconnect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_disconnect");
+ pContext->pulse.pa_context_set_state_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_set_state_callback");
+ pContext->pulse.pa_context_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_state");
+ pContext->pulse.pa_context_get_sink_info_list = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_sink_info_list");
+ pContext->pulse.pa_context_get_source_info_list = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_source_info_list");
+ pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_sink_info_by_name");
+ pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_source_info_by_name");
+ pContext->pulse.pa_operation_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_operation_unref");
+ pContext->pulse.pa_operation_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_operation_get_state");
+ pContext->pulse.pa_channel_map_init_extend = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_init_extend");
+ pContext->pulse.pa_channel_map_valid = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_valid");
+ pContext->pulse.pa_channel_map_compatible = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_compatible");
+ pContext->pulse.pa_stream_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_new");
+ pContext->pulse.pa_stream_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_unref");
+ pContext->pulse.pa_stream_connect_playback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_connect_playback");
+ pContext->pulse.pa_stream_connect_record = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_connect_record");
+ pContext->pulse.pa_stream_disconnect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_disconnect");
+ pContext->pulse.pa_stream_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_state");
+ pContext->pulse.pa_stream_get_sample_spec = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_sample_spec");
+ pContext->pulse.pa_stream_get_channel_map = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_channel_map");
+ pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_buffer_attr");
+ pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_buffer_attr");
+ pContext->pulse.pa_stream_get_device_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_device_name");
+ pContext->pulse.pa_stream_set_write_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_write_callback");
+ pContext->pulse.pa_stream_set_read_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_read_callback");
+ pContext->pulse.pa_stream_flush = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_flush");
+ pContext->pulse.pa_stream_drain = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_drain");
+ pContext->pulse.pa_stream_is_corked = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_is_corked");
+ pContext->pulse.pa_stream_cork = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_cork");
+ pContext->pulse.pa_stream_trigger = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_trigger");
+ pContext->pulse.pa_stream_begin_write = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_begin_write");
+ pContext->pulse.pa_stream_write = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_write");
+ pContext->pulse.pa_stream_peek = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_peek");
+ pContext->pulse.pa_stream_drop = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_drop");
+ pContext->pulse.pa_stream_writable_size = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_writable_size");
+ pContext->pulse.pa_stream_readable_size = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_readable_size");
+#else
+ /* This strange assignment system is just for type safety. */
+ ma_pa_mainloop_new_proc _pa_mainloop_new = pa_mainloop_new;
+ ma_pa_mainloop_free_proc _pa_mainloop_free = pa_mainloop_free;
+ ma_pa_mainloop_get_api_proc _pa_mainloop_get_api = pa_mainloop_get_api;
+ ma_pa_mainloop_iterate_proc _pa_mainloop_iterate = pa_mainloop_iterate;
+ ma_pa_mainloop_wakeup_proc _pa_mainloop_wakeup = pa_mainloop_wakeup;
+ ma_pa_context_new_proc _pa_context_new = pa_context_new;
+ ma_pa_context_unref_proc _pa_context_unref = pa_context_unref;
+ ma_pa_context_connect_proc _pa_context_connect = pa_context_connect;
+ ma_pa_context_disconnect_proc _pa_context_disconnect = pa_context_disconnect;
+ ma_pa_context_set_state_callback_proc _pa_context_set_state_callback = pa_context_set_state_callback;
+ ma_pa_context_get_state_proc _pa_context_get_state = pa_context_get_state;
+ ma_pa_context_get_sink_info_list_proc _pa_context_get_sink_info_list = pa_context_get_sink_info_list;
+ ma_pa_context_get_source_info_list_proc _pa_context_get_source_info_list = pa_context_get_source_info_list;
+ ma_pa_context_get_sink_info_by_name_proc _pa_context_get_sink_info_by_name = pa_context_get_sink_info_by_name;
+ ma_pa_context_get_source_info_by_name_proc _pa_context_get_source_info_by_name= pa_context_get_source_info_by_name;
+ ma_pa_operation_unref_proc _pa_operation_unref = pa_operation_unref;
+ ma_pa_operation_get_state_proc _pa_operation_get_state = pa_operation_get_state;
+ ma_pa_channel_map_init_extend_proc _pa_channel_map_init_extend = pa_channel_map_init_extend;
+ ma_pa_channel_map_valid_proc _pa_channel_map_valid = pa_channel_map_valid;
+ ma_pa_channel_map_compatible_proc _pa_channel_map_compatible = pa_channel_map_compatible;
+ ma_pa_stream_new_proc _pa_stream_new = pa_stream_new;
+ ma_pa_stream_unref_proc _pa_stream_unref = pa_stream_unref;
+ ma_pa_stream_connect_playback_proc _pa_stream_connect_playback = pa_stream_connect_playback;
+ ma_pa_stream_connect_record_proc _pa_stream_connect_record = pa_stream_connect_record;
+ ma_pa_stream_disconnect_proc _pa_stream_disconnect = pa_stream_disconnect;
+ ma_pa_stream_get_state_proc _pa_stream_get_state = pa_stream_get_state;
+ ma_pa_stream_get_sample_spec_proc _pa_stream_get_sample_spec = pa_stream_get_sample_spec;
+ ma_pa_stream_get_channel_map_proc _pa_stream_get_channel_map = pa_stream_get_channel_map;
+ ma_pa_stream_get_buffer_attr_proc _pa_stream_get_buffer_attr = pa_stream_get_buffer_attr;
+ ma_pa_stream_set_buffer_attr_proc _pa_stream_set_buffer_attr = pa_stream_set_buffer_attr;
+ ma_pa_stream_get_device_name_proc _pa_stream_get_device_name = pa_stream_get_device_name;
+ ma_pa_stream_set_write_callback_proc _pa_stream_set_write_callback = pa_stream_set_write_callback;
+ ma_pa_stream_set_read_callback_proc _pa_stream_set_read_callback = pa_stream_set_read_callback;
+ ma_pa_stream_flush_proc _pa_stream_flush = pa_stream_flush;
+ ma_pa_stream_drain_proc _pa_stream_drain = pa_stream_drain;
+ ma_pa_stream_is_corked_proc _pa_stream_is_corked = pa_stream_is_corked;
+ ma_pa_stream_cork_proc _pa_stream_cork = pa_stream_cork;
+ ma_pa_stream_trigger_proc _pa_stream_trigger = pa_stream_trigger;
+ ma_pa_stream_begin_write_proc _pa_stream_begin_write = pa_stream_begin_write;
+ ma_pa_stream_write_proc _pa_stream_write = pa_stream_write;
+ ma_pa_stream_peek_proc _pa_stream_peek = pa_stream_peek;
+ ma_pa_stream_drop_proc _pa_stream_drop = pa_stream_drop;
+ ma_pa_stream_writable_size_proc _pa_stream_writable_size = pa_stream_writable_size;
+ ma_pa_stream_readable_size_proc _pa_stream_readable_size = pa_stream_readable_size;
-ma_bool32 ma_is_playback_device_blacklisted__alsa(const char* name)
-{
- size_t iName;
- for (iName = 0; iName < ma_countof(g_maBlacklistedPlaybackDeviceNamesALSA); ++iName) {
- if (ma_strcmp(name, g_maBlacklistedPlaybackDeviceNamesALSA[iName]) == 0) {
- return MA_TRUE;
- }
- }
+ pContext->pulse.pa_mainloop_new = (ma_proc)_pa_mainloop_new;
+ pContext->pulse.pa_mainloop_free = (ma_proc)_pa_mainloop_free;
+ pContext->pulse.pa_mainloop_get_api = (ma_proc)_pa_mainloop_get_api;
+ pContext->pulse.pa_mainloop_iterate = (ma_proc)_pa_mainloop_iterate;
+ pContext->pulse.pa_mainloop_wakeup = (ma_proc)_pa_mainloop_wakeup;
+ pContext->pulse.pa_context_new = (ma_proc)_pa_context_new;
+ pContext->pulse.pa_context_unref = (ma_proc)_pa_context_unref;
+ pContext->pulse.pa_context_connect = (ma_proc)_pa_context_connect;
+ pContext->pulse.pa_context_disconnect = (ma_proc)_pa_context_disconnect;
+ pContext->pulse.pa_context_set_state_callback = (ma_proc)_pa_context_set_state_callback;
+ pContext->pulse.pa_context_get_state = (ma_proc)_pa_context_get_state;
+ pContext->pulse.pa_context_get_sink_info_list = (ma_proc)_pa_context_get_sink_info_list;
+ pContext->pulse.pa_context_get_source_info_list = (ma_proc)_pa_context_get_source_info_list;
+ pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)_pa_context_get_sink_info_by_name;
+ pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)_pa_context_get_source_info_by_name;
+ pContext->pulse.pa_operation_unref = (ma_proc)_pa_operation_unref;
+ pContext->pulse.pa_operation_get_state = (ma_proc)_pa_operation_get_state;
+ pContext->pulse.pa_channel_map_init_extend = (ma_proc)_pa_channel_map_init_extend;
+ pContext->pulse.pa_channel_map_valid = (ma_proc)_pa_channel_map_valid;
+ pContext->pulse.pa_channel_map_compatible = (ma_proc)_pa_channel_map_compatible;
+ pContext->pulse.pa_stream_new = (ma_proc)_pa_stream_new;
+ pContext->pulse.pa_stream_unref = (ma_proc)_pa_stream_unref;
+ pContext->pulse.pa_stream_connect_playback = (ma_proc)_pa_stream_connect_playback;
+ pContext->pulse.pa_stream_connect_record = (ma_proc)_pa_stream_connect_record;
+ pContext->pulse.pa_stream_disconnect = (ma_proc)_pa_stream_disconnect;
+ pContext->pulse.pa_stream_get_state = (ma_proc)_pa_stream_get_state;
+ pContext->pulse.pa_stream_get_sample_spec = (ma_proc)_pa_stream_get_sample_spec;
+ pContext->pulse.pa_stream_get_channel_map = (ma_proc)_pa_stream_get_channel_map;
+ pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)_pa_stream_get_buffer_attr;
+ pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)_pa_stream_set_buffer_attr;
+ pContext->pulse.pa_stream_get_device_name = (ma_proc)_pa_stream_get_device_name;
+ pContext->pulse.pa_stream_set_write_callback = (ma_proc)_pa_stream_set_write_callback;
+ pContext->pulse.pa_stream_set_read_callback = (ma_proc)_pa_stream_set_read_callback;
+ pContext->pulse.pa_stream_flush = (ma_proc)_pa_stream_flush;
+ pContext->pulse.pa_stream_drain = (ma_proc)_pa_stream_drain;
+ pContext->pulse.pa_stream_is_corked = (ma_proc)_pa_stream_is_corked;
+ pContext->pulse.pa_stream_cork = (ma_proc)_pa_stream_cork;
+ pContext->pulse.pa_stream_trigger = (ma_proc)_pa_stream_trigger;
+ pContext->pulse.pa_stream_begin_write = (ma_proc)_pa_stream_begin_write;
+ pContext->pulse.pa_stream_write = (ma_proc)_pa_stream_write;
+ pContext->pulse.pa_stream_peek = (ma_proc)_pa_stream_peek;
+ pContext->pulse.pa_stream_drop = (ma_proc)_pa_stream_drop;
+ pContext->pulse.pa_stream_writable_size = (ma_proc)_pa_stream_writable_size;
+ pContext->pulse.pa_stream_readable_size = (ma_proc)_pa_stream_readable_size;
+#endif
- return MA_FALSE;
-}
+ pContext->onUninit = ma_context_uninit__pulse;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__pulse;
+ pContext->onEnumDevices = ma_context_enumerate_devices__pulse;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__pulse;
+ pContext->onDeviceInit = ma_device_init__pulse;
+ pContext->onDeviceUninit = ma_device_uninit__pulse;
+ pContext->onDeviceStart = NULL;
+ pContext->onDeviceStop = NULL;
+ pContext->onDeviceMainLoop = ma_device_main_loop__pulse;
-ma_bool32 ma_is_capture_device_blacklisted__alsa(const char* name)
-{
- size_t iName;
- for (iName = 0; iName < ma_countof(g_maBlacklistedCaptureDeviceNamesALSA); ++iName) {
- if (ma_strcmp(name, g_maBlacklistedCaptureDeviceNamesALSA[iName]) == 0) {
- return MA_TRUE;
- }
+ if (pConfig->pulse.pApplicationName) {
+ pContext->pulse.pApplicationName = ma_copy_string(pConfig->pulse.pApplicationName, &pContext->allocationCallbacks);
}
+ if (pConfig->pulse.pServerName) {
+ pContext->pulse.pServerName = ma_copy_string(pConfig->pulse.pServerName, &pContext->allocationCallbacks);
+ }
+ pContext->pulse.tryAutoSpawn = pConfig->pulse.tryAutoSpawn;
+
+ /*
+ Although we have found the libpulse library, it doesn't necessarily mean PulseAudio is useable. We need to initialize
+ and connect a dummy PulseAudio context to test PulseAudio's usability.
+ */
+ {
+ ma_pa_mainloop* pMainLoop;
+ ma_pa_mainloop_api* pAPI;
+ ma_pa_context* pPulseContext;
+ int error;
- return MA_FALSE;
-}
-
-ma_bool32 ma_is_device_blacklisted__alsa(ma_device_type deviceType, const char* name)
-{
- if (deviceType == ma_device_type_playback) {
- return ma_is_playback_device_blacklisted__alsa(name);
- } else {
- return ma_is_capture_device_blacklisted__alsa(name);
- }
-}
+ pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)();
+ if (pMainLoop == NULL) {
+ ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks);
+ ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks);
+ #ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->pulse.pulseSO);
+ #endif
+ return MA_NO_BACKEND;
+ }
+ pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop);
+ if (pAPI == NULL) {
+ ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks);
+ ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ #ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->pulse.pulseSO);
+ #endif
+ return MA_NO_BACKEND;
+ }
-const char* ma_find_char(const char* str, char c, int* index)
-{
- int i = 0;
- for (;;) {
- if (str[i] == '\0') {
- if (index) *index = -1;
- return NULL;
+ pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName);
+ if (pPulseContext == NULL) {
+ ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks);
+ ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ #ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->pulse.pulseSO);
+ #endif
+ return MA_NO_BACKEND;
}
- if (str[i] == c) {
- if (index) *index = i;
- return str + i;
+ error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, 0, NULL);
+ if (error != MA_PA_OK) {
+ ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks);
+ ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks);
+ ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ #ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->pulse.pulseSO);
+ #endif
+ return MA_NO_BACKEND;
}
- i += 1;
+ ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)(pPulseContext);
+ ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
+ ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
}
- /* Should never get here, but treat it as though the character was not found to make me feel better inside. */
- if (index) *index = -1;
- return NULL;
+ return MA_SUCCESS;
}
+#endif
-ma_bool32 ma_is_device_name_in_hw_format__alsa(const char* hwid)
-{
- /* This function is just checking whether or not hwid is in "hw:%d,%d" format. */
-
- int commaPos;
- const char* dev;
- int i;
-
- if (hwid == NULL) {
- return MA_FALSE;
- }
- if (hwid[0] != 'h' || hwid[1] != 'w' || hwid[2] != ':') {
- return MA_FALSE;
- }
+/******************************************************************************
- hwid += 3;
+JACK Backend
- dev = ma_find_char(hwid, ',', &commaPos);
- if (dev == NULL) {
- return MA_FALSE;
- } else {
- dev += 1; /* Skip past the ",". */
- }
+******************************************************************************/
+#ifdef MA_HAS_JACK
- /* Check if the part between the ":" and the "," contains only numbers. If not, return false. */
- for (i = 0; i < commaPos; ++i) {
- if (hwid[i] < '0' || hwid[i] > '9') {
- return MA_FALSE;
- }
- }
+/* It is assumed jack.h is available when compile-time linking is being used. */
+#ifdef MA_NO_RUNTIME_LINKING
+#include
- /* Check if everything after the "," is numeric. If not, return false. */
- i = 0;
- while (dev[i] != '\0') {
- if (dev[i] < '0' || dev[i] > '9') {
- return MA_FALSE;
- }
- i += 1;
- }
+typedef jack_nframes_t ma_jack_nframes_t;
+typedef jack_options_t ma_jack_options_t;
+typedef jack_status_t ma_jack_status_t;
+typedef jack_client_t ma_jack_client_t;
+typedef jack_port_t ma_jack_port_t;
+typedef JackProcessCallback ma_JackProcessCallback;
+typedef JackBufferSizeCallback ma_JackBufferSizeCallback;
+typedef JackShutdownCallback ma_JackShutdownCallback;
+#define MA_JACK_DEFAULT_AUDIO_TYPE JACK_DEFAULT_AUDIO_TYPE
+#define ma_JackNoStartServer JackNoStartServer
+#define ma_JackPortIsInput JackPortIsInput
+#define ma_JackPortIsOutput JackPortIsOutput
+#define ma_JackPortIsPhysical JackPortIsPhysical
+#else
+typedef ma_uint32 ma_jack_nframes_t;
+typedef int ma_jack_options_t;
+typedef int ma_jack_status_t;
+typedef struct ma_jack_client_t ma_jack_client_t;
+typedef struct ma_jack_port_t ma_jack_port_t;
+typedef int (* ma_JackProcessCallback) (ma_jack_nframes_t nframes, void* arg);
+typedef int (* ma_JackBufferSizeCallback)(ma_jack_nframes_t nframes, void* arg);
+typedef void (* ma_JackShutdownCallback) (void* arg);
+#define MA_JACK_DEFAULT_AUDIO_TYPE "32 bit float mono audio"
+#define ma_JackNoStartServer 1
+#define ma_JackPortIsInput 1
+#define ma_JackPortIsOutput 2
+#define ma_JackPortIsPhysical 4
+#endif
- return MA_TRUE;
-}
+typedef ma_jack_client_t* (* ma_jack_client_open_proc) (const char* client_name, ma_jack_options_t options, ma_jack_status_t* status, ...);
+typedef int (* ma_jack_client_close_proc) (ma_jack_client_t* client);
+typedef int (* ma_jack_client_name_size_proc) ();
+typedef int (* ma_jack_set_process_callback_proc) (ma_jack_client_t* client, ma_JackProcessCallback process_callback, void* arg);
+typedef int (* ma_jack_set_buffer_size_callback_proc)(ma_jack_client_t* client, ma_JackBufferSizeCallback bufsize_callback, void* arg);
+typedef void (* ma_jack_on_shutdown_proc) (ma_jack_client_t* client, ma_JackShutdownCallback function, void* arg);
+typedef ma_jack_nframes_t (* ma_jack_get_sample_rate_proc) (ma_jack_client_t* client);
+typedef ma_jack_nframes_t (* ma_jack_get_buffer_size_proc) (ma_jack_client_t* client);
+typedef const char** (* ma_jack_get_ports_proc) (ma_jack_client_t* client, const char* port_name_pattern, const char* type_name_pattern, unsigned long flags);
+typedef int (* ma_jack_activate_proc) (ma_jack_client_t* client);
+typedef int (* ma_jack_deactivate_proc) (ma_jack_client_t* client);
+typedef int (* ma_jack_connect_proc) (ma_jack_client_t* client, const char* source_port, const char* destination_port);
+typedef ma_jack_port_t* (* ma_jack_port_register_proc) (ma_jack_client_t* client, const char* port_name, const char* port_type, unsigned long flags, unsigned long buffer_size);
+typedef const char* (* ma_jack_port_name_proc) (const ma_jack_port_t* port);
+typedef void* (* ma_jack_port_get_buffer_proc) (ma_jack_port_t* port, ma_jack_nframes_t nframes);
+typedef void (* ma_jack_free_proc) (void* ptr);
-int ma_convert_device_name_to_hw_format__alsa(ma_context* pContext, char* dst, size_t dstSize, const char* src) /* Returns 0 on success, non-0 on error. */
+static ma_result ma_context_open_client__jack(ma_context* pContext, ma_jack_client_t** ppClient)
{
- /* src should look something like this: "hw:CARD=I82801AAICH,DEV=0" */
+ size_t maxClientNameSize;
+ char clientName[256];
+ ma_jack_status_t status;
+ ma_jack_client_t* pClient;
- int colonPos;
- int commaPos;
- char card[256];
- const char* dev;
- int cardIndex;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppClient != NULL);
- if (dst == NULL) {
- return -1;
- }
- if (dstSize < 7) {
- return -1; /* Absolute minimum size of the output buffer is 7 bytes. */
+ if (ppClient) {
+ *ppClient = NULL;
}
- *dst = '\0'; /* Safety. */
- if (src == NULL) {
- return -1;
- }
+ maxClientNameSize = ((ma_jack_client_name_size_proc)pContext->jack.jack_client_name_size)(); /* Includes null terminator. */
+ ma_strncpy_s(clientName, ma_min(sizeof(clientName), maxClientNameSize), (pContext->jack.pClientName != NULL) ? pContext->jack.pClientName : "miniaudio", (size_t)-1);
- /* If the input name is already in "hw:%d,%d" format, just return that verbatim. */
- if (ma_is_device_name_in_hw_format__alsa(src)) {
- return ma_strcpy_s(dst, dstSize, src);
+ pClient = ((ma_jack_client_open_proc)pContext->jack.jack_client_open)(clientName, (pContext->jack.tryStartServer) ? 0 : ma_JackNoStartServer, &status, NULL);
+ if (pClient == NULL) {
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
}
- src = ma_find_char(src, ':', &colonPos);
- if (src == NULL) {
- return -1; /* Couldn't find a colon */
+ if (ppClient) {
+ *ppClient = pClient;
}
- dev = ma_find_char(src, ',', &commaPos);
- if (dev == NULL) {
- dev = "0";
- ma_strncpy_s(card, sizeof(card), src+6, (size_t)-1); /* +6 = ":CARD=" */
- } else {
- dev = dev + 5; /* +5 = ",DEV=" */
- ma_strncpy_s(card, sizeof(card), src+6, commaPos-6); /* +6 = ":CARD=" */
- }
+ return MA_SUCCESS;
+}
- cardIndex = ((ma_snd_card_get_index_proc)pContext->alsa.snd_card_get_index)(card);
- if (cardIndex < 0) {
- return -2; /* Failed to retrieve the card index. */
- }
+static ma_bool32 ma_context_is_device_id_equal__jack(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
- /*printf("TESTING: CARD=%s,DEV=%s\n", card, dev); */
+ return pID0->jack == pID1->jack;
+}
+
+static ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_bool32 cbResult = MA_TRUE;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
- /* Construction. */
- dst[0] = 'h'; dst[1] = 'w'; dst[2] = ':';
- if (ma_itoa_s(cardIndex, dst+3, dstSize-3, 10) != 0) {
- return -3;
- }
- if (ma_strcat_s(dst, dstSize, ",") != 0) {
- return -3;
+ /* Playback. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
}
- if (ma_strcat_s(dst, dstSize, dev) != 0) {
- return -3;
+
+ /* Capture. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
}
- return 0;
+ return MA_SUCCESS;
}
-ma_bool32 ma_does_id_exist_in_list__alsa(ma_device_id* pUniqueIDs, ma_uint32 count, const char* pHWID)
+static ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
{
- ma_uint32 i;
+ ma_jack_client_t* pClient;
+ ma_result result;
+ const char** ppPorts;
- ma_assert(pHWID != NULL);
+ MA_ASSERT(pContext != NULL);
- for (i = 0; i < count; ++i) {
- if (ma_strcmp(pUniqueIDs[i].alsa, pHWID) == 0) {
- return MA_TRUE;
- }
+ /* No exclusive mode with the JACK backend. */
+ if (shareMode == ma_share_mode_exclusive) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
}
- return MA_FALSE;
-}
-
+ if (pDeviceID != NULL && pDeviceID->jack != 0) {
+ return MA_NO_DEVICE; /* Don't know the device. */
+ }
-ma_result ma_context_open_pcm__alsa(ma_context* pContext, ma_share_mode shareMode, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_snd_pcm_t** ppPCM)
-{
- ma_snd_pcm_t* pPCM;
- ma_snd_pcm_stream_t stream;
- int openMode;
+ /* Name / Description */
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ }
- ma_assert(pContext != NULL);
- ma_assert(ppPCM != NULL);
+ /* Jack only supports f32 and has a specific channel count and sample rate. */
+ pDeviceInfo->formatCount = 1;
+ pDeviceInfo->formats[0] = ma_format_f32;
- *ppPCM = NULL;
- pPCM = NULL;
+ /* The channel count and sample rate can only be determined by opening the device. */
+ result = ma_context_open_client__jack(pContext, &pClient);
+ if (result != MA_SUCCESS) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client.", result);
+ }
- stream = (deviceType == ma_device_type_playback) ? MA_SND_PCM_STREAM_PLAYBACK : MA_SND_PCM_STREAM_CAPTURE;
- openMode = MA_SND_PCM_NO_AUTO_RESAMPLE | MA_SND_PCM_NO_AUTO_CHANNELS | MA_SND_PCM_NO_AUTO_FORMAT;
+ pDeviceInfo->minSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pClient);
+ pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate;
- if (pDeviceID == NULL) {
- ma_bool32 isDeviceOpen;
- size_t i;
+ pDeviceInfo->minChannels = 0;
+ pDeviceInfo->maxChannels = 0;
- /*
- We're opening the default device. I don't know if trying anything other than "default" is necessary, but it makes
- me feel better to try as hard as we can get to get _something_ working.
- */
- const char* defaultDeviceNames[] = {
- "default",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL
- };
+ ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ((deviceType == ma_device_type_playback) ? ma_JackPortIsInput : ma_JackPortIsOutput));
+ if (ppPorts == NULL) {
+ ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ }
- if (shareMode == ma_share_mode_exclusive) {
- defaultDeviceNames[1] = "hw";
- defaultDeviceNames[2] = "hw:0";
- defaultDeviceNames[3] = "hw:0,0";
- } else {
- if (deviceType == ma_device_type_playback) {
- defaultDeviceNames[1] = "dmix";
- defaultDeviceNames[2] = "dmix:0";
- defaultDeviceNames[3] = "dmix:0,0";
- } else {
- defaultDeviceNames[1] = "dsnoop";
- defaultDeviceNames[2] = "dsnoop:0";
- defaultDeviceNames[3] = "dsnoop:0,0";
- }
- defaultDeviceNames[4] = "hw";
- defaultDeviceNames[5] = "hw:0";
- defaultDeviceNames[6] = "hw:0,0";
- }
+ while (ppPorts[pDeviceInfo->minChannels] != NULL) {
+ pDeviceInfo->minChannels += 1;
+ pDeviceInfo->maxChannels += 1;
+ }
- isDeviceOpen = MA_FALSE;
- for (i = 0; i < ma_countof(defaultDeviceNames); ++i) {
- if (defaultDeviceNames[i] != NULL && defaultDeviceNames[i][0] != '\0') {
- if (((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, defaultDeviceNames[i], stream, openMode) == 0) {
- isDeviceOpen = MA_TRUE;
- break;
- }
- }
- }
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
+ ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient);
- if (!isDeviceOpen) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed when trying to open an appropriate default device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
- } else {
- /*
- We're trying to open a specific device. There's a few things to consider here:
-
- miniaudio recongnizes a special format of device id that excludes the "hw", "dmix", etc. prefix. It looks like this: ":0,0", ":0,1", etc. When
- an ID of this format is specified, it indicates to miniaudio that it can try different combinations of plugins ("hw", "dmix", etc.) until it
- finds an appropriate one that works. This comes in very handy when trying to open a device in shared mode ("dmix"), vs exclusive mode ("hw").
- */
+ (void)pContext;
+ return MA_SUCCESS;
+}
- /* May end up needing to make small adjustments to the ID, so make a copy. */
- ma_device_id deviceID = *pDeviceID;
- ma_bool32 isDeviceOpen = MA_FALSE;
- if (deviceID.alsa[0] != ':') {
- /* The ID is not in ":0,0" format. Use the ID exactly as-is. */
- if (((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, deviceID.alsa, stream, openMode) == 0) {
- isDeviceOpen = MA_TRUE;
- }
- } else {
- char hwid[256];
+static void ma_device_uninit__jack(ma_device* pDevice)
+{
+ ma_context* pContext;
- /* The ID is in ":0,0" format. Try different plugins depending on the shared mode. */
- if (deviceID.alsa[1] == '\0') {
- deviceID.alsa[0] = '\0'; /* An ID of ":" should be converted to "". */
- }
+ MA_ASSERT(pDevice != NULL);
- if (shareMode == ma_share_mode_shared) {
- if (deviceType == ma_device_type_playback) {
- ma_strcpy_s(hwid, sizeof(hwid), "dmix");
- } else {
- ma_strcpy_s(hwid, sizeof(hwid), "dsnoop");
- }
+ pContext = pDevice->pContext;
+ MA_ASSERT(pContext != NULL);
- if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) {
- if (((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode) == 0) {
- isDeviceOpen = MA_TRUE;
- }
- }
- }
+ if (pDevice->jack.pClient != NULL) {
+ ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDevice->jack.pClient);
+ }
- /* If at this point we still don't have an open device it means we're either preferencing exclusive mode or opening with "dmix"/"dsnoop" failed. */
- if (!isDeviceOpen) {
- ma_strcpy_s(hwid, sizeof(hwid), "hw");
- if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) {
- if (((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode) == 0) {
- isDeviceOpen = MA_TRUE;
- }
- }
- }
- }
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks);
+ }
- if (!isDeviceOpen) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks);
}
- *ppPCM = pPCM;
- return MA_SUCCESS;
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_pcm_rb_uninit(&pDevice->jack.duplexRB);
+ }
}
-
-ma_bool32 ma_context_is_device_id_equal__alsa(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+static void ma_device__jack_shutdown_callback(void* pUserData)
{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
+ /* JACK died. Stop the device. */
+ ma_device* pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
- return ma_strcmp(pID0->alsa, pID1->alsa) == 0;
+ ma_device_stop(pDevice);
}
-ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+static int ma_device__jack_buffer_size_callback(ma_jack_nframes_t frameCount, void* pUserData)
{
- ma_bool32 cbResult = MA_TRUE;
- char** ppDeviceHints;
- ma_device_id* pUniqueIDs = NULL;
- ma_uint32 uniqueIDCount = 0;
- char** ppNextDeviceHint;
+ ma_device* pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ size_t newBufferSize = frameCount * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat));
+ float* pNewBuffer = (float*)ma__calloc_from_callbacks(newBufferSize, &pDevice->pContext->allocationCallbacks);
+ if (pNewBuffer == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
- ma_mutex_lock(&pContext->alsa.internalDeviceEnumLock);
+ ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks);
- if (((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints) < 0) {
- ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock);
- return MA_NO_BACKEND;
+ pDevice->jack.pIntermediaryBufferCapture = pNewBuffer;
+ pDevice->playback.internalPeriodSizeInFrames = frameCount;
}
- ppNextDeviceHint = ppDeviceHints;
- while (*ppNextDeviceHint != NULL) {
- char* NAME = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "NAME");
- char* DESC = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "DESC");
- char* IOID = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "IOID");
- ma_device_type deviceType = ma_device_type_playback;
- ma_bool32 stopEnumeration = MA_FALSE;
- char hwid[sizeof(pUniqueIDs->alsa)];
- ma_device_info deviceInfo;
-
- if ((IOID == NULL || ma_strcmp(IOID, "Output") == 0)) {
- deviceType = ma_device_type_playback;
- }
- if ((IOID != NULL && ma_strcmp(IOID, "Input" ) == 0)) {
- deviceType = ma_device_type_capture;
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ size_t newBufferSize = frameCount * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat));
+ float* pNewBuffer = (float*)ma__calloc_from_callbacks(newBufferSize, &pDevice->pContext->allocationCallbacks);
+ if (pNewBuffer == NULL) {
+ return MA_OUT_OF_MEMORY;
}
- if (NAME != NULL) {
- if (pContext->alsa.useVerboseDeviceEnumeration) {
- /* Verbose mode. Use the name exactly as-is. */
- ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1);
- } else {
- /* Simplified mode. Use ":%d,%d" format. */
- if (ma_convert_device_name_to_hw_format__alsa(pContext, hwid, sizeof(hwid), NAME) == 0) {
- /*
- At this point, hwid looks like "hw:0,0". In simplified enumeration mode, we actually want to strip off the
- plugin name so it looks like ":0,0". The reason for this is that this special format is detected at device
- initialization time and is used as an indicator to try and use the most appropriate plugin depending on the
- device type and sharing mode.
- */
- char* dst = hwid;
- char* src = hwid+2;
- while ((*dst++ = *src++));
- } else {
- /* Conversion to "hw:%d,%d" failed. Just use the name as-is. */
- ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1);
- }
-
- if (ma_does_id_exist_in_list__alsa(pUniqueIDs, uniqueIDCount, hwid)) {
- goto next_device; /* The device has already been enumerated. Move on to the next one. */
- } else {
- /* The device has not yet been enumerated. Make sure it's added to our list so that it's not enumerated again. */
- ma_device_id* pNewUniqueIDs = (ma_device_id*)ma_realloc(pUniqueIDs, sizeof(*pUniqueIDs) * (uniqueIDCount + 1));
- if (pNewUniqueIDs == NULL) {
- goto next_device; /* Failed to allocate memory. */
- }
+ ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks);
- pUniqueIDs = pNewUniqueIDs;
- ma_copy_memory(pUniqueIDs[uniqueIDCount].alsa, hwid, sizeof(hwid));
- uniqueIDCount += 1;
- }
- }
- } else {
- ma_zero_memory(hwid, sizeof(hwid));
- }
+ pDevice->jack.pIntermediaryBufferPlayback = pNewBuffer;
+ pDevice->playback.internalPeriodSizeInFrames = frameCount;
+ }
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.id.alsa, sizeof(deviceInfo.id.alsa), hwid, (size_t)-1);
+ return 0;
+}
- /*
- DESC is the friendly name. We treat this slightly differently depending on whether or not we are using verbose
- device enumeration. In verbose mode we want to take the entire description so that the end-user can distinguish
- between the subdevices of each card/dev pair. In simplified mode, however, we only want the first part of the
- description.
-
- The value in DESC seems to be split into two lines, with the first line being the name of the device and the
- second line being a description of the device. I don't like having the description be across two lines because
- it makes formatting ugly and annoying. I'm therefore deciding to put it all on a single line with the second line
- being put into parentheses. In simplified mode I'm just stripping the second line entirely.
- */
- if (DESC != NULL) {
- int lfPos;
- const char* line2 = ma_find_char(DESC, '\n', &lfPos);
- if (line2 != NULL) {
- line2 += 1; /* Skip past the new-line character. */
+static int ma_device__jack_process_callback(ma_jack_nframes_t frameCount, void* pUserData)
+{
+ ma_device* pDevice;
+ ma_context* pContext;
+ ma_uint32 iChannel;
- if (pContext->alsa.useVerboseDeviceEnumeration) {
- /* Verbose mode. Put the second line in brackets. */
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos);
- ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), " (");
- ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), line2);
- ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), ")");
- } else {
- /* Simplified mode. Strip the second line entirely. */
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos);
- }
- } else {
- /* There's no second line. Just copy the whole description. */
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, (size_t)-1);
- }
- }
+ pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
- if (!ma_is_device_blacklisted__alsa(deviceType, NAME)) {
- cbResult = callback(pContext, deviceType, &deviceInfo, pUserData);
- }
+ pContext = pDevice->pContext;
+ MA_ASSERT(pContext != NULL);
- /*
- Some devices are both playback and capture, but they are only enumerated by ALSA once. We need to fire the callback
- again for the other device type in this case. We do this for known devices.
- */
- if (cbResult) {
- if (ma_is_common_device_name__alsa(NAME)) {
- if (deviceType == ma_device_type_playback) {
- if (!ma_is_capture_device_blacklisted__alsa(NAME)) {
- cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- }
- } else {
- if (!ma_is_playback_device_blacklisted__alsa(NAME)) {
- cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
- }
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ /* Channels need to be interleaved. */
+ for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) {
+ const float* pSrc = (const float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.pPortsCapture[iChannel], frameCount);
+ if (pSrc != NULL) {
+ float* pDst = pDevice->jack.pIntermediaryBufferCapture + iChannel;
+ ma_jack_nframes_t iFrame;
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ *pDst = *pSrc;
+
+ pDst += pDevice->capture.internalChannels;
+ pSrc += 1;
}
}
}
- if (cbResult == MA_FALSE) {
- stopEnumeration = MA_TRUE;
- }
-
- next_device:
- free(NAME);
- free(DESC);
- free(IOID);
- ppNextDeviceHint += 1;
-
- /* We need to stop enumeration if the callback returned false. */
- if (stopEnumeration) {
- break;
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_capture(pDevice, frameCount, pDevice->jack.pIntermediaryBufferCapture, &pDevice->jack.duplexRB);
+ } else {
+ ma_device__send_frames_to_client(pDevice, frameCount, pDevice->jack.pIntermediaryBufferCapture);
}
}
- ma_free(pUniqueIDs);
- ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints);
-
- ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock);
-
- return MA_SUCCESS;
-}
-
-
-typedef struct
-{
- ma_device_type deviceType;
- const ma_device_id* pDeviceID;
- ma_share_mode shareMode;
- ma_device_info* pDeviceInfo;
- ma_bool32 foundDevice;
-} ma_context_get_device_info_enum_callback_data__alsa;
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_playback(pDevice, frameCount, pDevice->jack.pIntermediaryBufferPlayback, &pDevice->jack.duplexRB);
+ } else {
+ ma_device__read_frames_from_client(pDevice, frameCount, pDevice->jack.pIntermediaryBufferPlayback);
+ }
-ma_bool32 ma_context_get_device_info_enum_callback__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData)
-{
- ma_context_get_device_info_enum_callback_data__alsa* pData = (ma_context_get_device_info_enum_callback_data__alsa*)pUserData;
- ma_assert(pData != NULL);
+ /* Channels need to be deinterleaved. */
+ for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) {
+ float* pDst = (float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.pPortsPlayback[iChannel], frameCount);
+ if (pDst != NULL) {
+ const float* pSrc = pDevice->jack.pIntermediaryBufferPlayback + iChannel;
+ ma_jack_nframes_t iFrame;
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ *pDst = *pSrc;
- if (pData->pDeviceID == NULL && ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) {
- ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1);
- pData->foundDevice = MA_TRUE;
- } else {
- if (pData->deviceType == deviceType && ma_context_is_device_id_equal__alsa(pContext, pData->pDeviceID, &pDeviceInfo->id)) {
- ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1);
- pData->foundDevice = MA_TRUE;
+ pDst += 1;
+ pSrc += pDevice->playback.internalChannels;
+ }
+ }
}
}
- /* Keep enumerating until we have found the device. */
- return !pData->foundDevice;
+ return 0;
}
-ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+static ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
- ma_context_get_device_info_enum_callback_data__alsa data;
ma_result result;
- ma_snd_pcm_t* pPCM;
- ma_snd_pcm_hw_params_t* pHWParams;
- ma_snd_pcm_format_mask_t* pFormatMask;
- int sampleRateDir = 0;
+ ma_uint32 periods;
+ ma_uint32 periodSizeInFrames;
- ma_assert(pContext != NULL);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pDevice != NULL);
- /* We just enumerate to find basic information about the device. */
- data.deviceType = deviceType;
- data.pDeviceID = pDeviceID;
- data.shareMode = shareMode;
- data.pDeviceInfo = pDeviceInfo;
- data.foundDevice = MA_FALSE;
- result = ma_context_enumerate_devices__alsa(pContext, ma_context_get_device_info_enum_callback__alsa, &data);
- if (result != MA_SUCCESS) {
- return result;
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
}
- if (!data.foundDevice) {
+ /* Only supporting default devices with JACK. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID != NULL && pConfig->playback.pDeviceID->jack != 0) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID != NULL && pConfig->capture.pDeviceID->jack != 0)) {
return MA_NO_DEVICE;
}
- /* For detailed info we need to open the device. */
- result = ma_context_open_pcm__alsa(pContext, shareMode, deviceType, pDeviceID, &pPCM);
- if (result != MA_SUCCESS) {
- return result;
+ /* No exclusive mode with the JACK backend. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
}
- /* We need to initialize a HW parameters object in order to know what formats are supported. */
- pHWParams = (ma_snd_pcm_hw_params_t*)calloc(1, ((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)());
- if (pHWParams == NULL) {
- return MA_OUT_OF_MEMORY;
+ /* Open the client. */
+ result = ma_context_open_client__jack(pContext, (ma_jack_client_t**)&pDevice->jack.pClient);
+ if (result != MA_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client.", result);
}
- if (((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams) < 0) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE);
+ /* Callbacks. */
+ if (((ma_jack_set_process_callback_proc)pContext->jack.jack_set_process_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_process_callback, pDevice) != 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to set process callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ }
+ if (((ma_jack_set_buffer_size_callback_proc)pContext->jack.jack_set_buffer_size_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_buffer_size_callback, pDevice) != 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to set buffer size callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
}
- ((ma_snd_pcm_hw_params_get_channels_min_proc)pContext->alsa.snd_pcm_hw_params_get_channels_min)(pHWParams, &pDeviceInfo->minChannels);
- ((ma_snd_pcm_hw_params_get_channels_max_proc)pContext->alsa.snd_pcm_hw_params_get_channels_max)(pHWParams, &pDeviceInfo->maxChannels);
- ((ma_snd_pcm_hw_params_get_rate_min_proc)pContext->alsa.snd_pcm_hw_params_get_rate_min)(pHWParams, &pDeviceInfo->minSampleRate, &sampleRateDir);
- ((ma_snd_pcm_hw_params_get_rate_max_proc)pContext->alsa.snd_pcm_hw_params_get_rate_max)(pHWParams, &pDeviceInfo->maxSampleRate, &sampleRateDir);
+ ((ma_jack_on_shutdown_proc)pContext->jack.jack_on_shutdown)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_shutdown_callback, pDevice);
- /* Formats. */
- pFormatMask = (ma_snd_pcm_format_mask_t*)calloc(1, ((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)());
- if (pFormatMask == NULL) {
- return MA_OUT_OF_MEMORY;
- }
- ((ma_snd_pcm_hw_params_get_format_mask_proc)pContext->alsa.snd_pcm_hw_params_get_format_mask)(pHWParams, pFormatMask);
+ /* The buffer size in frames can change. */
+ periods = pConfig->periods;
+ periodSizeInFrames = ((ma_jack_get_buffer_size_proc)pContext->jack.jack_get_buffer_size)((ma_jack_client_t*)pDevice->jack.pClient);
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ const char** ppPorts;
- pDeviceInfo->formatCount = 0;
- if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_U8)) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_u8;
- }
- if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_S16_LE)) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s16;
- }
- if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_S24_3LE)) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s24;
- }
- if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_S32_LE)) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s32;
+ pDevice->capture.internalFormat = ma_format_f32;
+ pDevice->capture.internalChannels = 0;
+ pDevice->capture.internalSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient);
+ ma_get_standard_channel_map(ma_standard_channel_map_alsa, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+
+ ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput);
+ if (ppPorts == NULL) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ }
+
+ while (ppPorts[pDevice->capture.internalChannels] != NULL) {
+ char name[64];
+ ma_strcpy_s(name, sizeof(name), "capture");
+ ma_itoa_s((int)pDevice->capture.internalChannels, name+7, sizeof(name)-7, 10); /* 7 = length of "capture" */
+
+ pDevice->jack.pPortsCapture[pDevice->capture.internalChannels] = ((ma_jack_port_register_proc)pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsInput, 0);
+ if (pDevice->jack.pPortsCapture[pDevice->capture.internalChannels] == NULL) {
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
+ ma_device_uninit__jack(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ }
+
+ pDevice->capture.internalChannels += 1;
+ }
+
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
+
+ pDevice->capture.internalPeriodSizeInFrames = periodSizeInFrames;
+ pDevice->capture.internalPeriods = periods;
+
+ pDevice->jack.pIntermediaryBufferCapture = (float*)ma__calloc_from_callbacks(pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels), &pContext->allocationCallbacks);
+ if (pDevice->jack.pIntermediaryBufferCapture == NULL) {
+ ma_device_uninit__jack(pDevice);
+ return MA_OUT_OF_MEMORY;
+ }
}
- if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, MA_SND_PCM_FORMAT_FLOAT_LE)) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_f32;
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ const char** ppPorts;
+
+ pDevice->playback.internalFormat = ma_format_f32;
+ pDevice->playback.internalChannels = 0;
+ pDevice->playback.internalSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient);
+ ma_get_standard_channel_map(ma_standard_channel_map_alsa, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
+
+ ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput);
+ if (ppPorts == NULL) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ }
+
+ while (ppPorts[pDevice->playback.internalChannels] != NULL) {
+ char name[64];
+ ma_strcpy_s(name, sizeof(name), "playback");
+ ma_itoa_s((int)pDevice->playback.internalChannels, name+8, sizeof(name)-8, 10); /* 8 = length of "playback" */
+
+ pDevice->jack.pPortsPlayback[pDevice->playback.internalChannels] = ((ma_jack_port_register_proc)pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsOutput, 0);
+ if (pDevice->jack.pPortsPlayback[pDevice->playback.internalChannels] == NULL) {
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
+ ma_device_uninit__jack(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ }
+
+ pDevice->playback.internalChannels += 1;
+ }
+
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
+
+ pDevice->playback.internalPeriodSizeInFrames = periodSizeInFrames;
+ pDevice->playback.internalPeriods = periods;
+
+ pDevice->jack.pIntermediaryBufferPlayback = (float*)ma__calloc_from_callbacks(pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels), &pContext->allocationCallbacks);
+ if (pDevice->jack.pIntermediaryBufferPlayback == NULL) {
+ ma_device_uninit__jack(pDevice);
+ return MA_OUT_OF_MEMORY;
+ }
}
- ma_free(pFormatMask);
- ma_free(pHWParams);
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames * pDevice->capture.internalPeriods);
+ result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->jack.duplexRB);
+ if (result != MA_SUCCESS) {
+ ma_device_uninit__jack(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to initialize ring buffer.", result);
+ }
+
+ /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */
+ {
+ ma_uint32 marginSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods;
+ void* pMarginData;
+ ma_pcm_rb_acquire_write(&pDevice->jack.duplexRB, &marginSizeInFrames, &pMarginData);
+ {
+ MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels));
+ }
+ ma_pcm_rb_commit_write(&pDevice->jack.duplexRB, marginSizeInFrames, pMarginData);
+ }
+ }
- ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM);
return MA_SUCCESS;
}
-#if 0
-/*
-Waits for a number of frames to become available for either capture or playback. The return
-value is the number of frames available.
-
-This will return early if the main loop is broken with ma_device__break_main_loop().
-*/
-ma_uint32 ma_device__wait_for_frames__alsa(ma_device* pDevice, ma_bool32* pRequiresRestart)
+static ma_result ma_device_start__jack(ma_device* pDevice)
{
- ma_assert(pDevice != NULL);
-
- if (pRequiresRestart) *pRequiresRestart = MA_FALSE;
+ ma_context* pContext = pDevice->pContext;
+ int resultJACK;
+ size_t i;
- /* I want it so that this function returns the period size in frames. We just wait until that number of frames are available and then return. */
- ma_uint32 periodSizeInFrames = pDevice->bufferSizeInFrames / pDevice->periods;
- while (!pDevice->alsa.breakFromMainLoop) {
- ma_snd_pcm_sframes_t framesAvailable = ((ma_snd_pcm_avail_update_proc)pDevice->pContext->alsa.snd_pcm_avail_update)((ma_snd_pcm_t*)pDevice->alsa.pPCM);
- if (framesAvailable < 0) {
- if (framesAvailable == -EPIPE) {
- if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, framesAvailable, MA_TRUE) < 0) {
- return 0;
- }
+ resultJACK = ((ma_jack_activate_proc)pContext->jack.jack_activate)((ma_jack_client_t*)pDevice->jack.pClient);
+ if (resultJACK != 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to activate the JACK client.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ }
- /* A device recovery means a restart for mmap mode. */
- if (pRequiresRestart) {
- *pRequiresRestart = MA_TRUE;
- }
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput);
+ if (ppServerPorts == NULL) {
+ ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.", MA_ERROR);
+ }
- /* Try again, but if it fails this time just return an error. */
- framesAvailable = ((ma_snd_pcm_avail_update_proc)pDevice->pContext->alsa.snd_pcm_avail_update)((ma_snd_pcm_t*)pDevice->alsa.pPCM);
- if (framesAvailable < 0) {
- return 0;
- }
+ for (i = 0; ppServerPorts[i] != NULL; ++i) {
+ const char* pServerPort = ppServerPorts[i];
+ const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.pPortsCapture[i]);
+
+ resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pServerPort, pClientPort);
+ if (resultJACK != 0) {
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
+ ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports.", MA_ERROR);
}
}
- if (framesAvailable >= periodSizeInFrames) {
- return periodSizeInFrames;
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput);
+ if (ppServerPorts == NULL) {
+ ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.", MA_ERROR);
}
- if (framesAvailable < periodSizeInFrames) {
- /* Less than a whole period is available so keep waiting. */
- int waitResult = ((ma_snd_pcm_wait_proc)pDevice->pContext->alsa.snd_pcm_wait)((ma_snd_pcm_t*)pDevice->alsa.pPCM, -1);
- if (waitResult < 0) {
- if (waitResult == -EPIPE) {
- if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, waitResult, MA_TRUE) < 0) {
- return 0;
- }
+ for (i = 0; ppServerPorts[i] != NULL; ++i) {
+ const char* pServerPort = ppServerPorts[i];
+ const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.pPortsPlayback[i]);
- /* A device recovery means a restart for mmap mode. */
- if (pRequiresRestart) {
- *pRequiresRestart = MA_TRUE;
- }
- }
+ resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pClientPort, pServerPort);
+ if (resultJACK != 0) {
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
+ ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports.", MA_ERROR);
}
}
- }
- /* We'll get here if the loop was terminated. Just return whatever's available. */
- ma_snd_pcm_sframes_t framesAvailable = ((ma_snd_pcm_avail_update_proc)pDevice->pContext->alsa.snd_pcm_avail_update)((ma_snd_pcm_t*)pDevice->alsa.pPCM);
- if (framesAvailable < 0) {
- return 0;
+ ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
}
- return framesAvailable;
+ return MA_SUCCESS;
}
-ma_bool32 ma_device_read_from_client_and_write__alsa(ma_device* pDevice)
+static ma_result ma_device_stop__jack(ma_device* pDevice)
{
- ma_assert(pDevice != NULL);
- if (!ma_device_is_started(pDevice) && ma_device__get_state(pDevice) != MA_STATE_STARTING) {
- return MA_FALSE;
+ ma_context* pContext = pDevice->pContext;
+ ma_stop_proc onStop;
+
+ if (((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient) != 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] An error occurred when deactivating the JACK client.", MA_ERROR);
}
- if (pDevice->alsa.breakFromMainLoop) {
- return MA_FALSE;
+
+ onStop = pDevice->onStop;
+ if (onStop) {
+ onStop(pDevice);
}
- if (pDevice->alsa.isUsingMMap) {
- /* mmap. */
- ma_bool32 requiresRestart;
- ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, &requiresRestart);
- if (framesAvailable == 0) {
- return MA_FALSE;
- }
-
- /* Don't bother asking the client for more audio data if we're just stopping the device anyway. */
- if (pDevice->alsa.breakFromMainLoop) {
- return MA_FALSE;
- }
-
- const ma_snd_pcm_channel_area_t* pAreas;
- ma_snd_pcm_uframes_t mappedOffset;
- ma_snd_pcm_uframes_t mappedFrames = framesAvailable;
- while (framesAvailable > 0) {
- int result = ((ma_snd_pcm_mmap_begin_proc)pDevice->pContext->alsa.snd_pcm_mmap_begin)((ma_snd_pcm_t*)pDevice->alsa.pPCM, &pAreas, &mappedOffset, &mappedFrames);
- if (result < 0) {
- return MA_FALSE;
- }
-
- if (mappedFrames > 0) {
- void* pBuffer = (ma_uint8*)pAreas[0].addr + ((pAreas[0].first + (mappedOffset * pAreas[0].step)) / 8);
- ma_device__read_frames_from_client(pDevice, mappedFrames, pBuffer);
- }
-
- result = ((ma_snd_pcm_mmap_commit_proc)pDevice->pContext->alsa.snd_pcm_mmap_commit)((ma_snd_pcm_t*)pDevice->alsa.pPCM, mappedOffset, mappedFrames);
- if (result < 0 || (ma_snd_pcm_uframes_t)result != mappedFrames) {
- ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, result, MA_TRUE);
- return MA_FALSE;
- }
+ return MA_SUCCESS;
+}
- if (requiresRestart) {
- if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) {
- return MA_FALSE;
- }
- }
- if (framesAvailable >= mappedFrames) {
- framesAvailable -= mappedFrames;
- } else {
- framesAvailable = 0;
- }
- }
- } else {
- /* readi/writei. */
- while (!pDevice->alsa.breakFromMainLoop) {
- ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, NULL);
- if (framesAvailable == 0) {
- continue;
- }
+static ma_result ma_context_uninit__jack(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_jack);
- /* Don't bother asking the client for more audio data if we're just stopping the device anyway. */
- if (pDevice->alsa.breakFromMainLoop) {
- return MA_FALSE;
- }
+ ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks);
+ pContext->jack.pClientName = NULL;
- ma_device__read_frames_from_client(pDevice, framesAvailable, pDevice->alsa.pIntermediaryBuffer);
+#ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->jack.jackSO);
+#endif
- ma_snd_pcm_sframes_t framesWritten = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable);
- if (framesWritten < 0) {
- if (framesWritten == -EAGAIN) {
- continue; /* Just keep trying... */
- } else if (framesWritten == -EPIPE) {
- /* Underrun. Just recover and try writing again. */
- if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, framesWritten, MA_TRUE) < 0) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun.", MA_FAILED_TO_START_BACKEND_DEVICE);
- return MA_FALSE;
- }
+ return MA_SUCCESS;
+}
- framesWritten = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable);
- if (framesWritten < 0) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to write data to the internal device.", MA_FAILED_TO_SEND_DATA_TO_DEVICE);
- return MA_FALSE;
- }
+static ma_result ma_context_init__jack(const ma_context_config* pConfig, ma_context* pContext)
+{
+#ifndef MA_NO_RUNTIME_LINKING
+ const char* libjackNames[] = {
+#ifdef MA_WIN32
+ "libjack.dll"
+#else
+ "libjack.so",
+ "libjack.so.0"
+#endif
+ };
+ size_t i;
- break; /* Success. */
- } else {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_writei() failed when writing initial data.", MA_FAILED_TO_SEND_DATA_TO_DEVICE);
- return MA_FALSE;
- }
- } else {
- break; /* Success. */
- }
+ for (i = 0; i < ma_countof(libjackNames); ++i) {
+ pContext->jack.jackSO = ma_dlopen(pContext, libjackNames[i]);
+ if (pContext->jack.jackSO != NULL) {
+ break;
}
}
- return MA_TRUE;
-}
-
-ma_bool32 ma_device_read_and_send_to_client__alsa(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
- if (!ma_device_is_started(pDevice)) {
- return MA_FALSE;
- }
- if (pDevice->alsa.breakFromMainLoop) {
- return MA_FALSE;
+ if (pContext->jack.jackSO == NULL) {
+ return MA_NO_BACKEND;
}
- ma_uint32 framesToSend = 0;
- void* pBuffer = NULL;
- if (pDevice->alsa.pIntermediaryBuffer == NULL) {
- /* mmap. */
- ma_bool32 requiresRestart;
- ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, &requiresRestart);
- if (framesAvailable == 0) {
- return MA_FALSE;
- }
+ pContext->jack.jack_client_open = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_open");
+ pContext->jack.jack_client_close = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_close");
+ pContext->jack.jack_client_name_size = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_name_size");
+ pContext->jack.jack_set_process_callback = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_set_process_callback");
+ pContext->jack.jack_set_buffer_size_callback = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_set_buffer_size_callback");
+ pContext->jack.jack_on_shutdown = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_on_shutdown");
+ pContext->jack.jack_get_sample_rate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_sample_rate");
+ pContext->jack.jack_get_buffer_size = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_buffer_size");
+ pContext->jack.jack_get_ports = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_ports");
+ pContext->jack.jack_activate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_activate");
+ pContext->jack.jack_deactivate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_deactivate");
+ pContext->jack.jack_connect = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_connect");
+ pContext->jack.jack_port_register = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_register");
+ pContext->jack.jack_port_name = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_name");
+ pContext->jack.jack_port_get_buffer = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_get_buffer");
+ pContext->jack.jack_free = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_free");
+#else
+ /*
+ This strange assignment system is here just to ensure type safety of miniaudio's function pointer
+ types. If anything differs slightly the compiler should throw a warning.
+ */
+ ma_jack_client_open_proc _jack_client_open = jack_client_open;
+ ma_jack_client_close_proc _jack_client_close = jack_client_close;
+ ma_jack_client_name_size_proc _jack_client_name_size = jack_client_name_size;
+ ma_jack_set_process_callback_proc _jack_set_process_callback = jack_set_process_callback;
+ ma_jack_set_buffer_size_callback_proc _jack_set_buffer_size_callback = jack_set_buffer_size_callback;
+ ma_jack_on_shutdown_proc _jack_on_shutdown = jack_on_shutdown;
+ ma_jack_get_sample_rate_proc _jack_get_sample_rate = jack_get_sample_rate;
+ ma_jack_get_buffer_size_proc _jack_get_buffer_size = jack_get_buffer_size;
+ ma_jack_get_ports_proc _jack_get_ports = jack_get_ports;
+ ma_jack_activate_proc _jack_activate = jack_activate;
+ ma_jack_deactivate_proc _jack_deactivate = jack_deactivate;
+ ma_jack_connect_proc _jack_connect = jack_connect;
+ ma_jack_port_register_proc _jack_port_register = jack_port_register;
+ ma_jack_port_name_proc _jack_port_name = jack_port_name;
+ ma_jack_port_get_buffer_proc _jack_port_get_buffer = jack_port_get_buffer;
+ ma_jack_free_proc _jack_free = jack_free;
- const ma_snd_pcm_channel_area_t* pAreas;
- ma_snd_pcm_uframes_t mappedOffset;
- ma_snd_pcm_uframes_t mappedFrames = framesAvailable;
- while (framesAvailable > 0) {
- int result = ((ma_snd_pcm_mmap_begin_proc)pDevice->pContext->alsa.snd_pcm_mmap_begin)((ma_snd_pcm_t*)pDevice->alsa.pPCM, &pAreas, &mappedOffset, &mappedFrames);
- if (result < 0) {
- return MA_FALSE;
- }
+ pContext->jack.jack_client_open = (ma_proc)_jack_client_open;
+ pContext->jack.jack_client_close = (ma_proc)_jack_client_close;
+ pContext->jack.jack_client_name_size = (ma_proc)_jack_client_name_size;
+ pContext->jack.jack_set_process_callback = (ma_proc)_jack_set_process_callback;
+ pContext->jack.jack_set_buffer_size_callback = (ma_proc)_jack_set_buffer_size_callback;
+ pContext->jack.jack_on_shutdown = (ma_proc)_jack_on_shutdown;
+ pContext->jack.jack_get_sample_rate = (ma_proc)_jack_get_sample_rate;
+ pContext->jack.jack_get_buffer_size = (ma_proc)_jack_get_buffer_size;
+ pContext->jack.jack_get_ports = (ma_proc)_jack_get_ports;
+ pContext->jack.jack_activate = (ma_proc)_jack_activate;
+ pContext->jack.jack_deactivate = (ma_proc)_jack_deactivate;
+ pContext->jack.jack_connect = (ma_proc)_jack_connect;
+ pContext->jack.jack_port_register = (ma_proc)_jack_port_register;
+ pContext->jack.jack_port_name = (ma_proc)_jack_port_name;
+ pContext->jack.jack_port_get_buffer = (ma_proc)_jack_port_get_buffer;
+ pContext->jack.jack_free = (ma_proc)_jack_free;
+#endif
- if (mappedFrames > 0) {
- void* pBuffer = (ma_uint8*)pAreas[0].addr + ((pAreas[0].first + (mappedOffset * pAreas[0].step)) / 8);
- ma_device__send_frames_to_client(pDevice, mappedFrames, pBuffer);
- }
+ pContext->isBackendAsynchronous = MA_TRUE;
- result = ((ma_snd_pcm_mmap_commit_proc)pDevice->pContext->alsa.snd_pcm_mmap_commit)((ma_snd_pcm_t*)pDevice->alsa.pPCM, mappedOffset, mappedFrames);
- if (result < 0 || (ma_snd_pcm_uframes_t)result != mappedFrames) {
- ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, result, MA_TRUE);
- return MA_FALSE;
- }
+ pContext->onUninit = ma_context_uninit__jack;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__jack;
+ pContext->onEnumDevices = ma_context_enumerate_devices__jack;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__jack;
+ pContext->onDeviceInit = ma_device_init__jack;
+ pContext->onDeviceUninit = ma_device_uninit__jack;
+ pContext->onDeviceStart = ma_device_start__jack;
+ pContext->onDeviceStop = ma_device_stop__jack;
- if (requiresRestart) {
- if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) {
- return MA_FALSE;
- }
- }
+ if (pConfig->jack.pClientName != NULL) {
+ pContext->jack.pClientName = ma_copy_string(pConfig->jack.pClientName, &pContext->allocationCallbacks);
+ }
+ pContext->jack.tryStartServer = pConfig->jack.tryStartServer;
- if (framesAvailable >= mappedFrames) {
- framesAvailable -= mappedFrames;
- } else {
- framesAvailable = 0;
- }
+ /*
+ Getting here means the JACK library is installed, but it doesn't necessarily mean it's usable. We need to quickly test this by connecting
+ a temporary client.
+ */
+ {
+ ma_jack_client_t* pDummyClient;
+ ma_result result = ma_context_open_client__jack(pContext, &pDummyClient);
+ if (result != MA_SUCCESS) {
+ ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks);
+ #ifndef MA_NO_RUNTIME_LINKING
+ ma_dlclose(pContext, pContext->jack.jackSO);
+ #endif
+ return MA_NO_BACKEND;
}
- } else {
- /* readi/writei. */
- ma_snd_pcm_sframes_t framesRead = 0;
- while (!pDevice->alsa.breakFromMainLoop) {
- ma_uint32 framesAvailable = ma_device__wait_for_frames__alsa(pDevice, NULL);
- if (framesAvailable == 0) {
- continue;
- }
- framesRead = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable);
- if (framesRead < 0) {
- if (framesRead == -EAGAIN) {
- continue; /* Just keep trying... */
- } else if (framesRead == -EPIPE) {
- /* Overrun. Just recover and try reading again. */
- if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCM, framesRead, MA_TRUE) < 0) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun.", MA_FAILED_TO_START_BACKEND_DEVICE);
- return MA_FALSE;
- }
+ ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDummyClient);
+ }
- framesRead = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCM, pDevice->alsa.pIntermediaryBuffer, framesAvailable);
- if (framesRead < 0) {
- ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to read data from the internal device.", MA_FAILED_TO_READ_DATA_FROM_DEVICE);
- return MA_FALSE;
- }
+ return MA_SUCCESS;
+}
+#endif /* JACK */
- break; /* Success. */
- } else {
- return MA_FALSE;
- }
- } else {
- break; /* Success. */
- }
- }
- framesToSend = framesRead;
- pBuffer = pDevice->alsa.pIntermediaryBuffer;
- }
- if (framesToSend > 0) {
- ma_device__send_frames_to_client(pDevice, framesToSend, pBuffer);
- }
+/******************************************************************************
- return MA_TRUE;
-}
-#endif /* 0 */
+Core Audio Backend
-void ma_device_uninit__alsa(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
+******************************************************************************/
+#ifdef MA_HAS_COREAUDIO
+#include
- if ((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) {
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
- }
+#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE == 1
+ #define MA_APPLE_MOBILE
+ #if defined(TARGET_OS_TV) && TARGET_OS_TV == 1
+ #define MA_APPLE_TV
+ #endif
+ #if defined(TARGET_OS_WATCH) && TARGET_OS_WATCH == 1
+ #define MA_APPLE_WATCH
+ #endif
+#else
+ #define MA_APPLE_DESKTOP
+#endif
- if ((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) {
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback);
- }
-}
+#if defined(MA_APPLE_DESKTOP)
+#include
+#else
+#include
+#endif
-ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
-{
- ma_result result;
- ma_snd_pcm_t* pPCM;
- ma_bool32 isUsingMMap;
- ma_snd_pcm_format_t formatALSA;
- ma_share_mode shareMode;
- ma_device_id* pDeviceID;
- ma_format internalFormat;
- ma_uint32 internalChannels;
- ma_uint32 internalSampleRate;
- ma_channel internalChannelMap[MA_MAX_CHANNELS];
- ma_uint32 internalBufferSizeInFrames;
- ma_uint32 internalPeriods;
- ma_snd_pcm_hw_params_t* pHWParams;
- ma_snd_pcm_sw_params_t* pSWParams;
- ma_snd_pcm_uframes_t bufferBoundary;
- float bufferSizeScaleFactor;
+#include
- ma_assert(pContext != NULL);
- ma_assert(pConfig != NULL);
- ma_assert(deviceType != ma_device_type_duplex); /* This function should only be called for playback _or_ capture, never duplex. */
- ma_assert(pDevice != NULL);
+/* CoreFoundation */
+typedef Boolean (* ma_CFStringGetCString_proc)(CFStringRef theString, char* buffer, CFIndex bufferSize, CFStringEncoding encoding);
+typedef void (* ma_CFRelease_proc)(CFTypeRef cf);
- formatALSA = ma_convert_ma_format_to_alsa_format((deviceType == ma_device_type_capture) ? pConfig->capture.format : pConfig->playback.format);
- shareMode = (deviceType == ma_device_type_capture) ? pConfig->capture.shareMode : pConfig->playback.shareMode;
- pDeviceID = (deviceType == ma_device_type_capture) ? pConfig->capture.pDeviceID : pConfig->playback.pDeviceID;
+/* CoreAudio */
+#if defined(MA_APPLE_DESKTOP)
+typedef OSStatus (* ma_AudioObjectGetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* ioDataSize, void* outData);
+typedef OSStatus (* ma_AudioObjectGetPropertyDataSize_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* outDataSize);
+typedef OSStatus (* ma_AudioObjectSetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32 inDataSize, const void* inData);
+typedef OSStatus (* ma_AudioObjectAddPropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData);
+typedef OSStatus (* ma_AudioObjectRemovePropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData);
+#endif
- result = ma_context_open_pcm__alsa(pContext, shareMode, deviceType, pDeviceID, &pPCM);
- if (result != MA_SUCCESS) {
- return result;
- }
+/* AudioToolbox */
+typedef AudioComponent (* ma_AudioComponentFindNext_proc)(AudioComponent inComponent, const AudioComponentDescription* inDesc);
+typedef OSStatus (* ma_AudioComponentInstanceDispose_proc)(AudioComponentInstance inInstance);
+typedef OSStatus (* ma_AudioComponentInstanceNew_proc)(AudioComponent inComponent, AudioComponentInstance* outInstance);
+typedef OSStatus (* ma_AudioOutputUnitStart_proc)(AudioUnit inUnit);
+typedef OSStatus (* ma_AudioOutputUnitStop_proc)(AudioUnit inUnit);
+typedef OSStatus (* ma_AudioUnitAddPropertyListener_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void* inProcUserData);
+typedef OSStatus (* ma_AudioUnitGetPropertyInfo_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, UInt32* outDataSize, Boolean* outWriteable);
+typedef OSStatus (* ma_AudioUnitGetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData, UInt32* ioDataSize);
+typedef OSStatus (* ma_AudioUnitSetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void* inData, UInt32 inDataSize);
+typedef OSStatus (* ma_AudioUnitInitialize_proc)(AudioUnit inUnit);
+typedef OSStatus (* ma_AudioUnitRender_proc)(AudioUnit inUnit, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inOutputBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData);
- /* If using the default buffer size we may want to apply some device-specific scaling for known devices that have peculiar latency characteristics */
- bufferSizeScaleFactor = 1;
- if (pDevice->usingDefaultBufferSize) {
- ma_snd_pcm_info_t* pInfo = (ma_snd_pcm_info_t*)calloc(1, ((ma_snd_pcm_info_sizeof_proc)pContext->alsa.snd_pcm_info_sizeof)());
- if (pInfo == NULL) {
- return MA_OUT_OF_MEMORY;
- }
- /* We may need to scale the size of the buffer depending on the device. */
- if (((ma_snd_pcm_info_proc)pContext->alsa.snd_pcm_info)(pPCM, pInfo) == 0) {
- const char* deviceName = ((ma_snd_pcm_info_get_name_proc)pContext->alsa.snd_pcm_info_get_name)(pInfo);
- if (deviceName != NULL) {
- if (ma_strcmp(deviceName, "default") == 0) {
- char** ppDeviceHints;
- char** ppNextDeviceHint;
+#define MA_COREAUDIO_OUTPUT_BUS 0
+#define MA_COREAUDIO_INPUT_BUS 1
- /* It's the default device. We need to use DESC from snd_device_name_hint(). */
- if (((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints) < 0) {
- ma_free(pInfo);
- return MA_NO_BACKEND;
- }
+#if defined(MA_APPLE_DESKTOP)
+static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit);
+#endif
- ppNextDeviceHint = ppDeviceHints;
- while (*ppNextDeviceHint != NULL) {
- char* NAME = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "NAME");
- char* DESC = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "DESC");
- char* IOID = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "IOID");
+/*
+Core Audio
- ma_bool32 foundDevice = MA_FALSE;
- if ((deviceType == ma_device_type_playback && (IOID == NULL || ma_strcmp(IOID, "Output") == 0)) ||
- (deviceType == ma_device_type_capture && (IOID != NULL && ma_strcmp(IOID, "Input" ) == 0))) {
- if (ma_strcmp(NAME, deviceName) == 0) {
- bufferSizeScaleFactor = ma_find_default_buffer_size_scale__alsa(DESC);
- foundDevice = MA_TRUE;
- }
- }
+So far, Core Audio has been the worst backend to work with due to being both unintuitive and having almost no documentation
+apart from comments in the headers (which admittedly are quite good). For my own purposes, and for anybody out there whose
+needing to figure out how this darn thing works, I'm going to outline a few things here.
- free(NAME);
- free(DESC);
- free(IOID);
- ppNextDeviceHint += 1;
+Since miniaudio is a fairly low-level API, one of the things it needs is control over specific devices, and it needs to be
+able to identify whether or not it can be used as playback and/or capture. The AudioObject API is the only one I've seen
+that supports this level of detail. There was some public domain sample code I stumbled across that used the AudioComponent
+and AudioUnit APIs, but I couldn't see anything that gave low-level control over device selection and capabilities (the
+distinction between playback and capture in particular). Therefore, miniaudio is using the AudioObject API.
- if (foundDevice) {
- break;
- }
- }
+Most (all?) functions in the AudioObject API take a AudioObjectID as it's input. This is the device identifier. When
+retrieving global information, such as the device list, you use kAudioObjectSystemObject. When retrieving device-specific
+data, you pass in the ID for that device. In order to retrieve device-specific IDs you need to enumerate over each of the
+devices. This is done using the AudioObjectGetPropertyDataSize() and AudioObjectGetPropertyData() APIs which seem to be
+the central APIs for retrieving information about the system and specific devices.
- ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints);
- } else {
- bufferSizeScaleFactor = ma_find_default_buffer_size_scale__alsa(deviceName);
- }
- }
- }
+To use the AudioObjectGetPropertyData() API you need to use the notion of a property address. A property address is a
+structure with three variables and is used to identify which property you are getting or setting. The first is the "selector"
+which is basically the specific property that you're wanting to retrieve or set. The second is the "scope", which is
+typically set to kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyScopeInput for input-specific properties and
+kAudioObjectPropertyScopeOutput for output-specific properties. The last is the "element" which is always set to
+kAudioObjectPropertyElementMaster in miniaudio's case. I don't know of any cases where this would be set to anything different.
- ma_free(pInfo);
- }
+Back to the earlier issue of device retrieval, you first use the AudioObjectGetPropertyDataSize() API to retrieve the size
+of the raw data which is just a list of AudioDeviceID's. You use the kAudioObjectSystemObject AudioObjectID, and a property
+address with the kAudioHardwarePropertyDevices selector and the kAudioObjectPropertyScopeGlobal scope. Once you have the
+size, allocate a block of memory of that size and then call AudioObjectGetPropertyData(). The data is just a list of
+AudioDeviceID's so just do "dataSize/sizeof(AudioDeviceID)" to know the device count.
+*/
+static ma_result ma_result_from_OSStatus(OSStatus status)
+{
+ switch (status)
+ {
+ case noErr: return MA_SUCCESS;
+ #if defined(MA_APPLE_DESKTOP)
+ case kAudioHardwareNotRunningError: return MA_DEVICE_NOT_STARTED;
+ case kAudioHardwareUnspecifiedError: return MA_ERROR;
+ case kAudioHardwareUnknownPropertyError: return MA_INVALID_ARGS;
+ case kAudioHardwareBadPropertySizeError: return MA_INVALID_OPERATION;
+ case kAudioHardwareIllegalOperationError: return MA_INVALID_OPERATION;
+ case kAudioHardwareBadObjectError: return MA_INVALID_ARGS;
+ case kAudioHardwareBadDeviceError: return MA_INVALID_ARGS;
+ case kAudioHardwareBadStreamError: return MA_INVALID_ARGS;
+ case kAudioHardwareUnsupportedOperationError: return MA_INVALID_OPERATION;
+ case kAudioDeviceUnsupportedFormatError: return MA_FORMAT_NOT_SUPPORTED;
+ case kAudioDevicePermissionsError: return MA_ACCESS_DENIED;
+ #endif
+ default: return MA_ERROR;
+ }
+}
- /* Hardware parameters. */
- pHWParams = (ma_snd_pcm_hw_params_t*)calloc(1, ((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)());
- if (pHWParams == NULL) {
- return MA_OUT_OF_MEMORY;
+#if 0
+static ma_channel ma_channel_from_AudioChannelBitmap(AudioChannelBitmap bit)
+{
+ switch (bit)
+ {
+ case kAudioChannelBit_Left: return MA_CHANNEL_LEFT;
+ case kAudioChannelBit_Right: return MA_CHANNEL_RIGHT;
+ case kAudioChannelBit_Center: return MA_CHANNEL_FRONT_CENTER;
+ case kAudioChannelBit_LFEScreen: return MA_CHANNEL_LFE;
+ case kAudioChannelBit_LeftSurround: return MA_CHANNEL_BACK_LEFT;
+ case kAudioChannelBit_RightSurround: return MA_CHANNEL_BACK_RIGHT;
+ case kAudioChannelBit_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case kAudioChannelBit_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case kAudioChannelBit_CenterSurround: return MA_CHANNEL_BACK_CENTER;
+ case kAudioChannelBit_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT;
+ case kAudioChannelBit_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT;
+ case kAudioChannelBit_TopCenterSurround: return MA_CHANNEL_TOP_CENTER;
+ case kAudioChannelBit_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case kAudioChannelBit_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case kAudioChannelBit_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case kAudioChannelBit_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT;
+ case kAudioChannelBit_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER;
+ case kAudioChannelBit_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT;
+ default: return MA_CHANNEL_NONE;
}
+}
+#endif
- if (((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams) < 0) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE);
+static ma_result ma_format_from_AudioStreamBasicDescription(const AudioStreamBasicDescription* pDescription, ma_format* pFormatOut)
+{
+ MA_ASSERT(pDescription != NULL);
+ MA_ASSERT(pFormatOut != NULL);
+
+ *pFormatOut = ma_format_unknown; /* Safety. */
+
+ /* There's a few things miniaudio doesn't support. */
+ if (pDescription->mFormatID != kAudioFormatLinearPCM) {
+ return MA_FORMAT_NOT_SUPPORTED;
}
-
- /* MMAP Mode. Try using interleaved MMAP access. If this fails, fall back to standard readi/writei. */
- isUsingMMap = MA_FALSE;
-#if 0 /* NOTE: MMAP mode temporarily disabled. */
- if (deviceType != ma_device_type_capture) { /* <-- Disabling MMAP mode for capture devices because I apparently do not have a device that supports it which means I can't test it... Contributions welcome. */
- if (!pConfig->alsa.noMMap && ma_device__is_async(pDevice)) {
- if (((ma_snd_pcm_hw_params_set_access_proc)pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_MMAP_INTERLEAVED) == 0) {
- pDevice->alsa.isUsingMMap = MA_TRUE;
- }
- }
+
+ /* We don't support any non-packed formats that are aligned high. */
+ if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsAlignedHigh) != 0) {
+ return MA_FORMAT_NOT_SUPPORTED;
}
-#endif
- if (!isUsingMMap) {
- if (((ma_snd_pcm_hw_params_set_access_proc)pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_RW_INTERLEAVED) < 0) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set access mode to neither SND_PCM_ACCESS_MMAP_INTERLEAVED nor SND_PCM_ACCESS_RW_INTERLEAVED. snd_pcm_hw_params_set_access() failed.", MA_FORMAT_NOT_SUPPORTED);
- }
+ /* Only supporting native-endian. */
+ if ((ma_is_little_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) != 0) || (ma_is_big_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) == 0)) {
+ return MA_FORMAT_NOT_SUPPORTED;
}
+
+ /* We are not currently supporting non-interleaved formats (this will be added in a future version of miniaudio). */
+ /*if ((pDescription->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0) {
+ return MA_FORMAT_NOT_SUPPORTED;
+ }*/
- /*
- Most important properties first. The documentation for OSS (yes, I know this is ALSA!) recommends format, channels, then sample rate. I can't
- find any documentation for ALSA specifically, so I'm going to copy the recommendation for OSS.
- */
-
- /* Format. */
- {
- ma_snd_pcm_format_mask_t* pFormatMask;
-
- /* Try getting every supported format first. */
- pFormatMask = (ma_snd_pcm_format_mask_t*)calloc(1, ((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)());
- if (pFormatMask == NULL) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return MA_OUT_OF_MEMORY;
+ if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsFloat) != 0) {
+ if (pDescription->mBitsPerChannel == 32) {
+ *pFormatOut = ma_format_f32;
+ return MA_SUCCESS;
}
-
- ((ma_snd_pcm_hw_params_get_format_mask_proc)pContext->alsa.snd_pcm_hw_params_get_format_mask)(pHWParams, pFormatMask);
-
- /*
- At this point we should have a list of supported formats, so now we need to find the best one. We first check if the requested format is
- supported, and if so, use that one. If it's not supported, we just run though a list of formats and try to find the best one.
- */
- if (!((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, formatALSA)) {
- size_t i;
-
- /* The requested format is not supported so now try running through the list of formats and return the best one. */
- ma_snd_pcm_format_t preferredFormatsALSA[] = {
- MA_SND_PCM_FORMAT_S16_LE, /* ma_format_s16 */
- MA_SND_PCM_FORMAT_FLOAT_LE, /* ma_format_f32 */
- MA_SND_PCM_FORMAT_S32_LE, /* ma_format_s32 */
- MA_SND_PCM_FORMAT_S24_3LE, /* ma_format_s24 */
- MA_SND_PCM_FORMAT_U8 /* ma_format_u8 */
- };
-
- if (ma_is_big_endian()) {
- preferredFormatsALSA[0] = MA_SND_PCM_FORMAT_S16_BE;
- preferredFormatsALSA[1] = MA_SND_PCM_FORMAT_FLOAT_BE;
- preferredFormatsALSA[2] = MA_SND_PCM_FORMAT_S32_BE;
- preferredFormatsALSA[3] = MA_SND_PCM_FORMAT_S24_3BE;
- preferredFormatsALSA[4] = MA_SND_PCM_FORMAT_U8;
- }
-
- formatALSA = MA_SND_PCM_FORMAT_UNKNOWN;
- for (i = 0; i < (sizeof(preferredFormatsALSA) / sizeof(preferredFormatsALSA[0])); ++i) {
- if (((ma_snd_pcm_format_mask_test_proc)pContext->alsa.snd_pcm_format_mask_test)(pFormatMask, preferredFormatsALSA[i])) {
- formatALSA = preferredFormatsALSA[i];
- break;
+ } else {
+ if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsSignedInteger) != 0) {
+ if (pDescription->mBitsPerChannel == 16) {
+ *pFormatOut = ma_format_s16;
+ return MA_SUCCESS;
+ } else if (pDescription->mBitsPerChannel == 24) {
+ if (pDescription->mBytesPerFrame == (pDescription->mBitsPerChannel/8 * pDescription->mChannelsPerFrame)) {
+ *pFormatOut = ma_format_s24;
+ return MA_SUCCESS;
+ } else {
+ if (pDescription->mBytesPerFrame/pDescription->mChannelsPerFrame == sizeof(ma_int32)) {
+ /* TODO: Implement ma_format_s24_32. */
+ /**pFormatOut = ma_format_s24_32;*/
+ /*return MA_SUCCESS;*/
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
}
+ } else if (pDescription->mBitsPerChannel == 32) {
+ *pFormatOut = ma_format_s32;
+ return MA_SUCCESS;
}
-
- if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. The device does not support any miniaudio formats.", MA_FORMAT_NOT_SUPPORTED);
+ } else {
+ if (pDescription->mBitsPerChannel == 8) {
+ *pFormatOut = ma_format_u8;
+ return MA_SUCCESS;
}
}
-
- ma_free(pFormatMask);
- pFormatMask = NULL;
-
- if (((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, formatALSA) < 0) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. snd_pcm_hw_params_set_format() failed.", MA_FORMAT_NOT_SUPPORTED);
- }
-
- internalFormat = ma_format_from_alsa(formatALSA);
- if (internalFormat == ma_format_unknown) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] The chosen format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED);
- }
- }
-
- /* Channels. */
- {
- unsigned int channels = (deviceType == ma_device_type_capture) ? pConfig->capture.channels : pConfig->playback.channels;
- if (((ma_snd_pcm_hw_params_set_channels_near_proc)pContext->alsa.snd_pcm_hw_params_set_channels_near)(pPCM, pHWParams, &channels) < 0) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set channel count. snd_pcm_hw_params_set_channels_near() failed.", MA_FORMAT_NOT_SUPPORTED);
- }
- internalChannels = (ma_uint32)channels;
}
+
+ /* Getting here means the format is not supported. */
+ return MA_FORMAT_NOT_SUPPORTED;
+}
- /* Sample Rate */
+#if defined(MA_APPLE_DESKTOP)
+static ma_channel ma_channel_from_AudioChannelLabel(AudioChannelLabel label)
+{
+ switch (label)
{
- unsigned int sampleRate;
-
- /*
- It appears there's either a bug in ALSA, a bug in some drivers, or I'm doing something silly; but having resampling enabled causes
- problems with some device configurations when used in conjunction with MMAP access mode. To fix this problem we need to disable
- resampling.
-
- To reproduce this problem, open the "plug:dmix" device, and set the sample rate to 44100. Internally, it looks like dmix uses a
- sample rate of 48000. The hardware parameters will get set correctly with no errors, but it looks like the 44100 -> 48000 resampling
- doesn't work properly - but only with MMAP access mode. You will notice skipping/crackling in the audio, and it'll run at a slightly
- faster rate.
-
- miniaudio has built-in support for sample rate conversion (albeit low quality at the moment), so disabling resampling should be fine
- for us. The only problem is that it won't be taking advantage of any kind of hardware-accelerated resampling and it won't be very
- good quality until I get a chance to improve the quality of miniaudio's software sample rate conversion.
+ case kAudioChannelLabel_Unknown: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Unused: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_UseCoordinates: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Left: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_Right: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_Center: return MA_CHANNEL_FRONT_CENTER;
+ case kAudioChannelLabel_LFEScreen: return MA_CHANNEL_LFE;
+ case kAudioChannelLabel_LeftSurround: return MA_CHANNEL_BACK_LEFT;
+ case kAudioChannelLabel_RightSurround: return MA_CHANNEL_BACK_RIGHT;
+ case kAudioChannelLabel_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case kAudioChannelLabel_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case kAudioChannelLabel_CenterSurround: return MA_CHANNEL_BACK_CENTER;
+ case kAudioChannelLabel_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT;
+ case kAudioChannelLabel_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT;
+ case kAudioChannelLabel_TopCenterSurround: return MA_CHANNEL_TOP_CENTER;
+ case kAudioChannelLabel_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case kAudioChannelLabel_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case kAudioChannelLabel_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case kAudioChannelLabel_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT;
+ case kAudioChannelLabel_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER;
+ case kAudioChannelLabel_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT;
+ case kAudioChannelLabel_RearSurroundLeft: return MA_CHANNEL_BACK_LEFT;
+ case kAudioChannelLabel_RearSurroundRight: return MA_CHANNEL_BACK_RIGHT;
+ case kAudioChannelLabel_LeftWide: return MA_CHANNEL_SIDE_LEFT;
+ case kAudioChannelLabel_RightWide: return MA_CHANNEL_SIDE_RIGHT;
+ case kAudioChannelLabel_LFE2: return MA_CHANNEL_LFE;
+ case kAudioChannelLabel_LeftTotal: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_RightTotal: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_HearingImpaired: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Narration: return MA_CHANNEL_MONO;
+ case kAudioChannelLabel_Mono: return MA_CHANNEL_MONO;
+ case kAudioChannelLabel_DialogCentricMix: return MA_CHANNEL_MONO;
+ case kAudioChannelLabel_CenterSurroundDirect: return MA_CHANNEL_BACK_CENTER;
+ case kAudioChannelLabel_Haptic: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Ambisonic_W: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Ambisonic_X: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Ambisonic_Y: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Ambisonic_Z: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_MS_Mid: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_MS_Side: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_XY_X: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_XY_Y: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_HeadphonesLeft: return MA_CHANNEL_LEFT;
+ case kAudioChannelLabel_HeadphonesRight: return MA_CHANNEL_RIGHT;
+ case kAudioChannelLabel_ClickTrack: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_ForeignLanguage: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Discrete: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_Discrete_0: return MA_CHANNEL_AUX_0;
+ case kAudioChannelLabel_Discrete_1: return MA_CHANNEL_AUX_1;
+ case kAudioChannelLabel_Discrete_2: return MA_CHANNEL_AUX_2;
+ case kAudioChannelLabel_Discrete_3: return MA_CHANNEL_AUX_3;
+ case kAudioChannelLabel_Discrete_4: return MA_CHANNEL_AUX_4;
+ case kAudioChannelLabel_Discrete_5: return MA_CHANNEL_AUX_5;
+ case kAudioChannelLabel_Discrete_6: return MA_CHANNEL_AUX_6;
+ case kAudioChannelLabel_Discrete_7: return MA_CHANNEL_AUX_7;
+ case kAudioChannelLabel_Discrete_8: return MA_CHANNEL_AUX_8;
+ case kAudioChannelLabel_Discrete_9: return MA_CHANNEL_AUX_9;
+ case kAudioChannelLabel_Discrete_10: return MA_CHANNEL_AUX_10;
+ case kAudioChannelLabel_Discrete_11: return MA_CHANNEL_AUX_11;
+ case kAudioChannelLabel_Discrete_12: return MA_CHANNEL_AUX_12;
+ case kAudioChannelLabel_Discrete_13: return MA_CHANNEL_AUX_13;
+ case kAudioChannelLabel_Discrete_14: return MA_CHANNEL_AUX_14;
+ case kAudioChannelLabel_Discrete_15: return MA_CHANNEL_AUX_15;
+ case kAudioChannelLabel_Discrete_65535: return MA_CHANNEL_NONE;
- I don't currently know if the dmix plugin is the only one with this error. Indeed, this is the only one I've been able to reproduce
- this error with. In the future, we may want to restrict the disabling of resampling to only known bad plugins.
- */
- ((ma_snd_pcm_hw_params_set_rate_resample_proc)pContext->alsa.snd_pcm_hw_params_set_rate_resample)(pPCM, pHWParams, 0);
-
- sampleRate = pConfig->sampleRate;
- if (((ma_snd_pcm_hw_params_set_rate_near_proc)pContext->alsa.snd_pcm_hw_params_set_rate_near)(pPCM, pHWParams, &sampleRate, 0) < 0) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Sample rate not supported. snd_pcm_hw_params_set_rate_near() failed.", MA_FORMAT_NOT_SUPPORTED);
- }
- internalSampleRate = (ma_uint32)sampleRate;
+ #if 0 /* Introduced in a later version of macOS. */
+ case kAudioChannelLabel_HOA_ACN: return MA_CHANNEL_NONE;
+ case kAudioChannelLabel_HOA_ACN_0: return MA_CHANNEL_AUX_0;
+ case kAudioChannelLabel_HOA_ACN_1: return MA_CHANNEL_AUX_1;
+ case kAudioChannelLabel_HOA_ACN_2: return MA_CHANNEL_AUX_2;
+ case kAudioChannelLabel_HOA_ACN_3: return MA_CHANNEL_AUX_3;
+ case kAudioChannelLabel_HOA_ACN_4: return MA_CHANNEL_AUX_4;
+ case kAudioChannelLabel_HOA_ACN_5: return MA_CHANNEL_AUX_5;
+ case kAudioChannelLabel_HOA_ACN_6: return MA_CHANNEL_AUX_6;
+ case kAudioChannelLabel_HOA_ACN_7: return MA_CHANNEL_AUX_7;
+ case kAudioChannelLabel_HOA_ACN_8: return MA_CHANNEL_AUX_8;
+ case kAudioChannelLabel_HOA_ACN_9: return MA_CHANNEL_AUX_9;
+ case kAudioChannelLabel_HOA_ACN_10: return MA_CHANNEL_AUX_10;
+ case kAudioChannelLabel_HOA_ACN_11: return MA_CHANNEL_AUX_11;
+ case kAudioChannelLabel_HOA_ACN_12: return MA_CHANNEL_AUX_12;
+ case kAudioChannelLabel_HOA_ACN_13: return MA_CHANNEL_AUX_13;
+ case kAudioChannelLabel_HOA_ACN_14: return MA_CHANNEL_AUX_14;
+ case kAudioChannelLabel_HOA_ACN_15: return MA_CHANNEL_AUX_15;
+ case kAudioChannelLabel_HOA_ACN_65024: return MA_CHANNEL_NONE;
+ #endif
+
+ default: return MA_CHANNEL_NONE;
}
+}
- /* Buffer Size */
- {
- ma_snd_pcm_uframes_t actualBufferSizeInFrames = pConfig->bufferSizeInFrames;
- if (actualBufferSizeInFrames == 0) {
- actualBufferSizeInFrames = ma_scale_buffer_size(ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, internalSampleRate), bufferSizeScaleFactor);
+static ma_result ma_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, ma_channel channelMap[MA_MAX_CHANNELS])
+{
+ MA_ASSERT(pChannelLayout != NULL);
+
+ if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) {
+ UInt32 iChannel;
+ for (iChannel = 0; iChannel < pChannelLayout->mNumberChannelDescriptions; ++iChannel) {
+ channelMap[iChannel] = ma_channel_from_AudioChannelLabel(pChannelLayout->mChannelDescriptions[iChannel].mChannelLabel);
}
-
- if (((ma_snd_pcm_hw_params_set_buffer_size_near_proc)pContext->alsa.snd_pcm_hw_params_set_buffer_size_near)(pPCM, pHWParams, &actualBufferSizeInFrames) < 0) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set buffer size for device. snd_pcm_hw_params_set_buffer_size() failed.", MA_FORMAT_NOT_SUPPORTED);
+ } else
+#if 0
+ if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) {
+ /* This is the same kind of system that's used by Windows audio APIs. */
+ UInt32 iChannel = 0;
+ UInt32 iBit;
+ AudioChannelBitmap bitmap = pChannelLayout->mChannelBitmap;
+ for (iBit = 0; iBit < 32; ++iBit) {
+ AudioChannelBitmap bit = bitmap & (1 << iBit);
+ if (bit != 0) {
+ channelMap[iChannel++] = ma_channel_from_AudioChannelBit(bit);
+ }
}
- internalBufferSizeInFrames = actualBufferSizeInFrames;
- }
-
- /* Periods. */
+ } else
+#endif
{
- ma_uint32 periods = pConfig->periods;
- if (((ma_snd_pcm_hw_params_set_periods_near_proc)pContext->alsa.snd_pcm_hw_params_set_periods_near)(pPCM, pHWParams, &periods, NULL) < 0) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set period count. snd_pcm_hw_params_set_periods_near() failed.", MA_FORMAT_NOT_SUPPORTED);
+ /*
+ Need to use the tag to determine the channel map. For now I'm just assuming a default channel map, but later on this should
+ be updated to determine the mapping based on the tag.
+ */
+ UInt32 channelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag);
+ switch (pChannelLayout->mChannelLayoutTag)
+ {
+ case kAudioChannelLayoutTag_Mono:
+ case kAudioChannelLayoutTag_Stereo:
+ case kAudioChannelLayoutTag_StereoHeadphones:
+ case kAudioChannelLayoutTag_MatrixStereo:
+ case kAudioChannelLayoutTag_MidSide:
+ case kAudioChannelLayoutTag_XY:
+ case kAudioChannelLayoutTag_Binaural:
+ case kAudioChannelLayoutTag_Ambisonic_B_Format:
+ {
+ ma_get_standard_channel_map(ma_standard_channel_map_default, channelCount, channelMap);
+ } break;
+
+ case kAudioChannelLayoutTag_Octagonal:
+ {
+ channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
+ channelMap[6] = MA_CHANNEL_SIDE_LEFT;
+ } /* Intentional fallthrough. */
+ case kAudioChannelLayoutTag_Hexagonal:
+ {
+ channelMap[5] = MA_CHANNEL_BACK_CENTER;
+ } /* Intentional fallthrough. */
+ case kAudioChannelLayoutTag_Pentagonal:
+ {
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ } /* Intentional fallghrough. */
+ case kAudioChannelLayoutTag_Quadraphonic:
+ {
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[1] = MA_CHANNEL_RIGHT;
+ channelMap[0] = MA_CHANNEL_LEFT;
+ } break;
+
+ /* TODO: Add support for more tags here. */
+
+ default:
+ {
+ ma_get_standard_channel_map(ma_standard_channel_map_default, channelCount, channelMap);
+ } break;
}
- internalPeriods = periods;
}
+
+ return MA_SUCCESS;
+}
- /* Apply hardware parameters. */
- if (((ma_snd_pcm_hw_params_proc)pContext->alsa.snd_pcm_hw_params)(pPCM, pHWParams) < 0) {
- ma_free(pHWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set hardware parameters. snd_pcm_hw_params() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE);
- }
+static ma_result ma_get_device_object_ids__coreaudio(ma_context* pContext, UInt32* pDeviceCount, AudioObjectID** ppDeviceObjectIDs) /* NOTE: Free the returned buffer with ma_free(). */
+{
+ AudioObjectPropertyAddress propAddressDevices;
+ UInt32 deviceObjectsDataSize;
+ OSStatus status;
+ AudioObjectID* pDeviceObjectIDs;
- ma_free(pHWParams);
- pHWParams = NULL;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDeviceCount != NULL);
+ MA_ASSERT(ppDeviceObjectIDs != NULL);
+ /* Safety. */
+ *pDeviceCount = 0;
+ *ppDeviceObjectIDs = NULL;
+
+ propAddressDevices.mSelector = kAudioHardwarePropertyDevices;
+ propAddressDevices.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddressDevices.mElement = kAudioObjectPropertyElementMaster;
- /* Software parameters. */
- pSWParams = (ma_snd_pcm_sw_params_t*)calloc(1, ((ma_snd_pcm_sw_params_sizeof_proc)pContext->alsa.snd_pcm_sw_params_sizeof)());
- if (pSWParams == NULL) {
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pDeviceObjectIDs = (AudioObjectID*)ma_malloc(deviceObjectsDataSize, &pContext->allocationCallbacks);
+ if (pDeviceObjectIDs == NULL) {
return MA_OUT_OF_MEMORY;
}
-
- if (((ma_snd_pcm_sw_params_current_proc)pContext->alsa.snd_pcm_sw_params_current)(pPCM, pSWParams) != 0) {
- ma_free(pSWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize software parameters. snd_pcm_sw_params_current() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE);
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize, pDeviceObjectIDs);
+ if (status != noErr) {
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
}
+
+ *pDeviceCount = deviceObjectsDataSize / sizeof(AudioObjectID);
+ *ppDeviceObjectIDs = pDeviceObjectIDs;
- if (deviceType == ma_device_type_capture) {
- if (((ma_snd_pcm_sw_params_set_avail_min_proc)pContext->alsa.snd_pcm_sw_params_set_avail_min)(pPCM, pSWParams, 1) != 0) {
- ma_free(pSWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_sw_params_set_avail_min() failed.", MA_FORMAT_NOT_SUPPORTED);
- }
- } else {
- if (((ma_snd_pcm_sw_params_set_avail_min_proc)pContext->alsa.snd_pcm_sw_params_set_avail_min)(pPCM, pSWParams, 1) != 0) {
- ma_free(pSWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_sw_params_set_avail_min() failed.", MA_FORMAT_NOT_SUPPORTED);
- }
+ return MA_SUCCESS;
+}
+
+static ma_result ma_get_AudioObject_uid_as_CFStringRef(ma_context* pContext, AudioObjectID objectID, CFStringRef* pUID)
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+
+ MA_ASSERT(pContext != NULL);
+
+ propAddress.mSelector = kAudioDevicePropertyDeviceUID;
+ propAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ dataSize = sizeof(*pUID);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, pUID);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
}
+ return MA_SUCCESS;
+}
- if (((ma_snd_pcm_sw_params_get_boundary_proc)pContext->alsa.snd_pcm_sw_params_get_boundary)(pSWParams, &bufferBoundary) < 0) {
- bufferBoundary = internalBufferSizeInFrames;
- }
+static ma_result ma_get_AudioObject_uid(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut)
+{
+ CFStringRef uid;
+ ma_result result;
- /*printf("TRACE: bufferBoundary=%ld\n", bufferBoundary);*/
+ MA_ASSERT(pContext != NULL);
- if (deviceType == ma_device_type_playback && !isUsingMMap) { /* Only playback devices in writei/readi mode need a start threshold. */
- /*
- Subtle detail here with the start threshold. When in playback-only mode (no full-duplex) we can set the start threshold to
- the size of a period. But for full-duplex we need to set it such that it is at least two periods.
- */
- if (((ma_snd_pcm_sw_params_set_start_threshold_proc)pContext->alsa.snd_pcm_sw_params_set_start_threshold)(pPCM, pSWParams, internalBufferSizeInFrames) != 0) {
- ma_free(pSWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set start threshold for playback device. snd_pcm_sw_params_set_start_threshold() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE);
- }
- if (((ma_snd_pcm_sw_params_set_stop_threshold_proc)pContext->alsa.snd_pcm_sw_params_set_stop_threshold)(pPCM, pSWParams, bufferBoundary) != 0) { /* Set to boundary to loop instead of stop in the event of an xrun. */
- ma_free(pSWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set stop threshold for playback device. snd_pcm_sw_params_set_stop_threshold() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE);
- }
+ result = ma_get_AudioObject_uid_as_CFStringRef(pContext, objectID, &uid);
+ if (result != MA_SUCCESS) {
+ return result;
}
-
- if (((ma_snd_pcm_sw_params_proc)pContext->alsa.snd_pcm_sw_params)(pPCM, pSWParams) != 0) {
- ma_free(pSWParams);
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set software parameters. snd_pcm_sw_params() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE);
+
+ if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(uid, bufferOut, bufferSize, kCFStringEncodingUTF8)) {
+ return MA_ERROR;
}
+
+ ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(uid);
+ return MA_SUCCESS;
+}
- ma_free(pSWParams);
- pSWParams = NULL;
+static ma_result ma_get_AudioObject_name(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut)
+{
+ AudioObjectPropertyAddress propAddress;
+ CFStringRef deviceName = NULL;
+ UInt32 dataSize;
+ OSStatus status;
+ MA_ASSERT(pContext != NULL);
- /* Grab the internal channel map. For now we're not going to bother trying to change the channel map and instead just do it ourselves. */
- {
- ma_snd_pcm_chmap_t* pChmap = ((ma_snd_pcm_get_chmap_proc)pContext->alsa.snd_pcm_get_chmap)(pPCM);
- if (pChmap != NULL) {
- ma_uint32 iChannel;
+ propAddress.mSelector = kAudioDevicePropertyDeviceNameCFString;
+ propAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
- /* There are cases where the returned channel map can have a different channel count than was returned by snd_pcm_hw_params_set_channels_near(). */
- if (pChmap->channels >= internalChannels) {
- /* Drop excess channels. */
- for (iChannel = 0; iChannel < internalChannels; ++iChannel) {
- internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]);
- }
- } else {
- ma_uint32 i;
+ dataSize = sizeof(deviceName);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, &deviceName);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(deviceName, bufferOut, bufferSize, kCFStringEncodingUTF8)) {
+ return MA_ERROR;
+ }
+
+ ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(deviceName);
+ return MA_SUCCESS;
+}
- /*
- Excess channels use defaults. Do an initial fill with defaults, overwrite the first pChmap->channels, validate to ensure there are no duplicate
- channels. If validation fails, fall back to defaults.
- */
- ma_bool32 isValid = MA_TRUE;
+static ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID deviceObjectID, AudioObjectPropertyScope scope)
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+ AudioBufferList* pBufferList;
+ ma_bool32 isSupported;
- /* Fill with defaults. */
- ma_get_standard_channel_map(ma_standard_channel_map_alsa, internalChannels, internalChannelMap);
+ MA_ASSERT(pContext != NULL);
- /* Overwrite first pChmap->channels channels. */
- for (iChannel = 0; iChannel < pChmap->channels; ++iChannel) {
- internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]);
- }
+ /* To know whether or not a device is an input device we need ot look at the stream configuration. If it has an output channel it's a playback device. */
+ propAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
+ propAddress.mScope = scope;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
+ if (status != noErr) {
+ return MA_FALSE;
+ }
+
+ pBufferList = (AudioBufferList*)ma__malloc_from_callbacks(dataSize, &pContext->allocationCallbacks);
+ if (pBufferList == NULL) {
+ return MA_FALSE; /* Out of memory. */
+ }
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pBufferList);
+ if (status != noErr) {
+ ma__free_from_callbacks(pBufferList, &pContext->allocationCallbacks);
+ return MA_FALSE;
+ }
- /* Validate. */
- for (i = 0; i < internalChannels && isValid; ++i) {
- ma_uint32 j;
- for (j = i+1; j < internalChannels; ++j) {
- if (internalChannelMap[i] == internalChannelMap[j]) {
- isValid = MA_FALSE;
- break;
- }
- }
- }
+ isSupported = MA_FALSE;
+ if (pBufferList->mNumberBuffers > 0) {
+ isSupported = MA_TRUE;
+ }
+
+ ma__free_from_callbacks(pBufferList, &pContext->allocationCallbacks);
+ return isSupported;
+}
+
+static ma_bool32 ma_does_AudioObject_support_playback(ma_context* pContext, AudioObjectID deviceObjectID)
+{
+ return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeOutput);
+}
- /* If our channel map is invalid, fall back to defaults. */
- if (!isValid) {
- ma_get_standard_channel_map(ma_standard_channel_map_alsa, internalChannels, internalChannelMap);
- }
- }
+static ma_bool32 ma_does_AudioObject_support_capture(ma_context* pContext, AudioObjectID deviceObjectID)
+{
+ return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeInput);
+}
- free(pChmap);
- pChmap = NULL;
- } else {
- /* Could not retrieve the channel map. Fall back to a hard-coded assumption. */
- ma_get_standard_channel_map(ma_standard_channel_map_alsa, internalChannels, internalChannelMap);
- }
- }
+static ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pDescriptionCount, AudioStreamRangedDescription** ppDescriptions) /* NOTE: Free the returned pointer with ma_free(). */
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+ AudioStreamRangedDescription* pDescriptions;
- /* We're done. Prepare the device. */
- if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)(pPCM) < 0) {
- ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to prepare device.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDescriptionCount != NULL);
+ MA_ASSERT(ppDescriptions != NULL);
+
+ /*
+ TODO: Experiment with kAudioStreamPropertyAvailablePhysicalFormats instead of (or in addition to) kAudioStreamPropertyAvailableVirtualFormats. My
+ MacBook Pro uses s24/32 format, however, which miniaudio does not currently support.
+ */
+ propAddress.mSelector = kAudioStreamPropertyAvailableVirtualFormats; /*kAudioStreamPropertyAvailablePhysicalFormats;*/
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pDescriptions = (AudioStreamRangedDescription*)ma_malloc(dataSize, &pContext->allocationCallbacks);
+ if (pDescriptions == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pDescriptions);
+ if (status != noErr) {
+ ma_free(pDescriptions, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
}
+
+ *pDescriptionCount = dataSize / sizeof(*pDescriptions);
+ *ppDescriptions = pDescriptions;
+ return MA_SUCCESS;
+}
- if (deviceType == ma_device_type_capture) {
- pDevice->alsa.pPCMCapture = (ma_ptr)pPCM;
- pDevice->alsa.isUsingMMapCapture = isUsingMMap;
- pDevice->capture.internalFormat = internalFormat;
- pDevice->capture.internalChannels = internalChannels;
- pDevice->capture.internalSampleRate = internalSampleRate;
- ma_channel_map_copy(pDevice->capture.internalChannelMap, internalChannelMap, internalChannels);
- pDevice->capture.internalBufferSizeInFrames = internalBufferSizeInFrames;
- pDevice->capture.internalPeriods = internalPeriods;
- } else {
- pDevice->alsa.pPCMPlayback = (ma_ptr)pPCM;
- pDevice->alsa.isUsingMMapPlayback = isUsingMMap;
- pDevice->playback.internalFormat = internalFormat;
- pDevice->playback.internalChannels = internalChannels;
- pDevice->playback.internalSampleRate = internalSampleRate;
- ma_channel_map_copy(pDevice->playback.internalChannelMap, internalChannelMap, internalChannels);
- pDevice->playback.internalBufferSizeInFrames = internalBufferSizeInFrames;
- pDevice->playback.internalPeriods = internalPeriods;
- }
+static ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, AudioChannelLayout** ppChannelLayout) /* NOTE: Free the returned pointer with ma_free(). */
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+ AudioChannelLayout* pChannelLayout;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(ppChannelLayout != NULL);
+
+ *ppChannelLayout = NULL; /* Safety. */
+
+ propAddress.mSelector = kAudioDevicePropertyPreferredChannelLayout;
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pChannelLayout = (AudioChannelLayout*)ma_malloc(dataSize, &pContext->allocationCallbacks);
+ if (pChannelLayout == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pChannelLayout);
+ if (status != noErr) {
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
+ }
+
+ *ppChannelLayout = pChannelLayout;
return MA_SUCCESS;
}
-ma_result ma_device_init__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+static ma_result ma_get_AudioObject_channel_count(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pChannelCount)
{
- ma_assert(pDevice != NULL);
+ AudioChannelLayout* pChannelLayout;
+ ma_result result;
- ma_zero_object(&pDevice->alsa);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pChannelCount != NULL);
+
+ *pChannelCount = 0; /* Safety. */
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ma_result result = ma_device_init_by_type__alsa(pContext, pConfig, ma_device_type_capture, pDevice);
- if (result != MA_SUCCESS) {
- return result;
- }
+ result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) {
+ *pChannelCount = pChannelLayout->mNumberChannelDescriptions;
+ } else if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) {
+ *pChannelCount = ma_count_set_bits(pChannelLayout->mChannelBitmap);
+ } else {
+ *pChannelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag);
}
+
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+}
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ma_result result = ma_device_init_by_type__alsa(pContext, pConfig, ma_device_type_playback, pDevice);
- if (result != MA_SUCCESS) {
- return result;
- }
+#if 0
+static ma_result ma_get_AudioObject_channel_map(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS])
+{
+ AudioChannelLayout* pChannelLayout;
+ ma_result result;
+
+ MA_ASSERT(pContext != NULL);
+
+ result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout);
+ if (result != MA_SUCCESS) {
+ return result; /* Rather than always failing here, would it be more robust to simply assume a default? */
+ }
+
+ result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap);
+ if (result != MA_SUCCESS) {
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return result;
}
+
+ ma_free(pChannelLayout, &pContext->allocationCallbacks);
+ return result;
+}
+#endif
+
+static ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pSampleRateRangesCount, AudioValueRange** ppSampleRateRanges) /* NOTE: Free the returned pointer with ma_free(). */
+{
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
+ AudioValueRange* pSampleRateRanges;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pSampleRateRangesCount != NULL);
+ MA_ASSERT(ppSampleRateRanges != NULL);
+
+ /* Safety. */
+ *pSampleRateRangesCount = 0;
+ *ppSampleRateRanges = NULL;
+
+ propAddress.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pSampleRateRanges = (AudioValueRange*)ma_malloc(dataSize, &pContext->allocationCallbacks);
+ if (pSampleRateRanges == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pSampleRateRanges);
+ if (status != noErr) {
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
+ }
+
+ *pSampleRateRangesCount = dataSize / sizeof(*pSampleRateRanges);
+ *ppSampleRateRanges = pSampleRateRanges;
return MA_SUCCESS;
}
#if 0
-ma_result ma_device_start__alsa(ma_device* pDevice)
+static ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 sampleRateIn, ma_uint32* pSampleRateOut)
{
- ma_assert(pDevice != NULL);
+ UInt32 sampleRateRangeCount;
+ AudioValueRange* pSampleRateRanges;
+ ma_result result;
- /* Prepare the device first... */
- if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to prepare device.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pSampleRateOut != NULL);
+
+ *pSampleRateOut = 0; /* Safety. */
+
+ result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges);
+ if (result != MA_SUCCESS) {
+ return result;
}
-
- /*
- ... and then grab an initial chunk from the client. After this is done, the device should
- automatically start playing, since that's how we configured the software parameters.
- */
- if (pDevice->type == ma_device_type_playback) {
- if (!ma_device_read_from_client_and_write__alsa(pDevice)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to write initial chunk of data to the playback device.", MA_FAILED_TO_SEND_DATA_TO_DEVICE);
+
+ if (sampleRateRangeCount == 0) {
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_ERROR; /* Should never hit this case should we? */
+ }
+
+ if (sampleRateIn == 0) {
+ /* Search in order of miniaudio's preferred priority. */
+ UInt32 iMALSampleRate;
+ for (iMALSampleRate = 0; iMALSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iMALSampleRate) {
+ ma_uint32 malSampleRate = g_maStandardSampleRatePriorities[iMALSampleRate];
+ UInt32 iCASampleRate;
+ for (iCASampleRate = 0; iCASampleRate < sampleRateRangeCount; ++iCASampleRate) {
+ AudioValueRange caSampleRate = pSampleRateRanges[iCASampleRate];
+ if (caSampleRate.mMinimum <= malSampleRate && caSampleRate.mMaximum >= malSampleRate) {
+ *pSampleRateOut = malSampleRate;
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ }
+ }
}
-
- /* mmap mode requires an explicit start. */
- if (pDevice->alsa.isUsingMMap) {
- if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start capture device.", MA_FAILED_TO_START_BACKEND_DEVICE);
+
+ /*
+ If we get here it means none of miniaudio's standard sample rates matched any of the supported sample rates from the device. In this
+ case we just fall back to the first one reported by Core Audio.
+ */
+ MA_ASSERT(sampleRateRangeCount > 0);
+
+ *pSampleRateOut = pSampleRateRanges[0].mMinimum;
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ } else {
+ /* Find the closest match to this sample rate. */
+ UInt32 currentAbsoluteDifference = INT32_MAX;
+ UInt32 iCurrentClosestRange = (UInt32)-1;
+ UInt32 iRange;
+ for (iRange = 0; iRange < sampleRateRangeCount; ++iRange) {
+ if (pSampleRateRanges[iRange].mMinimum <= sampleRateIn && pSampleRateRanges[iRange].mMaximum >= sampleRateIn) {
+ *pSampleRateOut = sampleRateIn;
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ } else {
+ UInt32 absoluteDifference;
+ if (pSampleRateRanges[iRange].mMinimum > sampleRateIn) {
+ absoluteDifference = pSampleRateRanges[iRange].mMinimum - sampleRateIn;
+ } else {
+ absoluteDifference = sampleRateIn - pSampleRateRanges[iRange].mMaximum;
+ }
+
+ if (currentAbsoluteDifference > absoluteDifference) {
+ currentAbsoluteDifference = absoluteDifference;
+ iCurrentClosestRange = iRange;
+ }
}
}
+
+ MA_ASSERT(iCurrentClosestRange != (UInt32)-1);
+
+ *pSampleRateOut = pSampleRateRanges[iCurrentClosestRange].mMinimum;
+ ma_free(pSampleRateRanges, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ }
+
+ /* Should never get here, but it would mean we weren't able to find any suitable sample rates. */
+ /*ma_free(pSampleRateRanges, &pContext->allocationCallbacks);*/
+ /*return MA_ERROR;*/
+}
+#endif
+
+static ma_result ma_get_AudioObject_closest_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 bufferSizeInFramesIn, ma_uint32* pBufferSizeInFramesOut)
+{
+ AudioObjectPropertyAddress propAddress;
+ AudioValueRange bufferSizeRange;
+ UInt32 dataSize;
+ OSStatus status;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pBufferSizeInFramesOut != NULL);
+
+ *pBufferSizeInFramesOut = 0; /* Safety. */
+
+ propAddress.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ dataSize = sizeof(bufferSizeRange);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &bufferSizeRange);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ /* This is just a clamp. */
+ if (bufferSizeInFramesIn < bufferSizeRange.mMinimum) {
+ *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMinimum;
+ } else if (bufferSizeInFramesIn > bufferSizeRange.mMaximum) {
+ *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMaximum;
} else {
- if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start capture device.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
+ *pBufferSizeInFramesOut = bufferSizeInFramesIn;
}
return MA_SUCCESS;
}
-#endif /* 0 */
-ma_result ma_device_stop__alsa(ma_device* pDevice)
+static ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pPeriodSizeInOut)
{
- ma_assert(pDevice != NULL);
-
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ((ma_snd_pcm_drain_proc)pDevice->pContext->alsa.snd_pcm_drain)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);
-
- /* We need to prepare the device again, otherwise we won't be able to restart the device. */
- if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) < 0) {
- #ifdef MA_DEBUG_OUTPUT
- printf("[ALSA] Failed to prepare capture device after stopping.\n");
- #endif
- }
- }
+ ma_result result;
+ ma_uint32 chosenBufferSizeInFrames;
+ AudioObjectPropertyAddress propAddress;
+ UInt32 dataSize;
+ OSStatus status;
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- /* Using drain instead of drop because ma_device_stop() is defined such that pending frames are processed before returning. */
- ((ma_snd_pcm_drain_proc)pDevice->pContext->alsa.snd_pcm_drain)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback);
+ MA_ASSERT(pContext != NULL);
- /* We need to prepare the device again, otherwise we won't be able to restart the device. */
- if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) < 0) {
- #ifdef MA_DEBUG_OUTPUT
- printf("[ALSA] Failed to prepare playback device after stopping.\n");
- #endif
- }
+ result = ma_get_AudioObject_closest_buffer_size_in_frames(pContext, deviceObjectID, deviceType, *pPeriodSizeInOut, &chosenBufferSizeInFrames);
+ if (result != MA_SUCCESS) {
+ return result;
}
+ /* Try setting the size of the buffer... If this fails we just use whatever is currently set. */
+ propAddress.mSelector = kAudioDevicePropertyBufferFrameSize;
+ propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ ((ma_AudioObjectSetPropertyData_proc)pContext->coreaudio.AudioObjectSetPropertyData)(deviceObjectID, &propAddress, 0, NULL, sizeof(chosenBufferSizeInFrames), &chosenBufferSizeInFrames);
+ /* Get the actual size of the buffer. */
+ dataSize = sizeof(*pPeriodSizeInOut);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &chosenBufferSizeInFrames);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+ *pPeriodSizeInOut = chosenBufferSizeInFrames;
return MA_SUCCESS;
}
-ma_result ma_device_write__alsa(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount)
-{
- ma_snd_pcm_sframes_t resultALSA;
- ma_uint32 totalPCMFramesProcessed;
-
- ma_assert(pDevice != NULL);
- ma_assert(pPCMFrames != NULL);
-
- /*printf("TRACE: Enter write()\n");*/
-
- totalPCMFramesProcessed = 0;
- while (totalPCMFramesProcessed < frameCount) {
- const void* pSrc = ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
- ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed);
- /*printf("TRACE: Writing %d frames (frameCount=%d)\n", framesRemaining, frameCount);*/
+static ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, AudioObjectID* pDeviceObjectID)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pDeviceObjectID != NULL);
- resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pSrc, framesRemaining);
- if (resultALSA < 0) {
- if (resultALSA == -EAGAIN) {
- /*printf("TRACE: EGAIN (write)\n");*/
- continue; /* Try again. */
- } else if (resultALSA == -EPIPE) {
- /*printf("TRACE: EPIPE (write)\n");*/
+ /* Safety. */
+ *pDeviceObjectID = 0;
+
+ if (pDeviceID == NULL) {
+ /* Default device. */
+ AudioObjectPropertyAddress propAddressDefaultDevice;
+ UInt32 defaultDeviceObjectIDSize = sizeof(AudioObjectID);
+ AudioObjectID defaultDeviceObjectID;
+ OSStatus status;
- /* Underrun. Recover and try again. If this fails we need to return an error. */
- if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, resultALSA, MA_TRUE) < 0) { /* MA_TRUE=silent (don't print anything on error). */
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
+ propAddressDefaultDevice.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddressDefaultDevice.mElement = kAudioObjectPropertyElementMaster;
+ if (deviceType == ma_device_type_playback) {
+ propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ } else {
+ propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ }
+
+ defaultDeviceObjectIDSize = sizeof(AudioObjectID);
+ status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDefaultDevice, 0, NULL, &defaultDeviceObjectIDSize, &defaultDeviceObjectID);
+ if (status == noErr) {
+ *pDeviceObjectID = defaultDeviceObjectID;
+ return MA_SUCCESS;
+ }
+ } else {
+ /* Explicit device. */
+ UInt32 deviceCount;
+ AudioObjectID* pDeviceObjectIDs;
+ ma_result result;
+ UInt32 iDevice;
- /*
- In my testing I have had a situation where writei() does not automatically restart the device even though I've set it
- up as such in the software parameters. What will happen is writei() will block indefinitely even though the number of
- frames is well beyond the auto-start threshold. To work around this I've needed to add an explicit start here. Not sure
- if this is me just being stupid and not recovering the device properly, but this definitely feels like something isn't
- quite right here.
- */
- if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
+ AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice];
+
+ char uid[256];
+ if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(uid), uid) != MA_SUCCESS) {
+ continue;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) {
+ if (strcmp(uid, pDeviceID->coreaudio) == 0) {
+ *pDeviceObjectID = deviceObjectID;
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ }
}
-
- resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pSrc, framesRemaining);
- if (resultALSA < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to write data to device after underrun.", MA_FAILED_TO_START_BACKEND_DEVICE);
+ } else {
+ if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) {
+ if (strcmp(uid, pDeviceID->coreaudio) == 0) {
+ *pDeviceObjectID = deviceObjectID;
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+ }
}
}
}
- totalPCMFramesProcessed += resultALSA;
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
}
-
- return MA_SUCCESS;
+
+ /* If we get here it means we couldn't find the device. */
+ return MA_NO_DEVICE;
}
-ma_result ma_device_read__alsa(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount)
-{
- ma_snd_pcm_sframes_t resultALSA;
- ma_uint32 totalPCMFramesProcessed;
- ma_assert(pDevice != NULL);
- ma_assert(pPCMFrames != NULL);
+static ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_bool32 usingDefaultFormat, ma_bool32 usingDefaultChannels, ma_bool32 usingDefaultSampleRate, AudioStreamBasicDescription* pFormat)
+{
+ UInt32 deviceFormatDescriptionCount;
+ AudioStreamRangedDescription* pDeviceFormatDescriptions;
+ ma_result result;
+ ma_uint32 desiredSampleRate;
+ ma_uint32 desiredChannelCount;
+ ma_format desiredFormat;
+ AudioStreamBasicDescription bestDeviceFormatSoFar;
+ ma_bool32 hasSupportedFormat;
+ UInt32 iFormat;
- /* We need to explicitly start the device if it isn't already. */
- if (((ma_snd_pcm_state_proc)pDevice->pContext->alsa.snd_pcm_state)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) != MA_SND_PCM_STATE_RUNNING) {
- if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device in preparation for reading.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
+ result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &deviceFormatDescriptionCount, &pDeviceFormatDescriptions);
+ if (result != MA_SUCCESS) {
+ return result;
}
+
+ desiredSampleRate = sampleRate;
+ if (usingDefaultSampleRate) {
+ /*
+ When using the device's default sample rate, we get the highest priority standard rate supported by the device. Otherwise
+ we just use the pre-set rate.
+ */
+ ma_uint32 iStandardRate;
+ for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) {
+ ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate];
+ ma_bool32 foundRate = MA_FALSE;
+ UInt32 iDeviceRate;
- totalPCMFramesProcessed = 0;
- while (totalPCMFramesProcessed < frameCount) {
- void* pDst = ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
- ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed);
-
- /*printf("TRACE: snd_pcm_readi(framesRemaining=%d)\n", framesRemaining);*/
-
- resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pDst, framesRemaining);
- if (resultALSA < 0) {
- if (resultALSA == -EAGAIN) {
- /*printf("TRACE: EGAIN (read)\n");*/
- continue;
- } else if (resultALSA == -EPIPE) {
- /*printf("TRACE: EPIPE (read)\n");*/
-
- /* Overrun. Recover and try again. If this fails we need to return an error. */
- if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, resultALSA, MA_TRUE) < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
-
- if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
-
- resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pDst, framesRemaining);
- if (resultALSA < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to read data from the internal device.", MA_FAILED_TO_READ_DATA_FROM_DEVICE);
+ for (iDeviceRate = 0; iDeviceRate < deviceFormatDescriptionCount; ++iDeviceRate) {
+ ma_uint32 deviceRate = (ma_uint32)pDeviceFormatDescriptions[iDeviceRate].mFormat.mSampleRate;
+
+ if (deviceRate == standardRate) {
+ desiredSampleRate = standardRate;
+ foundRate = MA_TRUE;
+ break;
}
}
+
+ if (foundRate) {
+ break;
+ }
}
-
- totalPCMFramesProcessed += resultALSA;
}
-
- return MA_SUCCESS;
-}
-
-#if 0
-ma_result ma_device_break_main_loop__alsa(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
-
- pDevice->alsa.breakFromMainLoop = MA_TRUE;
- return MA_SUCCESS;
-}
-
-ma_result ma_device_main_loop__alsa(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
-
- pDevice->alsa.breakFromMainLoop = MA_FALSE;
- if (pDevice->type == ma_device_type_playback) {
- /* Playback. Read from client, write to device. */
- while (!pDevice->alsa.breakFromMainLoop && ma_device_read_from_client_and_write__alsa(pDevice)) {
- }
- } else {
- /* Capture. Read from device, write to client. */
- while (!pDevice->alsa.breakFromMainLoop && ma_device_read_and_send_to_client__alsa(pDevice)) {
- }
+
+ desiredChannelCount = channels;
+ if (usingDefaultChannels) {
+ ma_get_AudioObject_channel_count(pContext, deviceObjectID, deviceType, &desiredChannelCount); /* <-- Not critical if this fails. */
}
-
- return MA_SUCCESS;
-}
-#endif /* 0 */
-
-ma_result ma_context_uninit__alsa(ma_context* pContext)
-{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_alsa);
-
- /* Clean up memory for memory leak checkers. */
- ((ma_snd_config_update_free_global_proc)pContext->alsa.snd_config_update_free_global)();
-
-#ifndef MA_NO_RUNTIME_LINKING
- ma_dlclose(pContext, pContext->alsa.asoundSO);
-#endif
-
- ma_mutex_uninit(&pContext->alsa.internalDeviceEnumLock);
-
- return MA_SUCCESS;
-}
-
-ma_result ma_context_init__alsa(const ma_context_config* pConfig, ma_context* pContext)
-{
-#ifndef MA_NO_RUNTIME_LINKING
- const char* libasoundNames[] = {
- "libasound.so.2",
- "libasound.so"
- };
- size_t i;
-
- for (i = 0; i < ma_countof(libasoundNames); ++i) {
- pContext->alsa.asoundSO = ma_dlopen(pContext, libasoundNames[i]);
- if (pContext->alsa.asoundSO != NULL) {
+
+ desiredFormat = format;
+ if (usingDefaultFormat) {
+ desiredFormat = g_maFormatPriorities[0];
+ }
+
+ /*
+ If we get here it means we don't have an exact match to what the client is asking for. We'll need to find the closest one. The next
+ loop will check for formats that have the same sample rate to what we're asking for. If there is, we prefer that one in all cases.
+ */
+ MA_ZERO_OBJECT(&bestDeviceFormatSoFar);
+
+ hasSupportedFormat = MA_FALSE;
+ for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) {
+ ma_format format;
+ ma_result formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &format);
+ if (formatResult == MA_SUCCESS && format != ma_format_unknown) {
+ hasSupportedFormat = MA_TRUE;
+ bestDeviceFormatSoFar = pDeviceFormatDescriptions[iFormat].mFormat;
break;
}
}
-
- if (pContext->alsa.asoundSO == NULL) {
-#ifdef MA_DEBUG_OUTPUT
- printf("[ALSA] Failed to open shared object.\n");
-#endif
- return MA_NO_BACKEND;
+
+ if (!hasSupportedFormat) {
+ ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks);
+ return MA_FORMAT_NOT_SUPPORTED;
}
+
+
+ for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) {
+ AudioStreamBasicDescription thisDeviceFormat = pDeviceFormatDescriptions[iFormat].mFormat;
+ ma_format thisSampleFormat;
+ ma_result formatResult;
+ ma_format bestSampleFormatSoFar;
- pContext->alsa.snd_pcm_open = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_open");
- pContext->alsa.snd_pcm_close = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_close");
- pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_sizeof");
- pContext->alsa.snd_pcm_hw_params_any = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_any");
- pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format");
- pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format_first");
- pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format_mask");
- pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels_near");
- pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_resample");
- pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_near");
- pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_buffer_size_near");
- pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_periods_near");
- pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_access");
- pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format");
- pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels");
- pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_min");
- pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_max");
- pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate");
- pContext->alsa.snd_pcm_hw_params_get_rate_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_min");
- pContext->alsa.snd_pcm_hw_params_get_rate_max = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_max");
- pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_buffer_size");
- pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_periods");
- pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_get_access");
- pContext->alsa.snd_pcm_hw_params = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params");
- pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_sizeof");
- pContext->alsa.snd_pcm_sw_params_current = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_current");
- pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_get_boundary");
- pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_avail_min");
- pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_start_threshold");
- pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params_set_stop_threshold");
- pContext->alsa.snd_pcm_sw_params = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_sw_params");
- pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_format_mask_sizeof");
- pContext->alsa.snd_pcm_format_mask_test = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_format_mask_test");
- pContext->alsa.snd_pcm_get_chmap = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_get_chmap");
- pContext->alsa.snd_pcm_state = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_state");
- pContext->alsa.snd_pcm_prepare = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_prepare");
- pContext->alsa.snd_pcm_start = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_start");
- pContext->alsa.snd_pcm_drop = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_drop");
- pContext->alsa.snd_pcm_drain = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_drain");
- pContext->alsa.snd_device_name_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_hint");
- pContext->alsa.snd_device_name_get_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_get_hint");
- pContext->alsa.snd_card_get_index = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_card_get_index");
- pContext->alsa.snd_device_name_free_hint = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_device_name_free_hint");
- pContext->alsa.snd_pcm_mmap_begin = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_mmap_begin");
- pContext->alsa.snd_pcm_mmap_commit = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_mmap_commit");
- pContext->alsa.snd_pcm_recover = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_recover");
- pContext->alsa.snd_pcm_readi = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_readi");
- pContext->alsa.snd_pcm_writei = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_writei");
- pContext->alsa.snd_pcm_avail = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_avail");
- pContext->alsa.snd_pcm_avail_update = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_avail_update");
- pContext->alsa.snd_pcm_wait = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_wait");
- pContext->alsa.snd_pcm_info = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info");
- pContext->alsa.snd_pcm_info_sizeof = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info_sizeof");
- pContext->alsa.snd_pcm_info_get_name = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_info_get_name");
- pContext->alsa.snd_config_update_free_global = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_config_update_free_global");
-#else
- /* The system below is just for type safety. */
- ma_snd_pcm_open_proc _snd_pcm_open = snd_pcm_open;
- ma_snd_pcm_close_proc _snd_pcm_close = snd_pcm_close;
- ma_snd_pcm_hw_params_sizeof_proc _snd_pcm_hw_params_sizeof = snd_pcm_hw_params_sizeof;
- ma_snd_pcm_hw_params_any_proc _snd_pcm_hw_params_any = snd_pcm_hw_params_any;
- ma_snd_pcm_hw_params_set_format_proc _snd_pcm_hw_params_set_format = snd_pcm_hw_params_set_format;
- ma_snd_pcm_hw_params_set_format_first_proc _snd_pcm_hw_params_set_format_first = snd_pcm_hw_params_set_format_first;
- ma_snd_pcm_hw_params_get_format_mask_proc _snd_pcm_hw_params_get_format_mask = snd_pcm_hw_params_get_format_mask;
- ma_snd_pcm_hw_params_set_channels_near_proc _snd_pcm_hw_params_set_channels_near = snd_pcm_hw_params_set_channels_near;
- ma_snd_pcm_hw_params_set_rate_resample_proc _snd_pcm_hw_params_set_rate_resample = snd_pcm_hw_params_set_rate_resample;
- ma_snd_pcm_hw_params_set_rate_near_proc _snd_pcm_hw_params_set_rate_near = snd_pcm_hw_params_set_rate_near;
- ma_snd_pcm_hw_params_set_buffer_size_near_proc _snd_pcm_hw_params_set_buffer_size_near = snd_pcm_hw_params_set_buffer_size_near;
- ma_snd_pcm_hw_params_set_periods_near_proc _snd_pcm_hw_params_set_periods_near = snd_pcm_hw_params_set_periods_near;
- ma_snd_pcm_hw_params_set_access_proc _snd_pcm_hw_params_set_access = snd_pcm_hw_params_set_access;
- ma_snd_pcm_hw_params_get_format_proc _snd_pcm_hw_params_get_format = snd_pcm_hw_params_get_format;
- ma_snd_pcm_hw_params_get_channels_proc _snd_pcm_hw_params_get_channels = snd_pcm_hw_params_get_channels;
- ma_snd_pcm_hw_params_get_channels_min_proc _snd_pcm_hw_params_get_channels_min = snd_pcm_hw_params_get_channels_min;
- ma_snd_pcm_hw_params_get_channels_max_proc _snd_pcm_hw_params_get_channels_max = snd_pcm_hw_params_get_channels_max;
- ma_snd_pcm_hw_params_get_rate_proc _snd_pcm_hw_params_get_rate = snd_pcm_hw_params_get_rate;
- ma_snd_pcm_hw_params_get_rate_min_proc _snd_pcm_hw_params_get_rate_min = snd_pcm_hw_params_get_rate_min;
- ma_snd_pcm_hw_params_get_rate_max_proc _snd_pcm_hw_params_get_rate_max = snd_pcm_hw_params_get_rate_max;
- ma_snd_pcm_hw_params_get_buffer_size_proc _snd_pcm_hw_params_get_buffer_size = snd_pcm_hw_params_get_buffer_size;
- ma_snd_pcm_hw_params_get_periods_proc _snd_pcm_hw_params_get_periods = snd_pcm_hw_params_get_periods;
- ma_snd_pcm_hw_params_get_access_proc _snd_pcm_hw_params_get_access = snd_pcm_hw_params_get_access;
- ma_snd_pcm_hw_params_proc _snd_pcm_hw_params = snd_pcm_hw_params;
- ma_snd_pcm_sw_params_sizeof_proc _snd_pcm_sw_params_sizeof = snd_pcm_sw_params_sizeof;
- ma_snd_pcm_sw_params_current_proc _snd_pcm_sw_params_current = snd_pcm_sw_params_current;
- ma_snd_pcm_sw_params_get_boundary_proc _snd_pcm_sw_params_get_boundary = snd_pcm_sw_params_get_boundary;
- ma_snd_pcm_sw_params_set_avail_min_proc _snd_pcm_sw_params_set_avail_min = snd_pcm_sw_params_set_avail_min;
- ma_snd_pcm_sw_params_set_start_threshold_proc _snd_pcm_sw_params_set_start_threshold = snd_pcm_sw_params_set_start_threshold;
- ma_snd_pcm_sw_params_set_stop_threshold_proc _snd_pcm_sw_params_set_stop_threshold = snd_pcm_sw_params_set_stop_threshold;
- ma_snd_pcm_sw_params_proc _snd_pcm_sw_params = snd_pcm_sw_params;
- ma_snd_pcm_format_mask_sizeof_proc _snd_pcm_format_mask_sizeof = snd_pcm_format_mask_sizeof;
- ma_snd_pcm_format_mask_test_proc _snd_pcm_format_mask_test = snd_pcm_format_mask_test;
- ma_snd_pcm_get_chmap_proc _snd_pcm_get_chmap = snd_pcm_get_chmap;
- ma_snd_pcm_state_proc _snd_pcm_state = snd_pcm_state;
- ma_snd_pcm_prepare_proc _snd_pcm_prepare = snd_pcm_prepare;
- ma_snd_pcm_start_proc _snd_pcm_start = snd_pcm_start;
- ma_snd_pcm_drop_proc _snd_pcm_drop = snd_pcm_drop;
- ma_snd_pcm_drain_proc _snd_pcm_drain = snd_pcm_drain;
- ma_snd_device_name_hint_proc _snd_device_name_hint = snd_device_name_hint;
- ma_snd_device_name_get_hint_proc _snd_device_name_get_hint = snd_device_name_get_hint;
- ma_snd_card_get_index_proc _snd_card_get_index = snd_card_get_index;
- ma_snd_device_name_free_hint_proc _snd_device_name_free_hint = snd_device_name_free_hint;
- ma_snd_pcm_mmap_begin_proc _snd_pcm_mmap_begin = snd_pcm_mmap_begin;
- ma_snd_pcm_mmap_commit_proc _snd_pcm_mmap_commit = snd_pcm_mmap_commit;
- ma_snd_pcm_recover_proc _snd_pcm_recover = snd_pcm_recover;
- ma_snd_pcm_readi_proc _snd_pcm_readi = snd_pcm_readi;
- ma_snd_pcm_writei_proc _snd_pcm_writei = snd_pcm_writei;
- ma_snd_pcm_avail_proc _snd_pcm_avail = snd_pcm_avail;
- ma_snd_pcm_avail_update_proc _snd_pcm_avail_update = snd_pcm_avail_update;
- ma_snd_pcm_wait_proc _snd_pcm_wait = snd_pcm_wait;
- ma_snd_pcm_info_proc _snd_pcm_info = snd_pcm_info;
- ma_snd_pcm_info_sizeof_proc _snd_pcm_info_sizeof = snd_pcm_info_sizeof;
- ma_snd_pcm_info_get_name_proc _snd_pcm_info_get_name = snd_pcm_info_get_name;
- ma_snd_config_update_free_global_proc _snd_config_update_free_global = snd_config_update_free_global;
-
- pContext->alsa.snd_pcm_open = (ma_proc)_snd_pcm_open;
- pContext->alsa.snd_pcm_close = (ma_proc)_snd_pcm_close;
- pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)_snd_pcm_hw_params_sizeof;
- pContext->alsa.snd_pcm_hw_params_any = (ma_proc)_snd_pcm_hw_params_any;
- pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)_snd_pcm_hw_params_set_format;
- pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)_snd_pcm_hw_params_set_format_first;
- pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)_snd_pcm_hw_params_get_format_mask;
- pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)_snd_pcm_hw_params_set_channels_near;
- pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)_snd_pcm_hw_params_set_rate_resample;
- pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)_snd_pcm_hw_params_set_rate_near;
- pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)_snd_pcm_hw_params_set_buffer_size_near;
- pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)_snd_pcm_hw_params_set_periods_near;
- pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)_snd_pcm_hw_params_set_access;
- pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)_snd_pcm_hw_params_get_format;
- pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)_snd_pcm_hw_params_get_channels;
- pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)_snd_pcm_hw_params_get_channels_min;
- pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)_snd_pcm_hw_params_get_channels_max;
- pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)_snd_pcm_hw_params_get_rate;
- pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)_snd_pcm_hw_params_get_buffer_size;
- pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)_snd_pcm_hw_params_get_periods;
- pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)_snd_pcm_hw_params_get_access;
- pContext->alsa.snd_pcm_hw_params = (ma_proc)_snd_pcm_hw_params;
- pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)_snd_pcm_sw_params_sizeof;
- pContext->alsa.snd_pcm_sw_params_current = (ma_proc)_snd_pcm_sw_params_current;
- pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)_snd_pcm_sw_params_get_boundary;
- pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)_snd_pcm_sw_params_set_avail_min;
- pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)_snd_pcm_sw_params_set_start_threshold;
- pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)_snd_pcm_sw_params_set_stop_threshold;
- pContext->alsa.snd_pcm_sw_params = (ma_proc)_snd_pcm_sw_params;
- pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)_snd_pcm_format_mask_sizeof;
- pContext->alsa.snd_pcm_format_mask_test = (ma_proc)_snd_pcm_format_mask_test;
- pContext->alsa.snd_pcm_get_chmap = (ma_proc)_snd_pcm_get_chmap;
- pContext->alsa.snd_pcm_state = (ma_proc)_snd_pcm_state;
- pContext->alsa.snd_pcm_prepare = (ma_proc)_snd_pcm_prepare;
- pContext->alsa.snd_pcm_start = (ma_proc)_snd_pcm_start;
- pContext->alsa.snd_pcm_drop = (ma_proc)_snd_pcm_drop;
- pContext->alsa.snd_pcm_drain = (ma_proc)_snd_pcm_drain;
- pContext->alsa.snd_device_name_hint = (ma_proc)_snd_device_name_hint;
- pContext->alsa.snd_device_name_get_hint = (ma_proc)_snd_device_name_get_hint;
- pContext->alsa.snd_card_get_index = (ma_proc)_snd_card_get_index;
- pContext->alsa.snd_device_name_free_hint = (ma_proc)_snd_device_name_free_hint;
- pContext->alsa.snd_pcm_mmap_begin = (ma_proc)_snd_pcm_mmap_begin;
- pContext->alsa.snd_pcm_mmap_commit = (ma_proc)_snd_pcm_mmap_commit;
- pContext->alsa.snd_pcm_recover = (ma_proc)_snd_pcm_recover;
- pContext->alsa.snd_pcm_readi = (ma_proc)_snd_pcm_readi;
- pContext->alsa.snd_pcm_writei = (ma_proc)_snd_pcm_writei;
- pContext->alsa.snd_pcm_avail = (ma_proc)_snd_pcm_avail;
- pContext->alsa.snd_pcm_avail_update = (ma_proc)_snd_pcm_avail_update;
- pContext->alsa.snd_pcm_wait = (ma_proc)_snd_pcm_wait;
- pContext->alsa.snd_pcm_info = (ma_proc)_snd_pcm_info;
- pContext->alsa.snd_pcm_info_sizeof = (ma_proc)_snd_pcm_info_sizeof;
- pContext->alsa.snd_pcm_info_get_name = (ma_proc)_snd_pcm_info_get_name;
- pContext->alsa.snd_config_update_free_global = (ma_proc)_snd_config_update_free_global;
-#endif
-
- pContext->alsa.useVerboseDeviceEnumeration = pConfig->alsa.useVerboseDeviceEnumeration;
-
- if (ma_mutex_init(pContext, &pContext->alsa.internalDeviceEnumLock) != MA_SUCCESS) {
- ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[ALSA] WARNING: Failed to initialize mutex for internal device enumeration.", MA_ERROR);
+ /* If the format is not supported by miniaudio we need to skip this one entirely. */
+ formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &thisSampleFormat);
+ if (formatResult != MA_SUCCESS || thisSampleFormat == ma_format_unknown) {
+ continue; /* The format is not supported by miniaudio. Skip. */
+ }
+
+ ma_format_from_AudioStreamBasicDescription(&bestDeviceFormatSoFar, &bestSampleFormatSoFar);
+
+ /* Getting here means the format is supported by miniaudio which makes this format a candidate. */
+ if (thisDeviceFormat.mSampleRate != desiredSampleRate) {
+ /*
+ The sample rate does not match, but this format could still be usable, although it's a very low priority. If the best format
+ so far has an equal sample rate we can just ignore this one.
+ */
+ if (bestDeviceFormatSoFar.mSampleRate == desiredSampleRate) {
+ continue; /* The best sample rate so far has the same sample rate as what we requested which means it's still the best so far. Skip this format. */
+ } else {
+ /* In this case, neither the best format so far nor this one have the same sample rate. Check the channel count next. */
+ if (thisDeviceFormat.mChannelsPerFrame != desiredChannelCount) {
+ /* This format has a different sample rate _and_ a different channel count. */
+ if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) {
+ continue; /* No change to the best format. */
+ } else {
+ /*
+ Both this format and the best so far have different sample rates and different channel counts. Whichever has the
+ best format is the new best.
+ */
+ if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ continue; /* No change to the best format. */
+ }
+ }
+ } else {
+ /* This format has a different sample rate but the desired channel count. */
+ if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) {
+ /* Both this format and the best so far have the desired channel count. Whichever has the best format is the new best. */
+ if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ continue; /* No change to the best format for now. */
+ }
+ } else {
+ /* This format has the desired channel count, but the best so far does not. We have a new best. */
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ }
+ }
+ }
+ } else {
+ /*
+ The sample rates match which makes this format a very high priority contender. If the best format so far has a different
+ sample rate it needs to be replaced with this one.
+ */
+ if (bestDeviceFormatSoFar.mSampleRate != desiredSampleRate) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ /* In this case both this format and the best format so far have the same sample rate. Check the channel count next. */
+ if (thisDeviceFormat.mChannelsPerFrame == desiredChannelCount) {
+ /*
+ In this case this format has the same channel count as what the client is requesting. If the best format so far has
+ a different count, this one becomes the new best.
+ */
+ if (bestDeviceFormatSoFar.mChannelsPerFrame != desiredChannelCount) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ /* In this case both this format and the best so far have the ideal sample rate and channel count. Check the format. */
+ if (thisSampleFormat == desiredFormat) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ break; /* Found the exact match. */
+ } else {
+ /* The formats are different. The new best format is the one with the highest priority format according to miniaudio. */
+ if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ continue; /* No change to the best format for now. */
+ }
+ }
+ }
+ } else {
+ /*
+ In this case the channel count is different to what the client has requested. If the best so far has the same channel
+ count as the requested count then it remains the best.
+ */
+ if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) {
+ continue;
+ } else {
+ /*
+ This is the case where both have the same sample rate (good) but different channel counts. Right now both have about
+ the same priority, but we need to compare the format now.
+ */
+ if (thisSampleFormat == bestSampleFormatSoFar) {
+ if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
+ bestDeviceFormatSoFar = thisDeviceFormat;
+ continue;
+ } else {
+ continue; /* No change to the best format for now. */
+ }
+ }
+ }
+ }
+ }
+ }
}
+
+ *pFormat = bestDeviceFormatSoFar;
- pContext->onUninit = ma_context_uninit__alsa;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__alsa;
- pContext->onEnumDevices = ma_context_enumerate_devices__alsa;
- pContext->onGetDeviceInfo = ma_context_get_device_info__alsa;
- pContext->onDeviceInit = ma_device_init__alsa;
- pContext->onDeviceUninit = ma_device_uninit__alsa;
- pContext->onDeviceStart = NULL; /*ma_device_start__alsa;*/
- pContext->onDeviceStop = ma_device_stop__alsa;
- pContext->onDeviceWrite = ma_device_write__alsa;
- pContext->onDeviceRead = ma_device_read__alsa;
-
+ ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks);
return MA_SUCCESS;
}
-#endif /* ALSA */
-
-
-
-/******************************************************************************
-
-PulseAudio Backend
-
-******************************************************************************/
-#ifdef MA_HAS_PULSEAUDIO
-/*
-It is assumed pulseaudio.h is available when compile-time linking is being used. We use this for type safety when using
-compile time linking (we don't have this luxury when using runtime linking without headers).
-
-When using compile time linking, each of our ma_* equivalents should use the sames types as defined by the header. The
-reason for this is that it allow us to take advantage of proper type safety.
-*/
-#ifdef MA_NO_RUNTIME_LINKING
-#include
-
-#define MA_PA_OK PA_OK
-#define MA_PA_ERR_ACCESS PA_ERR_ACCESS
-#define MA_PA_ERR_INVALID PA_ERR_INVALID
-#define MA_PA_ERR_NOENTITY PA_ERR_NOENTITY
-
-#define MA_PA_CHANNELS_MAX PA_CHANNELS_MAX
-#define MA_PA_RATE_MAX PA_RATE_MAX
-
-typedef pa_context_flags_t ma_pa_context_flags_t;
-#define MA_PA_CONTEXT_NOFLAGS PA_CONTEXT_NOFLAGS
-#define MA_PA_CONTEXT_NOAUTOSPAWN PA_CONTEXT_NOAUTOSPAWN
-#define MA_PA_CONTEXT_NOFAIL PA_CONTEXT_NOFAIL
-
-typedef pa_stream_flags_t ma_pa_stream_flags_t;
-#define MA_PA_STREAM_NOFLAGS PA_STREAM_NOFLAGS
-#define MA_PA_STREAM_START_CORKED PA_STREAM_START_CORKED
-#define MA_PA_STREAM_INTERPOLATE_TIMING PA_STREAM_INTERPOLATE_TIMING
-#define MA_PA_STREAM_NOT_MONOTONIC PA_STREAM_NOT_MONOTONIC
-#define MA_PA_STREAM_AUTO_TIMING_UPDATE PA_STREAM_AUTO_TIMING_UPDATE
-#define MA_PA_STREAM_NO_REMAP_CHANNELS PA_STREAM_NO_REMAP_CHANNELS
-#define MA_PA_STREAM_NO_REMIX_CHANNELS PA_STREAM_NO_REMIX_CHANNELS
-#define MA_PA_STREAM_FIX_FORMAT PA_STREAM_FIX_FORMAT
-#define MA_PA_STREAM_FIX_RATE PA_STREAM_FIX_RATE
-#define MA_PA_STREAM_FIX_CHANNELS PA_STREAM_FIX_CHANNELS
-#define MA_PA_STREAM_DONT_MOVE PA_STREAM_DONT_MOVE
-#define MA_PA_STREAM_VARIABLE_RATE PA_STREAM_VARIABLE_RATE
-#define MA_PA_STREAM_PEAK_DETECT PA_STREAM_PEAK_DETECT
-#define MA_PA_STREAM_START_MUTED PA_STREAM_START_MUTED
-#define MA_PA_STREAM_ADJUST_LATENCY PA_STREAM_ADJUST_LATENCY
-#define MA_PA_STREAM_EARLY_REQUESTS PA_STREAM_EARLY_REQUESTS
-#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND
-#define MA_PA_STREAM_START_UNMUTED PA_STREAM_START_UNMUTED
-#define MA_PA_STREAM_FAIL_ON_SUSPEND PA_STREAM_FAIL_ON_SUSPEND
-#define MA_PA_STREAM_RELATIVE_VOLUME PA_STREAM_RELATIVE_VOLUME
-#define MA_PA_STREAM_PASSTHROUGH PA_STREAM_PASSTHROUGH
-
-typedef pa_sink_flags_t ma_pa_sink_flags_t;
-#define MA_PA_SINK_NOFLAGS PA_SINK_NOFLAGS
-#define MA_PA_SINK_HW_VOLUME_CTRL PA_SINK_HW_VOLUME_CTRL
-#define MA_PA_SINK_LATENCY PA_SINK_LATENCY
-#define MA_PA_SINK_HARDWARE PA_SINK_HARDWARE
-#define MA_PA_SINK_NETWORK PA_SINK_NETWORK
-#define MA_PA_SINK_HW_MUTE_CTRL PA_SINK_HW_MUTE_CTRL
-#define MA_PA_SINK_DECIBEL_VOLUME PA_SINK_DECIBEL_VOLUME
-#define MA_PA_SINK_FLAT_VOLUME PA_SINK_FLAT_VOLUME
-#define MA_PA_SINK_DYNAMIC_LATENCY PA_SINK_DYNAMIC_LATENCY
-#define MA_PA_SINK_SET_FORMATS PA_SINK_SET_FORMATS
-
-typedef pa_source_flags_t ma_pa_source_flags_t;
-#define MA_PA_SOURCE_NOFLAGS PA_SOURCE_NOFLAGS
-#define MA_PA_SOURCE_HW_VOLUME_CTRL PA_SOURCE_HW_VOLUME_CTRL
-#define MA_PA_SOURCE_LATENCY PA_SOURCE_LATENCY
-#define MA_PA_SOURCE_HARDWARE PA_SOURCE_HARDWARE
-#define MA_PA_SOURCE_NETWORK PA_SOURCE_NETWORK
-#define MA_PA_SOURCE_HW_MUTE_CTRL PA_SOURCE_HW_MUTE_CTRL
-#define MA_PA_SOURCE_DECIBEL_VOLUME PA_SOURCE_DECIBEL_VOLUME
-#define MA_PA_SOURCE_DYNAMIC_LATENCY PA_SOURCE_DYNAMIC_LATENCY
-#define MA_PA_SOURCE_FLAT_VOLUME PA_SOURCE_FLAT_VOLUME
-
-typedef pa_context_state_t ma_pa_context_state_t;
-#define MA_PA_CONTEXT_UNCONNECTED PA_CONTEXT_UNCONNECTED
-#define MA_PA_CONTEXT_CONNECTING PA_CONTEXT_CONNECTING
-#define MA_PA_CONTEXT_AUTHORIZING PA_CONTEXT_AUTHORIZING
-#define MA_PA_CONTEXT_SETTING_NAME PA_CONTEXT_SETTING_NAME
-#define MA_PA_CONTEXT_READY PA_CONTEXT_READY
-#define MA_PA_CONTEXT_FAILED PA_CONTEXT_FAILED
-#define MA_PA_CONTEXT_TERMINATED PA_CONTEXT_TERMINATED
-
-typedef pa_stream_state_t ma_pa_stream_state_t;
-#define MA_PA_STREAM_UNCONNECTED PA_STREAM_UNCONNECTED
-#define MA_PA_STREAM_CREATING PA_STREAM_CREATING
-#define MA_PA_STREAM_READY PA_STREAM_READY
-#define MA_PA_STREAM_FAILED PA_STREAM_FAILED
-#define MA_PA_STREAM_TERMINATED PA_STREAM_TERMINATED
-
-typedef pa_operation_state_t ma_pa_operation_state_t;
-#define MA_PA_OPERATION_RUNNING PA_OPERATION_RUNNING
-#define MA_PA_OPERATION_DONE PA_OPERATION_DONE
-#define MA_PA_OPERATION_CANCELLED PA_OPERATION_CANCELLED
-
-typedef pa_sink_state_t ma_pa_sink_state_t;
-#define MA_PA_SINK_INVALID_STATE PA_SINK_INVALID_STATE
-#define MA_PA_SINK_RUNNING PA_SINK_RUNNING
-#define MA_PA_SINK_IDLE PA_SINK_IDLE
-#define MA_PA_SINK_SUSPENDED PA_SINK_SUSPENDED
-
-typedef pa_source_state_t ma_pa_source_state_t;
-#define MA_PA_SOURCE_INVALID_STATE PA_SOURCE_INVALID_STATE
-#define MA_PA_SOURCE_RUNNING PA_SOURCE_RUNNING
-#define MA_PA_SOURCE_IDLE PA_SOURCE_IDLE
-#define MA_PA_SOURCE_SUSPENDED PA_SOURCE_SUSPENDED
-
-typedef pa_seek_mode_t ma_pa_seek_mode_t;
-#define MA_PA_SEEK_RELATIVE PA_SEEK_RELATIVE
-#define MA_PA_SEEK_ABSOLUTE PA_SEEK_ABSOLUTE
-#define MA_PA_SEEK_RELATIVE_ON_READ PA_SEEK_RELATIVE_ON_READ
-#define MA_PA_SEEK_RELATIVE_END PA_SEEK_RELATIVE_END
-
-typedef pa_channel_position_t ma_pa_channel_position_t;
-#define MA_PA_CHANNEL_POSITION_INVALID PA_CHANNEL_POSITION_INVALID
-#define MA_PA_CHANNEL_POSITION_MONO PA_CHANNEL_POSITION_MONO
-#define MA_PA_CHANNEL_POSITION_FRONT_LEFT PA_CHANNEL_POSITION_FRONT_LEFT
-#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT PA_CHANNEL_POSITION_FRONT_RIGHT
-#define MA_PA_CHANNEL_POSITION_FRONT_CENTER PA_CHANNEL_POSITION_FRONT_CENTER
-#define MA_PA_CHANNEL_POSITION_REAR_CENTER PA_CHANNEL_POSITION_REAR_CENTER
-#define MA_PA_CHANNEL_POSITION_REAR_LEFT PA_CHANNEL_POSITION_REAR_LEFT
-#define MA_PA_CHANNEL_POSITION_REAR_RIGHT PA_CHANNEL_POSITION_REAR_RIGHT
-#define MA_PA_CHANNEL_POSITION_LFE PA_CHANNEL_POSITION_LFE
-#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER
-#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER
-#define MA_PA_CHANNEL_POSITION_SIDE_LEFT PA_CHANNEL_POSITION_SIDE_LEFT
-#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT PA_CHANNEL_POSITION_SIDE_RIGHT
-#define MA_PA_CHANNEL_POSITION_AUX0 PA_CHANNEL_POSITION_AUX0
-#define MA_PA_CHANNEL_POSITION_AUX1 PA_CHANNEL_POSITION_AUX1
-#define MA_PA_CHANNEL_POSITION_AUX2 PA_CHANNEL_POSITION_AUX2
-#define MA_PA_CHANNEL_POSITION_AUX3 PA_CHANNEL_POSITION_AUX3
-#define MA_PA_CHANNEL_POSITION_AUX4 PA_CHANNEL_POSITION_AUX4
-#define MA_PA_CHANNEL_POSITION_AUX5 PA_CHANNEL_POSITION_AUX5
-#define MA_PA_CHANNEL_POSITION_AUX6 PA_CHANNEL_POSITION_AUX6
-#define MA_PA_CHANNEL_POSITION_AUX7 PA_CHANNEL_POSITION_AUX7
-#define MA_PA_CHANNEL_POSITION_AUX8 PA_CHANNEL_POSITION_AUX8
-#define MA_PA_CHANNEL_POSITION_AUX9 PA_CHANNEL_POSITION_AUX9
-#define MA_PA_CHANNEL_POSITION_AUX10 PA_CHANNEL_POSITION_AUX10
-#define MA_PA_CHANNEL_POSITION_AUX11 PA_CHANNEL_POSITION_AUX11
-#define MA_PA_CHANNEL_POSITION_AUX12 PA_CHANNEL_POSITION_AUX12
-#define MA_PA_CHANNEL_POSITION_AUX13 PA_CHANNEL_POSITION_AUX13
-#define MA_PA_CHANNEL_POSITION_AUX14 PA_CHANNEL_POSITION_AUX14
-#define MA_PA_CHANNEL_POSITION_AUX15 PA_CHANNEL_POSITION_AUX15
-#define MA_PA_CHANNEL_POSITION_AUX16 PA_CHANNEL_POSITION_AUX16
-#define MA_PA_CHANNEL_POSITION_AUX17 PA_CHANNEL_POSITION_AUX17
-#define MA_PA_CHANNEL_POSITION_AUX18 PA_CHANNEL_POSITION_AUX18
-#define MA_PA_CHANNEL_POSITION_AUX19 PA_CHANNEL_POSITION_AUX19
-#define MA_PA_CHANNEL_POSITION_AUX20 PA_CHANNEL_POSITION_AUX20
-#define MA_PA_CHANNEL_POSITION_AUX21 PA_CHANNEL_POSITION_AUX21
-#define MA_PA_CHANNEL_POSITION_AUX22 PA_CHANNEL_POSITION_AUX22
-#define MA_PA_CHANNEL_POSITION_AUX23 PA_CHANNEL_POSITION_AUX23
-#define MA_PA_CHANNEL_POSITION_AUX24 PA_CHANNEL_POSITION_AUX24
-#define MA_PA_CHANNEL_POSITION_AUX25 PA_CHANNEL_POSITION_AUX25
-#define MA_PA_CHANNEL_POSITION_AUX26 PA_CHANNEL_POSITION_AUX26
-#define MA_PA_CHANNEL_POSITION_AUX27 PA_CHANNEL_POSITION_AUX27
-#define MA_PA_CHANNEL_POSITION_AUX28 PA_CHANNEL_POSITION_AUX28
-#define MA_PA_CHANNEL_POSITION_AUX29 PA_CHANNEL_POSITION_AUX29
-#define MA_PA_CHANNEL_POSITION_AUX30 PA_CHANNEL_POSITION_AUX30
-#define MA_PA_CHANNEL_POSITION_AUX31 PA_CHANNEL_POSITION_AUX31
-#define MA_PA_CHANNEL_POSITION_TOP_CENTER PA_CHANNEL_POSITION_TOP_CENTER
-#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT PA_CHANNEL_POSITION_TOP_FRONT_LEFT
-#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT PA_CHANNEL_POSITION_TOP_FRONT_RIGHT
-#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER PA_CHANNEL_POSITION_TOP_FRONT_CENTER
-#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT PA_CHANNEL_POSITION_TOP_REAR_LEFT
-#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT PA_CHANNEL_POSITION_TOP_REAR_RIGHT
-#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER PA_CHANNEL_POSITION_TOP_REAR_CENTER
-#define MA_PA_CHANNEL_POSITION_LEFT PA_CHANNEL_POSITION_LEFT
-#define MA_PA_CHANNEL_POSITION_RIGHT PA_CHANNEL_POSITION_RIGHT
-#define MA_PA_CHANNEL_POSITION_CENTER PA_CHANNEL_POSITION_CENTER
-#define MA_PA_CHANNEL_POSITION_SUBWOOFER PA_CHANNEL_POSITION_SUBWOOFER
-typedef pa_channel_map_def_t ma_pa_channel_map_def_t;
-#define MA_PA_CHANNEL_MAP_AIFF PA_CHANNEL_MAP_AIFF
-#define MA_PA_CHANNEL_MAP_ALSA PA_CHANNEL_MAP_ALSA
-#define MA_PA_CHANNEL_MAP_AUX PA_CHANNEL_MAP_AUX
-#define MA_PA_CHANNEL_MAP_WAVEEX PA_CHANNEL_MAP_WAVEEX
-#define MA_PA_CHANNEL_MAP_OSS PA_CHANNEL_MAP_OSS
-#define MA_PA_CHANNEL_MAP_DEFAULT PA_CHANNEL_MAP_DEFAULT
+static ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS])
+{
+ AudioUnitScope deviceScope;
+ AudioUnitElement deviceBus;
+ UInt32 channelLayoutSize;
+ OSStatus status;
+ AudioChannelLayout* pChannelLayout;
+ ma_result result;
-typedef pa_sample_format_t ma_pa_sample_format_t;
-#define MA_PA_SAMPLE_INVALID PA_SAMPLE_INVALID
-#define MA_PA_SAMPLE_U8 PA_SAMPLE_U8
-#define MA_PA_SAMPLE_ALAW PA_SAMPLE_ALAW
-#define MA_PA_SAMPLE_ULAW PA_SAMPLE_ULAW
-#define MA_PA_SAMPLE_S16LE PA_SAMPLE_S16LE
-#define MA_PA_SAMPLE_S16BE PA_SAMPLE_S16BE
-#define MA_PA_SAMPLE_FLOAT32LE PA_SAMPLE_FLOAT32LE
-#define MA_PA_SAMPLE_FLOAT32BE PA_SAMPLE_FLOAT32BE
-#define MA_PA_SAMPLE_S32LE PA_SAMPLE_S32LE
-#define MA_PA_SAMPLE_S32BE PA_SAMPLE_S32BE
-#define MA_PA_SAMPLE_S24LE PA_SAMPLE_S24LE
-#define MA_PA_SAMPLE_S24BE PA_SAMPLE_S24BE
-#define MA_PA_SAMPLE_S24_32LE PA_SAMPLE_S24_32LE
-#define MA_PA_SAMPLE_S24_32BE PA_SAMPLE_S24_32BE
+ MA_ASSERT(pContext != NULL);
+
+ if (deviceType == ma_device_type_playback) {
+ deviceScope = kAudioUnitScope_Output;
+ deviceBus = MA_COREAUDIO_OUTPUT_BUS;
+ } else {
+ deviceScope = kAudioUnitScope_Input;
+ deviceBus = MA_COREAUDIO_INPUT_BUS;
+ }
+
+ status = ((ma_AudioUnitGetPropertyInfo_proc)pContext->coreaudio.AudioUnitGetPropertyInfo)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, &channelLayoutSize, NULL);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ pChannelLayout = (AudioChannelLayout*)ma__malloc_from_callbacks(channelLayoutSize, &pContext->allocationCallbacks);
+ if (pChannelLayout == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+
+ status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, pChannelLayout, &channelLayoutSize);
+ if (status != noErr) {
+ ma__free_from_callbacks(pChannelLayout, &pContext->allocationCallbacks);
+ return ma_result_from_OSStatus(status);
+ }
+
+ result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap);
+ if (result != MA_SUCCESS) {
+ ma__free_from_callbacks(pChannelLayout, &pContext->allocationCallbacks);
+ return result;
+ }
-typedef pa_mainloop ma_pa_mainloop;
-typedef pa_mainloop_api ma_pa_mainloop_api;
-typedef pa_context ma_pa_context;
-typedef pa_operation ma_pa_operation;
-typedef pa_stream ma_pa_stream;
-typedef pa_spawn_api ma_pa_spawn_api;
-typedef pa_buffer_attr ma_pa_buffer_attr;
-typedef pa_channel_map ma_pa_channel_map;
-typedef pa_cvolume ma_pa_cvolume;
-typedef pa_sample_spec ma_pa_sample_spec;
-typedef pa_sink_info ma_pa_sink_info;
-typedef pa_source_info ma_pa_source_info;
+ ma__free_from_callbacks(pChannelLayout, &pContext->allocationCallbacks);
+ return MA_SUCCESS;
+}
+#endif /* MA_APPLE_DESKTOP */
-typedef pa_context_notify_cb_t ma_pa_context_notify_cb_t;
-typedef pa_sink_info_cb_t ma_pa_sink_info_cb_t;
-typedef pa_source_info_cb_t ma_pa_source_info_cb_t;
-typedef pa_stream_success_cb_t ma_pa_stream_success_cb_t;
-typedef pa_stream_request_cb_t ma_pa_stream_request_cb_t;
-typedef pa_free_cb_t ma_pa_free_cb_t;
-#else
-#define MA_PA_OK 0
-#define MA_PA_ERR_ACCESS 1
-#define MA_PA_ERR_INVALID 2
-#define MA_PA_ERR_NOENTITY 5
+static ma_bool32 ma_context_is_device_id_equal__coreaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
-#define MA_PA_CHANNELS_MAX 32
-#define MA_PA_RATE_MAX 384000
+ return strcmp(pID0->coreaudio, pID1->coreaudio) == 0;
+}
-typedef int ma_pa_context_flags_t;
-#define MA_PA_CONTEXT_NOFLAGS 0x00000000
-#define MA_PA_CONTEXT_NOAUTOSPAWN 0x00000001
-#define MA_PA_CONTEXT_NOFAIL 0x00000002
+static ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+#if defined(MA_APPLE_DESKTOP)
+ UInt32 deviceCount;
+ AudioObjectID* pDeviceObjectIDs;
+ ma_result result;
+ UInt32 iDevice;
-typedef int ma_pa_stream_flags_t;
-#define MA_PA_STREAM_NOFLAGS 0x00000000
-#define MA_PA_STREAM_START_CORKED 0x00000001
-#define MA_PA_STREAM_INTERPOLATE_TIMING 0x00000002
-#define MA_PA_STREAM_NOT_MONOTONIC 0x00000004
-#define MA_PA_STREAM_AUTO_TIMING_UPDATE 0x00000008
-#define MA_PA_STREAM_NO_REMAP_CHANNELS 0x00000010
-#define MA_PA_STREAM_NO_REMIX_CHANNELS 0x00000020
-#define MA_PA_STREAM_FIX_FORMAT 0x00000040
-#define MA_PA_STREAM_FIX_RATE 0x00000080
-#define MA_PA_STREAM_FIX_CHANNELS 0x00000100
-#define MA_PA_STREAM_DONT_MOVE 0x00000200
-#define MA_PA_STREAM_VARIABLE_RATE 0x00000400
-#define MA_PA_STREAM_PEAK_DETECT 0x00000800
-#define MA_PA_STREAM_START_MUTED 0x00001000
-#define MA_PA_STREAM_ADJUST_LATENCY 0x00002000
-#define MA_PA_STREAM_EARLY_REQUESTS 0x00004000
-#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND 0x00008000
-#define MA_PA_STREAM_START_UNMUTED 0x00010000
-#define MA_PA_STREAM_FAIL_ON_SUSPEND 0x00020000
-#define MA_PA_STREAM_RELATIVE_VOLUME 0x00040000
-#define MA_PA_STREAM_PASSTHROUGH 0x00080000
+ result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
+ AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice];
+ ma_device_info info;
-typedef int ma_pa_sink_flags_t;
-#define MA_PA_SINK_NOFLAGS 0x00000000
-#define MA_PA_SINK_HW_VOLUME_CTRL 0x00000001
-#define MA_PA_SINK_LATENCY 0x00000002
-#define MA_PA_SINK_HARDWARE 0x00000004
-#define MA_PA_SINK_NETWORK 0x00000008
-#define MA_PA_SINK_HW_MUTE_CTRL 0x00000010
-#define MA_PA_SINK_DECIBEL_VOLUME 0x00000020
-#define MA_PA_SINK_FLAT_VOLUME 0x00000040
-#define MA_PA_SINK_DYNAMIC_LATENCY 0x00000080
-#define MA_PA_SINK_SET_FORMATS 0x00000100
+ MA_ZERO_OBJECT(&info);
+ if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(info.id.coreaudio), info.id.coreaudio) != MA_SUCCESS) {
+ continue;
+ }
+ if (ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(info.name), info.name) != MA_SUCCESS) {
+ continue;
+ }
-typedef int ma_pa_source_flags_t;
-#define MA_PA_SOURCE_NOFLAGS 0x00000000
-#define MA_PA_SOURCE_HW_VOLUME_CTRL 0x00000001
-#define MA_PA_SOURCE_LATENCY 0x00000002
-#define MA_PA_SOURCE_HARDWARE 0x00000004
-#define MA_PA_SOURCE_NETWORK 0x00000008
-#define MA_PA_SOURCE_HW_MUTE_CTRL 0x00000010
-#define MA_PA_SOURCE_DECIBEL_VOLUME 0x00000020
-#define MA_PA_SOURCE_DYNAMIC_LATENCY 0x00000040
-#define MA_PA_SOURCE_FLAT_VOLUME 0x00000080
+ if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) {
+ if (!callback(pContext, ma_device_type_playback, &info, pUserData)) {
+ break;
+ }
+ }
+ if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) {
+ if (!callback(pContext, ma_device_type_capture, &info, pUserData)) {
+ break;
+ }
+ }
+ }
+
+ ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks);
+#else
+ /* Only supporting default devices on non-Desktop platforms. */
+ ma_device_info info;
+
+ MA_ZERO_OBJECT(&info);
+ ma_strncpy_s(info.name, sizeof(info.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ if (!callback(pContext, ma_device_type_playback, &info, pUserData)) {
+ return MA_SUCCESS;
+ }
+
+ MA_ZERO_OBJECT(&info);
+ ma_strncpy_s(info.name, sizeof(info.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ if (!callback(pContext, ma_device_type_capture, &info, pUserData)) {
+ return MA_SUCCESS;
+ }
+#endif
+
+ return MA_SUCCESS;
+}
-typedef int ma_pa_context_state_t;
-#define MA_PA_CONTEXT_UNCONNECTED 0
-#define MA_PA_CONTEXT_CONNECTING 1
-#define MA_PA_CONTEXT_AUTHORIZING 2
-#define MA_PA_CONTEXT_SETTING_NAME 3
-#define MA_PA_CONTEXT_READY 4
-#define MA_PA_CONTEXT_FAILED 5
-#define MA_PA_CONTEXT_TERMINATED 6
+static ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+{
+ ma_result result;
-typedef int ma_pa_stream_state_t;
-#define MA_PA_STREAM_UNCONNECTED 0
-#define MA_PA_STREAM_CREATING 1
-#define MA_PA_STREAM_READY 2
-#define MA_PA_STREAM_FAILED 3
-#define MA_PA_STREAM_TERMINATED 4
+ MA_ASSERT(pContext != NULL);
-typedef int ma_pa_operation_state_t;
-#define MA_PA_OPERATION_RUNNING 0
-#define MA_PA_OPERATION_DONE 1
-#define MA_PA_OPERATION_CANCELLED 2
+ /* No exclusive mode with the Core Audio backend for now. */
+ if (shareMode == ma_share_mode_exclusive) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+
+#if defined(MA_APPLE_DESKTOP)
+ /* Desktop */
+ {
+ AudioObjectID deviceObjectID;
+ UInt32 streamDescriptionCount;
+ AudioStreamRangedDescription* pStreamDescriptions;
+ UInt32 iStreamDescription;
+ UInt32 sampleRateRangeCount;
+ AudioValueRange* pSampleRateRanges;
-typedef int ma_pa_sink_state_t;
-#define MA_PA_SINK_INVALID_STATE -1
-#define MA_PA_SINK_RUNNING 0
-#define MA_PA_SINK_IDLE 1
-#define MA_PA_SINK_SUSPENDED 2
+ result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(pDeviceInfo->id.coreaudio), pDeviceInfo->id.coreaudio);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pDeviceInfo->name), pDeviceInfo->name);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /* Formats. */
+ result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &streamDescriptionCount, &pStreamDescriptions);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ for (iStreamDescription = 0; iStreamDescription < streamDescriptionCount; ++iStreamDescription) {
+ ma_format format;
+ ma_bool32 formatExists = MA_FALSE;
+ ma_uint32 iOutputFormat;
-typedef int ma_pa_source_state_t;
-#define MA_PA_SOURCE_INVALID_STATE -1
-#define MA_PA_SOURCE_RUNNING 0
-#define MA_PA_SOURCE_IDLE 1
-#define MA_PA_SOURCE_SUSPENDED 2
+ result = ma_format_from_AudioStreamBasicDescription(&pStreamDescriptions[iStreamDescription].mFormat, &format);
+ if (result != MA_SUCCESS) {
+ continue;
+ }
+
+ MA_ASSERT(format != ma_format_unknown);
+
+ /* Make sure the format isn't already in the output list. */
+ for (iOutputFormat = 0; iOutputFormat < pDeviceInfo->formatCount; ++iOutputFormat) {
+ if (pDeviceInfo->formats[iOutputFormat] == format) {
+ formatExists = MA_TRUE;
+ break;
+ }
+ }
+
+ if (!formatExists) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = format;
+ }
+ }
+
+ ma_free(pStreamDescriptions, &pContext->allocationCallbacks);
+
+
+ /* Channels. */
+ result = ma_get_AudioObject_channel_count(pContext, deviceObjectID, deviceType, &pDeviceInfo->minChannels);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ pDeviceInfo->maxChannels = pDeviceInfo->minChannels;
+
+
+ /* Sample rates. */
+ result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (sampleRateRangeCount > 0) {
+ UInt32 iSampleRate;
+ pDeviceInfo->minSampleRate = UINT32_MAX;
+ pDeviceInfo->maxSampleRate = 0;
+ for (iSampleRate = 0; iSampleRate < sampleRateRangeCount; ++iSampleRate) {
+ if (pDeviceInfo->minSampleRate > pSampleRateRanges[iSampleRate].mMinimum) {
+ pDeviceInfo->minSampleRate = pSampleRateRanges[iSampleRate].mMinimum;
+ }
+ if (pDeviceInfo->maxSampleRate < pSampleRateRanges[iSampleRate].mMaximum) {
+ pDeviceInfo->maxSampleRate = pSampleRateRanges[iSampleRate].mMaximum;
+ }
+ }
+ }
+ }
+#else
+ /* Mobile */
+ {
+ AudioComponentDescription desc;
+ AudioComponent component;
+ AudioUnit audioUnit;
+ OSStatus status;
+ AudioUnitScope formatScope;
+ AudioUnitElement formatElement;
+ AudioStreamBasicDescription bestFormat;
+ UInt32 propSize;
-typedef int ma_pa_seek_mode_t;
-#define MA_PA_SEEK_RELATIVE 0
-#define MA_PA_SEEK_ABSOLUTE 1
-#define MA_PA_SEEK_RELATIVE_ON_READ 2
-#define MA_PA_SEEK_RELATIVE_END 3
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ }
+
+ /*
+ Retrieving device information is more annoying on mobile than desktop. For simplicity I'm locking this down to whatever format is
+ reported on a temporary I/O unit. The problem, however, is that this doesn't return a value for the sample rate which we need to
+ retrieve from the AVAudioSession shared instance.
+ */
+ desc.componentType = kAudioUnitType_Output;
+ desc.componentSubType = kAudioUnitSubType_RemoteIO;
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ desc.componentFlags = 0;
+ desc.componentFlagsMask = 0;
+
+ component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc);
+ if (component == NULL) {
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
+
+ status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)(component, &audioUnit);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+ formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
+ formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
+
+ propSize = sizeof(bestFormat);
+ status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize);
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
+ audioUnit = NULL;
+
+
+ pDeviceInfo->minChannels = bestFormat.mChannelsPerFrame;
+ pDeviceInfo->maxChannels = bestFormat.mChannelsPerFrame;
+
+ pDeviceInfo->formatCount = 1;
+ result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pDeviceInfo->formats[0]);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /*
+ It looks like Apple are wanting to push the whole AVAudioSession thing. Thus, we need to use that to determine device settings. To do
+ this we just get the shared instance and inspect.
+ */
+ @autoreleasepool {
+ AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
+ MA_ASSERT(pAudioSession != NULL);
-typedef int ma_pa_channel_position_t;
-#define MA_PA_CHANNEL_POSITION_INVALID -1
-#define MA_PA_CHANNEL_POSITION_MONO 0
-#define MA_PA_CHANNEL_POSITION_FRONT_LEFT 1
-#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT 2
-#define MA_PA_CHANNEL_POSITION_FRONT_CENTER 3
-#define MA_PA_CHANNEL_POSITION_REAR_CENTER 4
-#define MA_PA_CHANNEL_POSITION_REAR_LEFT 5
-#define MA_PA_CHANNEL_POSITION_REAR_RIGHT 6
-#define MA_PA_CHANNEL_POSITION_LFE 7
-#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER 8
-#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER 9
-#define MA_PA_CHANNEL_POSITION_SIDE_LEFT 10
-#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT 11
-#define MA_PA_CHANNEL_POSITION_AUX0 12
-#define MA_PA_CHANNEL_POSITION_AUX1 13
-#define MA_PA_CHANNEL_POSITION_AUX2 14
-#define MA_PA_CHANNEL_POSITION_AUX3 15
-#define MA_PA_CHANNEL_POSITION_AUX4 16
-#define MA_PA_CHANNEL_POSITION_AUX5 17
-#define MA_PA_CHANNEL_POSITION_AUX6 18
-#define MA_PA_CHANNEL_POSITION_AUX7 19
-#define MA_PA_CHANNEL_POSITION_AUX8 20
-#define MA_PA_CHANNEL_POSITION_AUX9 21
-#define MA_PA_CHANNEL_POSITION_AUX10 22
-#define MA_PA_CHANNEL_POSITION_AUX11 23
-#define MA_PA_CHANNEL_POSITION_AUX12 24
-#define MA_PA_CHANNEL_POSITION_AUX13 25
-#define MA_PA_CHANNEL_POSITION_AUX14 26
-#define MA_PA_CHANNEL_POSITION_AUX15 27
-#define MA_PA_CHANNEL_POSITION_AUX16 28
-#define MA_PA_CHANNEL_POSITION_AUX17 29
-#define MA_PA_CHANNEL_POSITION_AUX18 30
-#define MA_PA_CHANNEL_POSITION_AUX19 31
-#define MA_PA_CHANNEL_POSITION_AUX20 32
-#define MA_PA_CHANNEL_POSITION_AUX21 33
-#define MA_PA_CHANNEL_POSITION_AUX22 34
-#define MA_PA_CHANNEL_POSITION_AUX23 35
-#define MA_PA_CHANNEL_POSITION_AUX24 36
-#define MA_PA_CHANNEL_POSITION_AUX25 37
-#define MA_PA_CHANNEL_POSITION_AUX26 38
-#define MA_PA_CHANNEL_POSITION_AUX27 39
-#define MA_PA_CHANNEL_POSITION_AUX28 40
-#define MA_PA_CHANNEL_POSITION_AUX29 41
-#define MA_PA_CHANNEL_POSITION_AUX30 42
-#define MA_PA_CHANNEL_POSITION_AUX31 43
-#define MA_PA_CHANNEL_POSITION_TOP_CENTER 44
-#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT 45
-#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT 46
-#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER 47
-#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT 48
-#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT 49
-#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER 50
-#define MA_PA_CHANNEL_POSITION_LEFT MA_PA_CHANNEL_POSITION_FRONT_LEFT
-#define MA_PA_CHANNEL_POSITION_RIGHT MA_PA_CHANNEL_POSITION_FRONT_RIGHT
-#define MA_PA_CHANNEL_POSITION_CENTER MA_PA_CHANNEL_POSITION_FRONT_CENTER
-#define MA_PA_CHANNEL_POSITION_SUBWOOFER MA_PA_CHANNEL_POSITION_LFE
+ pDeviceInfo->minSampleRate = (ma_uint32)pAudioSession.sampleRate;
+ pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate;
+ }
+ }
+#endif
+
+ (void)pDeviceInfo; /* Unused. */
+ return MA_SUCCESS;
+}
-typedef int ma_pa_channel_map_def_t;
-#define MA_PA_CHANNEL_MAP_AIFF 0
-#define MA_PA_CHANNEL_MAP_ALSA 1
-#define MA_PA_CHANNEL_MAP_AUX 2
-#define MA_PA_CHANNEL_MAP_WAVEEX 3
-#define MA_PA_CHANNEL_MAP_OSS 4
-#define MA_PA_CHANNEL_MAP_DEFAULT MA_PA_CHANNEL_MAP_AIFF
-typedef int ma_pa_sample_format_t;
-#define MA_PA_SAMPLE_INVALID -1
-#define MA_PA_SAMPLE_U8 0
-#define MA_PA_SAMPLE_ALAW 1
-#define MA_PA_SAMPLE_ULAW 2
-#define MA_PA_SAMPLE_S16LE 3
-#define MA_PA_SAMPLE_S16BE 4
-#define MA_PA_SAMPLE_FLOAT32LE 5
-#define MA_PA_SAMPLE_FLOAT32BE 6
-#define MA_PA_SAMPLE_S32LE 7
-#define MA_PA_SAMPLE_S32BE 8
-#define MA_PA_SAMPLE_S24LE 9
-#define MA_PA_SAMPLE_S24BE 10
-#define MA_PA_SAMPLE_S24_32LE 11
-#define MA_PA_SAMPLE_S24_32BE 12
+static OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pBufferList)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ ma_stream_layout layout;
-typedef struct ma_pa_mainloop ma_pa_mainloop;
-typedef struct ma_pa_mainloop_api ma_pa_mainloop_api;
-typedef struct ma_pa_context ma_pa_context;
-typedef struct ma_pa_operation ma_pa_operation;
-typedef struct ma_pa_stream ma_pa_stream;
-typedef struct ma_pa_spawn_api ma_pa_spawn_api;
+ MA_ASSERT(pDevice != NULL);
-typedef struct
-{
- ma_uint32 maxlength;
- ma_uint32 tlength;
- ma_uint32 prebuf;
- ma_uint32 minreq;
- ma_uint32 fragsize;
-} ma_pa_buffer_attr;
+#if defined(MA_DEBUG_OUTPUT)
+ printf("INFO: Output Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pBufferList->mNumberBuffers);
+#endif
-typedef struct
-{
- ma_uint8 channels;
- ma_pa_channel_position_t map[MA_PA_CHANNELS_MAX];
-} ma_pa_channel_map;
+ /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */
+ layout = ma_stream_layout_interleaved;
+ if (pBufferList->mBuffers[0].mNumberChannels != pDevice->playback.internalChannels) {
+ layout = ma_stream_layout_deinterleaved;
+ }
+
+ if (layout == ma_stream_layout_interleaved) {
+ /* For now we can assume everything is interleaved. */
+ UInt32 iBuffer;
+ for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) {
+ if (pBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->playback.internalChannels) {
+ ma_uint32 frameCountForThisBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ if (frameCountForThisBuffer > 0) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_playback(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData, &pDevice->coreaudio.duplexRB);
+ } else {
+ ma_device__read_frames_from_client(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData);
+ }
+ }
+
+ #if defined(MA_DEBUG_OUTPUT)
+ printf(" frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize);
+ #endif
+ } else {
+ /*
+ This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's
+ not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. We just
+ output silence here.
+ */
+ MA_ZERO_MEMORY(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize);
-typedef struct
-{
- ma_uint8 channels;
- ma_uint32 values[MA_PA_CHANNELS_MAX];
-} ma_pa_cvolume;
+ #if defined(MA_DEBUG_OUTPUT)
+ printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize);
+ #endif
+ }
+ }
+ } else {
+ /* This is the deinterleaved case. We need to update each buffer in groups of internalChannels. This assumes each buffer is the same size. */
+
+ /*
+ For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something
+ very strange has happened and we're not going to support it.
+ */
+ if ((pBufferList->mNumberBuffers % pDevice->playback.internalChannels) == 0) {
+ ma_uint8 tempBuffer[4096];
+ UInt32 iBuffer;
+
+ for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; iBuffer += pDevice->playback.internalChannels) {
+ ma_uint32 frameCountPerBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_sample(pDevice->playback.internalFormat);
+ ma_uint32 framesRemaining = frameCountPerBuffer;
-typedef struct
-{
- ma_pa_sample_format_t format;
- ma_uint32 rate;
- ma_uint8 channels;
-} ma_pa_sample_spec;
+ while (framesRemaining > 0) {
+ void* ppDeinterleavedBuffers[MA_MAX_CHANNELS];
+ ma_uint32 iChannel;
+ ma_uint32 framesToRead = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ if (framesToRead > framesRemaining) {
+ framesToRead = framesRemaining;
+ }
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_playback(pDevice, framesToRead, tempBuffer, &pDevice->coreaudio.duplexRB);
+ } else {
+ ma_device__read_frames_from_client(pDevice, framesToRead, tempBuffer);
+ }
+
+ for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) {
+ ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pBufferList->mBuffers[iBuffer+iChannel].mData, (frameCountPerBuffer - framesRemaining) * ma_get_bytes_per_sample(pDevice->playback.internalFormat));
+ }
+
+ ma_deinterleave_pcm_frames(pDevice->playback.internalFormat, pDevice->playback.internalChannels, framesToRead, tempBuffer, ppDeinterleavedBuffers);
+
+ framesRemaining -= framesToRead;
+ }
+ }
+ }
+ }
+
+ (void)pActionFlags;
+ (void)pTimeStamp;
+ (void)busNumber;
-typedef struct
-{
- const char* name;
- ma_uint32 index;
- const char* description;
- ma_pa_sample_spec sample_spec;
- ma_pa_channel_map channel_map;
- ma_uint32 owner_module;
- ma_pa_cvolume volume;
- int mute;
- ma_uint32 monitor_source;
- const char* monitor_source_name;
- ma_uint64 latency;
- const char* driver;
- ma_pa_sink_flags_t flags;
- void* proplist;
- ma_uint64 configured_latency;
- ma_uint32 base_volume;
- ma_pa_sink_state_t state;
- ma_uint32 n_volume_steps;
- ma_uint32 card;
- ma_uint32 n_ports;
- void** ports;
- void* active_port;
- ma_uint8 n_formats;
- void** formats;
-} ma_pa_sink_info;
+ return noErr;
+}
-typedef struct
+static OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pUnusedBufferList)
{
- const char *name;
- ma_uint32 index;
- const char *description;
- ma_pa_sample_spec sample_spec;
- ma_pa_channel_map channel_map;
- ma_uint32 owner_module;
- ma_pa_cvolume volume;
- int mute;
- ma_uint32 monitor_of_sink;
- const char *monitor_of_sink_name;
- ma_uint64 latency;
- const char *driver;
- ma_pa_source_flags_t flags;
- void* proplist;
- ma_uint64 configured_latency;
- ma_uint32 base_volume;
- ma_pa_source_state_t state;
- ma_uint32 n_volume_steps;
- ma_uint32 card;
- ma_uint32 n_ports;
- void** ports;
- void* active_port;
- ma_uint8 n_formats;
- void** formats;
-} ma_pa_source_info;
+ ma_device* pDevice = (ma_device*)pUserData;
+ AudioBufferList* pRenderedBufferList;
+ ma_stream_layout layout;
+ OSStatus status;
-typedef void (* ma_pa_context_notify_cb_t)(ma_pa_context* c, void* userdata);
-typedef void (* ma_pa_sink_info_cb_t) (ma_pa_context* c, const ma_pa_sink_info* i, int eol, void* userdata);
-typedef void (* ma_pa_source_info_cb_t) (ma_pa_context* c, const ma_pa_source_info* i, int eol, void* userdata);
-typedef void (* ma_pa_stream_success_cb_t)(ma_pa_stream* s, int success, void* userdata);
-typedef void (* ma_pa_stream_request_cb_t)(ma_pa_stream* s, size_t nbytes, void* userdata);
-typedef void (* ma_pa_free_cb_t) (void* p);
+ MA_ASSERT(pDevice != NULL);
+
+ pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList;
+ MA_ASSERT(pRenderedBufferList);
+
+ /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */
+ layout = ma_stream_layout_interleaved;
+ if (pRenderedBufferList->mBuffers[0].mNumberChannels != pDevice->capture.internalChannels) {
+ layout = ma_stream_layout_deinterleaved;
+ }
+
+#if defined(MA_DEBUG_OUTPUT)
+ printf("INFO: Input Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pRenderedBufferList->mNumberBuffers);
#endif
+
+ status = ((ma_AudioUnitRender_proc)pDevice->pContext->coreaudio.AudioUnitRender)((AudioUnit)pDevice->coreaudio.audioUnitCapture, pActionFlags, pTimeStamp, busNumber, frameCount, pRenderedBufferList);
+ if (status != noErr) {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf(" ERROR: AudioUnitRender() failed with %d\n", status);
+ #endif
+ return status;
+ }
+
+ if (layout == ma_stream_layout_interleaved) {
+ UInt32 iBuffer;
+ for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) {
+ if (pRenderedBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->capture.internalChannels) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_capture(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData, &pDevice->coreaudio.duplexRB);
+ } else {
+ ma_device__send_frames_to_client(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData);
+ }
+ #if defined(MA_DEBUG_OUTPUT)
+ printf(" mDataByteSize=%d\n", pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);
+ #endif
+ } else {
+ /*
+ This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's
+ not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams.
+ */
+ ma_uint8 silentBuffer[4096];
+ ma_uint32 framesRemaining;
+
+ MA_ZERO_MEMORY(silentBuffer, sizeof(silentBuffer));
+
+ framesRemaining = frameCount;
+ while (framesRemaining > 0) {
+ ma_uint32 framesToSend = sizeof(silentBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ if (framesToSend > framesRemaining) {
+ framesToSend = framesRemaining;
+ }
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_capture(pDevice, framesToSend, silentBuffer, &pDevice->coreaudio.duplexRB);
+ } else {
+ ma_device__send_frames_to_client(pDevice, framesToSend, silentBuffer);
+ }
+
+ framesRemaining -= framesToSend;
+ }
+
+ #if defined(MA_DEBUG_OUTPUT)
+ printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pRenderedBufferList->mBuffers[iBuffer].mNumberChannels, pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);
+ #endif
+ }
+ }
+ } else {
+ /* This is the deinterleaved case. We need to interleave the audio data before sending it to the client. This assumes each buffer is the same size. */
+
+ /*
+ For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something
+ very strange has happened and we're not going to support it.
+ */
+ if ((pRenderedBufferList->mNumberBuffers % pDevice->capture.internalChannels) == 0) {
+ ma_uint8 tempBuffer[4096];
+ UInt32 iBuffer;
+ for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; iBuffer += pDevice->capture.internalChannels) {
+ ma_uint32 framesRemaining = frameCount;
+ while (framesRemaining > 0) {
+ void* ppDeinterleavedBuffers[MA_MAX_CHANNELS];
+ ma_uint32 iChannel;
+ ma_uint32 framesToSend = sizeof(tempBuffer) / ma_get_bytes_per_sample(pDevice->capture.internalFormat);
+ if (framesToSend > framesRemaining) {
+ framesToSend = framesRemaining;
+ }
+
+ for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) {
+ ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pRenderedBufferList->mBuffers[iBuffer+iChannel].mData, (frameCount - framesRemaining) * ma_get_bytes_per_sample(pDevice->capture.internalFormat));
+ }
+
+ ma_interleave_pcm_frames(pDevice->capture.internalFormat, pDevice->capture.internalChannels, framesToSend, (const void**)ppDeinterleavedBuffers, tempBuffer);
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_capture(pDevice, framesToSend, tempBuffer, &pDevice->coreaudio.duplexRB);
+ } else {
+ ma_device__send_frames_to_client(pDevice, framesToSend, tempBuffer);
+ }
-typedef ma_pa_mainloop* (* ma_pa_mainloop_new_proc) ();
-typedef void (* ma_pa_mainloop_free_proc) (ma_pa_mainloop* m);
-typedef ma_pa_mainloop_api* (* ma_pa_mainloop_get_api_proc) (ma_pa_mainloop* m);
-typedef int (* ma_pa_mainloop_iterate_proc) (ma_pa_mainloop* m, int block, int* retval);
-typedef void (* ma_pa_mainloop_wakeup_proc) (ma_pa_mainloop* m);
-typedef ma_pa_context* (* ma_pa_context_new_proc) (ma_pa_mainloop_api* mainloop, const char* name);
-typedef void (* ma_pa_context_unref_proc) (ma_pa_context* c);
-typedef int (* ma_pa_context_connect_proc) (ma_pa_context* c, const char* server, ma_pa_context_flags_t flags, const ma_pa_spawn_api* api);
-typedef void (* ma_pa_context_disconnect_proc) (ma_pa_context* c);
-typedef void (* ma_pa_context_set_state_callback_proc) (ma_pa_context* c, ma_pa_context_notify_cb_t cb, void* userdata);
-typedef ma_pa_context_state_t (* ma_pa_context_get_state_proc) (ma_pa_context* c);
-typedef ma_pa_operation* (* ma_pa_context_get_sink_info_list_proc) (ma_pa_context* c, ma_pa_sink_info_cb_t cb, void* userdata);
-typedef ma_pa_operation* (* ma_pa_context_get_source_info_list_proc) (ma_pa_context* c, ma_pa_source_info_cb_t cb, void* userdata);
-typedef ma_pa_operation* (* ma_pa_context_get_sink_info_by_name_proc) (ma_pa_context* c, const char* name, ma_pa_sink_info_cb_t cb, void* userdata);
-typedef ma_pa_operation* (* ma_pa_context_get_source_info_by_name_proc)(ma_pa_context* c, const char* name, ma_pa_source_info_cb_t cb, void* userdata);
-typedef void (* ma_pa_operation_unref_proc) (ma_pa_operation* o);
-typedef ma_pa_operation_state_t (* ma_pa_operation_get_state_proc) (ma_pa_operation* o);
-typedef ma_pa_channel_map* (* ma_pa_channel_map_init_extend_proc) (ma_pa_channel_map* m, unsigned channels, ma_pa_channel_map_def_t def);
-typedef int (* ma_pa_channel_map_valid_proc) (const ma_pa_channel_map* m);
-typedef int (* ma_pa_channel_map_compatible_proc) (const ma_pa_channel_map* m, const ma_pa_sample_spec* ss);
-typedef ma_pa_stream* (* ma_pa_stream_new_proc) (ma_pa_context* c, const char* name, const ma_pa_sample_spec* ss, const ma_pa_channel_map* map);
-typedef void (* ma_pa_stream_unref_proc) (ma_pa_stream* s);
-typedef int (* ma_pa_stream_connect_playback_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags, const ma_pa_cvolume* volume, ma_pa_stream* sync_stream);
-typedef int (* ma_pa_stream_connect_record_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags);
-typedef int (* ma_pa_stream_disconnect_proc) (ma_pa_stream* s);
-typedef ma_pa_stream_state_t (* ma_pa_stream_get_state_proc) (ma_pa_stream* s);
-typedef const ma_pa_sample_spec* (* ma_pa_stream_get_sample_spec_proc) (ma_pa_stream* s);
-typedef const ma_pa_channel_map* (* ma_pa_stream_get_channel_map_proc) (ma_pa_stream* s);
-typedef const ma_pa_buffer_attr* (* ma_pa_stream_get_buffer_attr_proc) (ma_pa_stream* s);
-typedef ma_pa_operation* (* ma_pa_stream_set_buffer_attr_proc) (ma_pa_stream* s, const ma_pa_buffer_attr* attr, ma_pa_stream_success_cb_t cb, void* userdata);
-typedef const char* (* ma_pa_stream_get_device_name_proc) (ma_pa_stream* s);
-typedef void (* ma_pa_stream_set_write_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata);
-typedef void (* ma_pa_stream_set_read_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata);
-typedef ma_pa_operation* (* ma_pa_stream_flush_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata);
-typedef ma_pa_operation* (* ma_pa_stream_drain_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata);
-typedef int (* ma_pa_stream_is_corked_proc) (ma_pa_stream* s);
-typedef ma_pa_operation* (* ma_pa_stream_cork_proc) (ma_pa_stream* s, int b, ma_pa_stream_success_cb_t cb, void* userdata);
-typedef ma_pa_operation* (* ma_pa_stream_trigger_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata);
-typedef int (* ma_pa_stream_begin_write_proc) (ma_pa_stream* s, void** data, size_t* nbytes);
-typedef int (* ma_pa_stream_write_proc) (ma_pa_stream* s, const void* data, size_t nbytes, ma_pa_free_cb_t free_cb, int64_t offset, ma_pa_seek_mode_t seek);
-typedef int (* ma_pa_stream_peek_proc) (ma_pa_stream* s, const void** data, size_t* nbytes);
-typedef int (* ma_pa_stream_drop_proc) (ma_pa_stream* s);
-typedef size_t (* ma_pa_stream_writable_size_proc) (ma_pa_stream* s);
-typedef size_t (* ma_pa_stream_readable_size_proc) (ma_pa_stream* s);
+ framesRemaining -= framesToSend;
+ }
+ }
+ }
+ }
-typedef struct
-{
- ma_uint32 count;
- ma_uint32 capacity;
- ma_device_info* pInfo;
-} ma_pulse_device_enum_data;
+ (void)pActionFlags;
+ (void)pTimeStamp;
+ (void)busNumber;
+ (void)frameCount;
+ (void)pUnusedBufferList;
+
+ return noErr;
+}
-ma_result ma_result_from_pulse(int result)
+static void on_start_stop__coreaudio(void* pUserData, AudioUnit audioUnit, AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element)
{
- switch (result) {
- case MA_PA_OK: return MA_SUCCESS;
- case MA_PA_ERR_ACCESS: return MA_ACCESS_DENIED;
- case MA_PA_ERR_INVALID: return MA_INVALID_ARGS;
- case MA_PA_ERR_NOENTITY: return MA_NO_DEVICE;
- default: return MA_ERROR;
+ ma_device* pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
+
+ /*
+ There's been a report of a deadlock here when triggered by ma_device_uninit(). It looks like
+ AudioUnitGetProprty (called below) and AudioComponentInstanceDispose (called in ma_device_uninit)
+ can try waiting on the same lock. I'm going to try working around this by not calling any Core
+ Audio APIs in the callback when the device has been stopped or uninitialized.
+ */
+ if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED || ma_device__get_state(pDevice) == MA_STATE_STOPPING || ma_device__get_state(pDevice) == MA_STATE_STOPPED) {
+ ma_stop_proc onStop = pDevice->onStop;
+ if (onStop) {
+ onStop(pDevice);
+ }
+
+ ma_event_signal(&pDevice->coreaudio.stopEvent);
+ } else {
+ UInt32 isRunning;
+ UInt32 isRunningSize = sizeof(isRunning);
+ OSStatus status = ((ma_AudioUnitGetProperty_proc)pDevice->pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioOutputUnitProperty_IsRunning, scope, element, &isRunning, &isRunningSize);
+ if (status != noErr) {
+ return; /* Don't really know what to do in this case... just ignore it, I suppose... */
+ }
+
+ if (!isRunning) {
+ ma_stop_proc onStop;
+
+ /*
+ The stop event is a bit annoying in Core Audio because it will be called when we automatically switch the default device. Some scenarios to consider:
+
+ 1) When the device is unplugged, this will be called _before_ the default device change notification.
+ 2) When the device is changed via the default device change notification, this will be called _after_ the switch.
+
+ For case #1, we just check if there's a new default device available. If so, we just ignore the stop event. For case #2 we check a flag.
+ */
+ if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isDefaultPlaybackDevice) ||
+ ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isDefaultCaptureDevice)) {
+ /*
+ It looks like the device is switching through an external event, such as the user unplugging the device or changing the default device
+ via the operating system's sound settings. If we're re-initializing the device, we just terminate because we want the stopping of the
+ device to be seamless to the client (we don't want them receiving the onStop event and thinking that the device has stopped when it
+ hasn't!).
+ */
+ if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isSwitchingPlaybackDevice) ||
+ ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isSwitchingCaptureDevice)) {
+ return;
+ }
+
+ /*
+ Getting here means the device is not reinitializing which means it may have been unplugged. From what I can see, it looks like Core Audio
+ will try switching to the new default device seamlessly. We need to somehow find a way to determine whether or not Core Audio will most
+ likely be successful in switching to the new device.
+
+ TODO: Try to predict if Core Audio will switch devices. If not, the onStop callback needs to be posted.
+ */
+ return;
+ }
+
+ /* Getting here means we need to stop the device. */
+ onStop = pDevice->onStop;
+ if (onStop) {
+ onStop(pDevice);
+ }
+ }
}
+
+ (void)propertyID; /* Unused. */
}
-#if 0
-ma_pa_sample_format_t ma_format_to_pulse(ma_format format)
+#if defined(MA_APPLE_DESKTOP)
+static ma_uint32 g_DeviceTrackingInitCounter_CoreAudio = 0;
+static ma_mutex g_DeviceTrackingMutex_CoreAudio;
+static ma_device** g_ppTrackedDevices_CoreAudio = NULL;
+static ma_uint32 g_TrackedDeviceCap_CoreAudio = 0;
+static ma_uint32 g_TrackedDeviceCount_CoreAudio = 0;
+
+static OSStatus ma_default_device_changed__coreaudio(AudioObjectID objectID, UInt32 addressCount, const AudioObjectPropertyAddress* pAddresses, void* pUserData)
{
- if (ma_is_little_endian()) {
- switch (format) {
- case ma_format_s16: return MA_PA_SAMPLE_S16LE;
- case ma_format_s24: return MA_PA_SAMPLE_S24LE;
- case ma_format_s32: return MA_PA_SAMPLE_S32LE;
- case ma_format_f32: return MA_PA_SAMPLE_FLOAT32LE;
- default: break;
- }
+ ma_device_type deviceType;
+
+ /* Not sure if I really need to check this, but it makes me feel better. */
+ if (addressCount == 0) {
+ return noErr;
+ }
+
+ if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultOutputDevice) {
+ deviceType = ma_device_type_playback;
+ } else if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultInputDevice) {
+ deviceType = ma_device_type_capture;
} else {
- switch (format) {
- case ma_format_s16: return MA_PA_SAMPLE_S16BE;
- case ma_format_s24: return MA_PA_SAMPLE_S24BE;
- case ma_format_s32: return MA_PA_SAMPLE_S32BE;
- case ma_format_f32: return MA_PA_SAMPLE_FLOAT32BE;
- default: break;
- }
+ return noErr; /* Should never hit this. */
}
-
- /* Endian agnostic. */
- switch (format) {
- case ma_format_u8: return MA_PA_SAMPLE_U8;
- default: return MA_PA_SAMPLE_INVALID;
+
+ ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio);
+ {
+ ma_uint32 iDevice;
+ for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) {
+ ma_result reinitResult;
+ ma_device* pDevice;
+
+ pDevice = g_ppTrackedDevices_CoreAudio[iDevice];
+ if (pDevice->type == deviceType || pDevice->type == ma_device_type_duplex) {
+ if (deviceType == ma_device_type_playback) {
+ pDevice->coreaudio.isSwitchingPlaybackDevice = MA_TRUE;
+ reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE);
+ pDevice->coreaudio.isSwitchingPlaybackDevice = MA_FALSE;
+ } else {
+ pDevice->coreaudio.isSwitchingCaptureDevice = MA_TRUE;
+ reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE);
+ pDevice->coreaudio.isSwitchingCaptureDevice = MA_FALSE;
+ }
+
+ if (reinitResult == MA_SUCCESS) {
+ ma_device__post_init_setup(pDevice, deviceType);
+
+ /* Restart the device if required. If this fails we need to stop the device entirely. */
+ if (ma_device__get_state(pDevice) == MA_STATE_STARTED) {
+ OSStatus status;
+ if (deviceType == ma_device_type_playback) {
+ status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ if (status != noErr) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ }
+ ma_device__set_state(pDevice, MA_STATE_STOPPED);
+ }
+ } else if (deviceType == ma_device_type_capture) {
+ status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ if (status != noErr) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ }
+ ma_device__set_state(pDevice, MA_STATE_STOPPED);
+ }
+ }
+ }
+ }
+ }
+ }
}
+ ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio);
+
+ (void)objectID; /* Unused. */
+ return noErr;
}
-#endif
-ma_format ma_format_from_pulse(ma_pa_sample_format_t format)
+static ma_result ma_context__init_device_tracking__coreaudio(ma_context* pContext)
{
- if (ma_is_little_endian()) {
- switch (format) {
- case MA_PA_SAMPLE_S16LE: return ma_format_s16;
- case MA_PA_SAMPLE_S24LE: return ma_format_s24;
- case MA_PA_SAMPLE_S32LE: return ma_format_s32;
- case MA_PA_SAMPLE_FLOAT32LE: return ma_format_f32;
- default: break;
- }
- } else {
- switch (format) {
- case MA_PA_SAMPLE_S16BE: return ma_format_s16;
- case MA_PA_SAMPLE_S24BE: return ma_format_s24;
- case MA_PA_SAMPLE_S32BE: return ma_format_s32;
- case MA_PA_SAMPLE_FLOAT32BE: return ma_format_f32;
- default: break;
- }
+ MA_ASSERT(pContext != NULL);
+
+ if (ma_atomic_increment_32(&g_DeviceTrackingInitCounter_CoreAudio) == 1) {
+ AudioObjectPropertyAddress propAddress;
+ propAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ ma_mutex_init(pContext, &g_DeviceTrackingMutex_CoreAudio);
+
+ propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL);
+
+ propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL);
}
+
+ return MA_SUCCESS;
+}
- /* Endian agnostic. */
- switch (format) {
- case MA_PA_SAMPLE_U8: return ma_format_u8;
- default: return ma_format_unknown;
+static ma_result ma_context__uninit_device_tracking__coreaudio(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+
+ if (ma_atomic_decrement_32(&g_DeviceTrackingInitCounter_CoreAudio) == 0) {
+ AudioObjectPropertyAddress propAddress;
+ propAddress.mScope = kAudioObjectPropertyScopeGlobal;
+ propAddress.mElement = kAudioObjectPropertyElementMaster;
+
+ propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+ ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL);
+
+ propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL);
+
+ /* At this point there should be no tracked devices. If so there's an error somewhere. */
+ MA_ASSERT(g_ppTrackedDevices_CoreAudio == NULL);
+ MA_ASSERT(g_TrackedDeviceCount_CoreAudio == 0);
+
+ ma_mutex_uninit(&g_DeviceTrackingMutex_CoreAudio);
}
+
+ return MA_SUCCESS;
}
-ma_channel ma_channel_position_from_pulse(ma_pa_channel_position_t position)
+static ma_result ma_device__track__coreaudio(ma_device* pDevice)
{
- switch (position)
+ ma_result result;
+
+ MA_ASSERT(pDevice != NULL);
+
+ result = ma_context__init_device_tracking__coreaudio(pDevice->pContext);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio);
{
- case MA_PA_CHANNEL_POSITION_INVALID: return MA_CHANNEL_NONE;
- case MA_PA_CHANNEL_POSITION_MONO: return MA_CHANNEL_MONO;
- case MA_PA_CHANNEL_POSITION_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT;
- case MA_PA_CHANNEL_POSITION_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT;
- case MA_PA_CHANNEL_POSITION_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER;
- case MA_PA_CHANNEL_POSITION_REAR_CENTER: return MA_CHANNEL_BACK_CENTER;
- case MA_PA_CHANNEL_POSITION_REAR_LEFT: return MA_CHANNEL_BACK_LEFT;
- case MA_PA_CHANNEL_POSITION_REAR_RIGHT: return MA_CHANNEL_BACK_RIGHT;
- case MA_PA_CHANNEL_POSITION_LFE: return MA_CHANNEL_LFE;
- case MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER;
- case MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER;
- case MA_PA_CHANNEL_POSITION_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT;
- case MA_PA_CHANNEL_POSITION_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT;
- case MA_PA_CHANNEL_POSITION_AUX0: return MA_CHANNEL_AUX_0;
- case MA_PA_CHANNEL_POSITION_AUX1: return MA_CHANNEL_AUX_1;
- case MA_PA_CHANNEL_POSITION_AUX2: return MA_CHANNEL_AUX_2;
- case MA_PA_CHANNEL_POSITION_AUX3: return MA_CHANNEL_AUX_3;
- case MA_PA_CHANNEL_POSITION_AUX4: return MA_CHANNEL_AUX_4;
- case MA_PA_CHANNEL_POSITION_AUX5: return MA_CHANNEL_AUX_5;
- case MA_PA_CHANNEL_POSITION_AUX6: return MA_CHANNEL_AUX_6;
- case MA_PA_CHANNEL_POSITION_AUX7: return MA_CHANNEL_AUX_7;
- case MA_PA_CHANNEL_POSITION_AUX8: return MA_CHANNEL_AUX_8;
- case MA_PA_CHANNEL_POSITION_AUX9: return MA_CHANNEL_AUX_9;
- case MA_PA_CHANNEL_POSITION_AUX10: return MA_CHANNEL_AUX_10;
- case MA_PA_CHANNEL_POSITION_AUX11: return MA_CHANNEL_AUX_11;
- case MA_PA_CHANNEL_POSITION_AUX12: return MA_CHANNEL_AUX_12;
- case MA_PA_CHANNEL_POSITION_AUX13: return MA_CHANNEL_AUX_13;
- case MA_PA_CHANNEL_POSITION_AUX14: return MA_CHANNEL_AUX_14;
- case MA_PA_CHANNEL_POSITION_AUX15: return MA_CHANNEL_AUX_15;
- case MA_PA_CHANNEL_POSITION_AUX16: return MA_CHANNEL_AUX_16;
- case MA_PA_CHANNEL_POSITION_AUX17: return MA_CHANNEL_AUX_17;
- case MA_PA_CHANNEL_POSITION_AUX18: return MA_CHANNEL_AUX_18;
- case MA_PA_CHANNEL_POSITION_AUX19: return MA_CHANNEL_AUX_19;
- case MA_PA_CHANNEL_POSITION_AUX20: return MA_CHANNEL_AUX_20;
- case MA_PA_CHANNEL_POSITION_AUX21: return MA_CHANNEL_AUX_21;
- case MA_PA_CHANNEL_POSITION_AUX22: return MA_CHANNEL_AUX_22;
- case MA_PA_CHANNEL_POSITION_AUX23: return MA_CHANNEL_AUX_23;
- case MA_PA_CHANNEL_POSITION_AUX24: return MA_CHANNEL_AUX_24;
- case MA_PA_CHANNEL_POSITION_AUX25: return MA_CHANNEL_AUX_25;
- case MA_PA_CHANNEL_POSITION_AUX26: return MA_CHANNEL_AUX_26;
- case MA_PA_CHANNEL_POSITION_AUX27: return MA_CHANNEL_AUX_27;
- case MA_PA_CHANNEL_POSITION_AUX28: return MA_CHANNEL_AUX_28;
- case MA_PA_CHANNEL_POSITION_AUX29: return MA_CHANNEL_AUX_29;
- case MA_PA_CHANNEL_POSITION_AUX30: return MA_CHANNEL_AUX_30;
- case MA_PA_CHANNEL_POSITION_AUX31: return MA_CHANNEL_AUX_31;
- case MA_PA_CHANNEL_POSITION_TOP_CENTER: return MA_CHANNEL_TOP_CENTER;
- case MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT;
- case MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT;
- case MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER;
- case MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT: return MA_CHANNEL_TOP_BACK_LEFT;
- case MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT;
- case MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER: return MA_CHANNEL_TOP_BACK_CENTER;
- default: return MA_CHANNEL_NONE;
+ /* Allocate memory if required. */
+ if (g_TrackedDeviceCap_CoreAudio <= g_TrackedDeviceCount_CoreAudio) {
+ ma_uint32 oldCap;
+ ma_uint32 newCap;
+ ma_device** ppNewDevices;
+
+ oldCap = g_TrackedDeviceCap_CoreAudio;
+ newCap = g_TrackedDeviceCap_CoreAudio * 2;
+ if (newCap == 0) {
+ newCap = 1;
+ }
+
+ ppNewDevices = (ma_device**)ma__realloc_from_callbacks(g_ppTrackedDevices_CoreAudio, sizeof(*g_ppTrackedDevices_CoreAudio)*newCap, sizeof(*g_ppTrackedDevices_CoreAudio)*oldCap, &pDevice->pContext->allocationCallbacks);
+ if (ppNewDevices == NULL) {
+ ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio);
+ return MA_OUT_OF_MEMORY;
+ }
+
+ g_ppTrackedDevices_CoreAudio = ppNewDevices;
+ g_TrackedDeviceCap_CoreAudio = newCap;
+ }
+
+ g_ppTrackedDevices_CoreAudio[g_TrackedDeviceCount_CoreAudio] = pDevice;
+ g_TrackedDeviceCount_CoreAudio += 1;
}
+ ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio);
+
+ return MA_SUCCESS;
}
-#if 0
-ma_pa_channel_position_t ma_channel_position_to_pulse(ma_channel position)
+static ma_result ma_device__untrack__coreaudio(ma_device* pDevice)
{
- switch (position)
+ ma_result result;
+
+ MA_ASSERT(pDevice != NULL);
+
+ ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio);
{
- case MA_CHANNEL_NONE: return MA_PA_CHANNEL_POSITION_INVALID;
- case MA_CHANNEL_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_FRONT_LEFT;
- case MA_CHANNEL_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT;
- case MA_CHANNEL_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_CENTER;
- case MA_CHANNEL_LFE: return MA_PA_CHANNEL_POSITION_LFE;
- case MA_CHANNEL_BACK_LEFT: return MA_PA_CHANNEL_POSITION_REAR_LEFT;
- case MA_CHANNEL_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_REAR_RIGHT;
- case MA_CHANNEL_FRONT_LEFT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
- case MA_CHANNEL_FRONT_RIGHT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
- case MA_CHANNEL_BACK_CENTER: return MA_PA_CHANNEL_POSITION_REAR_CENTER;
- case MA_CHANNEL_SIDE_LEFT: return MA_PA_CHANNEL_POSITION_SIDE_LEFT;
- case MA_CHANNEL_SIDE_RIGHT: return MA_PA_CHANNEL_POSITION_SIDE_RIGHT;
- case MA_CHANNEL_TOP_CENTER: return MA_PA_CHANNEL_POSITION_TOP_CENTER;
- case MA_CHANNEL_TOP_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT;
- case MA_CHANNEL_TOP_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER;
- case MA_CHANNEL_TOP_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT;
- case MA_CHANNEL_TOP_BACK_LEFT: return MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT;
- case MA_CHANNEL_TOP_BACK_CENTER: return MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER;
- case MA_CHANNEL_TOP_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT;
- case MA_CHANNEL_19: return MA_PA_CHANNEL_POSITION_AUX18;
- case MA_CHANNEL_20: return MA_PA_CHANNEL_POSITION_AUX19;
- case MA_CHANNEL_21: return MA_PA_CHANNEL_POSITION_AUX20;
- case MA_CHANNEL_22: return MA_PA_CHANNEL_POSITION_AUX21;
- case MA_CHANNEL_23: return MA_PA_CHANNEL_POSITION_AUX22;
- case MA_CHANNEL_24: return MA_PA_CHANNEL_POSITION_AUX23;
- case MA_CHANNEL_25: return MA_PA_CHANNEL_POSITION_AUX24;
- case MA_CHANNEL_26: return MA_PA_CHANNEL_POSITION_AUX25;
- case MA_CHANNEL_27: return MA_PA_CHANNEL_POSITION_AUX26;
- case MA_CHANNEL_28: return MA_PA_CHANNEL_POSITION_AUX27;
- case MA_CHANNEL_29: return MA_PA_CHANNEL_POSITION_AUX28;
- case MA_CHANNEL_30: return MA_PA_CHANNEL_POSITION_AUX29;
- case MA_CHANNEL_31: return MA_PA_CHANNEL_POSITION_AUX30;
- case MA_CHANNEL_32: return MA_PA_CHANNEL_POSITION_AUX31;
- default: return (ma_pa_channel_position_t)position;
+ ma_uint32 iDevice;
+ for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) {
+ if (g_ppTrackedDevices_CoreAudio[iDevice] == pDevice) {
+ /* We've found the device. We now need to remove it from the list. */
+ ma_uint32 jDevice;
+ for (jDevice = iDevice; jDevice < g_TrackedDeviceCount_CoreAudio-1; jDevice += 1) {
+ g_ppTrackedDevices_CoreAudio[jDevice] = g_ppTrackedDevices_CoreAudio[jDevice+1];
+ }
+
+ g_TrackedDeviceCount_CoreAudio -= 1;
+
+ /* If there's nothing else in the list we need to free memory. */
+ if (g_TrackedDeviceCount_CoreAudio == 0) {
+ ma__free_from_callbacks(g_ppTrackedDevices_CoreAudio, &pDevice->pContext->allocationCallbacks);
+ g_ppTrackedDevices_CoreAudio = NULL;
+ g_TrackedDeviceCap_CoreAudio = 0;
+ }
+
+ break;
+ }
+ }
+ }
+ ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio);
+
+ result = ma_context__uninit_device_tracking__coreaudio(pDevice->pContext);
+ if (result != MA_SUCCESS) {
+ return result;
}
+
+ return MA_SUCCESS;
}
#endif
-ma_result ma_wait_for_operation__pulse(ma_context* pContext, ma_pa_mainloop* pMainLoop, ma_pa_operation* pOP)
+#if defined(MA_APPLE_MOBILE)
+@interface ma_router_change_handler:NSObject {
+ ma_device* m_pDevice;
+}
+@end
+
+@implementation ma_router_change_handler
+-(id)init:(ma_device*)pDevice
{
- ma_assert(pContext != NULL);
- ma_assert(pMainLoop != NULL);
- ma_assert(pOP != NULL);
+ self = [super init];
+ m_pDevice = pDevice;
- while (((ma_pa_operation_get_state_proc)pContext->pulse.pa_operation_get_state)(pOP) == MA_PA_OPERATION_RUNNING) {
- int error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL);
- if (error < 0) {
- return ma_result_from_pulse(error);
- }
- }
+ [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_route_change:) name:AVAudioSessionRouteChangeNotification object:[AVAudioSession sharedInstance]];
- return MA_SUCCESS;
+ return self;
}
-ma_result ma_device__wait_for_operation__pulse(ma_device* pDevice, ma_pa_operation* pOP)
+-(void)dealloc
{
- ma_assert(pDevice != NULL);
- ma_assert(pOP != NULL);
-
- return ma_wait_for_operation__pulse(pDevice->pContext, (ma_pa_mainloop*)pDevice->pulse.pMainLoop, pOP);
+ [self remove_handler];
}
+-(void)remove_handler
+{
+ [[NSNotificationCenter defaultCenter] removeObserver:self name:@"AVAudioSessionRouteChangeNotification" object:nil];
+}
-ma_bool32 ma_context_is_device_id_equal__pulse(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+-(void)handle_route_change:(NSNotification*)pNotification
{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
+ AVAudioSession* pSession = [AVAudioSession sharedInstance];
+
+ NSInteger reason = [[[pNotification userInfo] objectForKey:AVAudioSessionRouteChangeReasonKey] integerValue];
+ switch (reason)
+ {
+ case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
+ {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOldDeviceUnavailable\n");
+ #endif
+ } break;
- return ma_strcmp(pID0->pulse, pID1->pulse) == 0;
-}
+ case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
+ {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNewDeviceAvailable\n");
+ #endif
+ } break;
+ case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
+ {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory\n");
+ #endif
+ } break;
-typedef struct
-{
- ma_context* pContext;
- ma_enum_devices_callback_proc callback;
- void* pUserData;
- ma_bool32 isTerminated;
-} ma_context_enumerate_devices_callback_data__pulse;
+ case AVAudioSessionRouteChangeReasonWakeFromSleep:
+ {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonWakeFromSleep\n");
+ #endif
+ } break;
-void ma_context_enumerate_devices_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pSinkInfo, int endOfList, void* pUserData)
-{
- ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData;
- ma_device_info deviceInfo;
+ case AVAudioSessionRouteChangeReasonOverride:
+ {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOverride\n");
+ #endif
+ } break;
- ma_assert(pData != NULL);
+ case AVAudioSessionRouteChangeReasonCategoryChange:
+ {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonCategoryChange\n");
+ #endif
+ } break;
- if (endOfList || pData->isTerminated) {
- return;
+ case AVAudioSessionRouteChangeReasonUnknown:
+ default:
+ {
+ #if defined(MA_DEBUG_OUTPUT)
+ printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonUnknown\n");
+ #endif
+ } break;
}
- ma_zero_object(&deviceInfo);
+ m_pDevice->sampleRate = (ma_uint32)pSession.sampleRate;
- /* The name from PulseAudio is the ID for miniaudio. */
- if (pSinkInfo->name != NULL) {
- ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSinkInfo->name, (size_t)-1);
+ if (m_pDevice->type == ma_device_type_capture || m_pDevice->type == ma_device_type_duplex) {
+ m_pDevice->capture.channels = (ma_uint32)pSession.inputNumberOfChannels;
+ ma_device__post_init_setup(m_pDevice, ma_device_type_capture);
}
-
- /* The description from PulseAudio is the name for miniaudio. */
- if (pSinkInfo->description != NULL) {
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSinkInfo->description, (size_t)-1);
+ if (m_pDevice->type == ma_device_type_playback || m_pDevice->type == ma_device_type_duplex) {
+ m_pDevice->playback.channels = (ma_uint32)pSession.outputNumberOfChannels;
+ ma_device__post_init_setup(m_pDevice, ma_device_type_playback);
}
+}
+@end
+#endif
- pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_playback, &deviceInfo, pData->pUserData);
+static void ma_device_uninit__coreaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+ MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED);
+
+#if defined(MA_APPLE_DESKTOP)
+ /*
+ Make sure we're no longer tracking the device. It doesn't matter if we call this for a non-default device because it'll
+ just gracefully ignore it.
+ */
+ ma_device__untrack__coreaudio(pDevice);
+#endif
+#if defined(MA_APPLE_MOBILE)
+ if (pDevice->coreaudio.pRouteChangeHandler != NULL) {
+ ma_router_change_handler* pRouteChangeHandler = (__bridge_transfer ma_router_change_handler*)pDevice->coreaudio.pRouteChangeHandler;
+ [pRouteChangeHandler remove_handler];
+ }
+#endif
+
+ if (pDevice->coreaudio.audioUnitCapture != NULL) {
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ }
+ if (pDevice->coreaudio.audioUnitPlayback != NULL) {
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ }
+
+ if (pDevice->coreaudio.pAudioBufferList) {
+ ma__free_from_callbacks(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks);
+ }
- (void)pPulseContext; /* Unused. */
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_pcm_rb_uninit(&pDevice->coreaudio.duplexRB);
+ }
}
-void ma_context_enumerate_devices_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pSinkInfo, int endOfList, void* pUserData)
+typedef struct
{
- ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData;
- ma_device_info deviceInfo;
+ /* Input. */
+ ma_format formatIn;
+ ma_uint32 channelsIn;
+ ma_uint32 sampleRateIn;
+ ma_channel channelMapIn[MA_MAX_CHANNELS];
+ ma_uint32 periodSizeInFramesIn;
+ ma_uint32 periodSizeInMillisecondsIn;
+ ma_uint32 periodsIn;
+ ma_bool32 usingDefaultFormat;
+ ma_bool32 usingDefaultChannels;
+ ma_bool32 usingDefaultSampleRate;
+ ma_bool32 usingDefaultChannelMap;
+ ma_share_mode shareMode;
+ ma_bool32 registerStopEvent;
+
+ /* Output. */
+#if defined(MA_APPLE_DESKTOP)
+ AudioObjectID deviceObjectID;
+#endif
+ AudioComponent component;
+ AudioUnit audioUnit;
+ AudioBufferList* pAudioBufferList; /* Only used for input devices. */
+ ma_format formatOut;
+ ma_uint32 channelsOut;
+ ma_uint32 sampleRateOut;
+ ma_channel channelMapOut[MA_MAX_CHANNELS];
+ ma_uint32 periodSizeInFramesOut;
+ ma_uint32 periodsOut;
+ char deviceName[256];
+} ma_device_init_internal_data__coreaudio;
- ma_assert(pData != NULL);
+static ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__coreaudio* pData, void* pDevice_DoNotReference) /* <-- pDevice is typed as void* intentionally so as to avoid accidentally referencing it. */
+{
+ ma_result result;
+ OSStatus status;
+ UInt32 enableIOFlag;
+ AudioStreamBasicDescription bestFormat;
+ ma_uint32 actualPeriodSizeInFrames;
+ AURenderCallbackStruct callbackInfo;
+#if defined(MA_APPLE_DESKTOP)
+ AudioObjectID deviceObjectID;
+#endif
- if (endOfList || pData->isTerminated) {
- return;
+ /* This API should only be used for a single device type: playback or capture. No full-duplex mode. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
}
- ma_zero_object(&deviceInfo);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(deviceType == ma_device_type_playback || deviceType == ma_device_type_capture);
- /* The name from PulseAudio is the ID for miniaudio. */
- if (pSinkInfo->name != NULL) {
- ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSinkInfo->name, (size_t)-1);
+#if defined(MA_APPLE_DESKTOP)
+ pData->deviceObjectID = 0;
+#endif
+ pData->component = NULL;
+ pData->audioUnit = NULL;
+ pData->pAudioBufferList = NULL;
+
+#if defined(MA_APPLE_DESKTOP)
+ result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID);
+ if (result != MA_SUCCESS) {
+ return result;
}
-
- /* The description from PulseAudio is the name for miniaudio. */
- if (pSinkInfo->description != NULL) {
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSinkInfo->description, (size_t)-1);
+
+ pData->deviceObjectID = deviceObjectID;
+#endif
+
+ /* Core audio doesn't really use the notion of a period so we can leave this unmodified, but not too over the top. */
+ pData->periodsOut = pData->periodsIn;
+ if (pData->periodsOut == 0) {
+ pData->periodsOut = MA_DEFAULT_PERIODS;
}
+ if (pData->periodsOut > 16) {
+ pData->periodsOut = 16;
+ }
+
+
+ /* Audio unit. */
+ status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)((AudioComponent)pContext->coreaudio.component, (AudioUnit*)&pData->audioUnit);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+
+
+ /* The input/output buses need to be explicitly enabled and disabled. We set the flag based on the output unit first, then we just swap it for input. */
+ enableIOFlag = 1;
+ if (deviceType == ma_device_type_capture) {
+ enableIOFlag = 0;
+ }
+
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+ enableIOFlag = (enableIOFlag == 0) ? 1 : 0;
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+
+ /* Set the device to use with this audio unit. This is only used on desktop since we are using defaults on mobile. */
+#if defined(MA_APPLE_DESKTOP)
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS, &deviceObjectID, sizeof(AudioDeviceID));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(result);
+ }
+#endif
+
+ /*
+ Format. This is the hardest part of initialization because there's a few variables to take into account.
+ 1) The format must be supported by the device.
+ 2) The format must be supported miniaudio.
+ 3) There's a priority that miniaudio prefers.
+
+ Ideally we would like to use a format that's as close to the hardware as possible so we can get as close to a passthrough as possible. The
+ most important property is the sample rate. miniaudio can do format conversion for any sample rate and channel count, but cannot do the same
+ for the sample data format. If the sample data format is not supported by miniaudio it must be ignored completely.
+
+ On mobile platforms this is a bit different. We just force the use of whatever the audio unit's current format is set to.
+ */
+ {
+ AudioUnitScope formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
+ AudioUnitElement formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
- pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_capture, &deviceInfo, pData->pUserData);
-
- (void)pPulseContext; /* Unused. */
-}
-
-ma_result ma_context_enumerate_devices__pulse(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
-{
- ma_result result = MA_SUCCESS;
- ma_context_enumerate_devices_callback_data__pulse callbackData;
- ma_pa_operation* pOP = NULL;
- ma_pa_mainloop* pMainLoop;
- ma_pa_mainloop_api* pAPI;
- ma_pa_context* pPulseContext;
- int error;
-
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
+ #if defined(MA_APPLE_DESKTOP)
+ AudioStreamBasicDescription origFormat;
+ UInt32 origFormatSize;
- callbackData.pContext = pContext;
- callbackData.callback = callback;
- callbackData.pUserData = pUserData;
- callbackData.isTerminated = MA_FALSE;
+ result = ma_find_best_format__coreaudio(pContext, deviceObjectID, deviceType, pData->formatIn, pData->channelsIn, pData->sampleRateIn, pData->usingDefaultFormat, pData->usingDefaultChannels, pData->usingDefaultSampleRate, &bestFormat);
+ if (result != MA_SUCCESS) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return result;
+ }
+
+ /* From what I can see, Apple's documentation implies that we should keep the sample rate consistent. */
+ origFormatSize = sizeof(origFormat);
+ if (deviceType == ma_device_type_playback) {
+ status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &origFormat, &origFormatSize);
+ } else {
+ status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &origFormat, &origFormatSize);
+ }
+
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return result;
+ }
+
+ bestFormat.mSampleRate = origFormat.mSampleRate;
+
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat));
+ if (status != noErr) {
+ /* We failed to set the format, so fall back to the current format of the audio unit. */
+ bestFormat = origFormat;
+ }
+ #else
+ UInt32 propSize = sizeof(bestFormat);
+ status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize);
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+
+ /*
+ Sample rate is a little different here because for some reason kAudioUnitProperty_StreamFormat returns 0... Oh well. We need to instead try
+ setting the sample rate to what the user has requested and then just see the results of it. Need to use some Objective-C here for this since
+ it depends on Apple's AVAudioSession API. To do this we just get the shared AVAudioSession instance and then set it. Note that from what I
+ can tell, it looks like the sample rate is shared between playback and capture for everything.
+ */
+ @autoreleasepool {
+ AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
+ MA_ASSERT(pAudioSession != NULL);
+
+ [pAudioSession setPreferredSampleRate:(double)pData->sampleRateIn error:nil];
+ bestFormat.mSampleRate = pAudioSession.sampleRate;
- pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)();
- if (pMainLoop == NULL) {
- return MA_FAILED_TO_INIT_BACKEND;
+ /*
+ I've had a report that the channel count returned by AudioUnitGetProperty above is inconsistent with
+ AVAudioSession outputNumberOfChannels. I'm going to try using the AVAudioSession values instead.
+ */
+ if (deviceType == ma_device_type_playback) {
+ bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.outputNumberOfChannels;
+ }
+ if (deviceType == ma_device_type_capture) {
+ bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.inputNumberOfChannels;
+ }
+ }
+
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
+ #endif
+
+ result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pData->formatOut);
+ if (result != MA_SUCCESS) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return result;
+ }
+
+ if (pData->formatOut == ma_format_unknown) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return MA_FORMAT_NOT_SUPPORTED;
+ }
+
+ pData->channelsOut = bestFormat.mChannelsPerFrame;
+ pData->sampleRateOut = bestFormat.mSampleRate;
}
-
- pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop);
- if (pAPI == NULL) {
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- return MA_FAILED_TO_INIT_BACKEND;
+
+ /*
+ Internal channel map. This is weird in my testing. If I use the AudioObject to get the
+ channel map, the channel descriptions are set to "Unknown" for some reason. To work around
+ this it looks like retrieving it from the AudioUnit will work. However, and this is where
+ it gets weird, it doesn't seem to work with capture devices, nor at all on iOS... Therefore
+ I'm going to fall back to a default assumption in these cases.
+ */
+#if defined(MA_APPLE_DESKTOP)
+ result = ma_get_AudioUnit_channel_map(pContext, pData->audioUnit, deviceType, pData->channelMapOut);
+ if (result != MA_SUCCESS) {
+ #if 0
+ /* Try falling back to the channel map from the AudioObject. */
+ result = ma_get_AudioObject_channel_map(pContext, deviceObjectID, deviceType, pData->channelMapOut);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ #else
+ /* Fall back to default assumptions. */
+ ma_get_standard_channel_map(ma_standard_channel_map_default, pData->channelsOut, pData->channelMapOut);
+ #endif
}
+#else
+ /* TODO: Figure out how to get the channel map using AVAudioSession. */
+ ma_get_standard_channel_map(ma_standard_channel_map_default, pData->channelsOut, pData->channelMapOut);
+#endif
+
- pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName);
- if (pPulseContext == NULL) {
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- return MA_FAILED_TO_INIT_BACKEND;
+ /* Buffer size. Not allowing this to be configurable on iOS. */
+ actualPeriodSizeInFrames = pData->periodSizeInFramesIn;
+
+#if defined(MA_APPLE_DESKTOP)
+ if (actualPeriodSizeInFrames == 0) {
+ actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, pData->sampleRateOut);
}
-
- error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, (pContext->pulse.tryAutoSpawn) ? 0 : MA_PA_CONTEXT_NOAUTOSPAWN, NULL);
- if (error != MA_PA_OK) {
- ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- return ma_result_from_pulse(error);
+
+ result = ma_set_AudioObject_buffer_size_in_frames(pContext, deviceObjectID, deviceType, &actualPeriodSizeInFrames);
+ if (result != MA_SUCCESS) {
+ return result;
}
+
+ pData->periodSizeInFramesOut = actualPeriodSizeInFrames;
+#else
+ actualPeriodSizeInFrames = 2048;
+ pData->periodSizeInFramesOut = actualPeriodSizeInFrames;
+#endif
- for (;;) {
- ma_pa_context_state_t state = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext);
- if (state == MA_PA_CONTEXT_READY) {
- break; /* Success. */
- }
- if (state == MA_PA_CONTEXT_CONNECTING || state == MA_PA_CONTEXT_AUTHORIZING || state == MA_PA_CONTEXT_SETTING_NAME) {
- error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL);
- if (error < 0) {
- result = ma_result_from_pulse(error);
- goto done;
- }
-#ifdef MA_DEBUG_OUTPUT
- printf("[PulseAudio] pa_context_get_state() returned %d. Waiting.\n", state);
-#endif
- continue; /* Keep trying. */
- }
- if (state == MA_PA_CONTEXT_UNCONNECTED || state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) {
-#ifdef MA_DEBUG_OUTPUT
- printf("[PulseAudio] pa_context_get_state() returned %d. Failed.\n", state);
-#endif
- goto done; /* Failed. */
+ /*
+ During testing I discovered that the buffer size can be too big. You'll get an error like this:
+
+ kAudioUnitErr_TooManyFramesToProcess : inFramesToProcess=4096, mMaxFramesPerSlice=512
+
+ Note how inFramesToProcess is smaller than mMaxFramesPerSlice. To fix, we need to set kAudioUnitProperty_MaximumFramesPerSlice to that
+ of the size of our buffer, or do it the other way around and set our buffer size to the kAudioUnitProperty_MaximumFramesPerSlice.
+ */
+ {
+ /*AudioUnitScope propScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
+ AudioUnitElement propBus = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
+
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, propScope, propBus, &actualBufferSizeInFrames, sizeof(actualBufferSizeInFrames));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }*/
+
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &actualPeriodSizeInFrames, sizeof(actualPeriodSizeInFrames));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
}
}
+
+ /* We need a buffer list if this is an input device. We render into this in the input callback. */
+ if (deviceType == ma_device_type_capture) {
+ ma_bool32 isInterleaved = (bestFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) == 0;
+ size_t allocationSize;
+ AudioBufferList* pBufferList;
-
- /* Playback. */
- if (!callbackData.isTerminated) {
- pOP = ((ma_pa_context_get_sink_info_list_proc)pContext->pulse.pa_context_get_sink_info_list)(pPulseContext, ma_context_enumerate_devices_sink_callback__pulse, &callbackData);
- if (pOP == NULL) {
- result = MA_ERROR;
- goto done;
+ allocationSize = sizeof(AudioBufferList) - sizeof(AudioBuffer); /* Subtract sizeof(AudioBuffer) because that part is dynamically sized. */
+ if (isInterleaved) {
+ /* Interleaved case. This is the simple case because we just have one buffer. */
+ allocationSize += sizeof(AudioBuffer) * 1;
+ allocationSize += actualPeriodSizeInFrames * ma_get_bytes_per_frame(pData->formatOut, pData->channelsOut);
+ } else {
+ /* Non-interleaved case. This is the more complex case because there's more than one buffer. */
+ allocationSize += sizeof(AudioBuffer) * pData->channelsOut;
+ allocationSize += actualPeriodSizeInFrames * ma_get_bytes_per_sample(pData->formatOut) * pData->channelsOut;
}
-
- result = ma_wait_for_operation__pulse(pContext, pMainLoop, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
- if (result != MA_SUCCESS) {
- goto done;
+
+ pBufferList = (AudioBufferList*)ma__malloc_from_callbacks(allocationSize, &pContext->allocationCallbacks);
+ if (pBufferList == NULL) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return MA_OUT_OF_MEMORY;
+ }
+
+ if (isInterleaved) {
+ pBufferList->mNumberBuffers = 1;
+ pBufferList->mBuffers[0].mNumberChannels = pData->channelsOut;
+ pBufferList->mBuffers[0].mDataByteSize = actualPeriodSizeInFrames * ma_get_bytes_per_frame(pData->formatOut, pData->channelsOut);
+ pBufferList->mBuffers[0].mData = (ma_uint8*)pBufferList + sizeof(AudioBufferList);
+ } else {
+ ma_uint32 iBuffer;
+ pBufferList->mNumberBuffers = pData->channelsOut;
+ for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) {
+ pBufferList->mBuffers[iBuffer].mNumberChannels = 1;
+ pBufferList->mBuffers[iBuffer].mDataByteSize = actualPeriodSizeInFrames * ma_get_bytes_per_sample(pData->formatOut);
+ pBufferList->mBuffers[iBuffer].mData = (ma_uint8*)pBufferList + ((sizeof(AudioBufferList) - sizeof(AudioBuffer)) + (sizeof(AudioBuffer) * pData->channelsOut)) + (actualPeriodSizeInFrames * ma_get_bytes_per_sample(pData->formatOut) * iBuffer);
+ }
}
+
+ pData->pAudioBufferList = pBufferList;
}
-
-
- /* Capture. */
- if (!callbackData.isTerminated) {
- pOP = ((ma_pa_context_get_source_info_list_proc)pContext->pulse.pa_context_get_source_info_list)(pPulseContext, ma_context_enumerate_devices_source_callback__pulse, &callbackData);
- if (pOP == NULL) {
- result = MA_ERROR;
- goto done;
+
+ /* Callbacks. */
+ callbackInfo.inputProcRefCon = pDevice_DoNotReference;
+ if (deviceType == ma_device_type_playback) {
+ callbackInfo.inputProc = ma_on_output__coreaudio;
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, MA_COREAUDIO_OUTPUT_BUS, &callbackInfo, sizeof(callbackInfo));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
}
-
- result = ma_wait_for_operation__pulse(pContext, pMainLoop, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
- if (result != MA_SUCCESS) {
- goto done;
+ } else {
+ callbackInfo.inputProc = ma_on_input__coreaudio;
+ status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, MA_COREAUDIO_INPUT_BUS, &callbackInfo, sizeof(callbackInfo));
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
}
}
-
-done:
- ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)(pPulseContext);
- ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- return result;
-}
-
-
-typedef struct
-{
- ma_device_info* pDeviceInfo;
- ma_bool32 foundDevice;
-} ma_context_get_device_info_callback_data__pulse;
-
-void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData)
-{
- ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData;
-
- if (endOfList > 0) {
- return;
- }
-
- ma_assert(pData != NULL);
- pData->foundDevice = MA_TRUE;
-
- if (pInfo->name != NULL) {
- ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1);
+
+ /* We need to listen for stop events. */
+ if (pData->registerStopEvent) {
+ status = ((ma_AudioUnitAddPropertyListener_proc)pContext->coreaudio.AudioUnitAddPropertyListener)(pData->audioUnit, kAudioOutputUnitProperty_IsRunning, on_start_stop__coreaudio, pDevice_DoNotReference);
+ if (status != noErr) {
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
+ }
}
-
- if (pInfo->description != NULL) {
- ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1);
+
+ /* Initialize the audio unit. */
+ status = ((ma_AudioUnitInitialize_proc)pContext->coreaudio.AudioUnitInitialize)(pData->audioUnit);
+ if (status != noErr) {
+ ma__free_from_callbacks(pData->pAudioBufferList, &pContext->allocationCallbacks);
+ pData->pAudioBufferList = NULL;
+ ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
+ return ma_result_from_OSStatus(status);
}
-
- pData->pDeviceInfo->minChannels = pInfo->sample_spec.channels;
- pData->pDeviceInfo->maxChannels = pInfo->sample_spec.channels;
- pData->pDeviceInfo->minSampleRate = pInfo->sample_spec.rate;
- pData->pDeviceInfo->maxSampleRate = pInfo->sample_spec.rate;
- pData->pDeviceInfo->formatCount = 1;
- pData->pDeviceInfo->formats[0] = ma_format_from_pulse(pInfo->sample_spec.format);
-
- (void)pPulseContext; /* Unused. */
+
+ /* Grab the name. */
+#if defined(MA_APPLE_DESKTOP)
+ ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pData->deviceName), pData->deviceName);
+#else
+ if (deviceType == ma_device_type_playback) {
+ ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_PLAYBACK_DEVICE_NAME);
+ } else {
+ ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_CAPTURE_DEVICE_NAME);
+ }
+#endif
+
+ return result;
}
-void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData)
+#if defined(MA_APPLE_DESKTOP)
+static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit)
{
- ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData;
+ ma_device_init_internal_data__coreaudio data;
+ ma_result result;
- if (endOfList > 0) {
- return;
+ /* This should only be called for playback or capture, not duplex. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
}
- ma_assert(pData != NULL);
- pData->foundDevice = MA_TRUE;
-
- if (pInfo->name != NULL) {
- ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1);
+ if (deviceType == ma_device_type_capture) {
+ data.formatIn = pDevice->capture.format;
+ data.channelsIn = pDevice->capture.channels;
+ data.sampleRateIn = pDevice->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap));
+ data.usingDefaultFormat = pDevice->capture.usingDefaultFormat;
+ data.usingDefaultChannels = pDevice->capture.usingDefaultChannels;
+ data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
+ data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap;
+ data.shareMode = pDevice->capture.shareMode;
+ data.registerStopEvent = MA_TRUE;
+
+ if (disposePreviousAudioUnit) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ }
+ if (pDevice->coreaudio.pAudioBufferList) {
+ ma__free_from_callbacks(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks);
+ }
+ } else if (deviceType == ma_device_type_playback) {
+ data.formatIn = pDevice->playback.format;
+ data.channelsIn = pDevice->playback.channels;
+ data.sampleRateIn = pDevice->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap));
+ data.usingDefaultFormat = pDevice->playback.usingDefaultFormat;
+ data.usingDefaultChannels = pDevice->playback.usingDefaultChannels;
+ data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
+ data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap;
+ data.shareMode = pDevice->playback.shareMode;
+ data.registerStopEvent = (pDevice->type != ma_device_type_duplex);
+
+ if (disposePreviousAudioUnit) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ }
}
+ data.periodSizeInFramesIn = pDevice->coreaudio.originalPeriodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pDevice->coreaudio.originalPeriodSizeInMilliseconds;
+ data.periodsIn = pDevice->coreaudio.originalPeriods;
- if (pInfo->description != NULL) {
- ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1);
+ /* Need at least 3 periods for duplex. */
+ if (data.periodsIn < 3 && pDevice->type == ma_device_type_duplex) {
+ data.periodsIn = 3;
}
- pData->pDeviceInfo->minChannels = pInfo->sample_spec.channels;
- pData->pDeviceInfo->maxChannels = pInfo->sample_spec.channels;
- pData->pDeviceInfo->minSampleRate = pInfo->sample_spec.rate;
- pData->pDeviceInfo->maxSampleRate = pInfo->sample_spec.rate;
- pData->pDeviceInfo->formatCount = 1;
- pData->pDeviceInfo->formats[0] = ma_format_from_pulse(pInfo->sample_spec.format);
-
- (void)pPulseContext; /* Unused. */
+ result = ma_device_init_internal__coreaudio(pDevice->pContext, deviceType, NULL, &data, (void*)pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ if (deviceType == ma_device_type_capture) {
+ #if defined(MA_APPLE_DESKTOP)
+ pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID;
+ #endif
+ pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit;
+ pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList;
+
+ pDevice->capture.internalFormat = data.formatOut;
+ pDevice->capture.internalChannels = data.channelsOut;
+ pDevice->capture.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->capture.internalPeriods = data.periodsOut;
+ } else if (deviceType == ma_device_type_playback) {
+ #if defined(MA_APPLE_DESKTOP)
+ pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID;
+ #endif
+ pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit;
+
+ pDevice->playback.internalFormat = data.formatOut;
+ pDevice->playback.internalChannels = data.channelsOut;
+ pDevice->playback.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->playback.internalPeriods = data.periodsOut;
+ }
+
+ return MA_SUCCESS;
}
+#endif /* MA_APPLE_DESKTOP */
-ma_result ma_context_get_device_info__pulse(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+static ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
- ma_result result = MA_SUCCESS;
- ma_context_get_device_info_callback_data__pulse callbackData;
- ma_pa_operation* pOP = NULL;
- ma_pa_mainloop* pMainLoop;
- ma_pa_mainloop_api* pAPI;
- ma_pa_context* pPulseContext;
- int error;
-
- ma_assert(pContext != NULL);
-
- /* No exclusive mode with the PulseAudio backend. */
- if (shareMode == ma_share_mode_exclusive) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
- }
+ ma_result result;
- callbackData.pDeviceInfo = pDeviceInfo;
- callbackData.foundDevice = MA_FALSE;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pDevice != NULL);
- pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)();
- if (pMainLoop == NULL) {
- return MA_FAILED_TO_INIT_BACKEND;
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
}
- pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop);
- if (pAPI == NULL) {
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- return MA_FAILED_TO_INIT_BACKEND;
+ /* No exclusive mode with the Core Audio backend for now. */
+ if (((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
}
+
+ /* Capture needs to be initialized first. */
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_device_init_internal_data__coreaudio data;
+ data.formatIn = pConfig->capture.format;
+ data.channelsIn = pConfig->capture.channels;
+ data.sampleRateIn = pConfig->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap));
+ data.usingDefaultFormat = pDevice->capture.usingDefaultFormat;
+ data.usingDefaultChannels = pDevice->capture.usingDefaultChannels;
+ data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
+ data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap;
+ data.shareMode = pConfig->capture.shareMode;
+ data.periodSizeInFramesIn = pConfig->periodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pConfig->periodSizeInMilliseconds;
+ data.periodsIn = pConfig->periods;
+ data.registerStopEvent = MA_TRUE;
- pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName);
- if (pPulseContext == NULL) {
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- return MA_FAILED_TO_INIT_BACKEND;
+ /* Need at least 3 periods for duplex. */
+ if (data.periodsIn < 3 && pConfig->deviceType == ma_device_type_duplex) {
+ data.periodsIn = 3;
+ }
+
+ result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_capture, pConfig->capture.pDeviceID, &data, (void*)pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ pDevice->coreaudio.isDefaultCaptureDevice = (pConfig->capture.pDeviceID == NULL);
+ #if defined(MA_APPLE_DESKTOP)
+ pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID;
+ #endif
+ pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit;
+ pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList;
+
+ pDevice->capture.internalFormat = data.formatOut;
+ pDevice->capture.internalChannels = data.channelsOut;
+ pDevice->capture.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->capture.internalPeriods = data.periodsOut;
+
+ #if defined(MA_APPLE_DESKTOP)
+ /*
+ If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly
+ switch the device in the background.
+ */
+ if (pConfig->capture.pDeviceID == NULL) {
+ ma_device__track__coreaudio(pDevice);
+ }
+ #endif
}
-
- error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, 0, NULL);
- if (error != MA_PA_OK) {
- ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- return ma_result_from_pulse(error);
+
+ /* Playback. */
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_device_init_internal_data__coreaudio data;
+ data.formatIn = pConfig->playback.format;
+ data.channelsIn = pConfig->playback.channels;
+ data.sampleRateIn = pConfig->sampleRate;
+ MA_COPY_MEMORY(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap));
+ data.usingDefaultFormat = pDevice->playback.usingDefaultFormat;
+ data.usingDefaultChannels = pDevice->playback.usingDefaultChannels;
+ data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
+ data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap;
+ data.shareMode = pConfig->playback.shareMode;
+
+ /* In full-duplex mode we want the playback buffer to be the same size as the capture buffer. */
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ data.periodSizeInFramesIn = pDevice->capture.internalPeriodSizeInFrames;
+ data.periodsIn = pDevice->capture.internalPeriods;
+ data.registerStopEvent = MA_FALSE;
+ } else {
+ data.periodSizeInFramesIn = pConfig->periodSizeInFrames;
+ data.periodSizeInMillisecondsIn = pConfig->periodSizeInMilliseconds;
+ data.periodsIn = pConfig->periods;
+ data.registerStopEvent = MA_TRUE;
+ }
+
+ result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_playback, pConfig->playback.pDeviceID, &data, (void*)pDevice);
+ if (result != MA_SUCCESS) {
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ if (pDevice->coreaudio.pAudioBufferList) {
+ ma__free_from_callbacks(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks);
+ }
+ }
+ return result;
+ }
+
+ pDevice->coreaudio.isDefaultPlaybackDevice = (pConfig->playback.pDeviceID == NULL);
+ #if defined(MA_APPLE_DESKTOP)
+ pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID;
+ #endif
+ pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit;
+
+ pDevice->playback.internalFormat = data.formatOut;
+ pDevice->playback.internalChannels = data.channelsOut;
+ pDevice->playback.internalSampleRate = data.sampleRateOut;
+ MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
+ pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut;
+ pDevice->playback.internalPeriods = data.periodsOut;
+
+ #if defined(MA_APPLE_DESKTOP)
+ /*
+ If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly
+ switch the device in the background.
+ */
+ if (pConfig->playback.pDeviceID == NULL && (pConfig->deviceType != ma_device_type_duplex || pConfig->capture.pDeviceID != NULL)) {
+ ma_device__track__coreaudio(pDevice);
+ }
+ #endif
}
+
+ pDevice->coreaudio.originalPeriodSizeInFrames = pConfig->periodSizeInFrames;
+ pDevice->coreaudio.originalPeriodSizeInMilliseconds = pConfig->periodSizeInMilliseconds;
+ pDevice->coreaudio.originalPeriods = pConfig->periods;
+
+ /*
+ When stopping the device, a callback is called on another thread. We need to wait for this callback
+ before returning from ma_device_stop(). This event is used for this.
+ */
+ ma_event_init(pContext, &pDevice->coreaudio.stopEvent);
- for (;;) {
- ma_pa_context_state_t state = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext);
- if (state == MA_PA_CONTEXT_READY) {
- break; /* Success. */
+ /* Need a ring buffer for duplex mode. */
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames * pDevice->capture.internalPeriods);
+ ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->coreaudio.duplexRB);
+ if (result != MA_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[Core Audio] Failed to initialize ring buffer.", result);
}
- if (state == MA_PA_CONTEXT_CONNECTING || state == MA_PA_CONTEXT_AUTHORIZING || state == MA_PA_CONTEXT_SETTING_NAME) {
- error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL);
- if (error < 0) {
- result = ma_result_from_pulse(error);
- goto done;
+
+ /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */
+ {
+ ma_uint32 bufferSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods;
+ void* pBufferData;
+ ma_pcm_rb_acquire_write(&pDevice->coreaudio.duplexRB, &bufferSizeInFrames, &pBufferData);
+ {
+ MA_ZERO_MEMORY(pBufferData, bufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels));
}
+ ma_pcm_rb_commit_write(&pDevice->coreaudio.duplexRB, bufferSizeInFrames, pBufferData);
+ }
+ }
-#ifdef MA_DEBUG_OUTPUT
- printf("[PulseAudio] pa_context_get_state() returned %d. Waiting.\n", state);
+ /*
+ We need to detect when a route has changed so we can update the data conversion pipeline accordingly. This is done
+ differently on non-Desktop Apple platforms.
+ */
+#if defined(MA_APPLE_MOBILE)
+ pDevice->coreaudio.pRouteChangeHandler = (__bridge_retained void*)[[ma_router_change_handler alloc] init:pDevice];
#endif
- continue; /* Keep trying. */
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_device_start__coreaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
}
- if (state == MA_PA_CONTEXT_UNCONNECTED || state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) {
-#ifdef MA_DEBUG_OUTPUT
- printf("[PulseAudio] pa_context_get_state() returned %d. Failed.\n", state);
-#endif
- goto done; /* Failed. */
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ if (status != noErr) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ }
+ return ma_result_from_OSStatus(status);
}
}
+
+ return MA_SUCCESS;
+}
- if (deviceType == ma_device_type_playback) {
- pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)(pPulseContext, pDeviceID->pulse, ma_context_get_device_info_sink_callback__pulse, &callbackData);
- } else {
- pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)(pPulseContext, pDeviceID->pulse, ma_context_get_device_info_source_callback__pulse, &callbackData);
- }
+static ma_result ma_device_stop__coreaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
- if (pOP != NULL) {
- ma_wait_for_operation__pulse(pContext, pMainLoop, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
- } else {
- result = MA_ERROR;
- goto done;
- }
+ /* It's not clear from the documentation whether or not AudioOutputUnitStop() actually drains the device or not. */
- if (!callbackData.foundDevice) {
- result = MA_NO_DEVICE;
- goto done;
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
}
-
-
-done:
- ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)(pPulseContext);
- ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- return result;
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ if (status != noErr) {
+ return ma_result_from_OSStatus(status);
+ }
+ }
+
+ /* We need to wait for the callback to finish before returning. */
+ ma_event_wait(&pDevice->coreaudio.stopEvent);
+ return MA_SUCCESS;
}
-void ma_pulse_device_state_callback(ma_pa_context* pPulseContext, void* pUserData)
+static ma_result ma_context_uninit__coreaudio(ma_context* pContext)
{
- ma_device* pDevice;
- ma_context* pContext;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_coreaudio);
+
+#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
+ ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+#endif
- pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
+ (void)pContext;
+ return MA_SUCCESS;
+}
- pContext = pDevice->pContext;
- ma_assert(pContext != NULL);
+#if defined(MA_APPLE_MOBILE)
+static AVAudioSessionCategory ma_to_AVAudioSessionCategory(ma_ios_session_category category)
+{
+ /* The "default" and "none" categories are treated different and should not be used as an input into this function. */
+ MA_ASSERT(category != ma_ios_session_category_default);
+ MA_ASSERT(category != ma_ios_session_category_none);
- pDevice->pulse.pulseContextState = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext);
+ switch (category) {
+ case ma_ios_session_category_ambient: return AVAudioSessionCategoryAmbient;
+ case ma_ios_session_category_solo_ambient: return AVAudioSessionCategorySoloAmbient;
+ case ma_ios_session_category_playback: return AVAudioSessionCategoryPlayback;
+ case ma_ios_session_category_record: return AVAudioSessionCategoryRecord;
+ case ma_ios_session_category_play_and_record: return AVAudioSessionCategoryPlayAndRecord;
+ case ma_ios_session_category_multi_route: return AVAudioSessionCategoryMultiRoute;
+ case ma_ios_session_category_none: return AVAudioSessionCategoryAmbient;
+ case ma_ios_session_category_default: return AVAudioSessionCategoryAmbient;
+ default: return AVAudioSessionCategoryAmbient;
+ }
}
+#endif
-void ma_device_sink_info_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData)
+static ma_result ma_context_init__coreaudio(const ma_context_config* pConfig, ma_context* pContext)
{
- ma_pa_sink_info* pInfoOut;
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pContext != NULL);
- if (endOfList > 0) {
- return;
+#if defined(MA_APPLE_MOBILE)
+ @autoreleasepool {
+ AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
+ AVAudioSessionCategoryOptions options = pConfig->coreaudio.sessionCategoryOptions;
+
+ MA_ASSERT(pAudioSession != NULL);
+
+ if (pConfig->coreaudio.sessionCategory == ma_ios_session_category_default) {
+ /*
+ I'm going to use trial and error to determine our default session category. First we'll try PlayAndRecord. If that fails
+ we'll try Playback and if that fails we'll try record. If all of these fail we'll just not set the category.
+ */
+ #if !defined(MA_APPLE_TV) && !defined(MA_APPLE_WATCH)
+ options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
+ #endif
+
+ if ([pAudioSession setCategory: AVAudioSessionCategoryPlayAndRecord withOptions:options error:nil]) {
+ /* Using PlayAndRecord */
+ } else if ([pAudioSession setCategory: AVAudioSessionCategoryPlayback withOptions:options error:nil]) {
+ /* Using Playback */
+ } else if ([pAudioSession setCategory: AVAudioSessionCategoryRecord withOptions:options error:nil]) {
+ /* Using Record */
+ } else {
+ /* Leave as default? */
+ }
+ } else {
+ if (pConfig->coreaudio.sessionCategory != ma_ios_session_category_none) {
+ if (![pAudioSession setCategory: ma_to_AVAudioSessionCategory(pConfig->coreaudio.sessionCategory) withOptions:options error:nil]) {
+ return MA_INVALID_OPERATION; /* Failed to set session category. */
+ }
+ }
+ }
+ }
+#endif
+
+#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
+ pContext->coreaudio.hCoreFoundation = ma_dlopen(pContext, "CoreFoundation.framework/CoreFoundation");
+ if (pContext->coreaudio.hCoreFoundation == NULL) {
+ return MA_API_NOT_FOUND;
+ }
+
+ pContext->coreaudio.CFStringGetCString = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFStringGetCString");
+ pContext->coreaudio.CFRelease = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFRelease");
+
+
+ pContext->coreaudio.hCoreAudio = ma_dlopen(pContext, "CoreAudio.framework/CoreAudio");
+ if (pContext->coreaudio.hCoreAudio == NULL) {
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+ return MA_API_NOT_FOUND;
}
+
+ pContext->coreaudio.AudioObjectGetPropertyData = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyData");
+ pContext->coreaudio.AudioObjectGetPropertyDataSize = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyDataSize");
+ pContext->coreaudio.AudioObjectSetPropertyData = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectSetPropertyData");
+ pContext->coreaudio.AudioObjectAddPropertyListener = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectAddPropertyListener");
+ pContext->coreaudio.AudioObjectRemovePropertyListener = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectRemovePropertyListener");
- pInfoOut = (ma_pa_sink_info*)pUserData;
- ma_assert(pInfoOut != NULL);
+ /*
+ It looks like Apple has moved some APIs from AudioUnit into AudioToolbox on more recent versions of macOS. They are still
+ defined in AudioUnit, but just in case they decide to remove them from there entirely I'm going to implement a fallback.
+ The way it'll work is that it'll first try AudioUnit, and if the required symbols are not present there we'll fall back to
+ AudioToolbox.
+ */
+ pContext->coreaudio.hAudioUnit = ma_dlopen(pContext, "AudioUnit.framework/AudioUnit");
+ if (pContext->coreaudio.hAudioUnit == NULL) {
+ ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+ return MA_API_NOT_FOUND;
+ }
+
+ if (ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentFindNext") == NULL) {
+ /* Couldn't find the required symbols in AudioUnit, so fall back to AudioToolbox. */
+ ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
+ pContext->coreaudio.hAudioUnit = ma_dlopen(pContext, "AudioToolbox.framework/AudioToolbox");
+ if (pContext->coreaudio.hAudioUnit == NULL) {
+ ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+ return MA_API_NOT_FOUND;
+ }
+ }
+
+ pContext->coreaudio.AudioComponentFindNext = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentFindNext");
+ pContext->coreaudio.AudioComponentInstanceDispose = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentInstanceDispose");
+ pContext->coreaudio.AudioComponentInstanceNew = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentInstanceNew");
+ pContext->coreaudio.AudioOutputUnitStart = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioOutputUnitStart");
+ pContext->coreaudio.AudioOutputUnitStop = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioOutputUnitStop");
+ pContext->coreaudio.AudioUnitAddPropertyListener = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitAddPropertyListener");
+ pContext->coreaudio.AudioUnitGetPropertyInfo = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitGetPropertyInfo");
+ pContext->coreaudio.AudioUnitGetProperty = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitGetProperty");
+ pContext->coreaudio.AudioUnitSetProperty = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitSetProperty");
+ pContext->coreaudio.AudioUnitInitialize = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitInitialize");
+ pContext->coreaudio.AudioUnitRender = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitRender");
+#else
+ pContext->coreaudio.CFStringGetCString = (ma_proc)CFStringGetCString;
+ pContext->coreaudio.CFRelease = (ma_proc)CFRelease;
+
+ #if defined(MA_APPLE_DESKTOP)
+ pContext->coreaudio.AudioObjectGetPropertyData = (ma_proc)AudioObjectGetPropertyData;
+ pContext->coreaudio.AudioObjectGetPropertyDataSize = (ma_proc)AudioObjectGetPropertyDataSize;
+ pContext->coreaudio.AudioObjectSetPropertyData = (ma_proc)AudioObjectSetPropertyData;
+ pContext->coreaudio.AudioObjectAddPropertyListener = (ma_proc)AudioObjectAddPropertyListener;
+ pContext->coreaudio.AudioObjectRemovePropertyListener = (ma_proc)AudioObjectRemovePropertyListener;
+ #endif
+
+ pContext->coreaudio.AudioComponentFindNext = (ma_proc)AudioComponentFindNext;
+ pContext->coreaudio.AudioComponentInstanceDispose = (ma_proc)AudioComponentInstanceDispose;
+ pContext->coreaudio.AudioComponentInstanceNew = (ma_proc)AudioComponentInstanceNew;
+ pContext->coreaudio.AudioOutputUnitStart = (ma_proc)AudioOutputUnitStart;
+ pContext->coreaudio.AudioOutputUnitStop = (ma_proc)AudioOutputUnitStop;
+ pContext->coreaudio.AudioUnitAddPropertyListener = (ma_proc)AudioUnitAddPropertyListener;
+ pContext->coreaudio.AudioUnitGetPropertyInfo = (ma_proc)AudioUnitGetPropertyInfo;
+ pContext->coreaudio.AudioUnitGetProperty = (ma_proc)AudioUnitGetProperty;
+ pContext->coreaudio.AudioUnitSetProperty = (ma_proc)AudioUnitSetProperty;
+ pContext->coreaudio.AudioUnitInitialize = (ma_proc)AudioUnitInitialize;
+ pContext->coreaudio.AudioUnitRender = (ma_proc)AudioUnitRender;
+#endif
- *pInfoOut = *pInfo;
+ pContext->isBackendAsynchronous = MA_TRUE;
+
+ pContext->onUninit = ma_context_uninit__coreaudio;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__coreaudio;
+ pContext->onEnumDevices = ma_context_enumerate_devices__coreaudio;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__coreaudio;
+ pContext->onDeviceInit = ma_device_init__coreaudio;
+ pContext->onDeviceUninit = ma_device_uninit__coreaudio;
+ pContext->onDeviceStart = ma_device_start__coreaudio;
+ pContext->onDeviceStop = ma_device_stop__coreaudio;
+
+ /* Audio component. */
+ {
+ AudioComponentDescription desc;
+ desc.componentType = kAudioUnitType_Output;
+ #if defined(MA_APPLE_DESKTOP)
+ desc.componentSubType = kAudioUnitSubType_HALOutput;
+ #else
+ desc.componentSubType = kAudioUnitSubType_RemoteIO;
+ #endif
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ desc.componentFlags = 0;
+ desc.componentFlagsMask = 0;
+
+ pContext->coreaudio.component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc);
+ if (pContext->coreaudio.component == NULL) {
+ #if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
+ ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
+ ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
+ #endif
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
+ }
- (void)pPulseContext; /* Unused. */
+ return MA_SUCCESS;
}
+#endif /* Core Audio */
-void ma_device_source_info_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData)
-{
- ma_pa_source_info* pInfoOut;
- if (endOfList > 0) {
- return;
- }
- pInfoOut = (ma_pa_source_info*)pUserData;
- ma_assert(pInfoOut != NULL);
+/******************************************************************************
- *pInfoOut = *pInfo;
+sndio Backend
- (void)pPulseContext; /* Unused. */
-}
+******************************************************************************/
+#ifdef MA_HAS_SNDIO
+#include
+#include
+
+/*
+Only supporting OpenBSD. This did not work very well at all on FreeBSD when I tried it. Not sure if this is due
+to miniaudio's implementation or if it's some kind of system configuration issue, but basically the default device
+just doesn't emit any sound, or at times you'll hear tiny pieces. I will consider enabling this when there's
+demand for it or if I can get it tested and debugged more thoroughly.
+*/
+#if 0
+#if defined(__NetBSD__) || defined(__OpenBSD__)
+#include
+#endif
+#if defined(__FreeBSD__) || defined(__DragonFly__)
+#include
+#endif
+#endif
+
+#define MA_SIO_DEVANY "default"
+#define MA_SIO_PLAY 1
+#define MA_SIO_REC 2
+#define MA_SIO_NENC 8
+#define MA_SIO_NCHAN 8
+#define MA_SIO_NRATE 16
+#define MA_SIO_NCONF 4
+
+struct ma_sio_hdl; /* <-- Opaque */
-void ma_device_sink_name_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData)
+struct ma_sio_par
{
- ma_device* pDevice;
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ unsigned int rchan;
+ unsigned int pchan;
+ unsigned int rate;
+ unsigned int bufsz;
+ unsigned int xrun;
+ unsigned int round;
+ unsigned int appbufsz;
+ int __pad[3];
+ unsigned int __magic;
+};
- if (endOfList > 0) {
- return;
- }
+struct ma_sio_enc
+{
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+};
- pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
+struct ma_sio_conf
+{
+ unsigned int enc;
+ unsigned int rchan;
+ unsigned int pchan;
+ unsigned int rate;
+};
- ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), pInfo->description, (size_t)-1);
+struct ma_sio_cap
+{
+ struct ma_sio_enc enc[MA_SIO_NENC];
+ unsigned int rchan[MA_SIO_NCHAN];
+ unsigned int pchan[MA_SIO_NCHAN];
+ unsigned int rate[MA_SIO_NRATE];
+ int __pad[7];
+ unsigned int nconf;
+ struct ma_sio_conf confs[MA_SIO_NCONF];
+};
- (void)pPulseContext; /* Unused. */
-}
+typedef struct ma_sio_hdl* (* ma_sio_open_proc) (const char*, unsigned int, int);
+typedef void (* ma_sio_close_proc) (struct ma_sio_hdl*);
+typedef int (* ma_sio_setpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*);
+typedef int (* ma_sio_getpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*);
+typedef int (* ma_sio_getcap_proc) (struct ma_sio_hdl*, struct ma_sio_cap*);
+typedef size_t (* ma_sio_write_proc) (struct ma_sio_hdl*, const void*, size_t);
+typedef size_t (* ma_sio_read_proc) (struct ma_sio_hdl*, void*, size_t);
+typedef int (* ma_sio_start_proc) (struct ma_sio_hdl*);
+typedef int (* ma_sio_stop_proc) (struct ma_sio_hdl*);
+typedef int (* ma_sio_initpar_proc)(struct ma_sio_par*);
-void ma_device_source_name_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData)
+static ma_uint32 ma_get_standard_sample_rate_priority_index__sndio(ma_uint32 sampleRate) /* Lower = higher priority */
{
- ma_device* pDevice;
-
- if (endOfList > 0) {
- return;
+ ma_uint32 i;
+ for (i = 0; i < ma_countof(g_maStandardSampleRatePriorities); ++i) {
+ if (g_maStandardSampleRatePriorities[i] == sampleRate) {
+ return i;
+ }
}
- pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
-
- ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), pInfo->description, (size_t)-1);
-
- (void)pPulseContext; /* Unused. */
+ return (ma_uint32)-1;
}
-void ma_device_uninit__pulse(ma_device* pDevice)
+static ma_format ma_format_from_sio_enc__sndio(unsigned int bits, unsigned int bps, unsigned int sig, unsigned int le, unsigned int msb)
{
- ma_context* pContext;
-
- ma_assert(pDevice != NULL);
-
- pContext = pDevice->pContext;
- ma_assert(pContext != NULL);
-
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
- ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+ /* We only support native-endian right now. */
+ if ((ma_is_little_endian() && le == 0) || (ma_is_big_endian() && le == 1)) {
+ return ma_format_unknown;
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
- ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
+
+ if (bits == 8 && bps == 1 && sig == 0) {
+ return ma_format_u8;
}
-
- ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pDevice->pulse.pPulseContext);
- ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pDevice->pulse.pPulseContext);
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop);
+ if (bits == 16 && bps == 2 && sig == 1) {
+ return ma_format_s16;
+ }
+ if (bits == 24 && bps == 3 && sig == 1) {
+ return ma_format_s24;
+ }
+ if (bits == 24 && bps == 4 && sig == 1 && msb == 0) {
+ /*return ma_format_s24_32;*/
+ }
+ if (bits == 32 && bps == 4 && sig == 1) {
+ return ma_format_s32;
+ }
+
+ return ma_format_unknown;
}
-ma_pa_buffer_attr ma_device__pa_buffer_attr_new(ma_uint32 bufferSizeInFrames, ma_uint32 periods, const ma_pa_sample_spec* ss)
+static ma_format ma_find_best_format_from_sio_cap__sndio(struct ma_sio_cap* caps)
{
- ma_pa_buffer_attr attr;
- attr.maxlength = bufferSizeInFrames * ma_get_bytes_per_sample(ma_format_from_pulse(ss->format)) * ss->channels;
- attr.tlength = attr.maxlength / periods;
- attr.prebuf = (ma_uint32)-1;
- attr.minreq = attr.maxlength / periods;
- attr.fragsize = attr.maxlength / periods;
-
- return attr;
-}
+ ma_format bestFormat;
+ unsigned int iConfig;
-ma_pa_stream* ma_device__pa_stream_new__pulse(ma_device* pDevice, const char* pStreamName, const ma_pa_sample_spec* ss, const ma_pa_channel_map* cmap)
-{
- static int g_StreamCounter = 0;
- char actualStreamName[256];
+ MA_ASSERT(caps != NULL);
+
+ bestFormat = ma_format_unknown;
+ for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) {
+ unsigned int iEncoding;
+ for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ ma_format format;
- if (pStreamName != NULL) {
- ma_strncpy_s(actualStreamName, sizeof(actualStreamName), pStreamName, (size_t)-1);
- } else {
- ma_strcpy_s(actualStreamName, sizeof(actualStreamName), "miniaudio:");
- ma_itoa_s(g_StreamCounter, actualStreamName + 8, sizeof(actualStreamName)-8, 10); /* 8 = strlen("miniaudio:") */
+ if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) {
+ continue;
+ }
+
+ bits = caps->enc[iEncoding].bits;
+ bps = caps->enc[iEncoding].bps;
+ sig = caps->enc[iEncoding].sig;
+ le = caps->enc[iEncoding].le;
+ msb = caps->enc[iEncoding].msb;
+ format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
+ if (format == ma_format_unknown) {
+ continue; /* Format not supported. */
+ }
+
+ if (bestFormat == ma_format_unknown) {
+ bestFormat = format;
+ } else {
+ if (ma_get_format_priority_index(bestFormat) > ma_get_format_priority_index(format)) { /* <-- Lower = better. */
+ bestFormat = format;
+ }
+ }
+ }
}
- g_StreamCounter += 1;
-
- return ((ma_pa_stream_new_proc)pDevice->pContext->pulse.pa_stream_new)((ma_pa_context*)pDevice->pulse.pPulseContext, actualStreamName, ss, cmap);
+
+ return bestFormat;
}
-ma_result ma_device_init__pulse(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+static ma_uint32 ma_find_best_channels_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat)
{
- ma_result result = MA_SUCCESS;
- int error = 0;
- const char* devPlayback = NULL;
- const char* devCapture = NULL;
- ma_uint32 bufferSizeInMilliseconds;
- ma_pa_sink_info sinkInfo;
- ma_pa_source_info sourceInfo;
- ma_pa_operation* pOP = NULL;
- ma_pa_sample_spec ss;
- ma_pa_channel_map cmap;
- ma_pa_buffer_attr attr;
- const ma_pa_sample_spec* pActualSS = NULL;
- const ma_pa_channel_map* pActualCMap = NULL;
- const ma_pa_buffer_attr* pActualAttr = NULL;
- ma_uint32 iChannel;
- ma_pa_stream_flags_t streamFlags;
+ ma_uint32 maxChannels;
+ unsigned int iConfig;
- ma_assert(pDevice != NULL);
- ma_zero_object(&pDevice->pulse);
+ MA_ASSERT(caps != NULL);
+ MA_ASSERT(requiredFormat != ma_format_unknown);
+
+ /* Just pick whatever configuration has the most channels. */
+ maxChannels = 0;
+ for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) {
+ /* The encoding should be of requiredFormat. */
+ unsigned int iEncoding;
+ for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
+ unsigned int iChannel;
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ ma_format format;
- /* No exclusive mode with the PulseAudio backend. */
- if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
- ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
- }
+ if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) {
+ continue;
+ }
+
+ bits = caps->enc[iEncoding].bits;
+ bps = caps->enc[iEncoding].bps;
+ sig = caps->enc[iEncoding].sig;
+ le = caps->enc[iEncoding].le;
+ msb = caps->enc[iEncoding].msb;
+ format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
+ if (format != requiredFormat) {
+ continue;
+ }
+
+ /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */
+ for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) {
+ unsigned int chan = 0;
+ unsigned int channels;
- if ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID != NULL) {
- devPlayback = pConfig->playback.pDeviceID->pulse;
- }
- if ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID != NULL) {
- devCapture = pConfig->capture.pDeviceID->pulse;
+ if (deviceType == ma_device_type_playback) {
+ chan = caps->confs[iConfig].pchan;
+ } else {
+ chan = caps->confs[iConfig].rchan;
+ }
+
+ if ((chan & (1UL << iChannel)) == 0) {
+ continue;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ channels = caps->pchan[iChannel];
+ } else {
+ channels = caps->rchan[iChannel];
+ }
+
+ if (maxChannels < channels) {
+ maxChannels = channels;
+ }
+ }
+ }
}
+
+ return maxChannels;
+}
- bufferSizeInMilliseconds = pConfig->bufferSizeInMilliseconds;
- if (bufferSizeInMilliseconds == 0) {
- bufferSizeInMilliseconds = ma_calculate_buffer_size_in_milliseconds_from_frames(pConfig->bufferSizeInFrames, pConfig->sampleRate);
- }
+static ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat, ma_uint32 requiredChannels)
+{
+ ma_uint32 firstSampleRate;
+ ma_uint32 bestSampleRate;
+ unsigned int iConfig;
- pDevice->pulse.pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)();
- if (pDevice->pulse.pMainLoop == NULL) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create main loop for device.", MA_FAILED_TO_INIT_BACKEND);
- goto on_error0;
- }
+ MA_ASSERT(caps != NULL);
+ MA_ASSERT(requiredFormat != ma_format_unknown);
+ MA_ASSERT(requiredChannels > 0);
+ MA_ASSERT(requiredChannels <= MA_MAX_CHANNELS);
+
+ firstSampleRate = 0; /* <-- If the device does not support a standard rate we'll fall back to the first one that's found. */
+ bestSampleRate = 0;
- pDevice->pulse.pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)((ma_pa_mainloop*)pDevice->pulse.pMainLoop);
- if (pDevice->pulse.pAPI == NULL) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve PulseAudio main loop.", MA_FAILED_TO_INIT_BACKEND);
- goto on_error1;
- }
+ for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) {
+ /* The encoding should be of requiredFormat. */
+ unsigned int iEncoding;
+ for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
+ unsigned int iChannel;
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ ma_format format;
- pDevice->pulse.pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)((ma_pa_mainloop_api*)pDevice->pulse.pAPI, pContext->pulse.pApplicationName);
- if (pDevice->pulse.pPulseContext == NULL) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio context for device.", MA_FAILED_TO_INIT_BACKEND);
- goto on_error1;
- }
+ if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) {
+ continue;
+ }
+
+ bits = caps->enc[iEncoding].bits;
+ bps = caps->enc[iEncoding].bps;
+ sig = caps->enc[iEncoding].sig;
+ le = caps->enc[iEncoding].le;
+ msb = caps->enc[iEncoding].msb;
+ format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
+ if (format != requiredFormat) {
+ continue;
+ }
+
+ /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */
+ for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) {
+ unsigned int chan = 0;
+ unsigned int channels;
+ unsigned int iRate;
- error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)((ma_pa_context*)pDevice->pulse.pPulseContext, pContext->pulse.pServerName, (pContext->pulse.tryAutoSpawn) ? 0 : MA_PA_CONTEXT_NOAUTOSPAWN, NULL);
- if (error != MA_PA_OK) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio context.", ma_result_from_pulse(error));
- goto on_error2;
+ if (deviceType == ma_device_type_playback) {
+ chan = caps->confs[iConfig].pchan;
+ } else {
+ chan = caps->confs[iConfig].rchan;
+ }
+
+ if ((chan & (1UL << iChannel)) == 0) {
+ continue;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ channels = caps->pchan[iChannel];
+ } else {
+ channels = caps->rchan[iChannel];
+ }
+
+ if (channels != requiredChannels) {
+ continue;
+ }
+
+ /* Getting here means we have found a compatible encoding/channel pair. */
+ for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) {
+ ma_uint32 rate = (ma_uint32)caps->rate[iRate];
+ ma_uint32 ratePriority;
+
+ if (firstSampleRate == 0) {
+ firstSampleRate = rate;
+ }
+
+ /* Disregard this rate if it's not a standard one. */
+ ratePriority = ma_get_standard_sample_rate_priority_index__sndio(rate);
+ if (ratePriority == (ma_uint32)-1) {
+ continue;
+ }
+
+ if (ma_get_standard_sample_rate_priority_index__sndio(bestSampleRate) > ratePriority) { /* Lower = better. */
+ bestSampleRate = rate;
+ }
+ }
+ }
+ }
+ }
+
+ /* If a standard sample rate was not found just fall back to the first one that was iterated. */
+ if (bestSampleRate == 0) {
+ bestSampleRate = firstSampleRate;
}
+
+ return bestSampleRate;
+}
- pDevice->pulse.pulseContextState = MA_PA_CONTEXT_UNCONNECTED;
- ((ma_pa_context_set_state_callback_proc)pContext->pulse.pa_context_set_state_callback)((ma_pa_context*)pDevice->pulse.pPulseContext, ma_pulse_device_state_callback, pDevice);
+static ma_bool32 ma_context_is_device_id_equal__sndio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
- /* Wait for PulseAudio to get itself ready before returning. */
- for (;;) {
- if (pDevice->pulse.pulseContextState == MA_PA_CONTEXT_READY) {
- break;
- }
+ return ma_strcmp(pID0->sndio, pID1->sndio) == 0;
+}
- /* An error may have occurred. */
- if (pDevice->pulse.pulseContextState == MA_PA_CONTEXT_FAILED || pDevice->pulse.pulseContextState == MA_PA_CONTEXT_TERMINATED) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while connecting the PulseAudio context.", MA_ERROR);
- goto on_error3;
- }
+static ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_bool32 isTerminating = MA_FALSE;
+ struct ma_sio_hdl* handle;
- error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL);
- if (error < 0) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] The PulseAudio main loop returned an error while connecting the PulseAudio context.", ma_result_from_pulse(error));
- goto on_error3;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* sndio doesn't seem to have a good device enumeration API, so I'm therefore only enumerating over default devices for now. */
+
+ /* Playback. */
+ if (!isTerminating) {
+ handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_PLAY, 0);
+ if (handle != NULL) {
+ /* Supports playback. */
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), MA_SIO_DEVANY);
+ ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME);
+
+ isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+
+ ((ma_sio_close_proc)pContext->sndio.sio_close)(handle);
}
}
+
+ /* Capture. */
+ if (!isTerminating) {
+ handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_REC, 0);
+ if (handle != NULL) {
+ /* Supports capture. */
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), "default");
+ ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME);
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devCapture, ma_device_source_info_callback, &sourceInfo);
- if (pOP != NULL) {
- ma_device__wait_for_operation__pulse(pDevice, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
- } else {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve source info for capture device.", ma_result_from_pulse(error));
- goto on_error3;
+ isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+
+ ((ma_sio_close_proc)pContext->sndio.sio_close)(handle);
}
+ }
+
+ return MA_SUCCESS;
+}
- ss = sourceInfo.sample_spec;
- cmap = sourceInfo.channel_map;
-
- pDevice->capture.internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(bufferSizeInMilliseconds, ss.rate);
- pDevice->capture.internalPeriods = pConfig->periods;
-
- attr = ma_device__pa_buffer_attr_new(pDevice->capture.internalBufferSizeInFrames, pConfig->periods, &ss);
- #ifdef MA_DEBUG_OUTPUT
- printf("[PulseAudio] Capture attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalBufferSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->capture.internalBufferSizeInFrames);
- #endif
-
- pDevice->pulse.pStreamCapture = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNameCapture, &ss, &cmap);
- if (pDevice->pulse.pStreamCapture == NULL) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio capture stream.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- goto on_error3;
- }
+static ma_result ma_context_get_device_info__sndio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+{
+ char devid[256];
+ struct ma_sio_hdl* handle;
+ struct ma_sio_cap caps;
+ unsigned int iConfig;
- streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS;
- if (devCapture != NULL) {
- streamFlags |= MA_PA_STREAM_DONT_MOVE;
- }
+ MA_ASSERT(pContext != NULL);
+ (void)shareMode;
+
+ /* We need to open the device before we can get information about it. */
+ if (pDeviceID == NULL) {
+ ma_strcpy_s(devid, sizeof(devid), MA_SIO_DEVANY);
+ ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (deviceType == ma_device_type_playback) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : MA_DEFAULT_CAPTURE_DEVICE_NAME);
+ } else {
+ ma_strcpy_s(devid, sizeof(devid), pDeviceID->sndio);
+ ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), devid);
+ }
+
+ handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(devid, (deviceType == ma_device_type_playback) ? MA_SIO_PLAY : MA_SIO_REC, 0);
+ if (handle == NULL) {
+ return MA_NO_DEVICE;
+ }
+
+ if (((ma_sio_getcap_proc)pContext->sndio.sio_getcap)(handle, &caps) == 0) {
+ return MA_ERROR;
+ }
+
+ for (iConfig = 0; iConfig < caps.nconf; iConfig += 1) {
+ /*
+ The main thing we care about is that the encoding is supported by miniaudio. If it is, we want to give
+ preference to some formats over others.
+ */
+ unsigned int iEncoding;
+ unsigned int iChannel;
+ unsigned int iRate;
- error = ((ma_pa_stream_connect_record_proc)pContext->pulse.pa_stream_connect_record)((ma_pa_stream*)pDevice->pulse.pStreamCapture, devCapture, &attr, streamFlags);
- if (error != MA_PA_OK) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio capture stream.", ma_result_from_pulse(error));
- goto on_error4;
- }
+ for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
+ unsigned int bits;
+ unsigned int bps;
+ unsigned int sig;
+ unsigned int le;
+ unsigned int msb;
+ ma_format format;
+ ma_bool32 formatExists = MA_FALSE;
+ ma_uint32 iExistingFormat;
- while (((ma_pa_stream_get_state_proc)pContext->pulse.pa_stream_get_state)((ma_pa_stream*)pDevice->pulse.pStreamCapture) != MA_PA_STREAM_READY) {
- error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL);
- if (error < 0) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] The PulseAudio main loop returned an error while connecting the PulseAudio capture stream.", ma_result_from_pulse(error));
- goto on_error5;
+ if ((caps.confs[iConfig].enc & (1UL << iEncoding)) == 0) {
+ continue;
}
- }
-
- /* Internal format. */
- pActualSS = ((ma_pa_stream_get_sample_spec_proc)pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
- if (pActualSS != NULL) {
- /* If anything has changed between the requested and the actual sample spec, we need to update the buffer. */
- if (ss.format != pActualSS->format || ss.channels != pActualSS->channels || ss.rate != pActualSS->rate) {
- attr = ma_device__pa_buffer_attr_new(pDevice->capture.internalBufferSizeInFrames, pConfig->periods, pActualSS);
-
- pOP = ((ma_pa_stream_set_buffer_attr_proc)pContext->pulse.pa_stream_set_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture, &attr, NULL, NULL);
- if (pOP != NULL) {
- ma_device__wait_for_operation__pulse(pDevice, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+
+ bits = caps.enc[iEncoding].bits;
+ bps = caps.enc[iEncoding].bps;
+ sig = caps.enc[iEncoding].sig;
+ le = caps.enc[iEncoding].le;
+ msb = caps.enc[iEncoding].msb;
+ format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
+ if (format == ma_format_unknown) {
+ continue; /* Format not supported. */
+ }
+
+ /* Add this format if it doesn't already exist. */
+ for (iExistingFormat = 0; iExistingFormat < pDeviceInfo->formatCount; iExistingFormat += 1) {
+ if (pDeviceInfo->formats[iExistingFormat] == format) {
+ formatExists = MA_TRUE;
+ break;
}
}
-
- ss = *pActualSS;
- }
-
- pDevice->capture.internalFormat = ma_format_from_pulse(ss.format);
- pDevice->capture.internalChannels = ss.channels;
- pDevice->capture.internalSampleRate = ss.rate;
-
- /* Internal channel map. */
- pActualCMap = ((ma_pa_stream_get_channel_map_proc)pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
- if (pActualCMap != NULL) {
- cmap = *pActualCMap;
- }
- for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) {
- pDevice->capture.internalChannelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]);
+
+ if (!formatExists) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = format;
+ }
}
+
+ /* Channels. */
+ for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) {
+ unsigned int chan = 0;
+ unsigned int channels;
- /* Buffer. */
- pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
- if (pActualAttr != NULL) {
- attr = *pActualAttr;
+ if (deviceType == ma_device_type_playback) {
+ chan = caps.confs[iConfig].pchan;
+ } else {
+ chan = caps.confs[iConfig].rchan;
+ }
+
+ if ((chan & (1UL << iChannel)) == 0) {
+ continue;
+ }
+
+ if (deviceType == ma_device_type_playback) {
+ channels = caps.pchan[iChannel];
+ } else {
+ channels = caps.rchan[iChannel];
+ }
+
+ if (pDeviceInfo->minChannels > channels) {
+ pDeviceInfo->minChannels = channels;
+ }
+ if (pDeviceInfo->maxChannels < channels) {
+ pDeviceInfo->maxChannels = channels;
+ }
}
- pDevice->capture.internalBufferSizeInFrames = attr.maxlength / (ma_get_bytes_per_sample(pDevice->capture.internalFormat) * pDevice->capture.internalChannels);
- pDevice->capture.internalPeriods = attr.maxlength / attr.fragsize;
- #ifdef MA_DEBUG_OUTPUT
- printf("[PulseAudio] Capture actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalBufferSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->capture.internalBufferSizeInFrames);
- #endif
-
- /* Name. */
- devCapture = ((ma_pa_stream_get_device_name_proc)pContext->pulse.pa_stream_get_device_name)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
- if (devCapture != NULL) {
- ma_pa_operation* pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devCapture, ma_device_source_name_callback, pDevice);
- if (pOP != NULL) {
- ma_device__wait_for_operation__pulse(pDevice, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+
+ /* Sample rates. */
+ for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) {
+ if ((caps.confs[iConfig].rate & (1UL << iRate)) != 0) {
+ unsigned int rate = caps.rate[iRate];
+ if (pDeviceInfo->minSampleRate > rate) {
+ pDeviceInfo->minSampleRate = rate;
+ }
+ if (pDeviceInfo->maxSampleRate < rate) {
+ pDeviceInfo->maxSampleRate = rate;
+ }
}
}
}
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devPlayback, ma_device_sink_info_callback, &sinkInfo);
- if (pOP != NULL) {
- ma_device__wait_for_operation__pulse(pDevice, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
- } else {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve sink info for playback device.", ma_result_from_pulse(error));
- goto on_error3;
- }
-
- ss = sinkInfo.sample_spec;
- cmap = sinkInfo.channel_map;
-
- pDevice->playback.internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(bufferSizeInMilliseconds, ss.rate);
- pDevice->playback.internalPeriods = pConfig->periods;
+ ((ma_sio_close_proc)pContext->sndio.sio_close)(handle);
+ return MA_SUCCESS;
+}
- attr = ma_device__pa_buffer_attr_new(pDevice->playback.internalBufferSizeInFrames, pConfig->periods, &ss);
- #ifdef MA_DEBUG_OUTPUT
- printf("[PulseAudio] Playback attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalBufferSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->playback.internalBufferSizeInFrames);
- #endif
+static void ma_device_uninit__sndio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
- pDevice->pulse.pStreamPlayback = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNamePlayback, &ss, &cmap);
- if (pDevice->pulse.pStreamPlayback == NULL) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio playback stream.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- goto on_error3;
- }
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handleCapture);
+ }
- streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS;
- if (devPlayback != NULL) {
- streamFlags |= MA_PA_STREAM_DONT_MOVE;
- }
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback);
+ }
+}
- error = ((ma_pa_stream_connect_playback_proc)pContext->pulse.pa_stream_connect_playback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, devPlayback, &attr, streamFlags, NULL, NULL);
- if (error != MA_PA_OK) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio playback stream.", ma_result_from_pulse(error));
- goto on_error6;
- }
+static ma_result ma_device_init_handle__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
+{
+ const char* pDeviceName;
+ ma_ptr handle;
+ int openFlags = 0;
+ struct ma_sio_cap caps;
+ struct ma_sio_par par;
+ ma_device_id* pDeviceID;
+ ma_format format;
+ ma_uint32 channels;
+ ma_uint32 sampleRate;
+ ma_format internalFormat;
+ ma_uint32 internalChannels;
+ ma_uint32 internalSampleRate;
+ ma_uint32 internalPeriodSizeInFrames;
+ ma_uint32 internalPeriods;
- while (((ma_pa_stream_get_state_proc)pContext->pulse.pa_stream_get_state)((ma_pa_stream*)pDevice->pulse.pStreamPlayback) != MA_PA_STREAM_READY) {
- error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL);
- if (error < 0) {
- result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] The PulseAudio main loop returned an error while connecting the PulseAudio playback stream.", ma_result_from_pulse(error));
- goto on_error7;
- }
- }
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(deviceType != ma_device_type_duplex);
+ MA_ASSERT(pDevice != NULL);
- /* Internal format. */
- pActualSS = ((ma_pa_stream_get_sample_spec_proc)pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
- if (pActualSS != NULL) {
- /* If anything has changed between the requested and the actual sample spec, we need to update the buffer. */
- if (ss.format != pActualSS->format || ss.channels != pActualSS->channels || ss.rate != pActualSS->rate) {
- attr = ma_device__pa_buffer_attr_new(pDevice->playback.internalBufferSizeInFrames, pConfig->periods, pActualSS);
+ if (deviceType == ma_device_type_capture) {
+ openFlags = MA_SIO_REC;
+ pDeviceID = pConfig->capture.pDeviceID;
+ format = pConfig->capture.format;
+ channels = pConfig->capture.channels;
+ sampleRate = pConfig->sampleRate;
+ } else {
+ openFlags = MA_SIO_PLAY;
+ pDeviceID = pConfig->playback.pDeviceID;
+ format = pConfig->playback.format;
+ channels = pConfig->playback.channels;
+ sampleRate = pConfig->sampleRate;
+ }
- pOP = ((ma_pa_stream_set_buffer_attr_proc)pContext->pulse.pa_stream_set_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, &attr, NULL, NULL);
- if (pOP != NULL) {
- ma_device__wait_for_operation__pulse(pDevice, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
- }
- }
+ pDeviceName = MA_SIO_DEVANY;
+ if (pDeviceID != NULL) {
+ pDeviceName = pDeviceID->sndio;
+ }
- ss = *pActualSS;
- }
+ handle = (ma_ptr)((ma_sio_open_proc)pContext->sndio.sio_open)(pDeviceName, openFlags, 0);
+ if (handle == NULL) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to open device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ }
- pDevice->playback.internalFormat = ma_format_from_pulse(ss.format);
- pDevice->playback.internalChannels = ss.channels;
- pDevice->playback.internalSampleRate = ss.rate;
+ /* We need to retrieve the device caps to determine the most appropriate format to use. */
+ if (((ma_sio_getcap_proc)pContext->sndio.sio_getcap)((struct ma_sio_hdl*)handle, &caps) == 0) {
+ ((ma_sio_close_proc)pContext->sndio.sio_close)((struct ma_sio_hdl*)handle);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve device caps.", MA_ERROR);
+ }
- /* Internal channel map. */
- pActualCMap = ((ma_pa_stream_get_channel_map_proc)pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
- if (pActualCMap != NULL) {
- cmap = *pActualCMap;
+ /*
+ Note: sndio reports a huge range of available channels. This is inconvenient for us because there's no real
+ way, as far as I can tell, to get the _actual_ channel count of the device. I'm therefore restricting this
+ to the requested channels, regardless of whether or not the default channel count is requested.
+
+ For hardware devices, I'm suspecting only a single channel count will be reported and we can safely use the
+ value returned by ma_find_best_channels_from_sio_cap__sndio().
+ */
+ if (deviceType == ma_device_type_capture) {
+ if (pDevice->capture.usingDefaultFormat) {
+ format = ma_find_best_format_from_sio_cap__sndio(&caps);
}
- for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) {
- pDevice->playback.internalChannelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]);
+ if (pDevice->capture.usingDefaultChannels) {
+ if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) {
+ channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format);
+ }
}
-
- /* Buffer. */
- pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
- if (pActualAttr != NULL) {
- attr = *pActualAttr;
+ } else {
+ if (pDevice->playback.usingDefaultFormat) {
+ format = ma_find_best_format_from_sio_cap__sndio(&caps);
}
- pDevice->playback.internalBufferSizeInFrames = attr.maxlength / (ma_get_bytes_per_sample(pDevice->playback.internalFormat) * pDevice->playback.internalChannels);
- pDevice->playback.internalPeriods = /*pConfig->periods;*/attr.maxlength / attr.tlength;
- #ifdef MA_DEBUG_OUTPUT
- printf("[PulseAudio] Playback actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalBufferSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->playback.internalBufferSizeInFrames);
- #endif
-
- /* Name. */
- devPlayback = ((ma_pa_stream_get_device_name_proc)pContext->pulse.pa_stream_get_device_name)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
- if (devPlayback != NULL) {
- ma_pa_operation* pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devPlayback, ma_device_sink_name_callback, pDevice);
- if (pOP != NULL) {
- ma_device__wait_for_operation__pulse(pDevice, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ if (pDevice->playback.usingDefaultChannels) {
+ if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) {
+ channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format);
}
}
}
-
- return MA_SUCCESS;
-
-
-on_error7:
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
- }
-on_error6:
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
- }
-on_error5:
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
- }
-on_error4:
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
+
+ if (pDevice->usingDefaultSampleRate) {
+ sampleRate = ma_find_best_sample_rate_from_sio_cap__sndio(&caps, pConfig->deviceType, format, channels);
}
-on_error3: ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pDevice->pulse.pPulseContext);
-on_error2: ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pDevice->pulse.pPulseContext);
-on_error1: ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop);
-on_error0:
- return result;
-}
-
-
-void ma_pulse_operation_complete_callback(ma_pa_stream* pStream, int success, void* pUserData)
-{
- ma_bool32* pIsSuccessful = (ma_bool32*)pUserData;
- ma_assert(pIsSuccessful != NULL);
-
- *pIsSuccessful = (ma_bool32)success;
- (void)pStream; /* Unused. */
-}
-
-ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type deviceType, int cork)
-{
- ma_context* pContext = pDevice->pContext;
- ma_bool32 wasSuccessful;
- ma_pa_stream* pStream;
- ma_pa_operation* pOP;
- ma_result result;
- /* This should not be called with a duplex device type. */
- if (deviceType == ma_device_type_duplex) {
- return MA_INVALID_ARGS;
+ ((ma_sio_initpar_proc)pDevice->pContext->sndio.sio_initpar)(&par);
+ par.msb = 0;
+ par.le = ma_is_little_endian();
+
+ switch (format) {
+ case ma_format_u8:
+ {
+ par.bits = 8;
+ par.bps = 1;
+ par.sig = 0;
+ } break;
+
+ case ma_format_s24:
+ {
+ par.bits = 24;
+ par.bps = 3;
+ par.sig = 1;
+ } break;
+
+ case ma_format_s32:
+ {
+ par.bits = 32;
+ par.bps = 4;
+ par.sig = 1;
+ } break;
+
+ case ma_format_s16:
+ case ma_format_f32:
+ default:
+ {
+ par.bits = 16;
+ par.bps = 2;
+ par.sig = 1;
+ } break;
+ }
+
+ if (deviceType == ma_device_type_capture) {
+ par.rchan = channels;
+ } else {
+ par.pchan = channels;
}
- wasSuccessful = MA_FALSE;
+ par.rate = sampleRate;
- pStream = (ma_pa_stream*)((deviceType == ma_device_type_capture) ? pDevice->pulse.pStreamCapture : pDevice->pulse.pStreamPlayback);
- ma_assert(pStream != NULL);
+ internalPeriodSizeInFrames = pConfig->periodSizeInFrames;
+ if (internalPeriodSizeInFrames == 0) {
+ internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, par.rate);
+ }
- pOP = ((ma_pa_stream_cork_proc)pContext->pulse.pa_stream_cork)(pStream, cork, ma_pulse_operation_complete_callback, &wasSuccessful);
- if (pOP == NULL) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to cork PulseAudio stream.", (cork == 0) ? MA_FAILED_TO_START_BACKEND_DEVICE : MA_FAILED_TO_STOP_BACKEND_DEVICE);
+ par.round = internalPeriodSizeInFrames;
+ par.appbufsz = par.round * pConfig->periods;
+
+ if (((ma_sio_setpar_proc)pContext->sndio.sio_setpar)((struct ma_sio_hdl*)handle, &par) == 0) {
+ ((ma_sio_close_proc)pContext->sndio.sio_close)((struct ma_sio_hdl*)handle);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to set buffer size.", MA_FORMAT_NOT_SUPPORTED);
+ }
+ if (((ma_sio_getpar_proc)pContext->sndio.sio_getpar)((struct ma_sio_hdl*)handle, &par) == 0) {
+ ((ma_sio_close_proc)pContext->sndio.sio_close)((struct ma_sio_hdl*)handle);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve buffer size.", MA_FORMAT_NOT_SUPPORTED);
}
- result = ma_device__wait_for_operation__pulse(pDevice, pOP);
- ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);
+ internalFormat = ma_format_from_sio_enc__sndio(par.bits, par.bps, par.sig, par.le, par.msb);
+ internalChannels = (deviceType == ma_device_type_capture) ? par.rchan : par.pchan;
+ internalSampleRate = par.rate;
+ internalPeriods = par.appbufsz / par.round;
+ internalPeriodSizeInFrames = par.round;
- if (result != MA_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while waiting for the PulseAudio stream to cork.", result);
+ if (deviceType == ma_device_type_capture) {
+ pDevice->sndio.handleCapture = handle;
+ pDevice->capture.internalFormat = internalFormat;
+ pDevice->capture.internalChannels = internalChannels;
+ pDevice->capture.internalSampleRate = internalSampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_sndio, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+ pDevice->capture.internalPeriodSizeInFrames = internalPeriodSizeInFrames;
+ pDevice->capture.internalPeriods = internalPeriods;
+ } else {
+ pDevice->sndio.handlePlayback = handle;
+ pDevice->playback.internalFormat = internalFormat;
+ pDevice->playback.internalChannels = internalChannels;
+ pDevice->playback.internalSampleRate = internalSampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_sndio, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
+ pDevice->playback.internalPeriodSizeInFrames = internalPeriodSizeInFrames;
+ pDevice->playback.internalPeriods = internalPeriods;
}
- if (!wasSuccessful) {
- if (cork) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to stop PulseAudio stream.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- } else {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to start PulseAudio stream.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
- }
+#ifdef MA_DEBUG_OUTPUT
+ printf("DEVICE INFO\n");
+ printf(" Format: %s\n", ma_get_format_name(internalFormat));
+ printf(" Channels: %d\n", internalChannels);
+ printf(" Sample Rate: %d\n", internalSampleRate);
+ printf(" Period Size: %d\n", internalPeriodSizeInFrames);
+ printf(" Periods: %d\n", internalPeriods);
+ printf(" appbufsz: %d\n", par.appbufsz);
+ printf(" round: %d\n", par.round);
+#endif
return MA_SUCCESS;
}
-ma_result ma_device_stop__pulse(ma_device* pDevice)
+static ma_result ma_device_init__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
- ma_result result;
- ma_bool32 wasSuccessful;
- ma_pa_operation* pOP;
+ MA_ASSERT(pDevice != NULL);
- ma_assert(pDevice != NULL);
+ MA_ZERO_OBJECT(&pDevice->sndio);
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 1);
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_handle__sndio(pContext, pConfig, ma_device_type_capture, pDevice);
if (result != MA_SUCCESS) {
return result;
}
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- /* The stream needs to be drained if it's a playback device. */
- pOP = ((ma_pa_stream_drain_proc)pDevice->pContext->pulse.pa_stream_drain)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_pulse_operation_complete_callback, &wasSuccessful);
- if (pOP != NULL) {
- ma_device__wait_for_operation__pulse(pDevice, pOP);
- ((ma_pa_operation_unref_proc)pDevice->pContext->pulse.pa_operation_unref)(pOP);
- }
-
- result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 1);
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_handle__sndio(pContext, pConfig, ma_device_type_playback, pDevice);
if (result != MA_SUCCESS) {
return result;
}
@@ -15321,10880 +24878,10548 @@ ma_result ma_device_stop__pulse(ma_device* pDevice)
return MA_SUCCESS;
}
-ma_result ma_device_write__pulse(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount)
+static ma_result ma_device_stop__sndio(ma_device* pDevice)
{
- ma_uint32 totalFramesWritten;
-
- ma_assert(pDevice != NULL);
- ma_assert(pPCMFrames != NULL);
- ma_assert(frameCount > 0);
-
- /* The stream needs to be uncorked first. */
- if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamPlayback)) {
- ma_result result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 0);
- if (result != MA_SUCCESS) {
- return result;
- }
- }
-
- totalFramesWritten = 0;
- while (totalFramesWritten < frameCount) {
- /* Place the data into the mapped buffer if we have one. */
- if (pDevice->pulse.pMappedBufferPlayback != NULL && pDevice->pulse.mappedBufferFramesRemainingPlayback > 0) {
- ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- ma_uint32 mappedBufferFramesConsumed = pDevice->pulse.mappedBufferFramesCapacityPlayback - pDevice->pulse.mappedBufferFramesRemainingPlayback;
-
- void* pDst = (ma_uint8*)pDevice->pulse.pMappedBufferPlayback + (mappedBufferFramesConsumed * bpf);
- const void* pSrc = (const ma_uint8*)pPCMFrames + (totalFramesWritten * bpf);
- ma_uint32 framesToCopy = ma_min(pDevice->pulse.mappedBufferFramesRemainingPlayback, (frameCount - totalFramesWritten));
- ma_copy_memory(pDst, pSrc, framesToCopy * bpf);
+ MA_ASSERT(pDevice != NULL);
- pDevice->pulse.mappedBufferFramesRemainingPlayback -= framesToCopy;
- totalFramesWritten += framesToCopy;
- }
-
- /*
- Getting here means we've run out of data in the currently mapped chunk. We need to write this to the device and then try
- mapping another chunk. If this fails we need to wait for space to become available.
- */
- if (pDevice->pulse.mappedBufferFramesCapacityPlayback > 0 && pDevice->pulse.mappedBufferFramesRemainingPlayback == 0) {
- size_t nbytes = pDevice->pulse.mappedBufferFramesCapacityPlayback * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
-
- int error = ((ma_pa_stream_write_proc)pDevice->pContext->pulse.pa_stream_write)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, pDevice->pulse.pMappedBufferPlayback, nbytes, NULL, 0, MA_PA_SEEK_RELATIVE);
- if (error < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to write data to the PulseAudio stream.", ma_result_from_pulse(error));
- }
-
- pDevice->pulse.pMappedBufferPlayback = NULL;
- pDevice->pulse.mappedBufferFramesRemainingPlayback = 0;
- pDevice->pulse.mappedBufferFramesCapacityPlayback = 0;
- }
-
- ma_assert(totalFramesWritten <= frameCount);
- if (totalFramesWritten == frameCount) {
- break;
- }
-
- /* Getting here means we need to map a new buffer. If we don't have enough space we need to wait for more. */
- for (;;) {
- size_t writableSizeInBytes;
-
- /* If the device has been corked, don't try to continue. */
- if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamPlayback)) {
- break;
- }
+ /*
+ From the documentation:
- writableSizeInBytes = ((ma_pa_stream_writable_size_proc)pDevice->pContext->pulse.pa_stream_writable_size)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
- if (writableSizeInBytes != (size_t)-1) {
- /*size_t periodSizeInBytes = (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods) * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);*/
- if (writableSizeInBytes > 0) {
- /* Data is avaialable. */
- size_t bytesToMap = writableSizeInBytes;
- int error = ((ma_pa_stream_begin_write_proc)pDevice->pContext->pulse.pa_stream_begin_write)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, &pDevice->pulse.pMappedBufferPlayback, &bytesToMap);
- if (error < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to map write buffer.", ma_result_from_pulse(error));
- }
+ The sio_stop() function puts the audio subsystem in the same state as before sio_start() is called. It stops recording, drains the play buffer and then
+ stops playback. If samples to play are queued but playback hasn't started yet then playback is forced immediately; playback will actually stop once the
+ buffer is drained. In no case are samples in the play buffer discarded.
- pDevice->pulse.mappedBufferFramesCapacityPlayback = bytesToMap / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- pDevice->pulse.mappedBufferFramesRemainingPlayback = pDevice->pulse.mappedBufferFramesCapacityPlayback;
+ Therefore, sio_stop() performs all of the necessary draining for us.
+ */
- break;
- } else {
- /* No data available. Need to wait for more. */
- int error = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL);
- if (error < 0) {
- return ma_result_from_pulse(error);
- }
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handleCapture);
+ }
- continue;
- }
- } else {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to query the stream's writable size.", MA_ERROR);
- }
- }
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback);
}
return MA_SUCCESS;
}
-ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount)
+static ma_result ma_device_write__sndio(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
{
- ma_uint32 totalFramesRead;
-
- ma_assert(pDevice != NULL);
- ma_assert(pPCMFrames != NULL);
- ma_assert(frameCount > 0);
+ int result;
- /* The stream needs to be uncorked first. */
- if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamCapture)) {
- ma_result result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 0);
- if (result != MA_SUCCESS) {
- return result;
- }
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
}
- totalFramesRead = 0;
- while (totalFramesRead < frameCount) {
- if (ma_device__get_state(pDevice) != MA_STATE_STARTED) {
- break;
- }
-
- /* If a buffer is mapped we need to write to that first. Once it's consumed we reset the event and unmap it. */
- if (pDevice->pulse.pMappedBufferCapture != NULL && pDevice->pulse.mappedBufferFramesRemainingCapture > 0) {
- ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- ma_uint32 mappedBufferFramesConsumed = pDevice->pulse.mappedBufferFramesCapacityCapture - pDevice->pulse.mappedBufferFramesRemainingCapture;
-
- ma_uint32 framesToCopy = ma_min(pDevice->pulse.mappedBufferFramesRemainingCapture, (frameCount - totalFramesRead));
- void* pDst = (ma_uint8*)pPCMFrames + (totalFramesRead * bpf);
-
- /*
- This little bit of logic here is specifically for PulseAudio and it's hole management. The buffer pointer will be set to NULL
- when the current fragment is a hole. For a hole we just output silence.
- */
- if (pDevice->pulse.pMappedBufferCapture != NULL) {
- const void* pSrc = (const ma_uint8*)pDevice->pulse.pMappedBufferCapture + (mappedBufferFramesConsumed * bpf);
- ma_copy_memory(pDst, pSrc, framesToCopy * bpf);
- } else {
- ma_zero_memory(pDst, framesToCopy * bpf);
- }
-
- pDevice->pulse.mappedBufferFramesRemainingCapture -= framesToCopy;
- totalFramesRead += framesToCopy;
- }
-
- /*
- Getting here means we've run out of data in the currently mapped chunk. We need to drop this from the device and then try
- mapping another chunk. If this fails we need to wait for data to become available.
- */
- if (pDevice->pulse.mappedBufferFramesCapacityCapture > 0 && pDevice->pulse.mappedBufferFramesRemainingCapture == 0) {
- int error = ((ma_pa_stream_drop_proc)pDevice->pContext->pulse.pa_stream_drop)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
- if (error != 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to drop fragment.", ma_result_from_pulse(error));
- }
-
- pDevice->pulse.pMappedBufferCapture = NULL;
- pDevice->pulse.mappedBufferFramesRemainingCapture = 0;
- pDevice->pulse.mappedBufferFramesCapacityCapture = 0;
- }
-
- ma_assert(totalFramesRead <= frameCount);
- if (totalFramesRead == frameCount) {
- break;
- }
-
- /* Getting here means we need to map a new buffer. If we don't have enough data we wait for more. */
- for (;;) {
- size_t readableSizeInBytes;
+ result = ((ma_sio_write_proc)pDevice->pContext->sndio.sio_write)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+ if (result == 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to send data from the client to the device.", MA_IO_ERROR);
+ }
- if (ma_device__get_state(pDevice) != MA_STATE_STARTED) {
- break;
- }
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = frameCount;
+ }
+
+ return MA_SUCCESS;
+}
- /* If the device has been corked, don't try to continue. */
- if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamCapture)) {
- break;
- }
+static ma_result ma_device_read__sndio(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
+{
+ int result;
- readableSizeInBytes = ((ma_pa_stream_readable_size_proc)pDevice->pContext->pulse.pa_stream_readable_size)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
- if (readableSizeInBytes != (size_t)-1) {
- /*size_t periodSizeInBytes = (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods) * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);*/
- if (readableSizeInBytes > 0) {
- /* Data is avaialable. */
- size_t bytesMapped = (size_t)-1;
- int error = ((ma_pa_stream_peek_proc)pDevice->pContext->pulse.pa_stream_peek)((ma_pa_stream*)pDevice->pulse.pStreamCapture, &pDevice->pulse.pMappedBufferCapture, &bytesMapped);
- if (error < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to peek capture buffer.", ma_result_from_pulse(error));
- }
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
+ }
- if (pDevice->pulse.pMappedBufferCapture == NULL && bytesMapped == 0) {
- /* Nothing available. This shouldn't happen because we checked earlier with pa_stream_readable_size(). I'm going to throw an error in this case. */
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Nothing available after peeking capture buffer.", MA_ERROR);
- }
+ result = ((ma_sio_read_proc)pDevice->pContext->sndio.sio_read)((struct ma_sio_hdl*)pDevice->sndio.handleCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+ if (result == 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to read data from the device to be sent to the device.", MA_IO_ERROR);
+ }
- pDevice->pulse.mappedBufferFramesCapacityCapture = bytesMapped / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- pDevice->pulse.mappedBufferFramesRemainingCapture = pDevice->pulse.mappedBufferFramesCapacityCapture;
+ if (pFramesRead != NULL) {
+ *pFramesRead = frameCount;
+ }
+
+ return MA_SUCCESS;
+}
- break;
- } else {
- /* No data available. Need to wait for more. */
+static ma_result ma_device_main_loop__sndio(ma_device* pDevice)
+{
+ ma_result result = MA_SUCCESS;
+ ma_bool32 exitLoop = MA_FALSE;
- /*
- I have had reports of a deadlock in this part of the code. I have reproduced this when using the "Built-in Audio Analogue Stereo" device without
- an actual microphone connected. I'm experimenting here by not blocking in pa_mainloop_iterate() and instead sleep for a bit when there are no
- dispatches.
- */
- int error = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 0, NULL);
- if (error < 0) {
- return ma_result_from_pulse(error);
+ /* Devices need to be started here. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handleCapture);
+ }
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); /* <-- Doesn't actually playback until data is written. */
+ }
+
+ while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
+ switch (pDevice->type)
+ {
+ case ma_device_type_duplex:
+ {
+ /* The process is: device_read -> convert -> callback -> convert -> device_write */
+ ma_uint32 totalCapturedDeviceFramesProcessed = 0;
+ ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames);
+
+ while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) {
+ ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 capturedDeviceFramesRemaining;
+ ma_uint32 capturedDeviceFramesProcessed;
+ ma_uint32 capturedDeviceFramesToProcess;
+ ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed;
+ if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) {
+ capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames;
}
- /* Sleep for a bit if nothing was dispatched. */
- if (error == 0) {
- ma_sleep(1);
+ result = ma_device_read__sndio(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
}
- continue;
- }
- } else {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to query the stream's readable size.", MA_ERROR);
- }
- }
- }
+ capturedDeviceFramesRemaining = capturedDeviceFramesToProcess;
+ capturedDeviceFramesProcessed = 0;
- return MA_SUCCESS;
-}
+ for (;;) {
+ ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames);
+ ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining;
+ ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+
+ /* Convert capture data from device format to client format. */
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+ /*
+ If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small
+ which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE.
+ */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
-ma_result ma_context_uninit__pulse(ma_context* pContext)
-{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_pulseaudio);
+ ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/
- ma_free(pContext->pulse.pServerName);
- pContext->pulse.pServerName = NULL;
+ capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+ capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
- ma_free(pContext->pulse.pApplicationName);
- pContext->pulse.pApplicationName = NULL;
+ /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */
+ for (;;) {
+ ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration;
+ ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount);
+ if (result != MA_SUCCESS) {
+ break;
+ }
-#ifndef MA_NO_RUNTIME_LINKING
- ma_dlclose(pContext, pContext->pulse.pulseSO);
-#endif
+ result = ma_device_write__sndio(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
- return MA_SUCCESS;
-}
+ capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+ }
-ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* pContext)
-{
-#ifndef MA_NO_RUNTIME_LINKING
- const char* libpulseNames[] = {
- "libpulse.so",
- "libpulse.so.0"
- };
- size_t i;
+ /* In case an error happened from ma_device_write__sndio()... */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+ }
- for (i = 0; i < ma_countof(libpulseNames); ++i) {
- pContext->pulse.pulseSO = ma_dlopen(pContext, libpulseNames[i]);
- if (pContext->pulse.pulseSO != NULL) {
- break;
- }
- }
+ totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed;
+ }
+ } break;
- if (pContext->pulse.pulseSO == NULL) {
- return MA_NO_BACKEND;
- }
+ case ma_device_type_capture:
+ {
+ /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[8192];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames;
+ ma_uint32 framesReadThisPeriod = 0;
+ while (framesReadThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToReadThisIteration = framesRemainingInPeriod;
+ if (framesToReadThisIteration > intermediaryBufferSizeInFrames) {
+ framesToReadThisIteration = intermediaryBufferSizeInFrames;
+ }
- pContext->pulse.pa_mainloop_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_new");
- pContext->pulse.pa_mainloop_free = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_free");
- pContext->pulse.pa_mainloop_get_api = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_get_api");
- pContext->pulse.pa_mainloop_iterate = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_iterate");
- pContext->pulse.pa_mainloop_wakeup = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_mainloop_wakeup");
- pContext->pulse.pa_context_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_new");
- pContext->pulse.pa_context_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_unref");
- pContext->pulse.pa_context_connect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_connect");
- pContext->pulse.pa_context_disconnect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_disconnect");
- pContext->pulse.pa_context_set_state_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_set_state_callback");
- pContext->pulse.pa_context_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_state");
- pContext->pulse.pa_context_get_sink_info_list = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_sink_info_list");
- pContext->pulse.pa_context_get_source_info_list = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_source_info_list");
- pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_sink_info_by_name");
- pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_context_get_source_info_by_name");
- pContext->pulse.pa_operation_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_operation_unref");
- pContext->pulse.pa_operation_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_operation_get_state");
- pContext->pulse.pa_channel_map_init_extend = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_init_extend");
- pContext->pulse.pa_channel_map_valid = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_valid");
- pContext->pulse.pa_channel_map_compatible = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_channel_map_compatible");
- pContext->pulse.pa_stream_new = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_new");
- pContext->pulse.pa_stream_unref = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_unref");
- pContext->pulse.pa_stream_connect_playback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_connect_playback");
- pContext->pulse.pa_stream_connect_record = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_connect_record");
- pContext->pulse.pa_stream_disconnect = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_disconnect");
- pContext->pulse.pa_stream_get_state = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_state");
- pContext->pulse.pa_stream_get_sample_spec = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_sample_spec");
- pContext->pulse.pa_stream_get_channel_map = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_channel_map");
- pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_buffer_attr");
- pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_buffer_attr");
- pContext->pulse.pa_stream_get_device_name = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_get_device_name");
- pContext->pulse.pa_stream_set_write_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_write_callback");
- pContext->pulse.pa_stream_set_read_callback = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_set_read_callback");
- pContext->pulse.pa_stream_flush = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_flush");
- pContext->pulse.pa_stream_drain = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_drain");
- pContext->pulse.pa_stream_is_corked = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_is_corked");
- pContext->pulse.pa_stream_cork = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_cork");
- pContext->pulse.pa_stream_trigger = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_trigger");
- pContext->pulse.pa_stream_begin_write = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_begin_write");
- pContext->pulse.pa_stream_write = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_write");
- pContext->pulse.pa_stream_peek = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_peek");
- pContext->pulse.pa_stream_drop = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_drop");
- pContext->pulse.pa_stream_writable_size = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_writable_size");
- pContext->pulse.pa_stream_readable_size = (ma_proc)ma_dlsym(pContext, pContext->pulse.pulseSO, "pa_stream_readable_size");
-#else
- /* This strange assignment system is just for type safety. */
- ma_pa_mainloop_new_proc _pa_mainloop_new = pa_mainloop_new;
- ma_pa_mainloop_free_proc _pa_mainloop_free = pa_mainloop_free;
- ma_pa_mainloop_get_api_proc _pa_mainloop_get_api = pa_mainloop_get_api;
- ma_pa_mainloop_iterate_proc _pa_mainloop_iterate = pa_mainloop_iterate;
- ma_pa_mainloop_wakeup_proc _pa_mainloop_wakeup = pa_mainloop_wakeup;
- ma_pa_context_new_proc _pa_context_new = pa_context_new;
- ma_pa_context_unref_proc _pa_context_unref = pa_context_unref;
- ma_pa_context_connect_proc _pa_context_connect = pa_context_connect;
- ma_pa_context_disconnect_proc _pa_context_disconnect = pa_context_disconnect;
- ma_pa_context_set_state_callback_proc _pa_context_set_state_callback = pa_context_set_state_callback;
- ma_pa_context_get_state_proc _pa_context_get_state = pa_context_get_state;
- ma_pa_context_get_sink_info_list_proc _pa_context_get_sink_info_list = pa_context_get_sink_info_list;
- ma_pa_context_get_source_info_list_proc _pa_context_get_source_info_list = pa_context_get_source_info_list;
- ma_pa_context_get_sink_info_by_name_proc _pa_context_get_sink_info_by_name = pa_context_get_sink_info_by_name;
- ma_pa_context_get_source_info_by_name_proc _pa_context_get_source_info_by_name= pa_context_get_source_info_by_name;
- ma_pa_operation_unref_proc _pa_operation_unref = pa_operation_unref;
- ma_pa_operation_get_state_proc _pa_operation_get_state = pa_operation_get_state;
- ma_pa_channel_map_init_extend_proc _pa_channel_map_init_extend = pa_channel_map_init_extend;
- ma_pa_channel_map_valid_proc _pa_channel_map_valid = pa_channel_map_valid;
- ma_pa_channel_map_compatible_proc _pa_channel_map_compatible = pa_channel_map_compatible;
- ma_pa_stream_new_proc _pa_stream_new = pa_stream_new;
- ma_pa_stream_unref_proc _pa_stream_unref = pa_stream_unref;
- ma_pa_stream_connect_playback_proc _pa_stream_connect_playback = pa_stream_connect_playback;
- ma_pa_stream_connect_record_proc _pa_stream_connect_record = pa_stream_connect_record;
- ma_pa_stream_disconnect_proc _pa_stream_disconnect = pa_stream_disconnect;
- ma_pa_stream_get_state_proc _pa_stream_get_state = pa_stream_get_state;
- ma_pa_stream_get_sample_spec_proc _pa_stream_get_sample_spec = pa_stream_get_sample_spec;
- ma_pa_stream_get_channel_map_proc _pa_stream_get_channel_map = pa_stream_get_channel_map;
- ma_pa_stream_get_buffer_attr_proc _pa_stream_get_buffer_attr = pa_stream_get_buffer_attr;
- ma_pa_stream_set_buffer_attr_proc _pa_stream_set_buffer_attr = pa_stream_set_buffer_attr;
- ma_pa_stream_get_device_name_proc _pa_stream_get_device_name = pa_stream_get_device_name;
- ma_pa_stream_set_write_callback_proc _pa_stream_set_write_callback = pa_stream_set_write_callback;
- ma_pa_stream_set_read_callback_proc _pa_stream_set_read_callback = pa_stream_set_read_callback;
- ma_pa_stream_flush_proc _pa_stream_flush = pa_stream_flush;
- ma_pa_stream_drain_proc _pa_stream_drain = pa_stream_drain;
- ma_pa_stream_is_corked_proc _pa_stream_is_corked = pa_stream_is_corked;
- ma_pa_stream_cork_proc _pa_stream_cork = pa_stream_cork;
- ma_pa_stream_trigger_proc _pa_stream_trigger = pa_stream_trigger;
- ma_pa_stream_begin_write_proc _pa_stream_begin_write = pa_stream_begin_write;
- ma_pa_stream_write_proc _pa_stream_write = pa_stream_write;
- ma_pa_stream_peek_proc _pa_stream_peek = pa_stream_peek;
- ma_pa_stream_drop_proc _pa_stream_drop = pa_stream_drop;
- ma_pa_stream_writable_size_proc _pa_stream_writable_size = pa_stream_writable_size;
- ma_pa_stream_readable_size_proc _pa_stream_readable_size = pa_stream_readable_size;
+ result = ma_device_read__sndio(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
- pContext->pulse.pa_mainloop_new = (ma_proc)_pa_mainloop_new;
- pContext->pulse.pa_mainloop_free = (ma_proc)_pa_mainloop_free;
- pContext->pulse.pa_mainloop_get_api = (ma_proc)_pa_mainloop_get_api;
- pContext->pulse.pa_mainloop_iterate = (ma_proc)_pa_mainloop_iterate;
- pContext->pulse.pa_mainloop_wakeup = (ma_proc)_pa_mainloop_wakeup;
- pContext->pulse.pa_context_new = (ma_proc)_pa_context_new;
- pContext->pulse.pa_context_unref = (ma_proc)_pa_context_unref;
- pContext->pulse.pa_context_connect = (ma_proc)_pa_context_connect;
- pContext->pulse.pa_context_disconnect = (ma_proc)_pa_context_disconnect;
- pContext->pulse.pa_context_set_state_callback = (ma_proc)_pa_context_set_state_callback;
- pContext->pulse.pa_context_get_state = (ma_proc)_pa_context_get_state;
- pContext->pulse.pa_context_get_sink_info_list = (ma_proc)_pa_context_get_sink_info_list;
- pContext->pulse.pa_context_get_source_info_list = (ma_proc)_pa_context_get_source_info_list;
- pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)_pa_context_get_sink_info_by_name;
- pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)_pa_context_get_source_info_by_name;
- pContext->pulse.pa_operation_unref = (ma_proc)_pa_operation_unref;
- pContext->pulse.pa_operation_get_state = (ma_proc)_pa_operation_get_state;
- pContext->pulse.pa_channel_map_init_extend = (ma_proc)_pa_channel_map_init_extend;
- pContext->pulse.pa_channel_map_valid = (ma_proc)_pa_channel_map_valid;
- pContext->pulse.pa_channel_map_compatible = (ma_proc)_pa_channel_map_compatible;
- pContext->pulse.pa_stream_new = (ma_proc)_pa_stream_new;
- pContext->pulse.pa_stream_unref = (ma_proc)_pa_stream_unref;
- pContext->pulse.pa_stream_connect_playback = (ma_proc)_pa_stream_connect_playback;
- pContext->pulse.pa_stream_connect_record = (ma_proc)_pa_stream_connect_record;
- pContext->pulse.pa_stream_disconnect = (ma_proc)_pa_stream_disconnect;
- pContext->pulse.pa_stream_get_state = (ma_proc)_pa_stream_get_state;
- pContext->pulse.pa_stream_get_sample_spec = (ma_proc)_pa_stream_get_sample_spec;
- pContext->pulse.pa_stream_get_channel_map = (ma_proc)_pa_stream_get_channel_map;
- pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)_pa_stream_get_buffer_attr;
- pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)_pa_stream_set_buffer_attr;
- pContext->pulse.pa_stream_get_device_name = (ma_proc)_pa_stream_get_device_name;
- pContext->pulse.pa_stream_set_write_callback = (ma_proc)_pa_stream_set_write_callback;
- pContext->pulse.pa_stream_set_read_callback = (ma_proc)_pa_stream_set_read_callback;
- pContext->pulse.pa_stream_flush = (ma_proc)_pa_stream_flush;
- pContext->pulse.pa_stream_drain = (ma_proc)_pa_stream_drain;
- pContext->pulse.pa_stream_is_corked = (ma_proc)_pa_stream_is_corked;
- pContext->pulse.pa_stream_cork = (ma_proc)_pa_stream_cork;
- pContext->pulse.pa_stream_trigger = (ma_proc)_pa_stream_trigger;
- pContext->pulse.pa_stream_begin_write = (ma_proc)_pa_stream_begin_write;
- pContext->pulse.pa_stream_write = (ma_proc)_pa_stream_write;
- pContext->pulse.pa_stream_peek = (ma_proc)_pa_stream_peek;
- pContext->pulse.pa_stream_drop = (ma_proc)_pa_stream_drop;
- pContext->pulse.pa_stream_writable_size = (ma_proc)_pa_stream_writable_size;
- pContext->pulse.pa_stream_readable_size = (ma_proc)_pa_stream_readable_size;
-#endif
+ ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer);
- pContext->onUninit = ma_context_uninit__pulse;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__pulse;
- pContext->onEnumDevices = ma_context_enumerate_devices__pulse;
- pContext->onGetDeviceInfo = ma_context_get_device_info__pulse;
- pContext->onDeviceInit = ma_device_init__pulse;
- pContext->onDeviceUninit = ma_device_uninit__pulse;
- pContext->onDeviceStart = NULL;
- pContext->onDeviceStop = ma_device_stop__pulse;
- pContext->onDeviceWrite = ma_device_write__pulse;
- pContext->onDeviceRead = ma_device_read__pulse;
+ framesReadThisPeriod += framesProcessed;
+ }
+ } break;
- if (pConfig->pulse.pApplicationName) {
- pContext->pulse.pApplicationName = ma_copy_string(pConfig->pulse.pApplicationName);
- }
- if (pConfig->pulse.pServerName) {
- pContext->pulse.pServerName = ma_copy_string(pConfig->pulse.pServerName);
- }
- pContext->pulse.tryAutoSpawn = pConfig->pulse.tryAutoSpawn;
-
- /*
- Although we have found the libpulse library, it doesn't necessarily mean PulseAudio is useable. We need to initialize
- and connect a dummy PulseAudio context to test PulseAudio's usability.
- */
- {
- ma_pa_mainloop* pMainLoop;
- ma_pa_mainloop_api* pAPI;
- ma_pa_context* pPulseContext;
- int error;
+ case ma_device_type_playback:
+ {
+ /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[8192];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames;
+ ma_uint32 framesWrittenThisPeriod = 0;
+ while (framesWrittenThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod;
+ if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) {
+ framesToWriteThisIteration = intermediaryBufferSizeInFrames;
+ }
- pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)();
- if (pMainLoop == NULL) {
- ma_free(pContext->pulse.pServerName);
- ma_free(pContext->pulse.pApplicationName);
- #ifndef MA_NO_RUNTIME_LINKING
- ma_dlclose(pContext, pContext->pulse.pulseSO);
- #endif
- return MA_NO_BACKEND;
- }
+ ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer);
- pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop);
- if (pAPI == NULL) {
- ma_free(pContext->pulse.pServerName);
- ma_free(pContext->pulse.pApplicationName);
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- #ifndef MA_NO_RUNTIME_LINKING
- ma_dlclose(pContext, pContext->pulse.pulseSO);
- #endif
- return MA_NO_BACKEND;
- }
+ result = ma_device_write__sndio(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
- pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName);
- if (pPulseContext == NULL) {
- ma_free(pContext->pulse.pServerName);
- ma_free(pContext->pulse.pApplicationName);
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- #ifndef MA_NO_RUNTIME_LINKING
- ma_dlclose(pContext, pContext->pulse.pulseSO);
- #endif
- return MA_NO_BACKEND;
+ framesWrittenThisPeriod += framesProcessed;
+ }
+ } break;
+
+ /* To silence a warning. Will never hit this. */
+ case ma_device_type_loopback:
+ default: break;
}
+ }
- error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, 0, NULL);
- if (error != MA_PA_OK) {
- ma_free(pContext->pulse.pServerName);
- ma_free(pContext->pulse.pApplicationName);
- ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
- #ifndef MA_NO_RUNTIME_LINKING
- ma_dlclose(pContext, pContext->pulse.pulseSO);
- #endif
- return MA_NO_BACKEND;
+
+ /* Here is where the device is stopped. */
+ ma_device_stop__sndio(pDevice);
+
+ return result;
+}
+
+static ma_result ma_context_uninit__sndio(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_sndio);
+
+ (void)pContext;
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__sndio(const ma_context_config* pConfig, ma_context* pContext)
+{
+#ifndef MA_NO_RUNTIME_LINKING
+ const char* libsndioNames[] = {
+ "libsndio.so"
+ };
+ size_t i;
+
+ for (i = 0; i < ma_countof(libsndioNames); ++i) {
+ pContext->sndio.sndioSO = ma_dlopen(pContext, libsndioNames[i]);
+ if (pContext->sndio.sndioSO != NULL) {
+ break;
}
+ }
- ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)(pPulseContext);
- ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext);
- ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop);
+ if (pContext->sndio.sndioSO == NULL) {
+ return MA_NO_BACKEND;
}
+
+ pContext->sndio.sio_open = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_open");
+ pContext->sndio.sio_close = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_close");
+ pContext->sndio.sio_setpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_setpar");
+ pContext->sndio.sio_getpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_getpar");
+ pContext->sndio.sio_getcap = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_getcap");
+ pContext->sndio.sio_write = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_write");
+ pContext->sndio.sio_read = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_read");
+ pContext->sndio.sio_start = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_start");
+ pContext->sndio.sio_stop = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_stop");
+ pContext->sndio.sio_initpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_initpar");
+#else
+ pContext->sndio.sio_open = sio_open;
+ pContext->sndio.sio_close = sio_close;
+ pContext->sndio.sio_setpar = sio_setpar;
+ pContext->sndio.sio_getpar = sio_getpar;
+ pContext->sndio.sio_getcap = sio_getcap;
+ pContext->sndio.sio_write = sio_write;
+ pContext->sndio.sio_read = sio_read;
+ pContext->sndio.sio_start = sio_start;
+ pContext->sndio.sio_stop = sio_stop;
+ pContext->sndio.sio_initpar = sio_initpar;
+#endif
+
+ pContext->onUninit = ma_context_uninit__sndio;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__sndio;
+ pContext->onEnumDevices = ma_context_enumerate_devices__sndio;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__sndio;
+ pContext->onDeviceInit = ma_device_init__sndio;
+ pContext->onDeviceUninit = ma_device_uninit__sndio;
+ pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */
+ pContext->onDeviceStop = NULL; /* Not required for synchronous backends. */
+ pContext->onDeviceMainLoop = ma_device_main_loop__sndio;
+ (void)pConfig;
return MA_SUCCESS;
}
-#endif
+#endif /* sndio */
+
/******************************************************************************
-JACK Backend
+audio(4) Backend
******************************************************************************/
-#ifdef MA_HAS_JACK
-
-/* It is assumed jack.h is available when compile-time linking is being used. */
-#ifdef MA_NO_RUNTIME_LINKING
-#include
+#ifdef MA_HAS_AUDIO4
+#include
+#include
+#include
+#include
+#include
+#include
+#include
-typedef jack_nframes_t ma_jack_nframes_t;
-typedef jack_options_t ma_jack_options_t;
-typedef jack_status_t ma_jack_status_t;
-typedef jack_client_t ma_jack_client_t;
-typedef jack_port_t ma_jack_port_t;
-typedef JackProcessCallback ma_JackProcessCallback;
-typedef JackBufferSizeCallback ma_JackBufferSizeCallback;
-typedef JackShutdownCallback ma_JackShutdownCallback;
-#define MA_JACK_DEFAULT_AUDIO_TYPE JACK_DEFAULT_AUDIO_TYPE
-#define ma_JackNoStartServer JackNoStartServer
-#define ma_JackPortIsInput JackPortIsInput
-#define ma_JackPortIsOutput JackPortIsOutput
-#define ma_JackPortIsPhysical JackPortIsPhysical
-#else
-typedef ma_uint32 ma_jack_nframes_t;
-typedef int ma_jack_options_t;
-typedef int ma_jack_status_t;
-typedef struct ma_jack_client_t ma_jack_client_t;
-typedef struct ma_jack_port_t ma_jack_port_t;
-typedef int (* ma_JackProcessCallback) (ma_jack_nframes_t nframes, void* arg);
-typedef int (* ma_JackBufferSizeCallback)(ma_jack_nframes_t nframes, void* arg);
-typedef void (* ma_JackShutdownCallback) (void* arg);
-#define MA_JACK_DEFAULT_AUDIO_TYPE "32 bit float mono audio"
-#define ma_JackNoStartServer 1
-#define ma_JackPortIsInput 1
-#define ma_JackPortIsOutput 2
-#define ma_JackPortIsPhysical 4
+#if defined(__OpenBSD__)
+ #include
+ #if defined(OpenBSD) && OpenBSD >= 201709
+ #define MA_AUDIO4_USE_NEW_API
+ #endif
#endif
-typedef ma_jack_client_t* (* ma_jack_client_open_proc) (const char* client_name, ma_jack_options_t options, ma_jack_status_t* status, ...);
-typedef int (* ma_jack_client_close_proc) (ma_jack_client_t* client);
-typedef int (* ma_jack_client_name_size_proc) ();
-typedef int (* ma_jack_set_process_callback_proc) (ma_jack_client_t* client, ma_JackProcessCallback process_callback, void* arg);
-typedef int (* ma_jack_set_buffer_size_callback_proc)(ma_jack_client_t* client, ma_JackBufferSizeCallback bufsize_callback, void* arg);
-typedef void (* ma_jack_on_shutdown_proc) (ma_jack_client_t* client, ma_JackShutdownCallback function, void* arg);
-typedef ma_jack_nframes_t (* ma_jack_get_sample_rate_proc) (ma_jack_client_t* client);
-typedef ma_jack_nframes_t (* ma_jack_get_buffer_size_proc) (ma_jack_client_t* client);
-typedef const char** (* ma_jack_get_ports_proc) (ma_jack_client_t* client, const char* port_name_pattern, const char* type_name_pattern, unsigned long flags);
-typedef int (* ma_jack_activate_proc) (ma_jack_client_t* client);
-typedef int (* ma_jack_deactivate_proc) (ma_jack_client_t* client);
-typedef int (* ma_jack_connect_proc) (ma_jack_client_t* client, const char* source_port, const char* destination_port);
-typedef ma_jack_port_t* (* ma_jack_port_register_proc) (ma_jack_client_t* client, const char* port_name, const char* port_type, unsigned long flags, unsigned long buffer_size);
-typedef const char* (* ma_jack_port_name_proc) (const ma_jack_port_t* port);
-typedef void* (* ma_jack_port_get_buffer_proc) (ma_jack_port_t* port, ma_jack_nframes_t nframes);
-typedef void (* ma_jack_free_proc) (void* ptr);
+static void ma_construct_device_id__audio4(char* id, size_t idSize, const char* base, int deviceIndex)
+{
+ size_t baseLen;
+
+ MA_ASSERT(id != NULL);
+ MA_ASSERT(idSize > 0);
+ MA_ASSERT(deviceIndex >= 0);
+
+ baseLen = strlen(base);
+ MA_ASSERT(idSize > baseLen);
+
+ ma_strcpy_s(id, idSize, base);
+ ma_itoa_s(deviceIndex, id+baseLen, idSize-baseLen, 10);
+}
-ma_result ma_context_open_client__jack(ma_context* pContext, ma_jack_client_t** ppClient)
+static ma_result ma_extract_device_index_from_id__audio4(const char* id, const char* base, int* pIndexOut)
{
- size_t maxClientNameSize;
- char clientName[256];
- ma_jack_status_t status;
- ma_jack_client_t* pClient;
+ size_t idLen;
+ size_t baseLen;
+ const char* deviceIndexStr;
+
+ MA_ASSERT(id != NULL);
+ MA_ASSERT(base != NULL);
+ MA_ASSERT(pIndexOut != NULL);
+
+ idLen = strlen(id);
+ baseLen = strlen(base);
+ if (idLen <= baseLen) {
+ return MA_ERROR; /* Doesn't look like the id starts with the base. */
+ }
+
+ if (strncmp(id, base, baseLen) != 0) {
+ return MA_ERROR; /* ID does not begin with base. */
+ }
+
+ deviceIndexStr = id + baseLen;
+ if (deviceIndexStr[0] == '\0') {
+ return MA_ERROR; /* No index specified in the ID. */
+ }
+
+ if (pIndexOut) {
+ *pIndexOut = atoi(deviceIndexStr);
+ }
+
+ return MA_SUCCESS;
+}
- ma_assert(pContext != NULL);
- ma_assert(ppClient != NULL);
+static ma_bool32 ma_context_is_device_id_equal__audio4(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
- if (ppClient) {
- *ppClient = NULL;
+ return ma_strcmp(pID0->audio4, pID1->audio4) == 0;
+}
+
+#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */
+static ma_format ma_format_from_encoding__audio4(unsigned int encoding, unsigned int precision)
+{
+ if (precision == 8 && (encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR_LE || encoding == AUDIO_ENCODING_ULINEAR_BE)) {
+ return ma_format_u8;
+ } else {
+ if (ma_is_little_endian() && encoding == AUDIO_ENCODING_SLINEAR_LE) {
+ if (precision == 16) {
+ return ma_format_s16;
+ } else if (precision == 24) {
+ return ma_format_s24;
+ } else if (precision == 32) {
+ return ma_format_s32;
+ }
+ } else if (ma_is_big_endian() && encoding == AUDIO_ENCODING_SLINEAR_BE) {
+ if (precision == 16) {
+ return ma_format_s16;
+ } else if (precision == 24) {
+ return ma_format_s24;
+ } else if (precision == 32) {
+ return ma_format_s32;
+ }
+ }
}
- maxClientNameSize = ((ma_jack_client_name_size_proc)pContext->jack.jack_client_name_size)(); /* Includes null terminator. */
- ma_strncpy_s(clientName, ma_min(sizeof(clientName), maxClientNameSize), (pContext->jack.pClientName != NULL) ? pContext->jack.pClientName : "miniaudio", (size_t)-1);
+ return ma_format_unknown; /* Encoding not supported. */
+}
- pClient = ((ma_jack_client_open_proc)pContext->jack.jack_client_open)(clientName, (pContext->jack.tryStartServer) ? 0 : ma_JackNoStartServer, &status, NULL);
- if (pClient == NULL) {
- return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+static void ma_encoding_from_format__audio4(ma_format format, unsigned int* pEncoding, unsigned int* pPrecision)
+{
+ MA_ASSERT(format != ma_format_unknown);
+ MA_ASSERT(pEncoding != NULL);
+ MA_ASSERT(pPrecision != NULL);
+
+ switch (format)
+ {
+ case ma_format_u8:
+ {
+ *pEncoding = AUDIO_ENCODING_ULINEAR;
+ *pPrecision = 8;
+ } break;
+
+ case ma_format_s24:
+ {
+ *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE;
+ *pPrecision = 24;
+ } break;
+
+ case ma_format_s32:
+ {
+ *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE;
+ *pPrecision = 32;
+ } break;
+
+ case ma_format_s16:
+ case ma_format_f32:
+ default:
+ {
+ *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE;
+ *pPrecision = 16;
+ } break;
+ }
+}
+
+static ma_format ma_format_from_prinfo__audio4(struct audio_prinfo* prinfo)
+{
+ return ma_format_from_encoding__audio4(prinfo->encoding, prinfo->precision);
+}
+#else
+static ma_format ma_format_from_swpar__audio4(struct audio_swpar* par)
+{
+ if (par->bits == 8 && par->bps == 1 && par->sig == 0) {
+ return ma_format_u8;
+ }
+ if (par->bits == 16 && par->bps == 2 && par->sig == 1 && par->le == ma_is_little_endian()) {
+ return ma_format_s16;
}
-
- if (ppClient) {
- *ppClient = pClient;
+ if (par->bits == 24 && par->bps == 3 && par->sig == 1 && par->le == ma_is_little_endian()) {
+ return ma_format_s24;
+ }
+ if (par->bits == 32 && par->bps == 4 && par->sig == 1 && par->le == ma_is_little_endian()) {
+ return ma_format_f32;
}
- return MA_SUCCESS;
-}
-
-ma_bool32 ma_context_is_device_id_equal__jack(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
-{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
-
- return pID0->jack == pID1->jack;
+ /* Format not supported. */
+ return ma_format_unknown;
}
+#endif
-ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+static ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_device_type deviceType, int fd, ma_device_info* pInfoOut)
{
- ma_bool32 cbResult = MA_TRUE;
+ audio_device_t fdDevice;
+#if !defined(MA_AUDIO4_USE_NEW_API)
+ int counter = 0;
+ audio_info_t fdInfo;
+#else
+ struct audio_swpar fdPar;
+ ma_format format;
+#endif
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(fd >= 0);
+ MA_ASSERT(pInfoOut != NULL);
+
+ (void)pContext;
+ (void)deviceType;
- /* Playback. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ if (ioctl(fd, AUDIO_GETDEV, &fdDevice) < 0) {
+ return MA_ERROR; /* Failed to retrieve device info. */
}
- /* Capture. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
- cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- }
+ /* Name. */
+ ma_strcpy_s(pInfoOut->name, sizeof(pInfoOut->name), fdDevice.name);
- return MA_SUCCESS;
-}
+#if !defined(MA_AUDIO4_USE_NEW_API)
+ /* Supported formats. We get this by looking at the encodings. */
+ for (;;) {
+ audio_encoding_t encoding;
+ ma_format format;
-ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
-{
- ma_jack_client_t* pClient;
- ma_result result;
- const char** ppPorts;
+ MA_ZERO_OBJECT(&encoding);
+ encoding.index = counter;
+ if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) {
+ break;
+ }
- ma_assert(pContext != NULL);
+ format = ma_format_from_encoding__audio4(encoding.encoding, encoding.precision);
+ if (format != ma_format_unknown) {
+ pInfoOut->formats[pInfoOut->formatCount++] = format;
+ }
- /* No exclusive mode with the JACK backend. */
- if (shareMode == ma_share_mode_exclusive) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ counter += 1;
}
- if (pDeviceID != NULL && pDeviceID->jack != 0) {
- return MA_NO_DEVICE; /* Don't know the device. */
+ if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) {
+ return MA_ERROR;
}
- /* Name / Description */
if (deviceType == ma_device_type_playback) {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ pInfoOut->minChannels = fdInfo.play.channels;
+ pInfoOut->maxChannels = fdInfo.play.channels;
+ pInfoOut->minSampleRate = fdInfo.play.sample_rate;
+ pInfoOut->maxSampleRate = fdInfo.play.sample_rate;
} else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ pInfoOut->minChannels = fdInfo.record.channels;
+ pInfoOut->maxChannels = fdInfo.record.channels;
+ pInfoOut->minSampleRate = fdInfo.record.sample_rate;
+ pInfoOut->maxSampleRate = fdInfo.record.sample_rate;
}
-
- /* Jack only supports f32 and has a specific channel count and sample rate. */
- pDeviceInfo->formatCount = 1;
- pDeviceInfo->formats[0] = ma_format_f32;
-
- /* The channel count and sample rate can only be determined by opening the device. */
- result = ma_context_open_client__jack(pContext, &pClient);
- if (result != MA_SUCCESS) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+#else
+ if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) {
+ return MA_ERROR;
}
-
- pDeviceInfo->minSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pClient);
- pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate;
-
- pDeviceInfo->minChannels = 0;
- pDeviceInfo->maxChannels = 0;
-
- ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pClient, NULL, NULL, ma_JackPortIsPhysical | ((deviceType == ma_device_type_playback) ? ma_JackPortIsInput : ma_JackPortIsOutput));
- if (ppPorts == NULL) {
- ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient);
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+
+ format = ma_format_from_swpar__audio4(&fdPar);
+ if (format == ma_format_unknown) {
+ return MA_FORMAT_NOT_SUPPORTED;
}
-
- while (ppPorts[pDeviceInfo->minChannels] != NULL) {
- pDeviceInfo->minChannels += 1;
- pDeviceInfo->maxChannels += 1;
+ pInfoOut->formats[pInfoOut->formatCount++] = format;
+
+ if (deviceType == ma_device_type_playback) {
+ pInfoOut->minChannels = fdPar.pchan;
+ pInfoOut->maxChannels = fdPar.pchan;
+ } else {
+ pInfoOut->minChannels = fdPar.rchan;
+ pInfoOut->maxChannels = fdPar.rchan;
}
-
- ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
- ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient);
-
- (void)pContext;
+
+ pInfoOut->minSampleRate = fdPar.rate;
+ pInfoOut->maxSampleRate = fdPar.rate;
+#endif
+
return MA_SUCCESS;
}
-
-void ma_device_uninit__jack(ma_device* pDevice)
+static ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
{
- ma_context* pContext;
-
- ma_assert(pDevice != NULL);
-
- pContext = pDevice->pContext;
- ma_assert(pContext != NULL);
-
- if (pDevice->jack.pClient != NULL) {
- ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDevice->jack.pClient);
- }
+ const int maxDevices = 64;
+ char devpath[256];
+ int iDevice;
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ma_free(pDevice->jack.pIntermediaryBufferCapture);
- }
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /*
+ Every device will be named "/dev/audioN", with a "/dev/audioctlN" equivalent. We use the "/dev/audioctlN"
+ version here since we can open it even when another process has control of the "/dev/audioN" device.
+ */
+ for (iDevice = 0; iDevice < maxDevices; ++iDevice) {
+ struct stat st;
+ int fd;
+ ma_bool32 isTerminating = MA_FALSE;
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ma_free(pDevice->jack.pIntermediaryBufferPlayback);
- }
+ ma_strcpy_s(devpath, sizeof(devpath), "/dev/audioctl");
+ ma_itoa_s(iDevice, devpath+strlen(devpath), sizeof(devpath)-strlen(devpath), 10);
+
+ if (stat(devpath, &st) < 0) {
+ break;
+ }
- if (pDevice->type == ma_device_type_duplex) {
- ma_pcm_rb_uninit(&pDevice->jack.duplexRB);
+ /* The device exists, but we need to check if it's usable as playback and/or capture. */
+
+ /* Playback. */
+ if (!isTerminating) {
+ fd = open(devpath, O_RDONLY, 0);
+ if (fd >= 0) {
+ /* Supports playback. */
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice);
+ if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_playback, fd, &deviceInfo) == MA_SUCCESS) {
+ isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+
+ close(fd);
+ }
+ }
+
+ /* Capture. */
+ if (!isTerminating) {
+ fd = open(devpath, O_WRONLY, 0);
+ if (fd >= 0) {
+ /* Supports capture. */
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice);
+ if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_capture, fd, &deviceInfo) == MA_SUCCESS) {
+ isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+
+ close(fd);
+ }
+ }
+
+ if (isTerminating) {
+ break;
+ }
}
+
+ return MA_SUCCESS;
}
-void ma_device__jack_shutdown_callback(void* pUserData)
-{
- /* JACK died. Stop the device. */
- ma_device* pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
-
- ma_device_stop(pDevice);
-}
-
-int ma_device__jack_buffer_size_callback(ma_jack_nframes_t frameCount, void* pUserData)
+static ma_result ma_context_get_device_info__audio4(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
{
- ma_device* pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
+ int fd = -1;
+ int deviceIndex = -1;
+ char ctlid[256];
+ ma_result result;
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- float* pNewBuffer = (float*)ma_realloc(pDevice->jack.pIntermediaryBufferCapture, frameCount * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat)));
- if (pNewBuffer == NULL) {
- return MA_OUT_OF_MEMORY;
+ MA_ASSERT(pContext != NULL);
+ (void)shareMode;
+
+ /*
+ We need to open the "/dev/audioctlN" device to get the info. To do this we need to extract the number
+ from the device ID which will be in "/dev/audioN" format.
+ */
+ if (pDeviceID == NULL) {
+ /* Default device. */
+ ma_strcpy_s(ctlid, sizeof(ctlid), "/dev/audioctl");
+ } else {
+ /* Specific device. We need to convert from "/dev/audioN" to "/dev/audioctlN". */
+ result = ma_extract_device_index_from_id__audio4(pDeviceID->audio4, "/dev/audio", &deviceIndex);
+ if (result != MA_SUCCESS) {
+ return result;
}
-
- pDevice->jack.pIntermediaryBufferCapture = pNewBuffer;
- pDevice->playback.internalBufferSizeInFrames = frameCount * pDevice->capture.internalPeriods;
+
+ ma_construct_device_id__audio4(ctlid, sizeof(ctlid), "/dev/audioctl", deviceIndex);
}
-
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- float* pNewBuffer = (float*)ma_realloc(pDevice->jack.pIntermediaryBufferPlayback, frameCount * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat)));
- if (pNewBuffer == NULL) {
- return MA_OUT_OF_MEMORY;
- }
-
- pDevice->jack.pIntermediaryBufferPlayback = pNewBuffer;
- pDevice->playback.internalBufferSizeInFrames = frameCount * pDevice->playback.internalPeriods;
+
+ fd = open(ctlid, (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY, 0);
+ if (fd == -1) {
+ return MA_NO_DEVICE;
}
-
- return 0;
+
+ if (deviceIndex == -1) {
+ ma_strcpy_s(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio");
+ } else {
+ ma_construct_device_id__audio4(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio", deviceIndex);
+ }
+
+ result = ma_context_get_device_info_from_fd__audio4(pContext, deviceType, fd, pDeviceInfo);
+
+ close(fd);
+ return result;
}
-int ma_device__jack_process_callback(ma_jack_nframes_t frameCount, void* pUserData)
+static void ma_device_uninit__audio4(ma_device* pDevice)
{
- ma_device* pDevice;
- ma_context* pContext;
- ma_uint32 iChannel;
-
- pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
-
- pContext = pDevice->pContext;
- ma_assert(pContext != NULL);
+ MA_ASSERT(pDevice != NULL);
if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- /* Channels need to be interleaved. */
- for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) {
- const float* pSrc = (const float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.pPortsCapture[iChannel], frameCount);
- if (pSrc != NULL) {
- float* pDst = pDevice->jack.pIntermediaryBufferCapture + iChannel;
- ma_jack_nframes_t iFrame;
- for (iFrame = 0; iFrame < frameCount; ++iFrame) {
- *pDst = *pSrc;
-
- pDst += pDevice->capture.internalChannels;
- pSrc += 1;
- }
- }
- }
-
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_capture(pDevice, frameCount, pDevice->jack.pIntermediaryBufferCapture, &pDevice->jack.duplexRB);
- } else {
- ma_device__send_frames_to_client(pDevice, frameCount, pDevice->jack.pIntermediaryBufferCapture);
- }
+ close(pDevice->audio4.fdCapture);
}
if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_playback(pDevice, frameCount, pDevice->jack.pIntermediaryBufferPlayback, &pDevice->jack.duplexRB);
- } else {
- ma_device__read_frames_from_client(pDevice, frameCount, pDevice->jack.pIntermediaryBufferPlayback);
- }
-
- /* Channels need to be deinterleaved. */
- for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) {
- float* pDst = (float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.pPortsPlayback[iChannel], frameCount);
- if (pDst != NULL) {
- const float* pSrc = pDevice->jack.pIntermediaryBufferPlayback + iChannel;
- ma_jack_nframes_t iFrame;
- for (iFrame = 0; iFrame < frameCount; ++iFrame) {
- *pDst = *pSrc;
-
- pDst += 1;
- pSrc += pDevice->playback.internalChannels;
- }
- }
- }
+ close(pDevice->audio4.fdPlayback);
}
-
- return 0;
}
-ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+static ma_result ma_device_init_fd__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
{
- ma_result result;
- ma_uint32 periods;
- ma_uint32 bufferSizeInFrames;
+ const char* pDefaultDeviceNames[] = {
+ "/dev/audio",
+ "/dev/audio0"
+ };
+ int fd;
+ int fdFlags = 0;
+#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */
+ audio_info_t fdInfo;
+#else
+ struct audio_swpar fdPar;
+#endif
+ ma_format internalFormat;
+ ma_uint32 internalChannels;
+ ma_uint32 internalSampleRate;
+ ma_uint32 internalPeriodSizeInFrames;
+ ma_uint32 internalPeriods;
- ma_assert(pContext != NULL);
- ma_assert(pConfig != NULL);
- ma_assert(pDevice != NULL);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(deviceType != ma_device_type_duplex);
+ MA_ASSERT(pDevice != NULL);
- /* Only supporting default devices with JACK. */
- if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID != NULL && pConfig->playback.pDeviceID->jack != 0) ||
- ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID != NULL && pConfig->capture.pDeviceID->jack != 0)) {
- return MA_NO_DEVICE;
- }
+ (void)pContext;
- /* No exclusive mode with the JACK backend. */
- if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
- ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ /* The first thing to do is open the file. */
+ if (deviceType == ma_device_type_capture) {
+ fdFlags = O_RDONLY;
+ } else {
+ fdFlags = O_WRONLY;
}
+ /*fdFlags |= O_NONBLOCK;*/
- /* Open the client. */
- result = ma_context_open_client__jack(pContext, (ma_jack_client_t**)&pDevice->jack.pClient);
- if (result != MA_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ if ((deviceType == ma_device_type_capture && pConfig->capture.pDeviceID == NULL) || (deviceType == ma_device_type_playback && pConfig->playback.pDeviceID == NULL)) {
+ /* Default device. */
+ size_t iDevice;
+ for (iDevice = 0; iDevice < ma_countof(pDefaultDeviceNames); ++iDevice) {
+ fd = open(pDefaultDeviceNames[iDevice], fdFlags, 0);
+ if (fd != -1) {
+ break;
+ }
+ }
+ } else {
+ /* Specific device. */
+ fd = open((deviceType == ma_device_type_capture) ? pConfig->capture.pDeviceID->audio4 : pConfig->playback.pDeviceID->audio4, fdFlags, 0);
}
- /* Callbacks. */
- if (((ma_jack_set_process_callback_proc)pContext->jack.jack_set_process_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_process_callback, pDevice) != 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to set process callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
- if (((ma_jack_set_buffer_size_callback_proc)pContext->jack.jack_set_buffer_size_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_buffer_size_callback, pDevice) != 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to set buffer size callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ if (fd == -1) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to open device.", ma_result_from_errno(errno));
}
- ((ma_jack_on_shutdown_proc)pContext->jack.jack_on_shutdown)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_shutdown_callback, pDevice);
+#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */
+ AUDIO_INITINFO(&fdInfo);
+ /* We get the driver to do as much of the data conversion as possible. */
+ if (deviceType == ma_device_type_capture) {
+ fdInfo.mode = AUMODE_RECORD;
+ ma_encoding_from_format__audio4(pConfig->capture.format, &fdInfo.record.encoding, &fdInfo.record.precision);
+ fdInfo.record.channels = pConfig->capture.channels;
+ fdInfo.record.sample_rate = pConfig->sampleRate;
+ } else {
+ fdInfo.mode = AUMODE_PLAY;
+ ma_encoding_from_format__audio4(pConfig->playback.format, &fdInfo.play.encoding, &fdInfo.play.precision);
+ fdInfo.play.channels = pConfig->playback.channels;
+ fdInfo.play.sample_rate = pConfig->sampleRate;
+ }
- /* The buffer size in frames can change. */
- periods = 2;
- bufferSizeInFrames = ((ma_jack_get_buffer_size_proc)pContext->jack.jack_get_buffer_size)((ma_jack_client_t*)pDevice->jack.pClient) * periods;
+ if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device format. AUDIO_SETINFO failed.", MA_FORMAT_NOT_SUPPORTED);
+ }
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- const char** ppPorts;
+ if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] AUDIO_GETINFO failed.", MA_FORMAT_NOT_SUPPORTED);
+ }
- pDevice->capture.internalFormat = ma_format_f32;
- pDevice->capture.internalChannels = 0;
- pDevice->capture.internalSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient);
- ma_get_standard_channel_map(ma_standard_channel_map_alsa, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+ if (deviceType == ma_device_type_capture) {
+ internalFormat = ma_format_from_prinfo__audio4(&fdInfo.record);
+ internalChannels = fdInfo.record.channels;
+ internalSampleRate = fdInfo.record.sample_rate;
+ } else {
+ internalFormat = ma_format_from_prinfo__audio4(&fdInfo.play);
+ internalChannels = fdInfo.play.channels;
+ internalSampleRate = fdInfo.play.sample_rate;
+ }
- ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, NULL, ma_JackPortIsPhysical | ma_JackPortIsOutput);
- if (ppPorts == NULL) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ if (internalFormat == ma_format_unknown) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable.", MA_FORMAT_NOT_SUPPORTED);
+ }
+
+ /* Buffer. */
+ {
+ ma_uint32 internalPeriodSizeInBytes;
+
+ internalPeriodSizeInFrames = pConfig->periodSizeInFrames;
+ if (internalPeriodSizeInFrames == 0) {
+ internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, internalSampleRate);
}
- while (ppPorts[pDevice->capture.internalChannels] != NULL) {
- char name[64];
- ma_strcpy_s(name, sizeof(name), "capture");
- ma_itoa_s((int)pDevice->capture.internalChannels, name+7, sizeof(name)-7, 10); /* 7 = length of "capture" */
+ internalPeriodSizeInBytes = internalPeriodSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels);
+ if (internalPeriodSizeInBytes < 16) {
+ internalPeriodSizeInBytes = 16;
+ }
- pDevice->jack.pPortsCapture[pDevice->capture.internalChannels] = ((ma_jack_port_register_proc)pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsInput, 0);
- if (pDevice->jack.pPortsCapture[pDevice->capture.internalChannels] == NULL) {
- ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
- ma_device_uninit__jack(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ internalPeriods = pConfig->periods;
+ if (internalPeriods < 2) {
+ internalPeriods = 2;
+ }
- pDevice->capture.internalChannels += 1;
+ /* What miniaudio calls a period, audio4 calls a block. */
+ AUDIO_INITINFO(&fdInfo);
+ fdInfo.hiwat = internalPeriods;
+ fdInfo.lowat = internalPeriods-1;
+ fdInfo.blocksize = internalPeriodSizeInBytes;
+ if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to set internal buffer size. AUDIO_SETINFO failed.", MA_FORMAT_NOT_SUPPORTED);
}
- ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
+ internalPeriods = fdInfo.hiwat;
+ internalPeriodSizeInFrames = fdInfo.blocksize / ma_get_bytes_per_frame(internalFormat, internalChannels);
+ }
+#else
+ /* We need to retrieve the format of the device so we can know the channel count and sample rate. Then we can calculate the buffer size. */
+ if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve initial device parameters.", MA_FORMAT_NOT_SUPPORTED);
+ }
- pDevice->capture.internalBufferSizeInFrames = bufferSizeInFrames;
- pDevice->capture.internalPeriods = periods;
+ internalFormat = ma_format_from_swpar__audio4(&fdPar);
+ internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan;
+ internalSampleRate = fdPar.rate;
- pDevice->jack.pIntermediaryBufferCapture = (float*)ma_malloc((pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods) * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat)));
- if (pDevice->jack.pIntermediaryBufferCapture == NULL) {
- ma_device_uninit__jack(pDevice);
- return MA_OUT_OF_MEMORY;
- }
+ if (internalFormat == ma_format_unknown) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable.", MA_FORMAT_NOT_SUPPORTED);
}
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- const char** ppPorts;
+ /* Buffer. */
+ {
+ ma_uint32 internalPeriodSizeInBytes;
- pDevice->playback.internalFormat = ma_format_f32;
- pDevice->playback.internalChannels = 0;
- pDevice->playback.internalSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient);
- ma_get_standard_channel_map(ma_standard_channel_map_alsa, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
+ internalPeriodSizeInFrames = pConfig->periodSizeInFrames;
+ if (internalPeriodSizeInFrames == 0) {
+ internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, internalSampleRate);
+ }
- ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, NULL, ma_JackPortIsPhysical | ma_JackPortIsInput);
- if (ppPorts == NULL) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ /* What miniaudio calls a period, audio4 calls a block. */
+ internalPeriodSizeInBytes = internalPeriodSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels);
+ if (internalPeriodSizeInBytes < 16) {
+ internalPeriodSizeInBytes = 16;
+ }
+
+ fdPar.nblks = pConfig->periods;
+ fdPar.round = internalPeriodSizeInBytes;
+
+ if (ioctl(fd, AUDIO_SETPAR, &fdPar) < 0) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device parameters.", MA_FORMAT_NOT_SUPPORTED);
}
- while (ppPorts[pDevice->playback.internalChannels] != NULL) {
- char name[64];
- ma_strcpy_s(name, sizeof(name), "playback");
- ma_itoa_s((int)pDevice->playback.internalChannels, name+8, sizeof(name)-8, 10); /* 8 = length of "playback" */
+ if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve actual device parameters.", MA_FORMAT_NOT_SUPPORTED);
+ }
+ }
- pDevice->jack.pPortsPlayback[pDevice->playback.internalChannels] = ((ma_jack_port_register_proc)pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsOutput, 0);
- if (pDevice->jack.pPortsPlayback[pDevice->playback.internalChannels] == NULL) {
- ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
- ma_device_uninit__jack(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ internalFormat = ma_format_from_swpar__audio4(&fdPar);
+ internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan;
+ internalSampleRate = fdPar.rate;
+ internalPeriods = fdPar.nblks;
+ internalPeriodSizeInFrames = fdPar.round / ma_get_bytes_per_frame(internalFormat, internalChannels);
+#endif
+
+ if (internalFormat == ma_format_unknown) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable.", MA_FORMAT_NOT_SUPPORTED);
+ }
+
+ if (deviceType == ma_device_type_capture) {
+ pDevice->audio4.fdCapture = fd;
+ pDevice->capture.internalFormat = internalFormat;
+ pDevice->capture.internalChannels = internalChannels;
+ pDevice->capture.internalSampleRate = internalSampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_sound4, internalChannels, pDevice->capture.internalChannelMap);
+ pDevice->capture.internalPeriodSizeInFrames = internalPeriodSizeInFrames;
+ pDevice->capture.internalPeriods = internalPeriods;
+ } else {
+ pDevice->audio4.fdPlayback = fd;
+ pDevice->playback.internalFormat = internalFormat;
+ pDevice->playback.internalChannels = internalChannels;
+ pDevice->playback.internalSampleRate = internalSampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_sound4, internalChannels, pDevice->playback.internalChannelMap);
+ pDevice->playback.internalPeriodSizeInFrames = internalPeriodSizeInFrames;
+ pDevice->playback.internalPeriods = internalPeriods;
+ }
- pDevice->playback.internalChannels += 1;
- }
+ return MA_SUCCESS;
+}
- ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts);
+static ma_result ma_device_init__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
- pDevice->playback.internalBufferSizeInFrames = bufferSizeInFrames;
- pDevice->playback.internalPeriods = periods;
+ MA_ZERO_OBJECT(&pDevice->audio4);
- pDevice->jack.pIntermediaryBufferPlayback = (float*)ma_malloc((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods) * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat)));
- if (pDevice->jack.pIntermediaryBufferPlayback == NULL) {
- ma_device_uninit__jack(pDevice);
- return MA_OUT_OF_MEMORY;
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ pDevice->audio4.fdCapture = -1;
+ pDevice->audio4.fdPlayback = -1;
+
+ /*
+ The version of the operating system dictates whether or not the device is exclusive or shared. NetBSD
+ introduced in-kernel mixing which means it's shared. All other BSD flavours are exclusive as far as
+ I'm aware.
+ */
+#if defined(__NetBSD_Version__) && __NetBSD_Version__ >= 800000000
+ /* NetBSD 8.0+ */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+#else
+ /* All other flavors. */
+#endif
+
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_fd__audio4(pContext, pConfig, ma_device_type_capture, pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
}
}
- if (pDevice->type == ma_device_type_duplex) {
- ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames);
- result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->jack.duplexRB);
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_fd__audio4(pContext, pConfig, ma_device_type_playback, pDevice);
if (result != MA_SUCCESS) {
- ma_device_uninit__jack(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to initialize ring buffer.", result);
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ close(pDevice->audio4.fdCapture);
+ }
+ return result;
}
}
return MA_SUCCESS;
}
-
-ma_result ma_device_start__jack(ma_device* pDevice)
+#if 0
+static ma_result ma_device_start__audio4(ma_device* pDevice)
{
- ma_context* pContext = pDevice->pContext;
- int resultJACK;
- size_t i;
-
- resultJACK = ((ma_jack_activate_proc)pContext->jack.jack_activate)((ma_jack_client_t*)pDevice->jack.pClient);
- if (resultJACK != 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to activate the JACK client.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
+ MA_ASSERT(pDevice != NULL);
if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, NULL, ma_JackPortIsPhysical | ma_JackPortIsOutput);
- if (ppServerPorts == NULL) {
- ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.", MA_ERROR);
- }
-
- for (i = 0; ppServerPorts[i] != NULL; ++i) {
- const char* pServerPort = ppServerPorts[i];
- const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.pPortsCapture[i]);
-
- resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pServerPort, pClientPort);
- if (resultJACK != 0) {
- ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
- ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports.", MA_ERROR);
- }
+ if (pDevice->audio4.fdCapture == -1) {
+ return MA_INVALID_ARGS;
}
-
- ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
}
-
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, NULL, ma_JackPortIsPhysical | ma_JackPortIsInput);
- if (ppServerPorts == NULL) {
- ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.", MA_ERROR);
- }
-
- for (i = 0; ppServerPorts[i] != NULL; ++i) {
- const char* pServerPort = ppServerPorts[i];
- const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.pPortsPlayback[i]);
- resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pClientPort, pServerPort);
- if (resultJACK != 0) {
- ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
- ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports.", MA_ERROR);
- }
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->audio4.fdPlayback == -1) {
+ return MA_INVALID_ARGS;
}
-
- ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts);
}
return MA_SUCCESS;
}
+#endif
-ma_result ma_device_stop__jack(ma_device* pDevice)
+static ma_result ma_device_stop_fd__audio4(ma_device* pDevice, int fd)
{
- ma_context* pContext = pDevice->pContext;
- ma_stop_proc onStop;
+ if (fd == -1) {
+ return MA_INVALID_ARGS;
+ }
- if (((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient) != 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] An error occurred when deactivating the JACK client.", MA_ERROR);
+#if !defined(MA_AUDIO4_USE_NEW_API)
+ if (ioctl(fd, AUDIO_FLUSH, 0) < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_FLUSH failed.", ma_result_from_errno(errno));
}
-
- onStop = pDevice->onStop;
- if (onStop) {
- onStop(pDevice);
+#else
+ if (ioctl(fd, AUDIO_STOP, 0) < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_STOP failed.", ma_result_from_errno(errno));
}
+#endif
return MA_SUCCESS;
}
-
-ma_result ma_context_uninit__jack(ma_context* pContext)
+static ma_result ma_device_stop__audio4(ma_device* pDevice)
{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_jack);
+ MA_ASSERT(pDevice != NULL);
- ma_free(pContext->jack.pClientName);
- pContext->jack.pClientName = NULL;
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_result result;
-#ifndef MA_NO_RUNTIME_LINKING
- ma_dlclose(pContext, pContext->jack.jackSO);
-#endif
+ result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdCapture);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_result result;
+
+ /* Drain the device first. If this fails we'll just need to flush without draining. Unfortunately draining isn't available on newer version of OpenBSD. */
+ #if !defined(MA_AUDIO4_USE_NEW_API)
+ ioctl(pDevice->audio4.fdPlayback, AUDIO_DRAIN, 0);
+ #endif
+
+ /* Here is where the device is stopped immediately. */
+ result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdPlayback);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
return MA_SUCCESS;
}
-ma_result ma_context_init__jack(const ma_context_config* pConfig, ma_context* pContext)
+static ma_result ma_device_write__audio4(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
{
-#ifndef MA_NO_RUNTIME_LINKING
- const char* libjackNames[] = {
-#ifdef MA_WIN32
- "libjack.dll"
-#else
- "libjack.so",
- "libjack.so.0"
-#endif
- };
- size_t i;
+ int result;
- for (i = 0; i < ma_countof(libjackNames); ++i) {
- pContext->jack.jackSO = ma_dlopen(pContext, libjackNames[i]);
- if (pContext->jack.jackSO != NULL) {
- break;
- }
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
}
- if (pContext->jack.jackSO == NULL) {
- return MA_NO_BACKEND;
+ result = write(pDevice->audio4.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+ if (result < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to write data to the device.", ma_result_from_errno(errno));
}
- pContext->jack.jack_client_open = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_open");
- pContext->jack.jack_client_close = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_close");
- pContext->jack.jack_client_name_size = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_client_name_size");
- pContext->jack.jack_set_process_callback = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_set_process_callback");
- pContext->jack.jack_set_buffer_size_callback = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_set_buffer_size_callback");
- pContext->jack.jack_on_shutdown = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_on_shutdown");
- pContext->jack.jack_get_sample_rate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_sample_rate");
- pContext->jack.jack_get_buffer_size = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_buffer_size");
- pContext->jack.jack_get_ports = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_get_ports");
- pContext->jack.jack_activate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_activate");
- pContext->jack.jack_deactivate = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_deactivate");
- pContext->jack.jack_connect = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_connect");
- pContext->jack.jack_port_register = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_register");
- pContext->jack.jack_port_name = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_name");
- pContext->jack.jack_port_get_buffer = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_port_get_buffer");
- pContext->jack.jack_free = (ma_proc)ma_dlsym(pContext, pContext->jack.jackSO, "jack_free");
-#else
- /*
- This strange assignment system is here just to ensure type safety of miniaudio's function pointer
- types. If anything differs slightly the compiler should throw a warning.
- */
- ma_jack_client_open_proc _jack_client_open = jack_client_open;
- ma_jack_client_close_proc _jack_client_close = jack_client_close;
- ma_jack_client_name_size_proc _jack_client_name_size = jack_client_name_size;
- ma_jack_set_process_callback_proc _jack_set_process_callback = jack_set_process_callback;
- ma_jack_set_buffer_size_callback_proc _jack_set_buffer_size_callback = jack_set_buffer_size_callback;
- ma_jack_on_shutdown_proc _jack_on_shutdown = jack_on_shutdown;
- ma_jack_get_sample_rate_proc _jack_get_sample_rate = jack_get_sample_rate;
- ma_jack_get_buffer_size_proc _jack_get_buffer_size = jack_get_buffer_size;
- ma_jack_get_ports_proc _jack_get_ports = jack_get_ports;
- ma_jack_activate_proc _jack_activate = jack_activate;
- ma_jack_deactivate_proc _jack_deactivate = jack_deactivate;
- ma_jack_connect_proc _jack_connect = jack_connect;
- ma_jack_port_register_proc _jack_port_register = jack_port_register;
- ma_jack_port_name_proc _jack_port_name = jack_port_name;
- ma_jack_port_get_buffer_proc _jack_port_get_buffer = jack_port_get_buffer;
- ma_jack_free_proc _jack_free = jack_free;
-
- pContext->jack.jack_client_open = (ma_proc)_jack_client_open;
- pContext->jack.jack_client_close = (ma_proc)_jack_client_close;
- pContext->jack.jack_client_name_size = (ma_proc)_jack_client_name_size;
- pContext->jack.jack_set_process_callback = (ma_proc)_jack_set_process_callback;
- pContext->jack.jack_set_buffer_size_callback = (ma_proc)_jack_set_buffer_size_callback;
- pContext->jack.jack_on_shutdown = (ma_proc)_jack_on_shutdown;
- pContext->jack.jack_get_sample_rate = (ma_proc)_jack_get_sample_rate;
- pContext->jack.jack_get_buffer_size = (ma_proc)_jack_get_buffer_size;
- pContext->jack.jack_get_ports = (ma_proc)_jack_get_ports;
- pContext->jack.jack_activate = (ma_proc)_jack_activate;
- pContext->jack.jack_deactivate = (ma_proc)_jack_deactivate;
- pContext->jack.jack_connect = (ma_proc)_jack_connect;
- pContext->jack.jack_port_register = (ma_proc)_jack_port_register;
- pContext->jack.jack_port_name = (ma_proc)_jack_port_name;
- pContext->jack.jack_port_get_buffer = (ma_proc)_jack_port_get_buffer;
- pContext->jack.jack_free = (ma_proc)_jack_free;
-#endif
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = (ma_uint32)result / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ }
- pContext->isBackendAsynchronous = MA_TRUE;
+ return MA_SUCCESS;
+}
- pContext->onUninit = ma_context_uninit__jack;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__jack;
- pContext->onEnumDevices = ma_context_enumerate_devices__jack;
- pContext->onGetDeviceInfo = ma_context_get_device_info__jack;
- pContext->onDeviceInit = ma_device_init__jack;
- pContext->onDeviceUninit = ma_device_uninit__jack;
- pContext->onDeviceStart = ma_device_start__jack;
- pContext->onDeviceStop = ma_device_stop__jack;
+static ma_result ma_device_read__audio4(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
+{
+ int result;
- if (pConfig->jack.pClientName != NULL) {
- pContext->jack.pClientName = ma_copy_string(pConfig->jack.pClientName);
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
}
- pContext->jack.tryStartServer = pConfig->jack.tryStartServer;
- /*
- Getting here means the JACK library is installed, but it doesn't necessarily mean it's usable. We need to quickly test this by connecting
- a temporary client.
- */
- {
- ma_jack_client_t* pDummyClient;
- ma_result result = ma_context_open_client__jack(pContext, &pDummyClient);
- if (result != MA_SUCCESS) {
- ma_free(pContext->jack.pClientName);
- #ifndef MA_NO_RUNTIME_LINKING
- ma_dlclose(pContext, pContext->jack.jackSO);
- #endif
- return MA_NO_BACKEND;
- }
+ result = read(pDevice->audio4.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+ if (result < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to read data from the device.", ma_result_from_errno(errno));
+ }
- ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDummyClient);
+ if (pFramesRead != NULL) {
+ *pFramesRead = (ma_uint32)result / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
}
return MA_SUCCESS;
}
-#endif /* JACK */
+static ma_result ma_device_main_loop__audio4(ma_device* pDevice)
+{
+ ma_result result = MA_SUCCESS;
+ ma_bool32 exitLoop = MA_FALSE;
+
+ /* No need to explicitly start the device like the other backends. */
+
+ while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
+ switch (pDevice->type)
+ {
+ case ma_device_type_duplex:
+ {
+ /* The process is: device_read -> convert -> callback -> convert -> device_write */
+ ma_uint32 totalCapturedDeviceFramesProcessed = 0;
+ ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames);
+
+ while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) {
+ ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 capturedDeviceFramesRemaining;
+ ma_uint32 capturedDeviceFramesProcessed;
+ ma_uint32 capturedDeviceFramesToProcess;
+ ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed;
+ if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) {
+ capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames;
+ }
+
+ result = ma_device_read__audio4(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedDeviceFramesRemaining = capturedDeviceFramesToProcess;
+ capturedDeviceFramesProcessed = 0;
+ for (;;) {
+ ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames);
+ ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining;
+ ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+
+ /* Convert capture data from device format to client format. */
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
-/******************************************************************************
+ /*
+ If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small
+ which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE.
+ */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
-Core Audio Backend
+ ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/
-******************************************************************************/
-#ifdef MA_HAS_COREAUDIO
-#include
+ capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+ capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
-#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE == 1
- #define MA_APPLE_MOBILE
-#else
- #define MA_APPLE_DESKTOP
-#endif
+ /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */
+ for (;;) {
+ ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration;
+ ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount);
+ if (result != MA_SUCCESS) {
+ break;
+ }
-#if defined(MA_APPLE_DESKTOP)
-#include
-#else
-#include
-#endif
+ result = ma_device_write__audio4(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
-#include
+ capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+ }
-/* CoreFoundation */
-typedef Boolean (* ma_CFStringGetCString_proc)(CFStringRef theString, char* buffer, CFIndex bufferSize, CFStringEncoding encoding);
+ /* In case an error happened from ma_device_write__audio4()... */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+ }
-/* CoreAudio */
-#if defined(MA_APPLE_DESKTOP)
-typedef OSStatus (* ma_AudioObjectGetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* ioDataSize, void* outData);
-typedef OSStatus (* ma_AudioObjectGetPropertyDataSize_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* outDataSize);
-typedef OSStatus (* ma_AudioObjectSetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32 inDataSize, const void* inData);
-typedef OSStatus (* ma_AudioObjectAddPropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData);
-#endif
+ totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed;
+ }
+ } break;
-/* AudioToolbox */
-typedef AudioComponent (* ma_AudioComponentFindNext_proc)(AudioComponent inComponent, const AudioComponentDescription* inDesc);
-typedef OSStatus (* ma_AudioComponentInstanceDispose_proc)(AudioComponentInstance inInstance);
-typedef OSStatus (* ma_AudioComponentInstanceNew_proc)(AudioComponent inComponent, AudioComponentInstance* outInstance);
-typedef OSStatus (* ma_AudioOutputUnitStart_proc)(AudioUnit inUnit);
-typedef OSStatus (* ma_AudioOutputUnitStop_proc)(AudioUnit inUnit);
-typedef OSStatus (* ma_AudioUnitAddPropertyListener_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void* inProcUserData);
-typedef OSStatus (* ma_AudioUnitGetPropertyInfo_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, UInt32* outDataSize, Boolean* outWriteable);
-typedef OSStatus (* ma_AudioUnitGetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData, UInt32* ioDataSize);
-typedef OSStatus (* ma_AudioUnitSetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void* inData, UInt32 inDataSize);
-typedef OSStatus (* ma_AudioUnitInitialize_proc)(AudioUnit inUnit);
-typedef OSStatus (* ma_AudioUnitRender_proc)(AudioUnit inUnit, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inOutputBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData);
+ case ma_device_type_capture:
+ {
+ /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[8192];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames;
+ ma_uint32 framesReadThisPeriod = 0;
+ while (framesReadThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToReadThisIteration = framesRemainingInPeriod;
+ if (framesToReadThisIteration > intermediaryBufferSizeInFrames) {
+ framesToReadThisIteration = intermediaryBufferSizeInFrames;
+ }
+ result = ma_device_read__audio4(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
-#define MA_COREAUDIO_OUTPUT_BUS 0
-#define MA_COREAUDIO_INPUT_BUS 1
+ ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer);
-ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit);
+ framesReadThisPeriod += framesProcessed;
+ }
+ } break;
-/*
-Core Audio
+ case ma_device_type_playback:
+ {
+ /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[8192];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames;
+ ma_uint32 framesWrittenThisPeriod = 0;
+ while (framesWrittenThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod;
+ if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) {
+ framesToWriteThisIteration = intermediaryBufferSizeInFrames;
+ }
-So far, Core Audio has been the worst backend to work with due to being both unintuitive and having almost no documentation
-apart from comments in the headers (which admittedly are quite good). For my own purposes, and for anybody out there whose
-needing to figure out how this darn thing works, I'm going to outline a few things here.
+ ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer);
-Since miniaudio is a fairly low-level API, one of the things it needs is control over specific devices, and it needs to be
-able to identify whether or not it can be used as playback and/or capture. The AudioObject API is the only one I've seen
-that supports this level of detail. There was some public domain sample code I stumbled across that used the AudioComponent
-and AudioUnit APIs, but I couldn't see anything that gave low-level control over device selection and capabilities (the
-distinction between playback and capture in particular). Therefore, miniaudio is using the AudioObject API.
+ result = ma_device_write__audio4(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
-Most (all?) functions in the AudioObject API take a AudioObjectID as it's input. This is the device identifier. When
-retrieving global information, such as the device list, you use kAudioObjectSystemObject. When retrieving device-specific
-data, you pass in the ID for that device. In order to retrieve device-specific IDs you need to enumerate over each of the
-devices. This is done using the AudioObjectGetPropertyDataSize() and AudioObjectGetPropertyData() APIs which seem to be
-the central APIs for retrieving information about the system and specific devices.
+ framesWrittenThisPeriod += framesProcessed;
+ }
+ } break;
-To use the AudioObjectGetPropertyData() API you need to use the notion of a property address. A property address is a
-structure with three variables and is used to identify which property you are getting or setting. The first is the "selector"
-which is basically the specific property that you're wanting to retrieve or set. The second is the "scope", which is
-typically set to kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyScopeInput for input-specific properties and
-kAudioObjectPropertyScopeOutput for output-specific properties. The last is the "element" which is always set to
-kAudioObjectPropertyElementMaster in miniaudio's case. I don't know of any cases where this would be set to anything different.
+ /* To silence a warning. Will never hit this. */
+ case ma_device_type_loopback:
+ default: break;
+ }
+ }
-Back to the earlier issue of device retrieval, you first use the AudioObjectGetPropertyDataSize() API to retrieve the size
-of the raw data which is just a list of AudioDeviceID's. You use the kAudioObjectSystemObject AudioObjectID, and a property
-address with the kAudioHardwarePropertyDevices selector and the kAudioObjectPropertyScopeGlobal scope. Once you have the
-size, allocate a block of memory of that size and then call AudioObjectGetPropertyData(). The data is just a list of
-AudioDeviceID's so just do "dataSize/sizeof(AudioDeviceID)" to know the device count.
-*/
-ma_result ma_result_from_OSStatus(OSStatus status)
-{
- switch (status)
- {
- case noErr: return MA_SUCCESS;
- #if defined(MA_APPLE_DESKTOP)
- case kAudioHardwareNotRunningError: return MA_DEVICE_NOT_STARTED;
- case kAudioHardwareUnspecifiedError: return MA_ERROR;
- case kAudioHardwareUnknownPropertyError: return MA_INVALID_ARGS;
- case kAudioHardwareBadPropertySizeError: return MA_INVALID_OPERATION;
- case kAudioHardwareIllegalOperationError: return MA_INVALID_OPERATION;
- case kAudioHardwareBadObjectError: return MA_INVALID_ARGS;
- case kAudioHardwareBadDeviceError: return MA_INVALID_ARGS;
- case kAudioHardwareBadStreamError: return MA_INVALID_ARGS;
- case kAudioHardwareUnsupportedOperationError: return MA_INVALID_OPERATION;
- case kAudioDeviceUnsupportedFormatError: return MA_FORMAT_NOT_SUPPORTED;
- case kAudioDevicePermissionsError: return MA_ACCESS_DENIED;
- #endif
- default: return MA_ERROR;
- }
+ /* Here is where the device is stopped. */
+ ma_device_stop__audio4(pDevice);
+
+ return result;
}
-#if 0
-ma_channel ma_channel_from_AudioChannelBitmap(AudioChannelBitmap bit)
+static ma_result ma_context_uninit__audio4(ma_context* pContext)
{
- switch (bit)
- {
- case kAudioChannelBit_Left: return MA_CHANNEL_LEFT;
- case kAudioChannelBit_Right: return MA_CHANNEL_RIGHT;
- case kAudioChannelBit_Center: return MA_CHANNEL_FRONT_CENTER;
- case kAudioChannelBit_LFEScreen: return MA_CHANNEL_LFE;
- case kAudioChannelBit_LeftSurround: return MA_CHANNEL_BACK_LEFT;
- case kAudioChannelBit_RightSurround: return MA_CHANNEL_BACK_RIGHT;
- case kAudioChannelBit_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER;
- case kAudioChannelBit_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER;
- case kAudioChannelBit_CenterSurround: return MA_CHANNEL_BACK_CENTER;
- case kAudioChannelBit_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT;
- case kAudioChannelBit_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT;
- case kAudioChannelBit_TopCenterSurround: return MA_CHANNEL_TOP_CENTER;
- case kAudioChannelBit_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT;
- case kAudioChannelBit_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER;
- case kAudioChannelBit_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT;
- case kAudioChannelBit_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT;
- case kAudioChannelBit_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER;
- case kAudioChannelBit_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT;
- default: return MA_CHANNEL_NONE;
- }
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_audio4);
+
+ (void)pContext;
+ return MA_SUCCESS;
}
-#endif
-ma_channel ma_channel_from_AudioChannelLabel(AudioChannelLabel label)
+static ma_result ma_context_init__audio4(const ma_context_config* pConfig, ma_context* pContext)
{
- switch (label)
- {
- case kAudioChannelLabel_Unknown: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_Unused: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_UseCoordinates: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_Left: return MA_CHANNEL_LEFT;
- case kAudioChannelLabel_Right: return MA_CHANNEL_RIGHT;
- case kAudioChannelLabel_Center: return MA_CHANNEL_FRONT_CENTER;
- case kAudioChannelLabel_LFEScreen: return MA_CHANNEL_LFE;
- case kAudioChannelLabel_LeftSurround: return MA_CHANNEL_BACK_LEFT;
- case kAudioChannelLabel_RightSurround: return MA_CHANNEL_BACK_RIGHT;
- case kAudioChannelLabel_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER;
- case kAudioChannelLabel_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER;
- case kAudioChannelLabel_CenterSurround: return MA_CHANNEL_BACK_CENTER;
- case kAudioChannelLabel_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT;
- case kAudioChannelLabel_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT;
- case kAudioChannelLabel_TopCenterSurround: return MA_CHANNEL_TOP_CENTER;
- case kAudioChannelLabel_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT;
- case kAudioChannelLabel_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER;
- case kAudioChannelLabel_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT;
- case kAudioChannelLabel_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT;
- case kAudioChannelLabel_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER;
- case kAudioChannelLabel_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT;
- case kAudioChannelLabel_RearSurroundLeft: return MA_CHANNEL_BACK_LEFT;
- case kAudioChannelLabel_RearSurroundRight: return MA_CHANNEL_BACK_RIGHT;
- case kAudioChannelLabel_LeftWide: return MA_CHANNEL_SIDE_LEFT;
- case kAudioChannelLabel_RightWide: return MA_CHANNEL_SIDE_RIGHT;
- case kAudioChannelLabel_LFE2: return MA_CHANNEL_LFE;
- case kAudioChannelLabel_LeftTotal: return MA_CHANNEL_LEFT;
- case kAudioChannelLabel_RightTotal: return MA_CHANNEL_RIGHT;
- case kAudioChannelLabel_HearingImpaired: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_Narration: return MA_CHANNEL_MONO;
- case kAudioChannelLabel_Mono: return MA_CHANNEL_MONO;
- case kAudioChannelLabel_DialogCentricMix: return MA_CHANNEL_MONO;
- case kAudioChannelLabel_CenterSurroundDirect: return MA_CHANNEL_BACK_CENTER;
- case kAudioChannelLabel_Haptic: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_Ambisonic_W: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_Ambisonic_X: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_Ambisonic_Y: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_Ambisonic_Z: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_MS_Mid: return MA_CHANNEL_LEFT;
- case kAudioChannelLabel_MS_Side: return MA_CHANNEL_RIGHT;
- case kAudioChannelLabel_XY_X: return MA_CHANNEL_LEFT;
- case kAudioChannelLabel_XY_Y: return MA_CHANNEL_RIGHT;
- case kAudioChannelLabel_HeadphonesLeft: return MA_CHANNEL_LEFT;
- case kAudioChannelLabel_HeadphonesRight: return MA_CHANNEL_RIGHT;
- case kAudioChannelLabel_ClickTrack: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_ForeignLanguage: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_Discrete: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_Discrete_0: return MA_CHANNEL_AUX_0;
- case kAudioChannelLabel_Discrete_1: return MA_CHANNEL_AUX_1;
- case kAudioChannelLabel_Discrete_2: return MA_CHANNEL_AUX_2;
- case kAudioChannelLabel_Discrete_3: return MA_CHANNEL_AUX_3;
- case kAudioChannelLabel_Discrete_4: return MA_CHANNEL_AUX_4;
- case kAudioChannelLabel_Discrete_5: return MA_CHANNEL_AUX_5;
- case kAudioChannelLabel_Discrete_6: return MA_CHANNEL_AUX_6;
- case kAudioChannelLabel_Discrete_7: return MA_CHANNEL_AUX_7;
- case kAudioChannelLabel_Discrete_8: return MA_CHANNEL_AUX_8;
- case kAudioChannelLabel_Discrete_9: return MA_CHANNEL_AUX_9;
- case kAudioChannelLabel_Discrete_10: return MA_CHANNEL_AUX_10;
- case kAudioChannelLabel_Discrete_11: return MA_CHANNEL_AUX_11;
- case kAudioChannelLabel_Discrete_12: return MA_CHANNEL_AUX_12;
- case kAudioChannelLabel_Discrete_13: return MA_CHANNEL_AUX_13;
- case kAudioChannelLabel_Discrete_14: return MA_CHANNEL_AUX_14;
- case kAudioChannelLabel_Discrete_15: return MA_CHANNEL_AUX_15;
- case kAudioChannelLabel_Discrete_65535: return MA_CHANNEL_NONE;
-
- #if 0 /* Introduced in a later version of macOS. */
- case kAudioChannelLabel_HOA_ACN: return MA_CHANNEL_NONE;
- case kAudioChannelLabel_HOA_ACN_0: return MA_CHANNEL_AUX_0;
- case kAudioChannelLabel_HOA_ACN_1: return MA_CHANNEL_AUX_1;
- case kAudioChannelLabel_HOA_ACN_2: return MA_CHANNEL_AUX_2;
- case kAudioChannelLabel_HOA_ACN_3: return MA_CHANNEL_AUX_3;
- case kAudioChannelLabel_HOA_ACN_4: return MA_CHANNEL_AUX_4;
- case kAudioChannelLabel_HOA_ACN_5: return MA_CHANNEL_AUX_5;
- case kAudioChannelLabel_HOA_ACN_6: return MA_CHANNEL_AUX_6;
- case kAudioChannelLabel_HOA_ACN_7: return MA_CHANNEL_AUX_7;
- case kAudioChannelLabel_HOA_ACN_8: return MA_CHANNEL_AUX_8;
- case kAudioChannelLabel_HOA_ACN_9: return MA_CHANNEL_AUX_9;
- case kAudioChannelLabel_HOA_ACN_10: return MA_CHANNEL_AUX_10;
- case kAudioChannelLabel_HOA_ACN_11: return MA_CHANNEL_AUX_11;
- case kAudioChannelLabel_HOA_ACN_12: return MA_CHANNEL_AUX_12;
- case kAudioChannelLabel_HOA_ACN_13: return MA_CHANNEL_AUX_13;
- case kAudioChannelLabel_HOA_ACN_14: return MA_CHANNEL_AUX_14;
- case kAudioChannelLabel_HOA_ACN_15: return MA_CHANNEL_AUX_15;
- case kAudioChannelLabel_HOA_ACN_65024: return MA_CHANNEL_NONE;
- #endif
-
- default: return MA_CHANNEL_NONE;
+ MA_ASSERT(pContext != NULL);
+
+ (void)pConfig;
+
+ pContext->onUninit = ma_context_uninit__audio4;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__audio4;
+ pContext->onEnumDevices = ma_context_enumerate_devices__audio4;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__audio4;
+ pContext->onDeviceInit = ma_device_init__audio4;
+ pContext->onDeviceUninit = ma_device_uninit__audio4;
+ pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */
+ pContext->onDeviceStop = NULL; /* Not required for synchronous backends. */
+ pContext->onDeviceMainLoop = ma_device_main_loop__audio4;
+
+ return MA_SUCCESS;
+}
+#endif /* audio4 */
+
+
+/******************************************************************************
+
+OSS Backend
+
+******************************************************************************/
+#ifdef MA_HAS_OSS
+#include
+#include
+#include
+#include
+
+#ifndef SNDCTL_DSP_HALT
+#define SNDCTL_DSP_HALT SNDCTL_DSP_RESET
+#endif
+
+static int ma_open_temp_device__oss()
+{
+ /* The OSS sample code uses "/dev/mixer" as the device for getting system properties so I'm going to do the same. */
+ int fd = open("/dev/mixer", O_RDONLY, 0);
+ if (fd >= 0) {
+ return fd;
}
+
+ return -1;
}
-ma_result ma_format_from_AudioStreamBasicDescription(const AudioStreamBasicDescription* pDescription, ma_format* pFormatOut)
+static ma_result ma_context_open_device__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, int* pfd)
{
- ma_assert(pDescription != NULL);
- ma_assert(pFormatOut != NULL);
-
- *pFormatOut = ma_format_unknown; /* Safety. */
-
- /* There's a few things miniaudio doesn't support. */
- if (pDescription->mFormatID != kAudioFormatLinearPCM) {
- return MA_FORMAT_NOT_SUPPORTED;
+ const char* deviceName;
+ int flags;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pfd != NULL);
+ (void)pContext;
+
+ *pfd = -1;
+
+ /* This function should only be called for playback or capture, not duplex. */
+ if (deviceType == ma_device_type_duplex) {
+ return MA_INVALID_ARGS;
}
-
- /* We don't support any non-packed formats that are aligned high. */
- if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsAlignedHigh) != 0) {
- return MA_FORMAT_NOT_SUPPORTED;
+
+ deviceName = "/dev/dsp";
+ if (pDeviceID != NULL) {
+ deviceName = pDeviceID->oss;
}
- /* Only supporting native-endian. */
- if ((ma_is_little_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) != 0) || (ma_is_big_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) == 0)) {
- return MA_FORMAT_NOT_SUPPORTED;
+ flags = (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY;
+ if (shareMode == ma_share_mode_exclusive) {
+ flags |= O_EXCL;
}
-
- /* We are not currently supporting non-interleaved formats (this will be added in a future version of miniaudio). */
- /*if ((pDescription->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0) {
- return MA_FORMAT_NOT_SUPPORTED;
- }*/
- if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsFloat) != 0) {
- if (pDescription->mBitsPerChannel == 32) {
- *pFormatOut = ma_format_f32;
- return MA_SUCCESS;
- }
- } else {
- if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsSignedInteger) != 0) {
- if (pDescription->mBitsPerChannel == 16) {
- *pFormatOut = ma_format_s16;
- return MA_SUCCESS;
- } else if (pDescription->mBitsPerChannel == 24) {
- if (pDescription->mBytesPerFrame == (pDescription->mBitsPerChannel/8 * pDescription->mChannelsPerFrame)) {
- *pFormatOut = ma_format_s24;
- return MA_SUCCESS;
- } else {
- if (pDescription->mBytesPerFrame/pDescription->mChannelsPerFrame == sizeof(ma_int32)) {
- /* TODO: Implement ma_format_s24_32. */
- /**pFormatOut = ma_format_s24_32;*/
- /*return MA_SUCCESS;*/
- return MA_FORMAT_NOT_SUPPORTED;
+ *pfd = open(deviceName, flags, 0);
+ if (*pfd == -1) {
+ return ma_result_from_errno(errno);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_bool32 ma_context_is_device_id_equal__oss(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
+
+ return ma_strcmp(pID0->oss, pID1->oss) == 0;
+}
+
+static ma_result ma_context_enumerate_devices__oss(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ int fd;
+ oss_sysinfo si;
+ int result;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ fd = ma_open_temp_device__oss();
+ if (fd == -1) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration.", MA_NO_BACKEND);
+ }
+
+ result = ioctl(fd, SNDCTL_SYSINFO, &si);
+ if (result != -1) {
+ int iAudioDevice;
+ for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) {
+ oss_audioinfo ai;
+ ai.dev = iAudioDevice;
+ result = ioctl(fd, SNDCTL_AUDIOINFO, &ai);
+ if (result != -1) {
+ if (ai.devnode[0] != '\0') { /* <-- Can be blank, according to documentation. */
+ ma_device_info deviceInfo;
+ ma_bool32 isTerminating = MA_FALSE;
+
+ MA_ZERO_OBJECT(&deviceInfo);
+
+ /* ID */
+ ma_strncpy_s(deviceInfo.id.oss, sizeof(deviceInfo.id.oss), ai.devnode, (size_t)-1);
+
+ /*
+ The human readable device name should be in the "ai.handle" variable, but it can
+ sometimes be empty in which case we just fall back to "ai.name" which is less user
+ friendly, but usually has a value.
+ */
+ if (ai.handle[0] != '\0') {
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.handle, (size_t)-1);
+ } else {
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.name, (size_t)-1);
+ }
+
+ /* The device can be both playback and capture. */
+ if (!isTerminating && (ai.caps & PCM_CAP_OUTPUT) != 0) {
+ isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+ if (!isTerminating && (ai.caps & PCM_CAP_INPUT) != 0) {
+ isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+
+ if (isTerminating) {
+ break;
}
}
- } else if (pDescription->mBitsPerChannel == 32) {
- *pFormatOut = ma_format_s32;
- return MA_SUCCESS;
- }
- } else {
- if (pDescription->mBitsPerChannel == 8) {
- *pFormatOut = ma_format_u8;
- return MA_SUCCESS;
}
}
+ } else {
+ close(fd);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration.", MA_NO_BACKEND);
}
-
- /* Getting here means the format is not supported. */
- return MA_FORMAT_NOT_SUPPORTED;
+
+ close(fd);
+ return MA_SUCCESS;
}
-ma_result ma_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, ma_channel channelMap[MA_MAX_CHANNELS])
+static ma_result ma_context_get_device_info__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
{
- ma_assert(pChannelLayout != NULL);
-
- if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) {
- UInt32 iChannel;
- for (iChannel = 0; iChannel < pChannelLayout->mNumberChannelDescriptions; ++iChannel) {
- channelMap[iChannel] = ma_channel_from_AudioChannelLabel(pChannelLayout->mChannelDescriptions[iChannel].mChannelLabel);
- }
- } else
-#if 0
- if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) {
- /* This is the same kind of system that's used by Windows audio APIs. */
- UInt32 iChannel = 0;
- UInt32 iBit;
- AudioChannelBitmap bitmap = pChannelLayout->mChannelBitmap;
- for (iBit = 0; iBit < 32; ++iBit) {
- AudioChannelBitmap bit = bitmap & (1 << iBit);
- if (bit != 0) {
- channelMap[iChannel++] = ma_channel_from_AudioChannelBit(bit);
- }
- }
- } else
-#endif
- {
- /*
- Need to use the tag to determine the channel map. For now I'm just assuming a default channel map, but later on this should
- be updated to determine the mapping based on the tag.
- */
- UInt32 channelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag);
- switch (pChannelLayout->mChannelLayoutTag)
- {
- case kAudioChannelLayoutTag_Mono:
- case kAudioChannelLayoutTag_Stereo:
- case kAudioChannelLayoutTag_StereoHeadphones:
- case kAudioChannelLayoutTag_MatrixStereo:
- case kAudioChannelLayoutTag_MidSide:
- case kAudioChannelLayoutTag_XY:
- case kAudioChannelLayoutTag_Binaural:
- case kAudioChannelLayoutTag_Ambisonic_B_Format:
- {
- ma_get_standard_channel_map(ma_standard_channel_map_default, channelCount, channelMap);
- } break;
-
- case kAudioChannelLayoutTag_Octagonal:
- {
- channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
- channelMap[6] = MA_CHANNEL_SIDE_LEFT;
- } /* Intentional fallthrough. */
- case kAudioChannelLayoutTag_Hexagonal:
- {
- channelMap[5] = MA_CHANNEL_BACK_CENTER;
- } /* Intentional fallthrough. */
- case kAudioChannelLayoutTag_Pentagonal:
- {
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- } /* Intentional fallghrough. */
- case kAudioChannelLayoutTag_Quadraphonic:
- {
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[1] = MA_CHANNEL_RIGHT;
- channelMap[0] = MA_CHANNEL_LEFT;
- } break;
-
- /* TODO: Add support for more tags here. */
-
- default:
- {
- ma_get_standard_channel_map(ma_standard_channel_map_default, channelCount, channelMap);
- } break;
+ ma_bool32 foundDevice;
+ int fdTemp;
+ oss_sysinfo si;
+ int result;
+
+ MA_ASSERT(pContext != NULL);
+ (void)shareMode;
+
+ /* Handle the default device a little differently. */
+ if (pDeviceID == NULL) {
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
}
+
+ return MA_SUCCESS;
}
-
- return MA_SUCCESS;
-}
-#if defined(MA_APPLE_DESKTOP)
-ma_result ma_get_device_object_ids__coreaudio(ma_context* pContext, UInt32* pDeviceCount, AudioObjectID** ppDeviceObjectIDs) /* NOTE: Free the returned buffer with ma_free(). */
-{
- AudioObjectPropertyAddress propAddressDevices;
- UInt32 deviceObjectsDataSize;
- OSStatus status;
- AudioObjectID* pDeviceObjectIDs;
+ /* If we get here it means we are _not_ using the default device. */
+ foundDevice = MA_FALSE;
+
+ fdTemp = ma_open_temp_device__oss();
+ if (fdTemp == -1) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration.", MA_NO_BACKEND);
+ }
+
+ result = ioctl(fdTemp, SNDCTL_SYSINFO, &si);
+ if (result != -1) {
+ int iAudioDevice;
+ for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) {
+ oss_audioinfo ai;
+ ai.dev = iAudioDevice;
+ result = ioctl(fdTemp, SNDCTL_AUDIOINFO, &ai);
+ if (result != -1) {
+ if (ma_strcmp(ai.devnode, pDeviceID->oss) == 0) {
+ /* It has the same name, so now just confirm the type. */
+ if ((deviceType == ma_device_type_playback && ((ai.caps & PCM_CAP_OUTPUT) != 0)) ||
+ (deviceType == ma_device_type_capture && ((ai.caps & PCM_CAP_INPUT) != 0))) {
+ unsigned int formatMask;
+
+ /* ID */
+ ma_strncpy_s(pDeviceInfo->id.oss, sizeof(pDeviceInfo->id.oss), ai.devnode, (size_t)-1);
+
+ /*
+ The human readable device name should be in the "ai.handle" variable, but it can
+ sometimes be empty in which case we just fall back to "ai.name" which is less user
+ friendly, but usually has a value.
+ */
+ if (ai.handle[0] != '\0') {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.handle, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.name, (size_t)-1);
+ }
- ma_assert(pContext != NULL);
- ma_assert(pDeviceCount != NULL);
- ma_assert(ppDeviceObjectIDs != NULL);
+ pDeviceInfo->minChannels = ai.min_channels;
+ pDeviceInfo->maxChannels = ai.max_channels;
+ pDeviceInfo->minSampleRate = ai.min_rate;
+ pDeviceInfo->maxSampleRate = ai.max_rate;
+ pDeviceInfo->formatCount = 0;
- /* Safety. */
- *pDeviceCount = 0;
- *ppDeviceObjectIDs = NULL;
-
- propAddressDevices.mSelector = kAudioHardwarePropertyDevices;
- propAddressDevices.mScope = kAudioObjectPropertyScopeGlobal;
- propAddressDevices.mElement = kAudioObjectPropertyElementMaster;
+ if (deviceType == ma_device_type_playback) {
+ formatMask = ai.oformats;
+ } else {
+ formatMask = ai.iformats;
+ }
- status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
- }
-
- pDeviceObjectIDs = (AudioObjectID*)ma_malloc(deviceObjectsDataSize);
- if (pDeviceObjectIDs == NULL) {
- return MA_OUT_OF_MEMORY;
+ if ((formatMask & AFMT_U8) != 0) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_u8;
+ }
+ if (((formatMask & AFMT_S16_LE) != 0 && ma_is_little_endian()) || (AFMT_S16_BE && ma_is_big_endian())) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s16;
+ }
+ if (((formatMask & AFMT_S32_LE) != 0 && ma_is_little_endian()) || (AFMT_S32_BE && ma_is_big_endian())) {
+ pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s32;
+ }
+
+ foundDevice = MA_TRUE;
+ break;
+ }
+ }
+ }
+ }
+ } else {
+ close(fdTemp);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration.", MA_NO_BACKEND);
}
-
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize, pDeviceObjectIDs);
- if (status != noErr) {
- ma_free(pDeviceObjectIDs);
- return ma_result_from_OSStatus(status);
+
+
+ close(fdTemp);
+
+ if (!foundDevice) {
+ return MA_NO_DEVICE;
}
-
- *pDeviceCount = deviceObjectsDataSize / sizeof(AudioObjectID);
- *ppDeviceObjectIDs = pDeviceObjectIDs;
- (void)pContext; /* Unused. */
return MA_SUCCESS;
}
-ma_result ma_get_AudioObject_uid_as_CFStringRef(ma_context* pContext, AudioObjectID objectID, CFStringRef* pUID)
+static void ma_device_uninit__oss(ma_device* pDevice)
{
- AudioObjectPropertyAddress propAddress;
- UInt32 dataSize;
- OSStatus status;
+ MA_ASSERT(pDevice != NULL);
- ma_assert(pContext != NULL);
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ close(pDevice->oss.fdCapture);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ close(pDevice->oss.fdPlayback);
+ }
+}
- propAddress.mSelector = kAudioDevicePropertyDeviceUID;
- propAddress.mScope = kAudioObjectPropertyScopeGlobal;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
+static int ma_format_to_oss(ma_format format)
+{
+ int ossFormat = AFMT_U8;
+ switch (format) {
+ case ma_format_s16: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break;
+ case ma_format_s24: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break;
+ case ma_format_s32: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break;
+ case ma_format_f32: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break;
+ case ma_format_u8:
+ default: ossFormat = AFMT_U8; break;
+ }
- dataSize = sizeof(*pUID);
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, pUID);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
+ return ossFormat;
+}
+
+static ma_format ma_format_from_oss(int ossFormat)
+{
+ if (ossFormat == AFMT_U8) {
+ return ma_format_u8;
+ } else {
+ if (ma_is_little_endian()) {
+ switch (ossFormat) {
+ case AFMT_S16_LE: return ma_format_s16;
+ case AFMT_S32_LE: return ma_format_s32;
+ default: return ma_format_unknown;
+ }
+ } else {
+ switch (ossFormat) {
+ case AFMT_S16_BE: return ma_format_s16;
+ case AFMT_S32_BE: return ma_format_s32;
+ default: return ma_format_unknown;
+ }
+ }
}
-
- return MA_SUCCESS;
+
+ return ma_format_unknown;
}
-ma_result ma_get_AudioObject_uid(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut)
+static ma_result ma_device_init_fd__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
{
- CFStringRef uid;
ma_result result;
+ int ossResult;
+ int fd;
+ const ma_device_id* pDeviceID = NULL;
+ ma_share_mode shareMode;
+ int ossFormat;
+ int ossChannels;
+ int ossSampleRate;
+ int ossFragment;
- ma_assert(pContext != NULL);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(deviceType != ma_device_type_duplex);
+ MA_ASSERT(pDevice != NULL);
- result = ma_get_AudioObject_uid_as_CFStringRef(pContext, objectID, &uid);
- if (result != MA_SUCCESS) {
- return result;
+ (void)pContext;
+
+ if (deviceType == ma_device_type_capture) {
+ pDeviceID = pConfig->capture.pDeviceID;
+ shareMode = pConfig->capture.shareMode;
+ ossFormat = ma_format_to_oss(pConfig->capture.format);
+ ossChannels = (int)pConfig->capture.channels;
+ ossSampleRate = (int)pConfig->sampleRate;
+ } else {
+ pDeviceID = pConfig->playback.pDeviceID;
+ shareMode = pConfig->playback.shareMode;
+ ossFormat = ma_format_to_oss(pConfig->playback.format);
+ ossChannels = (int)pConfig->playback.channels;
+ ossSampleRate = (int)pConfig->sampleRate;
}
-
- if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(uid, bufferOut, bufferSize, kCFStringEncodingUTF8)) {
- return MA_ERROR;
+
+ result = ma_context_open_device__oss(pContext, deviceType, pDeviceID, shareMode, &fd);
+ if (result != MA_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device.", result);
}
-
- return MA_SUCCESS;
-}
-ma_result ma_get_AudioObject_name(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut)
-{
- AudioObjectPropertyAddress propAddress;
- CFStringRef deviceName = NULL;
- UInt32 dataSize;
- OSStatus status;
+ /*
+ The OSS documantation is very clear about the order we should be initializing the device's properties:
+ 1) Format
+ 2) Channels
+ 3) Sample rate.
+ */
- ma_assert(pContext != NULL);
+ /* Format. */
+ ossResult = ioctl(fd, SNDCTL_DSP_SETFMT, &ossFormat);
+ if (ossResult == -1) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set format.", MA_FORMAT_NOT_SUPPORTED);
+ }
- propAddress.mSelector = kAudioDevicePropertyDeviceNameCFString;
- propAddress.mScope = kAudioObjectPropertyScopeGlobal;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
+ /* Channels. */
+ ossResult = ioctl(fd, SNDCTL_DSP_CHANNELS, &ossChannels);
+ if (ossResult == -1) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set channel count.", MA_FORMAT_NOT_SUPPORTED);
+ }
- dataSize = sizeof(deviceName);
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, &deviceName);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
+ /* Sample Rate. */
+ ossResult = ioctl(fd, SNDCTL_DSP_SPEED, &ossSampleRate);
+ if (ossResult == -1) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set sample rate.", MA_FORMAT_NOT_SUPPORTED);
}
+
+ /*
+ Buffer.
+
+ The documentation says that the fragment settings should be set as soon as possible, but I'm not sure if
+ it should be done before or after format/channels/rate.
- if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(deviceName, bufferOut, bufferSize, kCFStringEncodingUTF8)) {
- return MA_ERROR;
+ OSS wants the fragment size in bytes and a power of 2. When setting, we specify the power, not the actual
+ value.
+ */
+ {
+ ma_uint32 periodSizeInFrames;
+ ma_uint32 periodSizeInBytes;
+ ma_uint32 ossFragmentSizePower;
+
+ periodSizeInFrames = pConfig->periodSizeInFrames;
+ if (periodSizeInFrames == 0) {
+ periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, (ma_uint32)ossSampleRate);
+ }
+
+ periodSizeInBytes = ma_round_to_power_of_2(periodSizeInFrames * ma_get_bytes_per_frame(ma_format_from_oss(ossFormat), ossChannels));
+ if (periodSizeInBytes < 16) {
+ periodSizeInBytes = 16;
+ }
+
+ ossFragmentSizePower = 4;
+ periodSizeInBytes >>= 4;
+ while (periodSizeInBytes >>= 1) {
+ ossFragmentSizePower += 1;
+ }
+
+ ossFragment = (int)((pConfig->periods << 16) | ossFragmentSizePower);
+ ossResult = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &ossFragment);
+ if (ossResult == -1) {
+ close(fd);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set fragment size and period count.", MA_FORMAT_NOT_SUPPORTED);
+ }
+ }
+
+ /* Internal settings. */
+ if (deviceType == ma_device_type_capture) {
+ pDevice->oss.fdCapture = fd;
+ pDevice->capture.internalFormat = ma_format_from_oss(ossFormat);
+ pDevice->capture.internalChannels = ossChannels;
+ pDevice->capture.internalSampleRate = ossSampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_sound4, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+ pDevice->capture.internalPeriods = (ma_uint32)(ossFragment >> 16);
+ pDevice->capture.internalPeriodSizeInFrames = (ma_uint32)(1 << (ossFragment & 0xFFFF)) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+
+ if (pDevice->capture.internalFormat == ma_format_unknown) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] The device's internal format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED);
+ }
+ } else {
+ pDevice->oss.fdPlayback = fd;
+ pDevice->playback.internalFormat = ma_format_from_oss(ossFormat);
+ pDevice->playback.internalChannels = ossChannels;
+ pDevice->playback.internalSampleRate = ossSampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_sound4, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
+ pDevice->playback.internalPeriods = (ma_uint32)(ossFragment >> 16);
+ pDevice->playback.internalPeriodSizeInFrames = (ma_uint32)(1 << (ossFragment & 0xFFFF)) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+
+ if (pDevice->playback.internalFormat == ma_format_unknown) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] The device's internal format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED);
+ }
}
-
+
return MA_SUCCESS;
}
-ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID deviceObjectID, AudioObjectPropertyScope scope)
+static ma_result ma_device_init__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
- AudioObjectPropertyAddress propAddress;
- UInt32 dataSize;
- OSStatus status;
- AudioBufferList* pBufferList;
- ma_bool32 isSupported;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pDevice != NULL);
- ma_assert(pContext != NULL);
+ MA_ZERO_OBJECT(&pDevice->oss);
- /* To know whether or not a device is an input device we need ot look at the stream configuration. If it has an output channel it's a playback device. */
- propAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
- propAddress.mScope = scope;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
-
- status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
- if (status != noErr) {
- return MA_FALSE;
- }
-
- pBufferList = (AudioBufferList*)ma_malloc(dataSize);
- if (pBufferList == NULL) {
- return MA_FALSE; /* Out of memory. */
- }
-
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pBufferList);
- if (status != noErr) {
- ma_free(pBufferList);
- return MA_FALSE;
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
}
- isSupported = MA_FALSE;
- if (pBufferList->mNumberBuffers > 0) {
- isSupported = MA_TRUE;
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_fd__oss(pContext, pConfig, ma_device_type_capture, pDevice);
+ if (result != MA_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device.", result);
+ }
}
-
- ma_free(pBufferList);
- return isSupported;
-}
-ma_bool32 ma_does_AudioObject_support_playback(ma_context* pContext, AudioObjectID deviceObjectID)
-{
- return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeOutput);
-}
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ ma_result result = ma_device_init_fd__oss(pContext, pConfig, ma_device_type_playback, pDevice);
+ if (result != MA_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device.", result);
+ }
+ }
-ma_bool32 ma_does_AudioObject_support_capture(ma_context* pContext, AudioObjectID deviceObjectID)
-{
- return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeInput);
+ return MA_SUCCESS;
}
-
-ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pDescriptionCount, AudioStreamRangedDescription** ppDescriptions) /* NOTE: Free the returned pointer with ma_free(). */
+static ma_result ma_device_stop__oss(ma_device* pDevice)
{
- AudioObjectPropertyAddress propAddress;
- UInt32 dataSize;
- OSStatus status;
- AudioStreamRangedDescription* pDescriptions;
+ MA_ASSERT(pDevice != NULL);
- ma_assert(pContext != NULL);
- ma_assert(pDescriptionCount != NULL);
- ma_assert(ppDescriptions != NULL);
-
/*
- TODO: Experiment with kAudioStreamPropertyAvailablePhysicalFormats instead of (or in addition to) kAudioStreamPropertyAvailableVirtualFormats. My
- MacBook Pro uses s24/32 format, however, which miniaudio does not currently support.
- */
- propAddress.mSelector = kAudioStreamPropertyAvailableVirtualFormats; /*kAudioStreamPropertyAvailablePhysicalFormats;*/
- propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
+ We want to use SNDCTL_DSP_HALT. From the documentation:
- status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
- }
+ In multithreaded applications SNDCTL_DSP_HALT (SNDCTL_DSP_RESET) must only be called by the thread
+ that actually reads/writes the audio device. It must not be called by some master thread to kill the
+ audio thread. The audio thread will not stop or get any kind of notification that the device was
+ stopped by the master thread. The device gets stopped but the next read or write call will silently
+ restart the device.
- pDescriptions = (AudioStreamRangedDescription*)ma_malloc(dataSize);
- if (pDescriptions == NULL) {
- return MA_OUT_OF_MEMORY;
+ This is actually safe in our case, because this function is only ever called from within our worker
+ thread anyway. Just keep this in mind, though...
+ */
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ int result = ioctl(pDevice->oss.fdCapture, SNDCTL_DSP_HALT, 0);
+ if (result == -1) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to stop device. SNDCTL_DSP_HALT failed.", ma_result_from_errno(errno));
+ }
}
-
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pDescriptions);
- if (status != noErr) {
- ma_free(pDescriptions);
- return ma_result_from_OSStatus(status);
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ int result = ioctl(pDevice->oss.fdPlayback, SNDCTL_DSP_HALT, 0);
+ if (result == -1) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to stop device. SNDCTL_DSP_HALT failed.", ma_result_from_errno(errno));
+ }
}
-
- *pDescriptionCount = dataSize / sizeof(*pDescriptions);
- *ppDescriptions = pDescriptions;
+
return MA_SUCCESS;
}
-
-ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, AudioChannelLayout** ppChannelLayout) /* NOTE: Free the returned pointer with ma_free(). */
+static ma_result ma_device_write__oss(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten)
{
- AudioObjectPropertyAddress propAddress;
- UInt32 dataSize;
- OSStatus status;
- AudioChannelLayout* pChannelLayout;
+ int resultOSS;
- ma_assert(pContext != NULL);
- ma_assert(ppChannelLayout != NULL);
-
- *ppChannelLayout = NULL; /* Safety. */
-
- propAddress.mSelector = kAudioDevicePropertyPreferredChannelLayout;
- propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
-
- status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = 0;
}
-
- pChannelLayout = (AudioChannelLayout*)ma_malloc(dataSize);
- if (pChannelLayout == NULL) {
- return MA_OUT_OF_MEMORY;
+
+ resultOSS = write(pDevice->oss.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+ if (resultOSS < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to send data from the client to the device.", ma_result_from_errno(errno));
}
-
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pChannelLayout);
- if (status != noErr) {
- ma_free(pChannelLayout);
- return ma_result_from_OSStatus(status);
+
+ if (pFramesWritten != NULL) {
+ *pFramesWritten = (ma_uint32)resultOSS / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
}
- *ppChannelLayout = pChannelLayout;
return MA_SUCCESS;
}
-ma_result ma_get_AudioObject_channel_count(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pChannelCount)
+static ma_result ma_device_read__oss(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead)
{
- AudioChannelLayout* pChannelLayout;
- ma_result result;
+ int resultOSS;
- ma_assert(pContext != NULL);
- ma_assert(pChannelCount != NULL);
-
- *pChannelCount = 0; /* Safety. */
+ if (pFramesRead != NULL) {
+ *pFramesRead = 0;
+ }
- result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout);
- if (result != MA_SUCCESS) {
- return result;
+ resultOSS = read(pDevice->oss.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+ if (resultOSS < 0) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to read data from the device to be sent to the client.", ma_result_from_errno(errno));
}
- if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) {
- *pChannelCount = pChannelLayout->mNumberChannelDescriptions;
- } else if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) {
- *pChannelCount = ma_count_set_bits(pChannelLayout->mChannelBitmap);
- } else {
- *pChannelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag);
+ if (pFramesRead != NULL) {
+ *pFramesRead = (ma_uint32)resultOSS / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
}
-
- ma_free(pChannelLayout);
+
return MA_SUCCESS;
}
-ma_result ma_get_AudioObject_channel_map(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS])
+static ma_result ma_device_main_loop__oss(ma_device* pDevice)
{
- AudioChannelLayout* pChannelLayout;
- ma_result result;
+ ma_result result = MA_SUCCESS;
+ ma_bool32 exitLoop = MA_FALSE;
- ma_assert(pContext != NULL);
-
- result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout);
- if (result != MA_SUCCESS) {
- return result; /* Rather than always failing here, would it be more robust to simply assume a default? */
- }
-
- result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap);
- if (result != MA_SUCCESS) {
- ma_free(pChannelLayout);
- return result;
- }
-
- ma_free(pChannelLayout);
- return result;
-}
+ /* No need to explicitly start the device like the other backends. */
-ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pSampleRateRangesCount, AudioValueRange** ppSampleRateRanges) /* NOTE: Free the returned pointer with ma_free(). */
-{
- AudioObjectPropertyAddress propAddress;
- UInt32 dataSize;
- OSStatus status;
- AudioValueRange* pSampleRateRanges;
+ while (ma_device__get_state(pDevice) == MA_STATE_STARTED && !exitLoop) {
+ switch (pDevice->type)
+ {
+ case ma_device_type_duplex:
+ {
+ /* The process is: device_read -> convert -> callback -> convert -> device_write */
+ ma_uint32 totalCapturedDeviceFramesProcessed = 0;
+ ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames);
+
+ while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) {
+ ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 capturedDeviceFramesRemaining;
+ ma_uint32 capturedDeviceFramesProcessed;
+ ma_uint32 capturedDeviceFramesToProcess;
+ ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed;
+ if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) {
+ capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames;
+ }
- ma_assert(pContext != NULL);
- ma_assert(pSampleRateRangesCount != NULL);
- ma_assert(ppSampleRateRanges != NULL);
-
- /* Safety. */
- *pSampleRateRangesCount = 0;
- *ppSampleRateRanges = NULL;
-
- propAddress.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
- propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
-
- status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
- }
-
- pSampleRateRanges = (AudioValueRange*)ma_malloc(dataSize);
- if (pSampleRateRanges == NULL) {
- return MA_OUT_OF_MEMORY;
- }
-
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pSampleRateRanges);
- if (status != noErr) {
- ma_free(pSampleRateRanges);
- return ma_result_from_OSStatus(status);
- }
-
- *pSampleRateRangesCount = dataSize / sizeof(*pSampleRateRanges);
- *ppSampleRateRanges = pSampleRateRanges;
- return MA_SUCCESS;
-}
+ result = ma_device_read__oss(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
-ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 sampleRateIn, ma_uint32* pSampleRateOut)
-{
- UInt32 sampleRateRangeCount;
- AudioValueRange* pSampleRateRanges;
- ma_result result;
+ capturedDeviceFramesRemaining = capturedDeviceFramesToProcess;
+ capturedDeviceFramesProcessed = 0;
- ma_assert(pContext != NULL);
- ma_assert(pSampleRateOut != NULL);
-
- *pSampleRateOut = 0; /* Safety. */
-
- result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges);
- if (result != MA_SUCCESS) {
- return result;
- }
-
- if (sampleRateRangeCount == 0) {
- ma_free(pSampleRateRanges);
- return MA_ERROR; /* Should never hit this case should we? */
- }
-
- if (sampleRateIn == 0) {
- /* Search in order of miniaudio's preferred priority. */
- UInt32 iMALSampleRate;
- for (iMALSampleRate = 0; iMALSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iMALSampleRate) {
- ma_uint32 malSampleRate = g_maStandardSampleRatePriorities[iMALSampleRate];
- UInt32 iCASampleRate;
- for (iCASampleRate = 0; iCASampleRate < sampleRateRangeCount; ++iCASampleRate) {
- AudioValueRange caSampleRate = pSampleRateRanges[iCASampleRate];
- if (caSampleRate.mMinimum <= malSampleRate && caSampleRate.mMaximum >= malSampleRate) {
- *pSampleRateOut = malSampleRate;
- ma_free(pSampleRateRanges);
- return MA_SUCCESS;
- }
- }
- }
-
- /*
- If we get here it means none of miniaudio's standard sample rates matched any of the supported sample rates from the device. In this
- case we just fall back to the first one reported by Core Audio.
- */
- ma_assert(sampleRateRangeCount > 0);
-
- *pSampleRateOut = pSampleRateRanges[0].mMinimum;
- ma_free(pSampleRateRanges);
- return MA_SUCCESS;
- } else {
- /* Find the closest match to this sample rate. */
- UInt32 currentAbsoluteDifference = INT32_MAX;
- UInt32 iCurrentClosestRange = (UInt32)-1;
- UInt32 iRange;
- for (iRange = 0; iRange < sampleRateRangeCount; ++iRange) {
- if (pSampleRateRanges[iRange].mMinimum <= sampleRateIn && pSampleRateRanges[iRange].mMaximum >= sampleRateIn) {
- *pSampleRateOut = sampleRateIn;
- ma_free(pSampleRateRanges);
- return MA_SUCCESS;
- } else {
- UInt32 absoluteDifference;
- if (pSampleRateRanges[iRange].mMinimum > sampleRateIn) {
- absoluteDifference = pSampleRateRanges[iRange].mMinimum - sampleRateIn;
- } else {
- absoluteDifference = sampleRateIn - pSampleRateRanges[iRange].mMaximum;
+ for (;;) {
+ ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
+ ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames);
+ ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining;
+ ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
+
+ /* Convert capture data from device format to client format. */
+ result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ /*
+ If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small
+ which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE.
+ */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+
+ ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/
+
+ capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+ capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */
+
+ /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */
+ for (;;) {
+ ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration;
+ ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames;
+ result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+
+ result = ma_device_write__oss(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */
+ if (capturedClientFramesToProcessThisIteration == 0) {
+ break;
+ }
+ }
+
+ /* In case an error happened from ma_device_write__oss()... */
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+ }
+
+ totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed;
}
-
- if (currentAbsoluteDifference > absoluteDifference) {
- currentAbsoluteDifference = absoluteDifference;
- iCurrentClosestRange = iRange;
+ } break;
+
+ case ma_device_type_capture:
+ {
+ /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames;
+ ma_uint32 framesReadThisPeriod = 0;
+ while (framesReadThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToReadThisIteration = framesRemainingInPeriod;
+ if (framesToReadThisIteration > intermediaryBufferSizeInFrames) {
+ framesToReadThisIteration = intermediaryBufferSizeInFrames;
+ }
+
+ result = ma_device_read__oss(pDevice, intermediaryBuffer, framesToReadThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ ma_device__send_frames_to_client(pDevice, framesProcessed, intermediaryBuffer);
+
+ framesReadThisPeriod += framesProcessed;
}
- }
+ } break;
+
+ case ma_device_type_playback:
+ {
+ /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */
+ ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames;
+ ma_uint32 framesWrittenThisPeriod = 0;
+ while (framesWrittenThisPeriod < periodSizeInFrames) {
+ ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod;
+ ma_uint32 framesProcessed;
+ ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod;
+ if (framesToWriteThisIteration > intermediaryBufferSizeInFrames) {
+ framesToWriteThisIteration = intermediaryBufferSizeInFrames;
+ }
+
+ ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, intermediaryBuffer);
+
+ result = ma_device_write__oss(pDevice, intermediaryBuffer, framesToWriteThisIteration, &framesProcessed);
+ if (result != MA_SUCCESS) {
+ exitLoop = MA_TRUE;
+ break;
+ }
+
+ framesWrittenThisPeriod += framesProcessed;
+ }
+ } break;
+
+ /* To silence a warning. Will never hit this. */
+ case ma_device_type_loopback:
+ default: break;
}
-
- ma_assert(iCurrentClosestRange != (UInt32)-1);
-
- *pSampleRateOut = pSampleRateRanges[iCurrentClosestRange].mMinimum;
- ma_free(pSampleRateRanges);
- return MA_SUCCESS;
}
-
- /* Should never get here, but it would mean we weren't able to find any suitable sample rates. */
- /*ma_free(pSampleRateRanges);*/
- /*return MA_ERROR;*/
-}
-ma_result ma_get_AudioObject_closest_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 bufferSizeInFramesIn, ma_uint32* pBufferSizeInFramesOut)
-{
- AudioObjectPropertyAddress propAddress;
- AudioValueRange bufferSizeRange;
- UInt32 dataSize;
- OSStatus status;
+ /* Here is where the device is stopped. */
+ ma_device_stop__oss(pDevice);
- ma_assert(pContext != NULL);
- ma_assert(pBufferSizeInFramesOut != NULL);
-
- *pBufferSizeInFramesOut = 0; /* Safety. */
-
- propAddress.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
- propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
+ return result;
+}
- dataSize = sizeof(bufferSizeRange);
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &bufferSizeRange);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
- }
-
- /* This is just a clamp. */
- if (bufferSizeInFramesIn < bufferSizeRange.mMinimum) {
- *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMinimum;
- } else if (bufferSizeInFramesIn > bufferSizeRange.mMaximum) {
- *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMaximum;
- } else {
- *pBufferSizeInFramesOut = bufferSizeInFramesIn;
- }
+static ma_result ma_context_uninit__oss(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_oss);
+ (void)pContext;
return MA_SUCCESS;
}
-ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pBufferSizeInOut)
+static ma_result ma_context_init__oss(const ma_context_config* pConfig, ma_context* pContext)
{
- ma_result result;
- ma_uint32 chosenBufferSizeInFrames;
- AudioObjectPropertyAddress propAddress;
- UInt32 dataSize;
- OSStatus status;
+ int fd;
+ int ossVersion;
+ int result;
- ma_assert(pContext != NULL);
+ MA_ASSERT(pContext != NULL);
- result = ma_get_AudioObject_closest_buffer_size_in_frames(pContext, deviceObjectID, deviceType, *pBufferSizeInOut, &chosenBufferSizeInFrames);
- if (result != MA_SUCCESS) {
- return result;
+ (void)pConfig;
+
+ /* Try opening a temporary device first so we can get version information. This is closed at the end. */
+ fd = ma_open_temp_device__oss();
+ if (fd == -1) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open temporary device for retrieving system properties.", MA_NO_BACKEND); /* Looks liks OSS isn't installed, or there are no available devices. */
}
- /* Try setting the size of the buffer... If this fails we just use whatever is currently set. */
- propAddress.mSelector = kAudioDevicePropertyBufferFrameSize;
- propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
-
- ((ma_AudioObjectSetPropertyData_proc)pContext->coreaudio.AudioObjectSetPropertyData)(deviceObjectID, &propAddress, 0, NULL, sizeof(chosenBufferSizeInFrames), &chosenBufferSizeInFrames);
-
- /* Get the actual size of the buffer. */
- dataSize = sizeof(*pBufferSizeInOut);
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &chosenBufferSizeInFrames);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
+ /* Grab the OSS version. */
+ ossVersion = 0;
+ result = ioctl(fd, OSS_GETVERSION, &ossVersion);
+ if (result == -1) {
+ close(fd);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve OSS version.", MA_NO_BACKEND);
}
-
- *pBufferSizeInOut = chosenBufferSizeInFrames;
+
+ pContext->oss.versionMajor = ((ossVersion & 0xFF0000) >> 16);
+ pContext->oss.versionMinor = ((ossVersion & 0x00FF00) >> 8);
+
+ pContext->onUninit = ma_context_uninit__oss;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__oss;
+ pContext->onEnumDevices = ma_context_enumerate_devices__oss;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__oss;
+ pContext->onDeviceInit = ma_device_init__oss;
+ pContext->onDeviceUninit = ma_device_uninit__oss;
+ pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */
+ pContext->onDeviceStop = NULL; /* Not required for synchronous backends. */
+ pContext->onDeviceMainLoop = ma_device_main_loop__oss;
+
+ close(fd);
return MA_SUCCESS;
}
+#endif /* OSS */
-ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, AudioObjectID* pDeviceObjectID)
-{
- ma_assert(pContext != NULL);
- ma_assert(pDeviceObjectID != NULL);
+/******************************************************************************
- /* Safety. */
- *pDeviceObjectID = 0;
-
- if (pDeviceID == NULL) {
- /* Default device. */
- AudioObjectPropertyAddress propAddressDefaultDevice;
- UInt32 defaultDeviceObjectIDSize = sizeof(AudioObjectID);
- AudioObjectID defaultDeviceObjectID;
- OSStatus status;
+AAudio Backend
- propAddressDefaultDevice.mScope = kAudioObjectPropertyScopeGlobal;
- propAddressDefaultDevice.mElement = kAudioObjectPropertyElementMaster;
- if (deviceType == ma_device_type_playback) {
- propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- } else {
- propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultInputDevice;
- }
-
- defaultDeviceObjectIDSize = sizeof(AudioObjectID);
- status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDefaultDevice, 0, NULL, &defaultDeviceObjectIDSize, &defaultDeviceObjectID);
- if (status == noErr) {
- *pDeviceObjectID = defaultDeviceObjectID;
- return MA_SUCCESS;
- }
- } else {
- /* Explicit device. */
- UInt32 deviceCount;
- AudioObjectID* pDeviceObjectIDs;
- ma_result result;
- UInt32 iDevice;
+******************************************************************************/
+#ifdef MA_HAS_AAUDIO
+/*#include */
- result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs);
- if (result != MA_SUCCESS) {
- return result;
- }
-
- for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
- AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice];
-
- char uid[256];
- if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(uid), uid) != MA_SUCCESS) {
- continue;
- }
-
- if (deviceType == ma_device_type_playback) {
- if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) {
- if (strcmp(uid, pDeviceID->coreaudio) == 0) {
- *pDeviceObjectID = deviceObjectID;
- return MA_SUCCESS;
- }
- }
- } else {
- if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) {
- if (strcmp(uid, pDeviceID->coreaudio) == 0) {
- *pDeviceObjectID = deviceObjectID;
- return MA_SUCCESS;
- }
- }
- }
- }
+#define MA_AAUDIO_UNSPECIFIED 0
+
+typedef int32_t ma_aaudio_result_t;
+typedef int32_t ma_aaudio_direction_t;
+typedef int32_t ma_aaudio_sharing_mode_t;
+typedef int32_t ma_aaudio_format_t;
+typedef int32_t ma_aaudio_stream_state_t;
+typedef int32_t ma_aaudio_performance_mode_t;
+typedef int32_t ma_aaudio_data_callback_result_t;
+
+/* Result codes. miniaudio only cares about the success code. */
+#define MA_AAUDIO_OK 0
+
+/* Directions. */
+#define MA_AAUDIO_DIRECTION_OUTPUT 0
+#define MA_AAUDIO_DIRECTION_INPUT 1
+
+/* Sharing modes. */
+#define MA_AAUDIO_SHARING_MODE_EXCLUSIVE 0
+#define MA_AAUDIO_SHARING_MODE_SHARED 1
+
+/* Formats. */
+#define MA_AAUDIO_FORMAT_PCM_I16 1
+#define MA_AAUDIO_FORMAT_PCM_FLOAT 2
+
+/* Stream states. */
+#define MA_AAUDIO_STREAM_STATE_UNINITIALIZED 0
+#define MA_AAUDIO_STREAM_STATE_UNKNOWN 1
+#define MA_AAUDIO_STREAM_STATE_OPEN 2
+#define MA_AAUDIO_STREAM_STATE_STARTING 3
+#define MA_AAUDIO_STREAM_STATE_STARTED 4
+#define MA_AAUDIO_STREAM_STATE_PAUSING 5
+#define MA_AAUDIO_STREAM_STATE_PAUSED 6
+#define MA_AAUDIO_STREAM_STATE_FLUSHING 7
+#define MA_AAUDIO_STREAM_STATE_FLUSHED 8
+#define MA_AAUDIO_STREAM_STATE_STOPPING 9
+#define MA_AAUDIO_STREAM_STATE_STOPPED 10
+#define MA_AAUDIO_STREAM_STATE_CLOSING 11
+#define MA_AAUDIO_STREAM_STATE_CLOSED 12
+#define MA_AAUDIO_STREAM_STATE_DISCONNECTED 13
+
+/* Performance modes. */
+#define MA_AAUDIO_PERFORMANCE_MODE_NONE 10
+#define MA_AAUDIO_PERFORMANCE_MODE_POWER_SAVING 11
+#define MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY 12
+
+/* Callback results. */
+#define MA_AAUDIO_CALLBACK_RESULT_CONTINUE 0
+#define MA_AAUDIO_CALLBACK_RESULT_STOP 1
+
+/* Objects. */
+typedef struct ma_AAudioStreamBuilder_t* ma_AAudioStreamBuilder;
+typedef struct ma_AAudioStream_t* ma_AAudioStream;
+
+typedef ma_aaudio_data_callback_result_t (* ma_AAudioStream_dataCallback) (ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t numFrames);
+typedef void (* ma_AAudioStream_errorCallback)(ma_AAudioStream *pStream, void *pUserData, ma_aaudio_result_t error);
+
+typedef ma_aaudio_result_t (* MA_PFN_AAudio_createStreamBuilder) (ma_AAudioStreamBuilder** ppBuilder);
+typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_delete) (ma_AAudioStreamBuilder* pBuilder);
+typedef void (* MA_PFN_AAudioStreamBuilder_setDeviceId) (ma_AAudioStreamBuilder* pBuilder, int32_t deviceId);
+typedef void (* MA_PFN_AAudioStreamBuilder_setDirection) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_direction_t direction);
+typedef void (* MA_PFN_AAudioStreamBuilder_setSharingMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_sharing_mode_t sharingMode);
+typedef void (* MA_PFN_AAudioStreamBuilder_setFormat) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_format_t format);
+typedef void (* MA_PFN_AAudioStreamBuilder_setChannelCount) (ma_AAudioStreamBuilder* pBuilder, int32_t channelCount);
+typedef void (* MA_PFN_AAudioStreamBuilder_setSampleRate) (ma_AAudioStreamBuilder* pBuilder, int32_t sampleRate);
+typedef void (* MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)(ma_AAudioStreamBuilder* pBuilder, int32_t numFrames);
+typedef void (* MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback) (ma_AAudioStreamBuilder* pBuilder, int32_t numFrames);
+typedef void (* MA_PFN_AAudioStreamBuilder_setDataCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_dataCallback callback, void* pUserData);
+typedef void (* MA_PFN_AAudioStreamBuilder_setErrorCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_errorCallback callback, void* pUserData);
+typedef void (* MA_PFN_AAudioStreamBuilder_setPerformanceMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_performance_mode_t mode);
+typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_openStream) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream** ppStream);
+typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_close) (ma_AAudioStream* pStream);
+typedef ma_aaudio_stream_state_t (* MA_PFN_AAudioStream_getState) (ma_AAudioStream* pStream);
+typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_waitForStateChange) (ma_AAudioStream* pStream, ma_aaudio_stream_state_t inputState, ma_aaudio_stream_state_t* pNextState, int64_t timeoutInNanoseconds);
+typedef ma_aaudio_format_t (* MA_PFN_AAudioStream_getFormat) (ma_AAudioStream* pStream);
+typedef int32_t (* MA_PFN_AAudioStream_getChannelCount) (ma_AAudioStream* pStream);
+typedef int32_t (* MA_PFN_AAudioStream_getSampleRate) (ma_AAudioStream* pStream);
+typedef int32_t (* MA_PFN_AAudioStream_getBufferCapacityInFrames) (ma_AAudioStream* pStream);
+typedef int32_t (* MA_PFN_AAudioStream_getFramesPerDataCallback) (ma_AAudioStream* pStream);
+typedef int32_t (* MA_PFN_AAudioStream_getFramesPerBurst) (ma_AAudioStream* pStream);
+typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStart) (ma_AAudioStream* pStream);
+typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStop) (ma_AAudioStream* pStream);
+
+static ma_result ma_result_from_aaudio(ma_aaudio_result_t resultAA)
+{
+ switch (resultAA)
+ {
+ case MA_AAUDIO_OK: return MA_SUCCESS;
+ default: break;
}
-
- /* If we get here it means we couldn't find the device. */
- return MA_NO_DEVICE;
-}
+ return MA_ERROR;
+}
-ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_bool32 usingDefaultFormat, ma_bool32 usingDefaultChannels, ma_bool32 usingDefaultSampleRate, AudioStreamBasicDescription* pFormat)
+static void ma_stream_error_callback__aaudio(ma_AAudioStream* pStream, void* pUserData, ma_aaudio_result_t error)
{
- UInt32 deviceFormatDescriptionCount;
- AudioStreamRangedDescription* pDeviceFormatDescriptions;
- ma_result result;
- ma_uint32 desiredSampleRate;
- ma_uint32 desiredChannelCount;
- ma_format desiredFormat;
- AudioStreamBasicDescription bestDeviceFormatSoFar;
- ma_bool32 hasSupportedFormat;
- UInt32 iFormat;
+ ma_device* pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
- result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &deviceFormatDescriptionCount, &pDeviceFormatDescriptions);
- if (result != MA_SUCCESS) {
- return result;
- }
-
- desiredSampleRate = sampleRate;
- if (usingDefaultSampleRate) {
- /*
- When using the device's default sample rate, we get the highest priority standard rate supported by the device. Otherwise
- we just use the pre-set rate.
- */
- ma_uint32 iStandardRate;
- for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) {
- ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate];
- ma_bool32 foundRate = MA_FALSE;
- UInt32 iDeviceRate;
+ (void)error;
- for (iDeviceRate = 0; iDeviceRate < deviceFormatDescriptionCount; ++iDeviceRate) {
- ma_uint32 deviceRate = (ma_uint32)pDeviceFormatDescriptions[iDeviceRate].mFormat.mSampleRate;
-
- if (deviceRate == standardRate) {
- desiredSampleRate = standardRate;
- foundRate = MA_TRUE;
- break;
- }
- }
-
- if (foundRate) {
- break;
- }
- }
+#if defined(MA_DEBUG_OUTPUT)
+ printf("[AAudio] ERROR CALLBACK: error=%d, AAudioStream_getState()=%d\n", error, ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream));
+#endif
+
+ /*
+ From the documentation for AAudio, when a device is disconnected all we can do is stop it. However, we cannot stop it from the callback - we need
+ to do it from another thread. Therefore we are going to use an event thread for the AAudio backend to do this cleanly and safely.
+ */
+ if (((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream) == MA_AAUDIO_STREAM_STATE_DISCONNECTED) {
+#if defined(MA_DEBUG_OUTPUT)
+ printf("[AAudio] Device Disconnected.\n");
+#endif
}
-
- desiredChannelCount = channels;
- if (usingDefaultChannels) {
- ma_get_AudioObject_channel_count(pContext, deviceObjectID, deviceType, &desiredChannelCount); /* <-- Not critical if this fails. */
+}
+
+static ma_aaudio_data_callback_result_t ma_stream_data_callback_capture__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_capture(pDevice, frameCount, pAudioData, &pDevice->aaudio.duplexRB);
+ } else {
+ ma_device__send_frames_to_client(pDevice, frameCount, pAudioData); /* Send directly to the client. */
}
-
- desiredFormat = format;
- if (usingDefaultFormat) {
- desiredFormat = g_maFormatPriorities[0];
+
+ (void)pStream;
+ return MA_AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+static ma_aaudio_data_callback_result_t ma_stream_data_callback_playback__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_playback(pDevice, frameCount, pAudioData, &pDevice->aaudio.duplexRB);
+ } else {
+ ma_device__read_frames_from_client(pDevice, frameCount, pAudioData); /* Read directly from the client. */
}
-
- /*
- If we get here it means we don't have an exact match to what the client is asking for. We'll need to find the closest one. The next
- loop will check for formats that have the same sample rate to what we're asking for. If there is, we prefer that one in all cases.
- */
- ma_zero_object(&bestDeviceFormatSoFar);
-
- hasSupportedFormat = MA_FALSE;
- for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) {
- ma_format format;
- ma_result formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &format);
- if (formatResult == MA_SUCCESS && format != ma_format_unknown) {
- hasSupportedFormat = MA_TRUE;
- bestDeviceFormatSoFar = pDeviceFormatDescriptions[iFormat].mFormat;
- break;
- }
+
+ (void)pStream;
+ return MA_AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+static ma_result ma_open_stream__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, const ma_device_config* pConfig, const ma_device* pDevice, ma_AAudioStream** ppStream)
+{
+ ma_AAudioStreamBuilder* pBuilder;
+ ma_aaudio_result_t resultAA;
+
+ MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should not be called for a full-duplex device type. */
+
+ *ppStream = NULL;
+
+ resultAA = ((MA_PFN_AAudio_createStreamBuilder)pContext->aaudio.AAudio_createStreamBuilder)(&pBuilder);
+ if (resultAA != MA_AAUDIO_OK) {
+ return ma_result_from_aaudio(resultAA);
}
-
- if (!hasSupportedFormat) {
- return MA_FORMAT_NOT_SUPPORTED;
+
+ if (pDeviceID != NULL) {
+ ((MA_PFN_AAudioStreamBuilder_setDeviceId)pContext->aaudio.AAudioStreamBuilder_setDeviceId)(pBuilder, pDeviceID->aaudio);
}
-
-
- for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) {
- AudioStreamBasicDescription thisDeviceFormat = pDeviceFormatDescriptions[iFormat].mFormat;
- ma_format thisSampleFormat;
- ma_result formatResult;
- ma_format bestSampleFormatSoFar;
- /* If the format is not supported by miniaudio we need to skip this one entirely. */
- formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &thisSampleFormat);
- if (formatResult != MA_SUCCESS || thisSampleFormat == ma_format_unknown) {
- continue; /* The format is not supported by miniaudio. Skip. */
- }
-
- ma_format_from_AudioStreamBasicDescription(&bestDeviceFormatSoFar, &bestSampleFormatSoFar);
-
- /* Getting here means the format is supported by miniaudio which makes this format a candidate. */
- if (thisDeviceFormat.mSampleRate != desiredSampleRate) {
- /*
- The sample rate does not match, but this format could still be usable, although it's a very low priority. If the best format
- so far has an equal sample rate we can just ignore this one.
- */
- if (bestDeviceFormatSoFar.mSampleRate == desiredSampleRate) {
- continue; /* The best sample rate so far has the same sample rate as what we requested which means it's still the best so far. Skip this format. */
- } else {
- /* In this case, neither the best format so far nor this one have the same sample rate. Check the channel count next. */
- if (thisDeviceFormat.mChannelsPerFrame != desiredChannelCount) {
- /* This format has a different sample rate _and_ a different channel count. */
- if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) {
- continue; /* No change to the best format. */
- } else {
- /*
- Both this format and the best so far have different sample rates and different channel counts. Whichever has the
- best format is the new best.
- */
- if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
- bestDeviceFormatSoFar = thisDeviceFormat;
- continue;
- } else {
- continue; /* No change to the best format. */
- }
- }
- } else {
- /* This format has a different sample rate but the desired channel count. */
- if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) {
- /* Both this format and the best so far have the desired channel count. Whichever has the best format is the new best. */
- if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
- bestDeviceFormatSoFar = thisDeviceFormat;
- continue;
- } else {
- continue; /* No change to the best format for now. */
- }
- } else {
- /* This format has the desired channel count, but the best so far does not. We have a new best. */
- bestDeviceFormatSoFar = thisDeviceFormat;
- continue;
- }
- }
- }
- } else {
- /*
- The sample rates match which makes this format a very high priority contender. If the best format so far has a different
- sample rate it needs to be replaced with this one.
- */
- if (bestDeviceFormatSoFar.mSampleRate != desiredSampleRate) {
- bestDeviceFormatSoFar = thisDeviceFormat;
- continue;
- } else {
- /* In this case both this format and the best format so far have the same sample rate. Check the channel count next. */
- if (thisDeviceFormat.mChannelsPerFrame == desiredChannelCount) {
- /*
- In this case this format has the same channel count as what the client is requesting. If the best format so far has
- a different count, this one becomes the new best.
- */
- if (bestDeviceFormatSoFar.mChannelsPerFrame != desiredChannelCount) {
- bestDeviceFormatSoFar = thisDeviceFormat;
- continue;
- } else {
- /* In this case both this format and the best so far have the ideal sample rate and channel count. Check the format. */
- if (thisSampleFormat == desiredFormat) {
- bestDeviceFormatSoFar = thisDeviceFormat;
- break; /* Found the exact match. */
- } else {
- /* The formats are different. The new best format is the one with the highest priority format according to miniaudio. */
- if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
- bestDeviceFormatSoFar = thisDeviceFormat;
- continue;
- } else {
- continue; /* No change to the best format for now. */
- }
- }
- }
- } else {
- /*
- In this case the channel count is different to what the client has requested. If the best so far has the same channel
- count as the requested count then it remains the best.
- */
- if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) {
- continue;
- } else {
- /*
- This is the case where both have the same sample rate (good) but different channel counts. Right now both have about
- the same priority, but we need to compare the format now.
- */
- if (thisSampleFormat == bestSampleFormatSoFar) {
- if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) {
- bestDeviceFormatSoFar = thisDeviceFormat;
- continue;
- } else {
- continue; /* No change to the best format for now. */
- }
- }
- }
- }
+ ((MA_PFN_AAudioStreamBuilder_setDirection)pContext->aaudio.AAudioStreamBuilder_setDirection)(pBuilder, (deviceType == ma_device_type_playback) ? MA_AAUDIO_DIRECTION_OUTPUT : MA_AAUDIO_DIRECTION_INPUT);
+ ((MA_PFN_AAudioStreamBuilder_setSharingMode)pContext->aaudio.AAudioStreamBuilder_setSharingMode)(pBuilder, (shareMode == ma_share_mode_shared) ? MA_AAUDIO_SHARING_MODE_SHARED : MA_AAUDIO_SHARING_MODE_EXCLUSIVE);
+
+ if (pConfig != NULL) {
+ ma_uint32 bufferCapacityInFrames;
+
+ if (pDevice == NULL || !pDevice->usingDefaultSampleRate) {
+ ((MA_PFN_AAudioStreamBuilder_setSampleRate)pContext->aaudio.AAudioStreamBuilder_setSampleRate)(pBuilder, pConfig->sampleRate);
+ }
+
+ if (deviceType == ma_device_type_capture) {
+ if (pDevice == NULL || !pDevice->capture.usingDefaultChannels) {
+ ((MA_PFN_AAudioStreamBuilder_setChannelCount)pContext->aaudio.AAudioStreamBuilder_setChannelCount)(pBuilder, pConfig->capture.channels);
+ }
+ if (pDevice == NULL || !pDevice->capture.usingDefaultFormat) {
+ ((MA_PFN_AAudioStreamBuilder_setFormat)pContext->aaudio.AAudioStreamBuilder_setFormat)(pBuilder, (pConfig->capture.format == ma_format_s16) ? MA_AAUDIO_FORMAT_PCM_I16 : MA_AAUDIO_FORMAT_PCM_FLOAT);
+ }
+ } else {
+ if (pDevice == NULL || !pDevice->playback.usingDefaultChannels) {
+ ((MA_PFN_AAudioStreamBuilder_setChannelCount)pContext->aaudio.AAudioStreamBuilder_setChannelCount)(pBuilder, pConfig->playback.channels);
}
+ if (pDevice == NULL || !pDevice->playback.usingDefaultFormat) {
+ ((MA_PFN_AAudioStreamBuilder_setFormat)pContext->aaudio.AAudioStreamBuilder_setFormat)(pBuilder, (pConfig->playback.format == ma_format_s16) ? MA_AAUDIO_FORMAT_PCM_I16 : MA_AAUDIO_FORMAT_PCM_FLOAT);
+ }
+ }
+
+ bufferCapacityInFrames = pConfig->periodSizeInFrames * pConfig->periods;
+ if (bufferCapacityInFrames == 0) {
+ bufferCapacityInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pConfig->sampleRate) * pConfig->periods;
+ }
+
+ ((MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames)(pBuilder, bufferCapacityInFrames);
+ ((MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback)pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback)(pBuilder, bufferCapacityInFrames / pConfig->periods);
+
+ if (deviceType == ma_device_type_capture) {
+ ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_capture__aaudio, (void*)pDevice);
+ } else {
+ ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_playback__aaudio, (void*)pDevice);
}
+
+ /* Not sure how this affects things, but since there's a mapping between miniaudio's performance profiles and AAudio's performance modes, let go ahead and set it. */
+ ((MA_PFN_AAudioStreamBuilder_setPerformanceMode)pContext->aaudio.AAudioStreamBuilder_setPerformanceMode)(pBuilder, (pConfig->performanceProfile == ma_performance_profile_low_latency) ? MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY : MA_AAUDIO_PERFORMANCE_MODE_NONE);
}
-
- *pFormat = bestDeviceFormatSoFar;
+
+ ((MA_PFN_AAudioStreamBuilder_setErrorCallback)pContext->aaudio.AAudioStreamBuilder_setErrorCallback)(pBuilder, ma_stream_error_callback__aaudio, (void*)pDevice);
+
+ resultAA = ((MA_PFN_AAudioStreamBuilder_openStream)pContext->aaudio.AAudioStreamBuilder_openStream)(pBuilder, ppStream);
+ if (resultAA != MA_AAUDIO_OK) {
+ *ppStream = NULL;
+ ((MA_PFN_AAudioStreamBuilder_delete)pContext->aaudio.AAudioStreamBuilder_delete)(pBuilder);
+ return ma_result_from_aaudio(resultAA);
+ }
+
+ ((MA_PFN_AAudioStreamBuilder_delete)pContext->aaudio.AAudioStreamBuilder_delete)(pBuilder);
return MA_SUCCESS;
}
-#endif
-ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS])
+static ma_result ma_close_stream__aaudio(ma_context* pContext, ma_AAudioStream* pStream)
{
- AudioUnitScope deviceScope;
- AudioUnitElement deviceBus;
- UInt32 channelLayoutSize;
- OSStatus status;
- AudioChannelLayout* pChannelLayout;
+ return ma_result_from_aaudio(((MA_PFN_AAudioStream_close)pContext->aaudio.AAudioStream_close)(pStream));
+}
+
+static ma_bool32 ma_has_default_device__aaudio(ma_context* pContext, ma_device_type deviceType)
+{
+ /* The only way to know this is to try creating a stream. */
+ ma_AAudioStream* pStream;
+ ma_result result = ma_open_stream__aaudio(pContext, deviceType, NULL, ma_share_mode_shared, NULL, NULL, &pStream);
+ if (result != MA_SUCCESS) {
+ return MA_FALSE;
+ }
+
+ ma_close_stream__aaudio(pContext, pStream);
+ return MA_TRUE;
+}
+
+static ma_result ma_wait_for_simple_state_transition__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_aaudio_stream_state_t oldState, ma_aaudio_stream_state_t newState)
+{
+ ma_aaudio_stream_state_t actualNewState;
+ ma_aaudio_result_t resultAA = ((MA_PFN_AAudioStream_waitForStateChange)pContext->aaudio.AAudioStream_waitForStateChange)(pStream, oldState, &actualNewState, 5000000000); /* 5 second timeout. */
+ if (resultAA != MA_AAUDIO_OK) {
+ return ma_result_from_aaudio(resultAA);
+ }
+
+ if (newState != actualNewState) {
+ return MA_ERROR; /* Failed to transition into the expected state. */
+ }
+
+ return MA_SUCCESS;
+}
+
+
+static ma_bool32 ma_context_is_device_id_equal__aaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
+
+ return pID0->aaudio == pID1->aaudio;
+}
+
+static ma_result ma_context_enumerate_devices__aaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_bool32 cbResult = MA_TRUE;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* Unfortunately AAudio does not have an enumeration API. Therefore I'm only going to report default devices, but only if it can instantiate a stream. */
+
+ /* Playback. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED;
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+
+ if (ma_has_default_device__aaudio(pContext, ma_device_type_playback)) {
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+ }
+
+ /* Capture. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED;
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+
+ if (ma_has_default_device__aaudio(pContext, ma_device_type_capture)) {
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+{
+ ma_AAudioStream* pStream;
ma_result result;
- ma_assert(pContext != NULL);
+ MA_ASSERT(pContext != NULL);
+
+ /* No exclusive mode with AAudio. */
+ if (shareMode == ma_share_mode_exclusive) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+
+ /* ID */
+ if (pDeviceID != NULL) {
+ pDeviceInfo->id.aaudio = pDeviceID->aaudio;
+ } else {
+ pDeviceInfo->id.aaudio = MA_AAUDIO_UNSPECIFIED;
+ }
+ /* Name */
if (deviceType == ma_device_type_playback) {
- deviceScope = kAudioUnitScope_Output;
- deviceBus = MA_COREAUDIO_OUTPUT_BUS;
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
} else {
- deviceScope = kAudioUnitScope_Input;
- deviceBus = MA_COREAUDIO_INPUT_BUS;
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
}
-
- status = ((ma_AudioUnitGetPropertyInfo_proc)pContext->coreaudio.AudioUnitGetPropertyInfo)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, &channelLayoutSize, NULL);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
+
+
+ /* We'll need to open the device to get accurate sample rate and channel count information. */
+ result = ma_open_stream__aaudio(pContext, deviceType, pDeviceID, shareMode, NULL, NULL, &pStream);
+ if (result != MA_SUCCESS) {
+ return result;
}
-
- pChannelLayout = (AudioChannelLayout*)ma_malloc(channelLayoutSize);
- if (pChannelLayout == NULL) {
- return MA_OUT_OF_MEMORY;
+
+ pDeviceInfo->minChannels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)(pStream);
+ pDeviceInfo->maxChannels = pDeviceInfo->minChannels;
+ pDeviceInfo->minSampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)(pStream);
+ pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate;
+
+ ma_close_stream__aaudio(pContext, pStream);
+ pStream = NULL;
+
+
+ /* AAudio supports s16 and f32. */
+ pDeviceInfo->formatCount = 2;
+ pDeviceInfo->formats[0] = ma_format_s16;
+ pDeviceInfo->formats[1] = ma_format_f32;
+
+ return MA_SUCCESS;
+}
+
+
+static void ma_device_uninit__aaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
+ pDevice->aaudio.pStreamCapture = NULL;
}
-
- status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, pChannelLayout, &channelLayoutSize);
- if (status != noErr) {
- ma_free(pChannelLayout);
- return ma_result_from_OSStatus(status);
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
+ pDevice->aaudio.pStreamPlayback = NULL;
}
-
- result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap);
- if (result != MA_SUCCESS) {
- ma_free(pChannelLayout);
- return result;
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_pcm_rb_uninit(&pDevice->aaudio.duplexRB);
+ }
+}
+
+static ma_result ma_device_init__aaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+{
+ ma_result result;
+
+ MA_ASSERT(pDevice != NULL);
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ /* No exclusive mode with AAudio. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
+
+ /* We first need to try opening the stream. */
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ int32_t bufferCapacityInFrames;
+ int32_t framesPerDataCallback;
+
+ result = ma_open_stream__aaudio(pContext, ma_device_type_capture, pConfig->capture.pDeviceID, pConfig->capture.shareMode, pConfig, pDevice, (ma_AAudioStream**)&pDevice->aaudio.pStreamCapture);
+ if (result != MA_SUCCESS) {
+ return result; /* Failed to open the AAudio stream. */
+ }
+
+ pDevice->capture.internalFormat = (((MA_PFN_AAudioStream_getFormat)pContext->aaudio.AAudioStream_getFormat)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture) == MA_AAUDIO_FORMAT_PCM_I16) ? ma_format_s16 : ma_format_f32;
+ pDevice->capture.internalChannels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
+ pDevice->capture.internalSampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
+ ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); /* <-- Cannot find info on channel order, so assuming a default. */
+
+ bufferCapacityInFrames = ((MA_PFN_AAudioStream_getBufferCapacityInFrames)pContext->aaudio.AAudioStream_getBufferCapacityInFrames)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
+ framesPerDataCallback = ((MA_PFN_AAudioStream_getFramesPerDataCallback)pContext->aaudio.AAudioStream_getFramesPerDataCallback)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
+
+ if (framesPerDataCallback > 0) {
+ pDevice->capture.internalPeriodSizeInFrames = framesPerDataCallback;
+ pDevice->capture.internalPeriods = bufferCapacityInFrames / framesPerDataCallback;
+ } else {
+ pDevice->capture.internalPeriodSizeInFrames = bufferCapacityInFrames;
+ pDevice->capture.internalPeriods = 1;
+ }
+ }
+
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ int32_t bufferCapacityInFrames;
+ int32_t framesPerDataCallback;
+
+ result = ma_open_stream__aaudio(pContext, ma_device_type_playback, pConfig->playback.pDeviceID, pConfig->playback.shareMode, pConfig, pDevice, (ma_AAudioStream**)&pDevice->aaudio.pStreamPlayback);
+ if (result != MA_SUCCESS) {
+ return result; /* Failed to open the AAudio stream. */
+ }
+
+ pDevice->playback.internalFormat = (((MA_PFN_AAudioStream_getFormat)pContext->aaudio.AAudioStream_getFormat)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback) == MA_AAUDIO_FORMAT_PCM_I16) ? ma_format_s16 : ma_format_f32;
+ pDevice->playback.internalChannels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
+ pDevice->playback.internalSampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
+ ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); /* <-- Cannot find info on channel order, so assuming a default. */
+
+ bufferCapacityInFrames = ((MA_PFN_AAudioStream_getBufferCapacityInFrames)pContext->aaudio.AAudioStream_getBufferCapacityInFrames)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
+ framesPerDataCallback = ((MA_PFN_AAudioStream_getFramesPerDataCallback)pContext->aaudio.AAudioStream_getFramesPerDataCallback)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
+
+ if (framesPerDataCallback > 0) {
+ pDevice->playback.internalPeriodSizeInFrames = framesPerDataCallback;
+ pDevice->playback.internalPeriods = bufferCapacityInFrames / framesPerDataCallback;
+ } else {
+ pDevice->playback.internalPeriodSizeInFrames = bufferCapacityInFrames;
+ pDevice->playback.internalPeriods = 1;
+ }
+ }
+
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames) * pDevice->capture.internalPeriods;
+ ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->aaudio.duplexRB);
+ if (result != MA_SUCCESS) {
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
+ }
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
+ }
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[AAudio] Failed to initialize ring buffer.", result);
+ }
+
+ /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */
+ {
+ ma_uint32 marginSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods;
+ void* pMarginData;
+ ma_pcm_rb_acquire_write(&pDevice->aaudio.duplexRB, &marginSizeInFrames, &pMarginData);
+ {
+ MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels));
+ }
+ ma_pcm_rb_commit_write(&pDevice->aaudio.duplexRB, marginSizeInFrames, pMarginData);
+ }
}
- ma_free(pChannelLayout);
return MA_SUCCESS;
}
-ma_bool32 ma_context_is_device_id_equal__coreaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+static ma_result ma_device_start_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream)
{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
-
- return strcmp(pID0->coreaudio, pID1->coreaudio) == 0;
-}
+ ma_aaudio_result_t resultAA;
+ ma_aaudio_stream_state_t currentState;
-ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
-{
-#if defined(MA_APPLE_DESKTOP)
- UInt32 deviceCount;
- AudioObjectID* pDeviceObjectIDs;
- ma_result result;
- UInt32 iDevice;
+ MA_ASSERT(pDevice != NULL);
- result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs);
- if (result != MA_SUCCESS) {
- return result;
+ resultAA = ((MA_PFN_AAudioStream_requestStart)pDevice->pContext->aaudio.AAudioStream_requestStart)(pStream);
+ if (resultAA != MA_AAUDIO_OK) {
+ return ma_result_from_aaudio(resultAA);
}
-
- for (iDevice = 0; iDevice < deviceCount; ++iDevice) {
- AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice];
- ma_device_info info;
- ma_zero_object(&info);
- if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(info.id.coreaudio), info.id.coreaudio) != MA_SUCCESS) {
- continue;
- }
- if (ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(info.name), info.name) != MA_SUCCESS) {
- continue;
- }
+ /* Do we actually need to wait for the device to transition into it's started state? */
- if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) {
- if (!callback(pContext, ma_device_type_playback, &info, pUserData)) {
- break;
- }
+ /* The device should be in either a starting or started state. If it's not set to started we need to wait for it to transition. It should go from starting to started. */
+ currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream);
+ if (currentState != MA_AAUDIO_STREAM_STATE_STARTED) {
+ ma_result result;
+
+ if (currentState != MA_AAUDIO_STREAM_STATE_STARTING) {
+ return MA_ERROR; /* Expecting the stream to be a starting or started state. */
}
- if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) {
- if (!callback(pContext, ma_device_type_capture, &info, pUserData)) {
- break;
- }
+
+ result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STARTED);
+ if (result != MA_SUCCESS) {
+ return result;
}
}
-
- ma_free(pDeviceObjectIDs);
-#else
- /* Only supporting default devices on non-Desktop platforms. */
- ma_device_info info;
-
- ma_zero_object(&info);
- ma_strncpy_s(info.name, sizeof(info.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- if (!callback(pContext, ma_device_type_playback, &info, pUserData)) {
- return MA_SUCCESS;
- }
-
- ma_zero_object(&info);
- ma_strncpy_s(info.name, sizeof(info.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
- if (!callback(pContext, ma_device_type_capture, &info, pUserData)) {
- return MA_SUCCESS;
- }
-#endif
-
+
return MA_SUCCESS;
}
-ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+static ma_result ma_device_stop_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream)
{
- ma_result result;
+ ma_aaudio_result_t resultAA;
+ ma_aaudio_stream_state_t currentState;
- ma_assert(pContext != NULL);
+ MA_ASSERT(pDevice != NULL);
- /* No exclusive mode with the Core Audio backend for now. */
- if (shareMode == ma_share_mode_exclusive) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ /*
+ From the AAudio documentation:
+
+ The stream will stop after all of the data currently buffered has been played.
+
+ This maps with miniaudio's requirement that device's be drained which means we don't need to implement any draining logic.
+ */
+
+ resultAA = ((MA_PFN_AAudioStream_requestStop)pDevice->pContext->aaudio.AAudioStream_requestStop)(pStream);
+ if (resultAA != MA_AAUDIO_OK) {
+ return ma_result_from_aaudio(resultAA);
}
-
-#if defined(MA_APPLE_DESKTOP)
- /* Desktop */
- {
- AudioObjectID deviceObjectID;
- UInt32 streamDescriptionCount;
- AudioStreamRangedDescription* pStreamDescriptions;
- UInt32 iStreamDescription;
- UInt32 sampleRateRangeCount;
- AudioValueRange* pSampleRateRanges;
- result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID);
- if (result != MA_SUCCESS) {
- return result;
- }
-
- result = ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(pDeviceInfo->id.coreaudio), pDeviceInfo->id.coreaudio);
- if (result != MA_SUCCESS) {
- return result;
+ /* The device should be in either a stopping or stopped state. If it's not set to started we need to wait for it to transition. It should go from stopping to stopped. */
+ currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream);
+ if (currentState != MA_AAUDIO_STREAM_STATE_STOPPED) {
+ ma_result result;
+
+ if (currentState != MA_AAUDIO_STREAM_STATE_STOPPING) {
+ return MA_ERROR; /* Expecting the stream to be a stopping or stopped state. */
}
-
- result = ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pDeviceInfo->name), pDeviceInfo->name);
+
+ result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STOPPED);
if (result != MA_SUCCESS) {
return result;
}
-
- /* Formats. */
- result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &streamDescriptionCount, &pStreamDescriptions);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_start__aaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
if (result != MA_SUCCESS) {
return result;
}
-
- for (iStreamDescription = 0; iStreamDescription < streamDescriptionCount; ++iStreamDescription) {
- ma_format format;
- ma_bool32 formatExists = MA_FALSE;
- ma_uint32 iOutputFormat;
+ }
- result = ma_format_from_AudioStreamBasicDescription(&pStreamDescriptions[iStreamDescription].mFormat, &format);
- if (result != MA_SUCCESS) {
- continue;
- }
-
- ma_assert(format != ma_format_unknown);
-
- /* Make sure the format isn't already in the output list. */
- for (iOutputFormat = 0; iOutputFormat < pDeviceInfo->formatCount; ++iOutputFormat) {
- if (pDeviceInfo->formats[iOutputFormat] == format) {
- formatExists = MA_TRUE;
- break;
- }
- }
-
- if (!formatExists) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = format;
- }
- }
-
- ma_free(pStreamDescriptions);
-
-
- /* Channels. */
- result = ma_get_AudioObject_channel_count(pContext, deviceObjectID, deviceType, &pDeviceInfo->minChannels);
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
if (result != MA_SUCCESS) {
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
+ }
return result;
}
- pDeviceInfo->maxChannels = pDeviceInfo->minChannels;
-
-
- /* Sample rates. */
- result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges);
+ }
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_device_stop__aaudio(ma_device* pDevice)
+{
+ ma_stop_proc onStop;
+
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
if (result != MA_SUCCESS) {
return result;
}
-
- if (sampleRateRangeCount > 0) {
- UInt32 iSampleRate;
- pDeviceInfo->minSampleRate = UINT32_MAX;
- pDeviceInfo->maxSampleRate = 0;
- for (iSampleRate = 0; iSampleRate < sampleRateRangeCount; ++iSampleRate) {
- if (pDeviceInfo->minSampleRate > pSampleRateRanges[iSampleRate].mMinimum) {
- pDeviceInfo->minSampleRate = pSampleRateRanges[iSampleRate].mMinimum;
- }
- if (pDeviceInfo->maxSampleRate < pSampleRateRanges[iSampleRate].mMaximum) {
- pDeviceInfo->maxSampleRate = pSampleRateRanges[iSampleRate].mMaximum;
- }
- }
- }
}
-#else
- /* Mobile */
- {
- AudioComponentDescription desc;
- AudioComponent component;
- AudioUnit audioUnit;
- OSStatus status;
- AudioUnitScope formatScope;
- AudioUnitElement formatElement;
- AudioStreamBasicDescription bestFormat;
- UInt32 propSize;
- if (deviceType == ma_device_type_playback) {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- } else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
- }
-
- /*
- Retrieving device information is more annoying on mobile than desktop. For simplicity I'm locking this down to whatever format is
- reported on a temporary I/O unit. The problem, however, is that this doesn't return a value for the sample rate which we need to
- retrieve from the AVAudioSession shared instance.
- */
- desc.componentType = kAudioUnitType_Output;
- desc.componentSubType = kAudioUnitSubType_RemoteIO;
- desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- desc.componentFlags = 0;
- desc.componentFlagsMask = 0;
-
- component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc);
- if (component == NULL) {
- return MA_FAILED_TO_INIT_BACKEND;
- }
-
- status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)(component, &audioUnit);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
- }
-
- formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
- formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
-
- propSize = sizeof(bestFormat);
- status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize);
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
- return ma_result_from_OSStatus(status);
- }
-
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
- audioUnit = NULL;
-
-
- pDeviceInfo->minChannels = bestFormat.mChannelsPerFrame;
- pDeviceInfo->maxChannels = bestFormat.mChannelsPerFrame;
-
- pDeviceInfo->formatCount = 1;
- result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pDeviceInfo->formats[0]);
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
if (result != MA_SUCCESS) {
return result;
}
-
- /*
- It looks like Apple are wanting to push the whole AVAudioSession thing. Thus, we need to use that to determine device settings. To do
- this we just get the shared instance and inspect.
- */
- @autoreleasepool {
- AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
- ma_assert(pAudioSession != NULL);
-
- pDeviceInfo->minSampleRate = (ma_uint32)pAudioSession.sampleRate;
- pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate;
- }
}
-#endif
-
- (void)pDeviceInfo; /* Unused. */
+
+ onStop = pDevice->onStop;
+ if (onStop) {
+ onStop(pDevice);
+ }
+
return MA_SUCCESS;
}
-void ma_device_uninit__coreaudio(ma_device* pDevice)
+static ma_result ma_context_uninit__aaudio(ma_context* pContext)
{
- ma_assert(pDevice != NULL);
- ma_assert(ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_aaudio);
- if (pDevice->coreaudio.audioUnitCapture != NULL) {
- ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
- }
- if (pDevice->coreaudio.audioUnitPlayback != NULL) {
- ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
- }
-
- if (pDevice->coreaudio.pAudioBufferList) {
- ma_free(pDevice->coreaudio.pAudioBufferList);
- }
+ ma_dlclose(pContext, pContext->aaudio.hAAudio);
+ pContext->aaudio.hAAudio = NULL;
- if (pDevice->type == ma_device_type_duplex) {
- ma_pcm_rb_uninit(&pDevice->coreaudio.duplexRB);
- }
+ return MA_SUCCESS;
}
-
-OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pBufferList)
+static ma_result ma_context_init__aaudio(const ma_context_config* pConfig, ma_context* pContext)
{
- ma_device* pDevice = (ma_device*)pUserData;
- ma_stream_layout layout;
-
- ma_assert(pDevice != NULL);
+ const char* libNames[] = {
+ "libaaudio.so"
+ };
+ size_t i;
-#if defined(MA_DEBUG_OUTPUT)
- printf("INFO: Output Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pBufferList->mNumberBuffers);
-#endif
+ for (i = 0; i < ma_countof(libNames); ++i) {
+ pContext->aaudio.hAAudio = ma_dlopen(pContext, libNames[i]);
+ if (pContext->aaudio.hAAudio != NULL) {
+ break;
+ }
+ }
- /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */
- layout = ma_stream_layout_interleaved;
- if (pBufferList->mBuffers[0].mNumberChannels != pDevice->playback.internalChannels) {
- layout = ma_stream_layout_deinterleaved;
+ if (pContext->aaudio.hAAudio == NULL) {
+ return MA_FAILED_TO_INIT_BACKEND;
}
-
- if (layout == ma_stream_layout_interleaved) {
- /* For now we can assume everything is interleaved. */
- UInt32 iBuffer;
- for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) {
- if (pBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->playback.internalChannels) {
- ma_uint32 frameCountForThisBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- if (frameCountForThisBuffer > 0) {
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_playback(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData, &pDevice->coreaudio.duplexRB);
- } else {
- ma_device__read_frames_from_client(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData);
- }
- }
-
- #if defined(MA_DEBUG_OUTPUT)
- printf(" frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize);
- #endif
- } else {
- /*
- This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's
- not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. We just
- output silence here.
- */
- ma_zero_memory(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize);
- #if defined(MA_DEBUG_OUTPUT)
- printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize);
- #endif
- }
- }
- } else {
- /* This is the deinterleaved case. We need to update each buffer in groups of internalChannels. This assumes each buffer is the same size. */
- ma_uint8 tempBuffer[4096];
- UInt32 iBuffer;
- for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; iBuffer += pDevice->playback.internalChannels) {
- ma_uint32 frameCountPerBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_sample(pDevice->playback.internalFormat);
- ma_uint32 framesRemaining = frameCountPerBuffer;
+ pContext->aaudio.AAudio_createStreamBuilder = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudio_createStreamBuilder");
+ pContext->aaudio.AAudioStreamBuilder_delete = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_delete");
+ pContext->aaudio.AAudioStreamBuilder_setDeviceId = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDeviceId");
+ pContext->aaudio.AAudioStreamBuilder_setDirection = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDirection");
+ pContext->aaudio.AAudioStreamBuilder_setSharingMode = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSharingMode");
+ pContext->aaudio.AAudioStreamBuilder_setFormat = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFormat");
+ pContext->aaudio.AAudioStreamBuilder_setChannelCount = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setChannelCount");
+ pContext->aaudio.AAudioStreamBuilder_setSampleRate = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSampleRate");
+ pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setBufferCapacityInFrames");
+ pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFramesPerDataCallback");
+ pContext->aaudio.AAudioStreamBuilder_setDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDataCallback");
+ pContext->aaudio.AAudioStreamBuilder_setErrorCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setErrorCallback");
+ pContext->aaudio.AAudioStreamBuilder_setPerformanceMode = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setPerformanceMode");
+ pContext->aaudio.AAudioStreamBuilder_openStream = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_openStream");
+ pContext->aaudio.AAudioStream_close = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_close");
+ pContext->aaudio.AAudioStream_getState = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getState");
+ pContext->aaudio.AAudioStream_waitForStateChange = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_waitForStateChange");
+ pContext->aaudio.AAudioStream_getFormat = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getFormat");
+ pContext->aaudio.AAudioStream_getChannelCount = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getChannelCount");
+ pContext->aaudio.AAudioStream_getSampleRate = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getSampleRate");
+ pContext->aaudio.AAudioStream_getBufferCapacityInFrames = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getBufferCapacityInFrames");
+ pContext->aaudio.AAudioStream_getFramesPerDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getFramesPerDataCallback");
+ pContext->aaudio.AAudioStream_getFramesPerBurst = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getFramesPerBurst");
+ pContext->aaudio.AAudioStream_requestStart = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_requestStart");
+ pContext->aaudio.AAudioStream_requestStop = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_requestStop");
- while (framesRemaining > 0) {
- void* ppDeinterleavedBuffers[MA_MAX_CHANNELS];
- ma_uint32 iChannel;
- ma_uint32 framesToRead = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- if (framesToRead > framesRemaining) {
- framesToRead = framesRemaining;
- }
-
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_playback(pDevice, framesToRead, tempBuffer, &pDevice->coreaudio.duplexRB);
- } else {
- ma_device__read_frames_from_client(pDevice, framesToRead, tempBuffer);
- }
-
- for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) {
- ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pBufferList->mBuffers[iBuffer].mData, (frameCountPerBuffer - framesRemaining) * ma_get_bytes_per_sample(pDevice->playback.internalFormat));
- }
-
- ma_deinterleave_pcm_frames(pDevice->playback.internalFormat, pDevice->playback.internalChannels, framesToRead, tempBuffer, ppDeinterleavedBuffers);
-
- framesRemaining -= framesToRead;
- }
- }
- }
-
- (void)pActionFlags;
- (void)pTimeStamp;
- (void)busNumber;
+ pContext->isBackendAsynchronous = MA_TRUE;
- return noErr;
+ pContext->onUninit = ma_context_uninit__aaudio;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__aaudio;
+ pContext->onEnumDevices = ma_context_enumerate_devices__aaudio;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__aaudio;
+ pContext->onDeviceInit = ma_device_init__aaudio;
+ pContext->onDeviceUninit = ma_device_uninit__aaudio;
+ pContext->onDeviceStart = ma_device_start__aaudio;
+ pContext->onDeviceStop = ma_device_stop__aaudio;
+
+ (void)pConfig;
+ return MA_SUCCESS;
}
+#endif /* AAudio */
-OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pUnusedBufferList)
-{
- ma_device* pDevice = (ma_device*)pUserData;
- AudioBufferList* pRenderedBufferList;
- ma_stream_layout layout;
- OSStatus status;
- ma_assert(pDevice != NULL);
-
- pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList;
- ma_assert(pRenderedBufferList);
-
- /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */
- layout = ma_stream_layout_interleaved;
- if (pRenderedBufferList->mBuffers[0].mNumberChannels != pDevice->capture.internalChannels) {
- layout = ma_stream_layout_deinterleaved;
- }
-
-#if defined(MA_DEBUG_OUTPUT)
- printf("INFO: Input Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pRenderedBufferList->mNumberBuffers);
+/******************************************************************************
+
+OpenSL|ES Backend
+
+******************************************************************************/
+#ifdef MA_HAS_OPENSL
+#include
+#ifdef MA_ANDROID
+#include
#endif
-
- status = ((ma_AudioUnitRender_proc)pDevice->pContext->coreaudio.AudioUnitRender)((AudioUnit)pDevice->coreaudio.audioUnitCapture, pActionFlags, pTimeStamp, busNumber, frameCount, pRenderedBufferList);
- if (status != noErr) {
- #if defined(MA_DEBUG_OUTPUT)
- printf(" ERROR: AudioUnitRender() failed with %d\n", status);
- #endif
- return status;
+
+/* OpenSL|ES has one-per-application objects :( */
+SLObjectItf g_maEngineObjectSL = NULL;
+SLEngineItf g_maEngineSL = NULL;
+ma_uint32 g_maOpenSLInitCounter = 0;
+
+#define MA_OPENSL_OBJ(p) (*((SLObjectItf)(p)))
+#define MA_OPENSL_OUTPUTMIX(p) (*((SLOutputMixItf)(p)))
+#define MA_OPENSL_PLAY(p) (*((SLPlayItf)(p)))
+#define MA_OPENSL_RECORD(p) (*((SLRecordItf)(p)))
+
+#ifdef MA_ANDROID
+#define MA_OPENSL_BUFFERQUEUE(p) (*((SLAndroidSimpleBufferQueueItf)(p)))
+#else
+#define MA_OPENSL_BUFFERQUEUE(p) (*((SLBufferQueueItf)(p)))
+#endif
+
+static ma_result ma_result_from_OpenSL(SLuint32 result)
+{
+ switch (result)
+ {
+ case SL_RESULT_SUCCESS: return MA_SUCCESS;
+ case SL_RESULT_PRECONDITIONS_VIOLATED: return MA_ERROR;
+ case SL_RESULT_PARAMETER_INVALID: return MA_INVALID_ARGS;
+ case SL_RESULT_MEMORY_FAILURE: return MA_OUT_OF_MEMORY;
+ case SL_RESULT_RESOURCE_ERROR: return MA_INVALID_DATA;
+ case SL_RESULT_RESOURCE_LOST: return MA_ERROR;
+ case SL_RESULT_IO_ERROR: return MA_IO_ERROR;
+ case SL_RESULT_BUFFER_INSUFFICIENT: return MA_NO_SPACE;
+ case SL_RESULT_CONTENT_CORRUPTED: return MA_INVALID_DATA;
+ case SL_RESULT_CONTENT_UNSUPPORTED: return MA_FORMAT_NOT_SUPPORTED;
+ case SL_RESULT_CONTENT_NOT_FOUND: return MA_ERROR;
+ case SL_RESULT_PERMISSION_DENIED: return MA_ACCESS_DENIED;
+ case SL_RESULT_FEATURE_UNSUPPORTED: return MA_NOT_IMPLEMENTED;
+ case SL_RESULT_INTERNAL_ERROR: return MA_ERROR;
+ case SL_RESULT_UNKNOWN_ERROR: return MA_ERROR;
+ case SL_RESULT_OPERATION_ABORTED: return MA_ERROR;
+ case SL_RESULT_CONTROL_LOST: return MA_ERROR;
+ default: return MA_ERROR;
}
-
- if (layout == ma_stream_layout_interleaved) {
- UInt32 iBuffer;
- for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) {
- if (pRenderedBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->capture.internalChannels) {
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_capture(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData, &pDevice->coreaudio.duplexRB);
- } else {
- ma_device__send_frames_to_client(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData);
- }
- #if defined(MA_DEBUG_OUTPUT)
- printf(" mDataByteSize=%d\n", pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);
- #endif
- } else {
- /*
- This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's
- not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams.
- */
- ma_uint8 silentBuffer[4096];
- ma_uint32 framesRemaining;
-
- ma_zero_memory(silentBuffer, sizeof(silentBuffer));
-
- framesRemaining = frameCount;
- while (framesRemaining > 0) {
- ma_uint32 framesToSend = sizeof(silentBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- if (framesToSend > framesRemaining) {
- framesToSend = framesRemaining;
- }
-
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_capture(pDevice, framesToSend, silentBuffer, &pDevice->coreaudio.duplexRB);
- } else {
- ma_device__send_frames_to_client(pDevice, framesToSend, silentBuffer);
- }
-
- framesRemaining -= framesToSend;
- }
-
- #if defined(MA_DEBUG_OUTPUT)
- printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pRenderedBufferList->mBuffers[iBuffer].mNumberChannels, pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);
- #endif
- }
- }
- } else {
- /* This is the deinterleaved case. We need to interleave the audio data before sending it to the client. This assumes each buffer is the same size. */
- ma_uint8 tempBuffer[4096];
- UInt32 iBuffer;
- for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; iBuffer += pDevice->capture.internalChannels) {
- ma_uint32 framesRemaining = frameCount;
- while (framesRemaining > 0) {
- void* ppDeinterleavedBuffers[MA_MAX_CHANNELS];
- ma_uint32 iChannel;
- ma_uint32 framesToSend = sizeof(tempBuffer) / ma_get_bytes_per_sample(pDevice->capture.internalFormat);
- if (framesToSend > framesRemaining) {
- framesToSend = framesRemaining;
- }
-
- for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) {
- ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pRenderedBufferList->mBuffers[iBuffer].mData, (frameCount - framesRemaining) * ma_get_bytes_per_sample(pDevice->capture.internalFormat));
- }
-
- ma_interleave_pcm_frames(pDevice->capture.internalFormat, pDevice->capture.internalChannels, framesToSend, (const void**)ppDeinterleavedBuffers, tempBuffer);
+}
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_capture(pDevice, framesToSend, tempBuffer, &pDevice->coreaudio.duplexRB);
- } else {
- ma_device__send_frames_to_client(pDevice, framesToSend, tempBuffer);
- }
+/* Converts an individual OpenSL-style channel identifier (SL_SPEAKER_FRONT_LEFT, etc.) to miniaudio. */
+static ma_uint8 ma_channel_id_to_ma__opensl(SLuint32 id)
+{
+ switch (id)
+ {
+ case SL_SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT;
+ case SL_SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT;
+ case SL_SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER;
+ case SL_SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE;
+ case SL_SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT;
+ case SL_SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT;
+ case SL_SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER;
+ case SL_SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER;
+ case SL_SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER;
+ case SL_SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT;
+ case SL_SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT;
+ case SL_SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER;
+ case SL_SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT;
+ case SL_SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER;
+ case SL_SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT;
+ case SL_SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT;
+ case SL_SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER;
+ case SL_SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT;
+ default: return 0;
+ }
+}
- framesRemaining -= framesToSend;
- }
- }
+/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to OpenSL-style. */
+static SLuint32 ma_channel_id_to_opensl(ma_uint8 id)
+{
+ switch (id)
+ {
+ case MA_CHANNEL_MONO: return SL_SPEAKER_FRONT_CENTER;
+ case MA_CHANNEL_FRONT_LEFT: return SL_SPEAKER_FRONT_LEFT;
+ case MA_CHANNEL_FRONT_RIGHT: return SL_SPEAKER_FRONT_RIGHT;
+ case MA_CHANNEL_FRONT_CENTER: return SL_SPEAKER_FRONT_CENTER;
+ case MA_CHANNEL_LFE: return SL_SPEAKER_LOW_FREQUENCY;
+ case MA_CHANNEL_BACK_LEFT: return SL_SPEAKER_BACK_LEFT;
+ case MA_CHANNEL_BACK_RIGHT: return SL_SPEAKER_BACK_RIGHT;
+ case MA_CHANNEL_FRONT_LEFT_CENTER: return SL_SPEAKER_FRONT_LEFT_OF_CENTER;
+ case MA_CHANNEL_FRONT_RIGHT_CENTER: return SL_SPEAKER_FRONT_RIGHT_OF_CENTER;
+ case MA_CHANNEL_BACK_CENTER: return SL_SPEAKER_BACK_CENTER;
+ case MA_CHANNEL_SIDE_LEFT: return SL_SPEAKER_SIDE_LEFT;
+ case MA_CHANNEL_SIDE_RIGHT: return SL_SPEAKER_SIDE_RIGHT;
+ case MA_CHANNEL_TOP_CENTER: return SL_SPEAKER_TOP_CENTER;
+ case MA_CHANNEL_TOP_FRONT_LEFT: return SL_SPEAKER_TOP_FRONT_LEFT;
+ case MA_CHANNEL_TOP_FRONT_CENTER: return SL_SPEAKER_TOP_FRONT_CENTER;
+ case MA_CHANNEL_TOP_FRONT_RIGHT: return SL_SPEAKER_TOP_FRONT_RIGHT;
+ case MA_CHANNEL_TOP_BACK_LEFT: return SL_SPEAKER_TOP_BACK_LEFT;
+ case MA_CHANNEL_TOP_BACK_CENTER: return SL_SPEAKER_TOP_BACK_CENTER;
+ case MA_CHANNEL_TOP_BACK_RIGHT: return SL_SPEAKER_TOP_BACK_RIGHT;
+ default: return 0;
}
+}
- (void)pActionFlags;
- (void)pTimeStamp;
- (void)busNumber;
- (void)frameCount;
- (void)pUnusedBufferList;
+/* Converts a channel mapping to an OpenSL-style channel mask. */
+static SLuint32 ma_channel_map_to_channel_mask__opensl(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels)
+{
+ SLuint32 channelMask = 0;
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ channelMask |= ma_channel_id_to_opensl(channelMap[iChannel]);
+ }
- return noErr;
+ return channelMask;
}
-void on_start_stop__coreaudio(void* pUserData, AudioUnit audioUnit, AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element)
+/* Converts an OpenSL-style channel mask to a miniaudio channel map. */
+static void ma_channel_mask_to_channel_map__opensl(SLuint32 channelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
{
- ma_device* pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
-
- /*
- There's been a report of a deadlock here when triggered by ma_device_uninit(). It looks like
- AudioUnitGetProprty (called below) and AudioComponentInstanceDispose (called in ma_device_uninit)
- can try waiting on the same lock. I'm going to try working around this by not calling any Core
- Audio APIs in the callback when the device has been stopped or uninitialized.
- */
- if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED || ma_device__get_state(pDevice) == MA_STATE_STOPPING || ma_device__get_state(pDevice) == MA_STATE_STOPPED) {
- ma_stop_proc onStop = pDevice->onStop;
- if (onStop) {
- onStop(pDevice);
- }
-
- ma_event_signal(&pDevice->coreaudio.stopEvent);
+ if (channels == 1 && channelMask == 0) {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } else if (channels == 2 && channelMask == 0) {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
} else {
- UInt32 isRunning;
- UInt32 isRunningSize = sizeof(isRunning);
- OSStatus status = ((ma_AudioUnitGetProperty_proc)pDevice->pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioOutputUnitProperty_IsRunning, scope, element, &isRunning, &isRunningSize);
- if (status != noErr) {
- return; /* Don't really know what to do in this case... just ignore it, I suppose... */
- }
-
- if (!isRunning) {
- ma_stop_proc onStop;
-
- /*
- The stop event is a bit annoying in Core Audio because it will be called when we automatically switch the default device. Some scenarios to consider:
-
- 1) When the device is unplugged, this will be called _before_ the default device change notification.
- 2) When the device is changed via the default device change notification, this will be called _after_ the switch.
-
- For case #1, we just check if there's a new default device available. If so, we just ignore the stop event. For case #2 we check a flag.
- */
- if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isDefaultPlaybackDevice) ||
- ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isDefaultCaptureDevice)) {
- /*
- It looks like the device is switching through an external event, such as the user unplugging the device or changing the default device
- via the operating system's sound settings. If we're re-initializing the device, we just terminate because we want the stopping of the
- device to be seamless to the client (we don't want them receiving the onStop event and thinking that the device has stopped when it
- hasn't!).
- */
- if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isSwitchingPlaybackDevice) ||
- ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isSwitchingCaptureDevice)) {
- return;
+ if (channels == 1 && (channelMask & SL_SPEAKER_FRONT_CENTER) != 0) {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } else {
+ /* Just iterate over each bit. */
+ ma_uint32 iChannel = 0;
+ ma_uint32 iBit;
+ for (iBit = 0; iBit < 32; ++iBit) {
+ SLuint32 bitValue = (channelMask & (1UL << iBit));
+ if (bitValue != 0) {
+ /* The bit is set. */
+ channelMap[iChannel] = ma_channel_id_to_ma__opensl(bitValue);
+ iChannel += 1;
}
-
- /*
- Getting here means the device is not reinitializing which means it may have been unplugged. From what I can see, it looks like Core Audio
- will try switching to the new default device seamlessly. We need to somehow find a way to determine whether or not Core Audio will most
- likely be successful in switching to the new device.
-
- TODO: Try to predict if Core Audio will switch devices. If not, the onStop callback needs to be posted.
- */
- return;
- }
-
- /* Getting here means we need to stop the device. */
- onStop = pDevice->onStop;
- if (onStop) {
- onStop(pDevice);
}
}
}
+}
- (void)propertyID; /* Unused. */
+static SLuint32 ma_round_to_standard_sample_rate__opensl(SLuint32 samplesPerSec)
+{
+ if (samplesPerSec <= SL_SAMPLINGRATE_8) {
+ return SL_SAMPLINGRATE_8;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_11_025) {
+ return SL_SAMPLINGRATE_11_025;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_12) {
+ return SL_SAMPLINGRATE_12;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_16) {
+ return SL_SAMPLINGRATE_16;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_22_05) {
+ return SL_SAMPLINGRATE_22_05;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_24) {
+ return SL_SAMPLINGRATE_24;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_32) {
+ return SL_SAMPLINGRATE_32;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_44_1) {
+ return SL_SAMPLINGRATE_44_1;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_48) {
+ return SL_SAMPLINGRATE_48;
+ }
+
+ /* Android doesn't support more than 48000. */
+#ifndef MA_ANDROID
+ if (samplesPerSec <= SL_SAMPLINGRATE_64) {
+ return SL_SAMPLINGRATE_64;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_88_2) {
+ return SL_SAMPLINGRATE_88_2;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_96) {
+ return SL_SAMPLINGRATE_96;
+ }
+ if (samplesPerSec <= SL_SAMPLINGRATE_192) {
+ return SL_SAMPLINGRATE_192;
+ }
+#endif
+
+ return SL_SAMPLINGRATE_16;
}
-#if defined(MA_APPLE_DESKTOP)
-OSStatus ma_default_device_changed__coreaudio(AudioObjectID objectID, UInt32 addressCount, const AudioObjectPropertyAddress* pAddresses, void* pUserData)
+
+static ma_bool32 ma_context_is_device_id_equal__opensl(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
{
- ma_device* pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
-
- /* Not sure if I really need to check this, but it makes me feel better. */
- if (addressCount == 0) {
- return noErr;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
+
+ return pID0->opensl == pID1->opensl;
+}
+
+static ma_result ma_context_enumerate_devices__opensl(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_bool32 cbResult;
+
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to enumerate devices. */
+ if (g_maOpenSLInitCounter == 0) {
+ return MA_INVALID_OPERATION;
}
+
+ /*
+ TODO: Test Me.
- if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultOutputDevice) {
- ma_result reinitResult;
+ This is currently untested, so for now we are just returning default devices.
+ */
+#if 0 && !defined(MA_ANDROID)
+ ma_bool32 isTerminated = MA_FALSE;
- pDevice->coreaudio.isSwitchingPlaybackDevice = MA_TRUE;
- reinitResult = ma_device_reinit_internal__coreaudio(pDevice, ma_device_type_playback, MA_TRUE);
- pDevice->coreaudio.isSwitchingPlaybackDevice = MA_FALSE;
-
- if (reinitResult == MA_SUCCESS) {
- ma_device__post_init_setup(pDevice, ma_device_type_playback);
-
- /* Restart the device if required. If this fails we need to stop the device entirely. */
- if (ma_device__get_state(pDevice) == MA_STATE_STARTED) {
- OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
- if (status != noErr) {
- if (pDevice->type == ma_device_type_duplex) {
- ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
- }
- ma_device__set_state(pDevice, MA_STATE_STOPPED);
+ SLuint32 pDeviceIDs[128];
+ SLint32 deviceCount = sizeof(pDeviceIDs) / sizeof(pDeviceIDs[0]);
+
+ SLAudioIODeviceCapabilitiesItf deviceCaps;
+ SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ /* The interface may not be supported so just report a default device. */
+ goto return_default_device;
+ }
+
+ /* Playback */
+ if (!isTerminated) {
+ resultSL = (*deviceCaps)->GetAvailableAudioOutputs(deviceCaps, &deviceCount, pDeviceIDs);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return ma_result_from_OpenSL(resultSL);
+ }
+
+ for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ deviceInfo.id.opensl = pDeviceIDs[iDevice];
+
+ SLAudioOutputDescriptor desc;
+ resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc);
+ if (resultSL == SL_RESULT_SUCCESS) {
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.pDeviceName, (size_t)-1);
+
+ ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ if (cbResult == MA_FALSE) {
+ isTerminated = MA_TRUE;
+ break;
}
}
}
}
-
- if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultInputDevice) {
- ma_result reinitResult;
- pDevice->coreaudio.isSwitchingPlaybackDevice = MA_TRUE;
- reinitResult = ma_device_reinit_internal__coreaudio(pDevice, ma_device_type_capture, MA_TRUE);
- pDevice->coreaudio.isSwitchingPlaybackDevice = MA_FALSE;
-
- if (reinitResult == MA_SUCCESS) {
- ma_device__post_init_setup(pDevice, ma_device_type_capture);
-
- /* Restart the device if required. If this fails we need to stop the device entirely. */
- if (ma_device__get_state(pDevice) == MA_STATE_STARTED) {
- OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
- if (status != noErr) {
- if (pDevice->type == ma_device_type_duplex) {
- ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
- }
- ma_device__set_state(pDevice, MA_STATE_STOPPED);
+ /* Capture */
+ if (!isTerminated) {
+ resultSL = (*deviceCaps)->GetAvailableAudioInputs(deviceCaps, &deviceCount, pDeviceIDs);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return ma_result_from_OpenSL(resultSL);
+ }
+
+ for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ deviceInfo.id.opensl = pDeviceIDs[iDevice];
+
+ SLAudioInputDescriptor desc;
+ resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc);
+ if (resultSL == SL_RESULT_SUCCESS) {
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.deviceName, (size_t)-1);
+
+ ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ if (cbResult == MA_FALSE) {
+ isTerminated = MA_TRUE;
+ break;
}
}
}
}
-
- (void)objectID; /* Unused. */
- return noErr;
-}
-#endif
-
-typedef struct
-{
- /* Input. */
- ma_format formatIn;
- ma_uint32 channelsIn;
- ma_uint32 sampleRateIn;
- ma_channel channelMapIn[MA_MAX_CHANNELS];
- ma_uint32 bufferSizeInFramesIn;
- ma_uint32 bufferSizeInMillisecondsIn;
- ma_uint32 periodsIn;
- ma_bool32 usingDefaultFormat;
- ma_bool32 usingDefaultChannels;
- ma_bool32 usingDefaultSampleRate;
- ma_bool32 usingDefaultChannelMap;
- ma_share_mode shareMode;
- ma_bool32 registerStopEvent;
- /* Output. */
-#if defined(MA_APPLE_DESKTOP)
- AudioObjectID deviceObjectID;
+ return MA_SUCCESS;
+#else
+ goto return_default_device;
#endif
- AudioComponent component;
- AudioUnit audioUnit;
- AudioBufferList* pAudioBufferList; /* Only used for input devices. */
- ma_format formatOut;
- ma_uint32 channelsOut;
- ma_uint32 sampleRateOut;
- ma_channel channelMapOut[MA_MAX_CHANNELS];
- ma_uint32 bufferSizeInFramesOut;
- ma_uint32 periodsOut;
- char deviceName[256];
-} ma_device_init_internal_data__coreaudio;
-ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__coreaudio* pData, void* pDevice_DoNotReference) /* <-- pDevice is typed as void* intentionally so as to avoid accidentally referencing it. */
-{
- ma_result result;
- OSStatus status;
- UInt32 enableIOFlag;
- AudioStreamBasicDescription bestFormat;
- ma_uint32 actualBufferSizeInFrames;
- AURenderCallbackStruct callbackInfo;
-#if defined(MA_APPLE_DESKTOP)
- AudioObjectID deviceObjectID;
-#endif
+return_default_device:;
+ cbResult = MA_TRUE;
- /* This API should only be used for a single device type: playback or capture. No full-duplex mode. */
- if (deviceType == ma_device_type_duplex) {
- return MA_INVALID_ARGS;
+ /* Playback. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
}
- ma_assert(pContext != NULL);
- ma_assert(deviceType == ma_device_type_playback || deviceType == ma_device_type_capture);
-
-#if defined(MA_APPLE_DESKTOP)
- pData->deviceObjectID = 0;
-#endif
- pData->component = NULL;
- pData->audioUnit = NULL;
- pData->pAudioBufferList = NULL;
-
-#if defined(MA_APPLE_DESKTOP)
- result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID);
- if (result != MA_SUCCESS) {
- return result;
- }
-
- pData->deviceObjectID = deviceObjectID;
-#endif
-
- /* Core audio doesn't really use the notion of a period so we can leave this unmodified, but not too over the top. */
- pData->periodsOut = pData->periodsIn;
- if (pData->periodsOut < 1) {
- pData->periodsOut = 1;
- }
- if (pData->periodsOut > 16) {
- pData->periodsOut = 16;
- }
-
-
- /* Audio unit. */
- status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)((AudioComponent)pContext->coreaudio.component, (AudioUnit*)&pData->audioUnit);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
- }
-
-
- /* The input/output buses need to be explicitly enabled and disabled. We set the flag based on the output unit first, then we just swap it for input. */
- enableIOFlag = 1;
- if (deviceType == ma_device_type_capture) {
- enableIOFlag = 0;
- }
-
- status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
+ /* Capture. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
}
-
- enableIOFlag = (enableIOFlag == 0) ? 1 : 0;
- status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_get_device_info__opensl(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+{
+ MA_ASSERT(pContext != NULL);
+
+ MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to get device info. */
+ if (g_maOpenSLInitCounter == 0) {
+ return MA_INVALID_OPERATION;
}
-
-
- /* Set the device to use with this audio unit. This is only used on desktop since we are using defaults on mobile. */
-#if defined(MA_APPLE_DESKTOP)
- status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS, &deviceObjectID, sizeof(AudioDeviceID));
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(result);
+
+ /* No exclusive mode with OpenSL|ES. */
+ if (shareMode == ma_share_mode_exclusive) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
}
-#endif
-
+
/*
- Format. This is the hardest part of initialization because there's a few variables to take into account.
- 1) The format must be supported by the device.
- 2) The format must be supported miniaudio.
- 3) There's a priority that miniaudio prefers.
-
- Ideally we would like to use a format that's as close to the hardware as possible so we can get as close to a passthrough as possible. The
- most important property is the sample rate. miniaudio can do format conversion for any sample rate and channel count, but cannot do the same
- for the sample data format. If the sample data format is not supported by miniaudio it must be ignored completely.
+ TODO: Test Me.
- On mobile platforms this is a bit different. We just force the use of whatever the audio unit's current format is set to.
+ This is currently untested, so for now we are just returning default devices.
*/
- {
- AudioUnitScope formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
- AudioUnitElement formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
-
- #if defined(MA_APPLE_DESKTOP)
- AudioStreamBasicDescription origFormat;
- UInt32 origFormatSize;
+#if 0 && !defined(MA_ANDROID)
+ SLAudioIODeviceCapabilitiesItf deviceCaps;
+ SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ /* The interface may not be supported so just report a default device. */
+ goto return_default_device;
+ }
- result = ma_find_best_format__coreaudio(pContext, deviceObjectID, deviceType, pData->formatIn, pData->channelsIn, pData->sampleRateIn, pData->usingDefaultFormat, pData->usingDefaultChannels, pData->usingDefaultSampleRate, &bestFormat);
- if (result != MA_SUCCESS) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return result;
- }
-
- /* From what I can see, Apple's documentation implies that we should keep the sample rate consistent. */
- origFormatSize = sizeof(origFormat);
- if (deviceType == ma_device_type_playback) {
- status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &origFormat, &origFormatSize);
- } else {
- status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &origFormat, &origFormatSize);
- }
-
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return result;
- }
-
- bestFormat.mSampleRate = origFormat.mSampleRate;
-
- status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat));
- if (status != noErr) {
- /* We failed to set the format, so fall back to the current format of the audio unit. */
- bestFormat = origFormat;
- }
- #else
- UInt32 propSize = sizeof(bestFormat);
- status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize);
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
- }
-
- /*
- Sample rate is a little different here because for some reason kAudioUnitProperty_StreamFormat returns 0... Oh well. We need to instead try
- setting the sample rate to what the user has requested and then just see the results of it. Need to use some Objective-C here for this since
- it depends on Apple's AVAudioSession API. To do this we just get the shared AVAudioSession instance and then set it. Note that from what I
- can tell, it looks like the sample rate is shared between playback and capture for everything.
- */
- @autoreleasepool {
- AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
- ma_assert(pAudioSession != NULL);
-
- [pAudioSession setPreferredSampleRate:(double)pData->sampleRateIn error:nil];
- bestFormat.mSampleRate = pAudioSession.sampleRate;
- }
-
- status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat));
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
- }
- #endif
-
- result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pData->formatOut);
- if (result != MA_SUCCESS) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return result;
- }
-
- if (pData->formatOut == ma_format_unknown) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return MA_FORMAT_NOT_SUPPORTED;
+ if (deviceType == ma_device_type_playback) {
+ SLAudioOutputDescriptor desc;
+ resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, pDeviceID->opensl, &desc);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return ma_result_from_OpenSL(resultSL);
}
-
- pData->channelsOut = bestFormat.mChannelsPerFrame;
- pData->sampleRateOut = bestFormat.mSampleRate;
- }
-
- /*
- Internal channel map. This is weird in my testing. If I use the AudioObject to get the
- channel map, the channel descriptions are set to "Unknown" for some reason. To work around
- this it looks like retrieving it from the AudioUnit will work. However, and this is where
- it gets weird, it doesn't seem to work with capture devices, nor at all on iOS... Therefore
- I'm going to fall back to a default assumption in these cases.
- */
-#if defined(MA_APPLE_DESKTOP)
- result = ma_get_AudioUnit_channel_map(pContext, pData->audioUnit, deviceType, pData->channelMapOut);
- if (result != MA_SUCCESS) {
- #if 0
- /* Try falling back to the channel map from the AudioObject. */
- result = ma_get_AudioObject_channel_map(pContext, deviceObjectID, deviceType, pData->channelMapOut);
- if (result != MA_SUCCESS) {
- return result;
+
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.pDeviceName, (size_t)-1);
+ } else {
+ SLAudioInputDescriptor desc;
+ resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, pDeviceID->opensl, &desc);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return ma_result_from_OpenSL(resultSL);
}
- #else
- /* Fall back to default assumptions. */
- ma_get_standard_channel_map(ma_standard_channel_map_default, pData->channelsOut, pData->channelMapOut);
- #endif
+
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.deviceName, (size_t)-1);
}
+
+ goto return_detailed_info;
#else
- /* TODO: Figure out how to get the channel map using AVAudioSession. */
- ma_get_standard_channel_map(ma_standard_channel_map_default, pData->channelsOut, pData->channelMapOut);
+ goto return_default_device;
#endif
-
- /* Buffer size. Not allowing this to be configurable on iOS. */
- actualBufferSizeInFrames = pData->bufferSizeInFramesIn;
-
-#if defined(MA_APPLE_DESKTOP)
- if (actualBufferSizeInFrames == 0) {
- actualBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->bufferSizeInMillisecondsIn, pData->sampleRateOut);
+return_default_device:
+ if (pDeviceID != NULL) {
+ if ((deviceType == ma_device_type_playback && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOOUTPUT) ||
+ (deviceType == ma_device_type_capture && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOINPUT)) {
+ return MA_NO_DEVICE; /* Don't know the device. */
+ }
}
-
- actualBufferSizeInFrames = actualBufferSizeInFrames / pData->periodsOut;
- result = ma_set_AudioObject_buffer_size_in_frames(pContext, deviceObjectID, deviceType, &actualBufferSizeInFrames);
- if (result != MA_SUCCESS) {
- return result;
+
+ /* Name / Description */
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
}
-
- pData->bufferSizeInFramesOut = actualBufferSizeInFrames * pData->periodsOut;
-#else
- actualBufferSizeInFrames = 4096;
- pData->bufferSizeInFramesOut = actualBufferSizeInFrames;
+
+ goto return_detailed_info;
+
+
+return_detailed_info:
+
+ /*
+ For now we're just outputting a set of values that are supported by the API but not necessarily supported
+ by the device natively. Later on we should work on this so that it more closely reflects the device's
+ actual native format.
+ */
+ pDeviceInfo->minChannels = 1;
+ pDeviceInfo->maxChannels = 2;
+ pDeviceInfo->minSampleRate = 8000;
+ pDeviceInfo->maxSampleRate = 48000;
+ pDeviceInfo->formatCount = 2;
+ pDeviceInfo->formats[0] = ma_format_u8;
+ pDeviceInfo->formats[1] = ma_format_s16;
+#if defined(MA_ANDROID) && __ANDROID_API__ >= 21
+ pDeviceInfo->formats[pDeviceInfo->formatCount] = ma_format_f32;
+ pDeviceInfo->formatCount += 1;
#endif
+ return MA_SUCCESS;
+}
+
+
+#ifdef MA_ANDROID
+/*void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, SLuint32 eventFlags, const void* pBuffer, SLuint32 bufferSize, SLuint32 dataUsed, void* pContext)*/
+static void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ size_t periodSizeInBytes;
+ ma_uint8* pBuffer;
+ SLresult resultSL;
+
+ MA_ASSERT(pDevice != NULL);
+
+ (void)pBufferQueue;
/*
- During testing I discovered that the buffer size can be too big. You'll get an error like this:
-
- kAudioUnitErr_TooManyFramesToProcess : inFramesToProcess=4096, mMaxFramesPerSlice=512
-
- Note how inFramesToProcess is smaller than mMaxFramesPerSlice. To fix, we need to set kAudioUnitProperty_MaximumFramesPerSlice to that
- of the size of our buffer, or do it the other way around and set our buffer size to the kAudioUnitProperty_MaximumFramesPerSlice.
+ For now, don't do anything unless the buffer was fully processed. From what I can tell, it looks like
+ OpenSL|ES 1.1 improves on buffer queues to the point that we could much more intelligently handle this,
+ but unfortunately it looks like Android is only supporting OpenSL|ES 1.0.1 for now :(
*/
- {
- /*AudioUnitScope propScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
- AudioUnitElement propBus = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
-
- status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, propScope, propBus, &actualBufferSizeInFrames, sizeof(actualBufferSizeInFrames));
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
- }*/
-
- status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &actualBufferSizeInFrames, sizeof(actualBufferSizeInFrames));
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
- }
+
+ /* Don't do anything if the device is not started. */
+ if (pDevice->state != MA_STATE_STARTED) {
+ return;
}
-
- /* We need a buffer list if this is an input device. We render into this in the input callback. */
- if (deviceType == ma_device_type_capture) {
- ma_bool32 isInterleaved = (bestFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) == 0;
- size_t allocationSize;
- AudioBufferList* pBufferList;
- allocationSize = sizeof(AudioBufferList) - sizeof(AudioBuffer); /* Subtract sizeof(AudioBuffer) because that part is dynamically sized. */
- if (isInterleaved) {
- /* Interleaved case. This is the simple case because we just have one buffer. */
- allocationSize += sizeof(AudioBuffer) * 1;
- allocationSize += actualBufferSizeInFrames * ma_get_bytes_per_frame(pData->formatOut, pData->channelsOut);
- } else {
- /* Non-interleaved case. This is the more complex case because there's more than one buffer. */
- allocationSize += sizeof(AudioBuffer) * pData->channelsOut;
- allocationSize += actualBufferSizeInFrames * ma_get_bytes_per_sample(pData->formatOut) * pData->channelsOut;
+ /* Don't do anything if the device is being drained. */
+ if (pDevice->opensl.isDrainingCapture) {
+ return;
+ }
+
+ periodSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ pBuffer = pDevice->opensl.pBufferCapture + (pDevice->opensl.currentBufferIndexCapture * periodSizeInBytes);
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_capture(pDevice, pDevice->capture.internalPeriodSizeInFrames, pBuffer, &pDevice->opensl.duplexRB);
+ } else {
+ ma_device__send_frames_to_client(pDevice, pDevice->capture.internalPeriodSizeInFrames, pBuffer);
+ }
+
+ resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pBuffer, periodSizeInBytes);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return;
+ }
+
+ pDevice->opensl.currentBufferIndexCapture = (pDevice->opensl.currentBufferIndexCapture + 1) % pDevice->capture.internalPeriods;
+}
+
+static void ma_buffer_queue_callback_playback__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData)
+{
+ ma_device* pDevice = (ma_device*)pUserData;
+ size_t periodSizeInBytes;
+ ma_uint8* pBuffer;
+ SLresult resultSL;
+
+ MA_ASSERT(pDevice != NULL);
+
+ (void)pBufferQueue;
+
+ /* Don't do anything if the device is not started. */
+ if (pDevice->state != MA_STATE_STARTED) {
+ return;
+ }
+
+ /* Don't do anything if the device is being drained. */
+ if (pDevice->opensl.isDrainingPlayback) {
+ return;
+ }
+
+ periodSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ pBuffer = pDevice->opensl.pBufferPlayback + (pDevice->opensl.currentBufferIndexPlayback * periodSizeInBytes);
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_playback(pDevice, pDevice->playback.internalPeriodSizeInFrames, pBuffer, &pDevice->opensl.duplexRB);
+ } else {
+ ma_device__read_frames_from_client(pDevice, pDevice->playback.internalPeriodSizeInFrames, pBuffer);
+ }
+
+ resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pBuffer, periodSizeInBytes);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return;
+ }
+
+ pDevice->opensl.currentBufferIndexPlayback = (pDevice->opensl.currentBufferIndexPlayback + 1) % pDevice->playback.internalPeriods;
+}
+#endif
+
+static void ma_device_uninit__opensl(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before uninitializing the device. */
+ if (g_maOpenSLInitCounter == 0) {
+ return;
+ }
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->opensl.pAudioRecorderObj) {
+ MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioRecorderObj);
}
-
- pBufferList = (AudioBufferList*)ma_malloc(allocationSize);
- if (pBufferList == NULL) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return MA_OUT_OF_MEMORY;
+
+ ma__free_from_callbacks(pDevice->opensl.pBufferCapture, &pDevice->pContext->allocationCallbacks);
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->opensl.pAudioPlayerObj) {
+ MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioPlayerObj);
}
-
- if (isInterleaved) {
- pBufferList->mNumberBuffers = 1;
- pBufferList->mBuffers[0].mNumberChannels = pData->channelsOut;
- pBufferList->mBuffers[0].mDataByteSize = actualBufferSizeInFrames * ma_get_bytes_per_frame(pData->formatOut, pData->channelsOut);
- pBufferList->mBuffers[0].mData = (ma_uint8*)pBufferList + sizeof(AudioBufferList);
- } else {
- ma_uint32 iBuffer;
- pBufferList->mNumberBuffers = pData->channelsOut;
- for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) {
- pBufferList->mBuffers[iBuffer].mNumberChannels = 1;
- pBufferList->mBuffers[iBuffer].mDataByteSize = actualBufferSizeInFrames * ma_get_bytes_per_sample(pData->formatOut);
- pBufferList->mBuffers[iBuffer].mData = (ma_uint8*)pBufferList + ((sizeof(AudioBufferList) - sizeof(AudioBuffer)) + (sizeof(AudioBuffer) * pData->channelsOut)) + (actualBufferSizeInFrames * ma_get_bytes_per_sample(pData->formatOut) * iBuffer);
- }
+ if (pDevice->opensl.pOutputMixObj) {
+ MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Destroy((SLObjectItf)pDevice->opensl.pOutputMixObj);
}
-
- pData->pAudioBufferList = pBufferList;
+
+ ma__free_from_callbacks(pDevice->opensl.pBufferPlayback, &pDevice->pContext->allocationCallbacks);
}
-
- /* Callbacks. */
- callbackInfo.inputProcRefCon = pDevice_DoNotReference;
- if (deviceType == ma_device_type_playback) {
- callbackInfo.inputProc = ma_on_output__coreaudio;
- status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, MA_COREAUDIO_OUTPUT_BUS, &callbackInfo, sizeof(callbackInfo));
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
- }
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_pcm_rb_uninit(&pDevice->opensl.duplexRB);
+ }
+}
+
+#if defined(MA_ANDROID) && __ANDROID_API__ >= 21
+typedef SLAndroidDataFormat_PCM_EX ma_SLDataFormat_PCM;
+#else
+typedef SLDataFormat_PCM ma_SLDataFormat_PCM;
+#endif
+
+static ma_result ma_SLDataFormat_PCM_init__opensl(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* channelMap, ma_SLDataFormat_PCM* pDataFormat)
+{
+#if defined(MA_ANDROID) && __ANDROID_API__ >= 21
+ if (format == ma_format_f32) {
+ pDataFormat->formatType = SL_ANDROID_DATAFORMAT_PCM_EX;
+ pDataFormat->representation = SL_ANDROID_PCM_REPRESENTATION_FLOAT;
} else {
- callbackInfo.inputProc = ma_on_input__coreaudio;
- status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, MA_COREAUDIO_INPUT_BUS, &callbackInfo, sizeof(callbackInfo));
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
- }
+ pDataFormat->formatType = SL_DATAFORMAT_PCM;
}
-
- /* We need to listen for stop events. */
- if (pData->registerStopEvent) {
- status = ((ma_AudioUnitAddPropertyListener_proc)pContext->coreaudio.AudioUnitAddPropertyListener)(pData->audioUnit, kAudioOutputUnitProperty_IsRunning, on_start_stop__coreaudio, pDevice_DoNotReference);
- if (status != noErr) {
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
- }
+#else
+ pDataFormat->formatType = SL_DATAFORMAT_PCM;
+#endif
+
+ pDataFormat->numChannels = channels;
+ ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = ma_round_to_standard_sample_rate__opensl(sampleRate * 1000); /* In millihertz. Annoyingly, the sample rate variable is named differently between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM */
+ pDataFormat->bitsPerSample = ma_get_bytes_per_sample(format)*8;
+ pDataFormat->channelMask = ma_channel_map_to_channel_mask__opensl(channelMap, channels);
+ pDataFormat->endianness = (ma_is_little_endian()) ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN;
+
+ /*
+ Android has a few restrictions on the format as documented here: https://developer.android.com/ndk/guides/audio/opensl-for-android.html
+ - Only mono and stereo is supported.
+ - Only u8 and s16 formats are supported.
+ - Maximum sample rate of 48000.
+ */
+#ifdef MA_ANDROID
+ if (pDataFormat->numChannels > 2) {
+ pDataFormat->numChannels = 2;
}
-
- /* Initialize the audio unit. */
- status = ((ma_AudioUnitInitialize_proc)pContext->coreaudio.AudioUnitInitialize)(pData->audioUnit);
- if (status != noErr) {
- ma_free(pData->pAudioBufferList);
- pData->pAudioBufferList = NULL;
- ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
- return ma_result_from_OSStatus(status);
+#if __ANDROID_API__ >= 21
+ if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) {
+ /* It's floating point. */
+ MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT);
+ if (pDataFormat->bitsPerSample > 32) {
+ pDataFormat->bitsPerSample = 32;
+ }
+ } else {
+ if (pDataFormat->bitsPerSample > 16) {
+ pDataFormat->bitsPerSample = 16;
+ }
}
-
- /* Grab the name. */
-#if defined(MA_APPLE_DESKTOP)
- ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pData->deviceName), pData->deviceName);
#else
- if (deviceType == ma_device_type_playback) {
- ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_PLAYBACK_DEVICE_NAME);
- } else {
- ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_CAPTURE_DEVICE_NAME);
+ if (pDataFormat->bitsPerSample > 16) {
+ pDataFormat->bitsPerSample = 16;
}
#endif
-
- return result;
+ if (((SLDataFormat_PCM*)pDataFormat)->samplesPerSec > SL_SAMPLINGRATE_48) {
+ ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = SL_SAMPLINGRATE_48;
+ }
+#endif
+
+ pDataFormat->containerSize = pDataFormat->bitsPerSample; /* Always tightly packed for now. */
+
+ return MA_SUCCESS;
}
-ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit)
+static ma_result ma_deconstruct_SLDataFormat_PCM__opensl(ma_SLDataFormat_PCM* pDataFormat, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap)
{
- ma_device_init_internal_data__coreaudio data;
- ma_result result;
-
- /* This should only be called for playback or capture, not duplex. */
- if (deviceType == ma_device_type_duplex) {
- return MA_INVALID_ARGS;
+ ma_bool32 isFloatingPoint = MA_FALSE;
+#if defined(MA_ANDROID) && __ANDROID_API__ >= 21
+ if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) {
+ MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT);
+ isFloatingPoint = MA_TRUE;
}
-
- if (deviceType == ma_device_type_capture) {
- data.formatIn = pDevice->capture.format;
- data.channelsIn = pDevice->capture.channels;
- data.sampleRateIn = pDevice->sampleRate;
- ma_copy_memory(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap));
- data.usingDefaultFormat = pDevice->capture.usingDefaultFormat;
- data.usingDefaultChannels = pDevice->capture.usingDefaultChannels;
- data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
- data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap;
- data.shareMode = pDevice->capture.shareMode;
- data.registerStopEvent = MA_TRUE;
-
- if (disposePreviousAudioUnit) {
- ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
- ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
- }
- if (pDevice->coreaudio.pAudioBufferList) {
- ma_free(pDevice->coreaudio.pAudioBufferList);
+#endif
+ if (isFloatingPoint) {
+ if (pDataFormat->bitsPerSample == 32) {
+ *pFormat = ma_format_f32;
}
-
- #if defined(MA_APPLE_DESKTOP)
- pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID;
- #endif
- pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit;
- pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList;
- }
- if (deviceType == ma_device_type_playback) {
- data.formatIn = pDevice->playback.format;
- data.channelsIn = pDevice->playback.channels;
- data.sampleRateIn = pDevice->sampleRate;
- ma_copy_memory(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap));
- data.usingDefaultFormat = pDevice->playback.usingDefaultFormat;
- data.usingDefaultChannels = pDevice->playback.usingDefaultChannels;
- data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
- data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap;
- data.shareMode = pDevice->playback.shareMode;
- data.registerStopEvent = (pDevice->type != ma_device_type_duplex);
-
- if (disposePreviousAudioUnit) {
- ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
- ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
+ } else {
+ if (pDataFormat->bitsPerSample == 8) {
+ *pFormat = ma_format_u8;
+ } else if (pDataFormat->bitsPerSample == 16) {
+ *pFormat = ma_format_s16;
+ } else if (pDataFormat->bitsPerSample == 24) {
+ *pFormat = ma_format_s24;
+ } else if (pDataFormat->bitsPerSample == 32) {
+ *pFormat = ma_format_s32;
}
-
- #if defined(MA_APPLE_DESKTOP)
- pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID;
- #endif
- pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit;
}
- data.bufferSizeInFramesIn = pDevice->coreaudio.originalBufferSizeInFrames;
- data.bufferSizeInMillisecondsIn = pDevice->coreaudio.originalBufferSizeInMilliseconds;
- data.periodsIn = pDevice->coreaudio.originalPeriods;
- result = ma_device_init_internal__coreaudio(pDevice->pContext, deviceType, NULL, &data, (void*)pDevice);
- if (result != MA_SUCCESS) {
- return result;
- }
-
+ *pChannels = pDataFormat->numChannels;
+ *pSampleRate = ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec / 1000;
+ ma_channel_mask_to_channel_map__opensl(pDataFormat->channelMask, pDataFormat->numChannels, pChannelMap);
+
return MA_SUCCESS;
}
-
-ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+static ma_result ma_device_init__opensl(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
- ma_result result;
+#ifdef MA_ANDROID
+ SLDataLocator_AndroidSimpleBufferQueue queue;
+ SLresult resultSL;
+ ma_uint32 periodSizeInFrames;
+ size_t bufferSizeInBytes;
+ const SLInterfaceID itfIDs1[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
+ const SLboolean itfIDsRequired1[] = {SL_BOOLEAN_TRUE};
+#endif
- ma_assert(pContext != NULL);
- ma_assert(pConfig != NULL);
- ma_assert(pDevice != NULL);
+ (void)pContext;
- /* No exclusive mode with the Core Audio backend for now. */
- if (((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive) ||
- ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive)) {
+ MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to initialize a new device. */
+ if (g_maOpenSLInitCounter == 0) {
+ return MA_INVALID_OPERATION;
+ }
+
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ /*
+ For now, only supporting Android implementations of OpenSL|ES since that's the only one I've
+ been able to test with and I currently depend on Android-specific extensions (simple buffer
+ queues).
+ */
+#ifdef MA_ANDROID
+ /* No exclusive mode with OpenSL|ES. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
return MA_SHARE_MODE_NOT_SUPPORTED;
}
-
- /* Capture needs to be initialized first. */
+
+ /* Now we can start initializing the device properly. */
+ MA_ASSERT(pDevice != NULL);
+ MA_ZERO_OBJECT(&pDevice->opensl);
+
+ queue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
+ queue.numBuffers = pConfig->periods;
+
+
if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ma_device_init_internal_data__coreaudio data;
- data.formatIn = pConfig->capture.format;
- data.channelsIn = pConfig->capture.channels;
- data.sampleRateIn = pConfig->sampleRate;
- ma_copy_memory(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap));
- data.usingDefaultFormat = pDevice->capture.usingDefaultFormat;
- data.usingDefaultChannels = pDevice->capture.usingDefaultChannels;
- data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
- data.usingDefaultChannelMap = pDevice->capture.usingDefaultChannelMap;
- data.shareMode = pConfig->capture.shareMode;
- data.bufferSizeInFramesIn = pConfig->bufferSizeInFrames;
- data.bufferSizeInMillisecondsIn = pConfig->bufferSizeInMilliseconds;
- data.registerStopEvent = MA_TRUE;
-
- result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_capture, pConfig->capture.pDeviceID, &data, (void*)pDevice);
- if (result != MA_SUCCESS) {
- return result;
+ ma_SLDataFormat_PCM pcm;
+ SLDataLocator_IODevice locatorDevice;
+ SLDataSource source;
+ SLDataSink sink;
+
+ ma_SLDataFormat_PCM_init__opensl(pConfig->capture.format, pConfig->capture.channels, pConfig->sampleRate, pConfig->capture.channelMap, &pcm);
+
+ locatorDevice.locatorType = SL_DATALOCATOR_IODEVICE;
+ locatorDevice.deviceType = SL_IODEVICE_AUDIOINPUT;
+ locatorDevice.deviceID = (pConfig->capture.pDeviceID == NULL) ? SL_DEFAULTDEVICEID_AUDIOINPUT : pConfig->capture.pDeviceID->opensl;
+ locatorDevice.device = NULL;
+
+ source.pLocator = &locatorDevice;
+ source.pFormat = NULL;
+
+ sink.pLocator = &queue;
+ sink.pFormat = (SLDataFormat_PCM*)&pcm;
+
+ resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
+ if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) {
+ /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */
+ pcm.formatType = SL_DATAFORMAT_PCM;
+ pcm.numChannels = 1;
+ ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; /* The name of the sample rate variable is different between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM. */
+ pcm.bitsPerSample = 16;
+ pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */
+ pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+ resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
}
-
- pDevice->coreaudio.isDefaultCaptureDevice = (pConfig->capture.pDeviceID == NULL);
- #if defined(MA_APPLE_DESKTOP)
- pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID;
- #endif
- pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit;
- pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList;
-
- pDevice->capture.internalFormat = data.formatOut;
- pDevice->capture.internalChannels = data.channelsOut;
- pDevice->capture.internalSampleRate = data.sampleRateOut;
- ma_copy_memory(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
- pDevice->capture.internalBufferSizeInFrames = data.bufferSizeInFramesOut;
- pDevice->capture.internalPeriods = data.periodsOut;
-
- /* TODO: This needs to be made global. */
- #if defined(MA_APPLE_DESKTOP)
- /*
- If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly
- switch the device in the background.
- */
- if (pConfig->capture.pDeviceID == NULL) {
- AudioObjectPropertyAddress propAddress;
- propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
- propAddress.mScope = kAudioObjectPropertyScopeGlobal;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
- ((ma_AudioObjectAddPropertyListener_proc)pDevice->pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, pDevice);
+
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio recorder.", ma_result_from_OpenSL(resultSL));
}
- #endif
+
+ resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Realize((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_BOOLEAN_FALSE);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio recorder.", ma_result_from_OpenSL(resultSL));
+ }
+
+ resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_RECORD, &pDevice->opensl.pAudioRecorder);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_RECORD interface.", ma_result_from_OpenSL(resultSL));
+ }
+
+ resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueueCapture);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", ma_result_from_OpenSL(resultSL));
+ }
+
+ resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, ma_buffer_queue_callback_capture__opensl_android, pDevice);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", ma_result_from_OpenSL(resultSL));
+ }
+
+ /* The internal format is determined by the "pcm" object. */
+ ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->capture.internalFormat, &pDevice->capture.internalChannels, &pDevice->capture.internalSampleRate, pDevice->capture.internalChannelMap);
+
+ /* Buffer. */
+ periodSizeInFrames = pConfig->periodSizeInFrames;
+ if (periodSizeInFrames == 0) {
+ periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pDevice->capture.internalSampleRate);
+ }
+ pDevice->capture.internalPeriods = pConfig->periods;
+ pDevice->capture.internalPeriodSizeInFrames = periodSizeInFrames;
+ pDevice->opensl.currentBufferIndexCapture = 0;
+
+ bufferSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels) * pDevice->capture.internalPeriods;
+ pDevice->opensl.pBufferCapture = (ma_uint8*)ma__calloc_from_callbacks(bufferSizeInBytes, &pContext->allocationCallbacks);
+ if (pDevice->opensl.pBufferCapture == NULL) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY);
+ }
+ MA_ZERO_MEMORY(pDevice->opensl.pBufferCapture, bufferSizeInBytes);
}
-
- /* Playback. */
+
if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ma_device_init_internal_data__coreaudio data;
- data.formatIn = pConfig->playback.format;
- data.channelsIn = pConfig->playback.channels;
- data.sampleRateIn = pConfig->sampleRate;
- ma_copy_memory(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap));
- data.usingDefaultFormat = pDevice->playback.usingDefaultFormat;
- data.usingDefaultChannels = pDevice->playback.usingDefaultChannels;
- data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate;
- data.usingDefaultChannelMap = pDevice->playback.usingDefaultChannelMap;
- data.shareMode = pConfig->playback.shareMode;
-
- /* In full-duplex mode we want the playback buffer to be the same size as the capture buffer. */
- if (pConfig->deviceType == ma_device_type_duplex) {
- data.bufferSizeInFramesIn = pDevice->capture.internalBufferSizeInFrames;
- data.periodsIn = pDevice->capture.internalPeriods;
- data.registerStopEvent = MA_FALSE;
- } else {
- data.bufferSizeInFramesIn = pConfig->bufferSizeInFrames;
- data.bufferSizeInMillisecondsIn = pConfig->bufferSizeInMilliseconds;
- data.periodsIn = pConfig->periods;
- data.registerStopEvent = MA_TRUE;
+ ma_SLDataFormat_PCM pcm;
+ SLDataSource source;
+ SLDataLocator_OutputMix outmixLocator;
+ SLDataSink sink;
+
+ ma_SLDataFormat_PCM_init__opensl(pConfig->playback.format, pConfig->playback.channels, pConfig->sampleRate, pConfig->playback.channelMap, &pcm);
+
+ resultSL = (*g_maEngineSL)->CreateOutputMix(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pOutputMixObj, 0, NULL, NULL);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create output mix.", ma_result_from_OpenSL(resultSL));
}
-
- result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_playback, pConfig->playback.pDeviceID, &data, (void*)pDevice);
- if (result != MA_SUCCESS) {
- if (pConfig->deviceType == ma_device_type_duplex) {
- ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
- if (pDevice->coreaudio.pAudioBufferList) {
- ma_free(pDevice->coreaudio.pAudioBufferList);
- }
- }
- return result;
+
+ resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Realize((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_BOOLEAN_FALSE);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize output mix object.", ma_result_from_OpenSL(resultSL));
+ }
+
+ resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->GetInterface((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_IID_OUTPUTMIX, &pDevice->opensl.pOutputMix);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_OUTPUTMIX interface.", ma_result_from_OpenSL(resultSL));
+ }
+
+ /* Set the output device. */
+ if (pConfig->playback.pDeviceID != NULL) {
+ SLuint32 deviceID_OpenSL = pConfig->playback.pDeviceID->opensl;
+ MA_OPENSL_OUTPUTMIX(pDevice->opensl.pOutputMix)->ReRoute((SLOutputMixItf)pDevice->opensl.pOutputMix, 1, &deviceID_OpenSL);
}
- pDevice->coreaudio.isDefaultPlaybackDevice = (pConfig->playback.pDeviceID == NULL);
- #if defined(MA_APPLE_DESKTOP)
- pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID;
- #endif
- pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit;
-
- pDevice->playback.internalFormat = data.formatOut;
- pDevice->playback.internalChannels = data.channelsOut;
- pDevice->playback.internalSampleRate = data.sampleRateOut;
- ma_copy_memory(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut));
- pDevice->playback.internalBufferSizeInFrames = data.bufferSizeInFramesOut;
- pDevice->playback.internalPeriods = data.periodsOut;
-
- /* TODO: This needs to be made global. */
- #if defined(MA_APPLE_DESKTOP)
- /*
- If we are using the default device we'll need to listen for changes to the system's default device so we can seemlessly
- switch the device in the background.
- */
- if (pConfig->playback.pDeviceID == NULL) {
- AudioObjectPropertyAddress propAddress;
- propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- propAddress.mScope = kAudioObjectPropertyScopeGlobal;
- propAddress.mElement = kAudioObjectPropertyElementMaster;
- ((ma_AudioObjectAddPropertyListener_proc)pDevice->pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, pDevice);
+ source.pLocator = &queue;
+ source.pFormat = (SLDataFormat_PCM*)&pcm;
+
+ outmixLocator.locatorType = SL_DATALOCATOR_OUTPUTMIX;
+ outmixLocator.outputMix = (SLObjectItf)pDevice->opensl.pOutputMixObj;
+
+ sink.pLocator = &outmixLocator;
+ sink.pFormat = NULL;
+
+ resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
+ if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) {
+ /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */
+ pcm.formatType = SL_DATAFORMAT_PCM;
+ pcm.numChannels = 2;
+ ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16;
+ pcm.bitsPerSample = 16;
+ pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */
+ pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+ resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
}
- #endif
- }
-
- pDevice->coreaudio.originalBufferSizeInFrames = pConfig->bufferSizeInFrames;
- pDevice->coreaudio.originalBufferSizeInMilliseconds = pConfig->bufferSizeInMilliseconds;
- pDevice->coreaudio.originalPeriods = pConfig->periods;
-
- /*
- When stopping the device, a callback is called on another thread. We need to wait for this callback
- before returning from ma_device_stop(). This event is used for this.
- */
- ma_event_init(pContext, &pDevice->coreaudio.stopEvent);
- /* Need a ring buffer for duplex mode. */
- if (pConfig->deviceType == ma_device_type_duplex) {
- ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames);
- ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->coreaudio.duplexRB);
- if (result != MA_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[Core Audio] Failed to initialize ring buffer.", result);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio player.", ma_result_from_OpenSL(resultSL));
}
- }
- return MA_SUCCESS;
-}
+ resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Realize((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_BOOLEAN_FALSE);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio player.", ma_result_from_OpenSL(resultSL));
+ }
+ resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_PLAY, &pDevice->opensl.pAudioPlayer);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_PLAY interface.", ma_result_from_OpenSL(resultSL));
+ }
-ma_result ma_device_start__coreaudio(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
-
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
+ resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueuePlayback);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", ma_result_from_OpenSL(resultSL));
}
- }
-
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
- if (status != noErr) {
- if (pDevice->type == ma_device_type_duplex) {
- ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
- }
- return ma_result_from_OSStatus(status);
+
+ resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, ma_buffer_queue_callback_playback__opensl_android, pDevice);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", ma_result_from_OpenSL(resultSL));
}
- }
-
- return MA_SUCCESS;
-}
-ma_result ma_device_stop__coreaudio(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
-
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
+ /* The internal format is determined by the "pcm" object. */
+ ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->playback.internalFormat, &pDevice->playback.internalChannels, &pDevice->playback.internalSampleRate, pDevice->playback.internalChannelMap);
+
+ /* Buffer. */
+ periodSizeInFrames = pConfig->periodSizeInFrames;
+ if (periodSizeInFrames == 0) {
+ periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pDevice->playback.internalSampleRate);
}
- }
-
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback);
- if (status != noErr) {
- return ma_result_from_OSStatus(status);
+ pDevice->playback.internalPeriods = pConfig->periods;
+ pDevice->playback.internalPeriodSizeInFrames = periodSizeInFrames;
+ pDevice->opensl.currentBufferIndexPlayback = 0;
+
+ bufferSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels) * pDevice->playback.internalPeriods;
+ pDevice->opensl.pBufferPlayback = (ma_uint8*)ma__calloc_from_callbacks(bufferSizeInBytes, &pContext->allocationCallbacks);
+ if (pDevice->opensl.pBufferPlayback == NULL) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY);
}
+ MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, bufferSizeInBytes);
}
-
- /* We need to wait for the callback to finish before returning. */
- ma_event_wait(&pDevice->coreaudio.stopEvent);
- return MA_SUCCESS;
-}
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames) * pDevice->capture.internalPeriods;
+ ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->opensl.duplexRB);
+ if (result != MA_SUCCESS) {
+ ma_device_uninit__opensl(pDevice);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to initialize ring buffer.", result);
+ }
-ma_result ma_context_uninit__coreaudio(ma_context* pContext)
-{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_coreaudio);
-
-#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
- ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
- ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
- ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
-#endif
+ /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */
+ {
+ ma_uint32 marginSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods;
+ void* pMarginData;
+ ma_pcm_rb_acquire_write(&pDevice->opensl.duplexRB, &marginSizeInFrames, &pMarginData);
+ {
+ MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels));
+ }
+ ma_pcm_rb_commit_write(&pDevice->opensl.duplexRB, marginSizeInFrames, pMarginData);
+ }
+ }
- (void)pContext;
return MA_SUCCESS;
+#else
+ return MA_NO_BACKEND; /* Non-Android implementations are not supported. */
+#endif
}
-ma_result ma_context_init__coreaudio(const ma_context_config* pConfig, ma_context* pContext)
+static ma_result ma_device_start__opensl(ma_device* pDevice)
{
- ma_assert(pContext != NULL);
-
- (void)pConfig;
+ SLresult resultSL;
+ size_t periodSizeInBytes;
+ ma_uint32 iPeriod;
-#if defined(MA_APPLE_MOBILE)
- @autoreleasepool {
- AVAudioSession* pAudioSession = [AVAudioSession sharedInstance];
- ma_assert(pAudioSession != NULL);
+ MA_ASSERT(pDevice != NULL);
- [pAudioSession setCategory: AVAudioSessionCategoryPlayAndRecord error:nil];
-
- /* By default we want miniaudio to use the speakers instead of the receiver. In the future this may be customizable. */
- ma_bool32 useSpeakers = MA_TRUE;
- if (useSpeakers) {
- [pAudioSession overrideOutputAudioPort:AVAudioSessionPortOverrideSpeaker error:nil];
- }
- }
-#endif
-
-#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
- pContext->coreaudio.hCoreFoundation = ma_dlopen(pContext, "CoreFoundation.framework/CoreFoundation");
- if (pContext->coreaudio.hCoreFoundation == NULL) {
- return MA_API_NOT_FOUND;
- }
-
- pContext->coreaudio.CFStringGetCString = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFStringGetCString");
-
-
- pContext->coreaudio.hCoreAudio = ma_dlopen(pContext, "CoreAudio.framework/CoreAudio");
- if (pContext->coreaudio.hCoreAudio == NULL) {
- ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
- return MA_API_NOT_FOUND;
+ MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to start the device. */
+ if (g_maOpenSLInitCounter == 0) {
+ return MA_INVALID_OPERATION;
}
-
- pContext->coreaudio.AudioObjectGetPropertyData = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyData");
- pContext->coreaudio.AudioObjectGetPropertyDataSize = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyDataSize");
- pContext->coreaudio.AudioObjectSetPropertyData = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectSetPropertyData");
- pContext->coreaudio.AudioObjectAddPropertyListener = ma_dlsym(pContext, pContext->coreaudio.hCoreAudio, "AudioObjectAddPropertyListener");
- /*
- It looks like Apple has moved some APIs from AudioUnit into AudioToolbox on more recent versions of macOS. They are still
- defined in AudioUnit, but just in case they decide to remove them from there entirely I'm going to implement a fallback.
- The way it'll work is that it'll first try AudioUnit, and if the required symbols are not present there we'll fall back to
- AudioToolbox.
- */
- pContext->coreaudio.hAudioUnit = ma_dlopen(pContext, "AudioUnit.framework/AudioUnit");
- if (pContext->coreaudio.hAudioUnit == NULL) {
- ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
- ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
- return MA_API_NOT_FOUND;
- }
-
- if (ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentFindNext") == NULL) {
- /* Couldn't find the required symbols in AudioUnit, so fall back to AudioToolbox. */
- ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
- pContext->coreaudio.hAudioUnit = ma_dlopen(pContext, "AudioToolbox.framework/AudioToolbox");
- if (pContext->coreaudio.hAudioUnit == NULL) {
- ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
- ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
- return MA_API_NOT_FOUND;
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_RECORDING);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal capture device.", ma_result_from_OpenSL(resultSL));
}
- }
-
- pContext->coreaudio.AudioComponentFindNext = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentFindNext");
- pContext->coreaudio.AudioComponentInstanceDispose = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentInstanceDispose");
- pContext->coreaudio.AudioComponentInstanceNew = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioComponentInstanceNew");
- pContext->coreaudio.AudioOutputUnitStart = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioOutputUnitStart");
- pContext->coreaudio.AudioOutputUnitStop = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioOutputUnitStop");
- pContext->coreaudio.AudioUnitAddPropertyListener = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitAddPropertyListener");
- pContext->coreaudio.AudioUnitGetPropertyInfo = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitGetPropertyInfo");
- pContext->coreaudio.AudioUnitGetProperty = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitGetProperty");
- pContext->coreaudio.AudioUnitSetProperty = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitSetProperty");
- pContext->coreaudio.AudioUnitInitialize = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitInitialize");
- pContext->coreaudio.AudioUnitRender = ma_dlsym(pContext, pContext->coreaudio.hAudioUnit, "AudioUnitRender");
-#else
- pContext->coreaudio.CFStringGetCString = (ma_proc)CFStringGetCString;
-
- #if defined(MA_APPLE_DESKTOP)
- pContext->coreaudio.AudioObjectGetPropertyData = (ma_proc)AudioObjectGetPropertyData;
- pContext->coreaudio.AudioObjectGetPropertyDataSize = (ma_proc)AudioObjectGetPropertyDataSize;
- pContext->coreaudio.AudioObjectSetPropertyData = (ma_proc)AudioObjectSetPropertyData;
- pContext->coreaudio.AudioObjectAddPropertyListener = (ma_proc)AudioObjectAddPropertyListener;
- #endif
-
- pContext->coreaudio.AudioComponentFindNext = (ma_proc)AudioComponentFindNext;
- pContext->coreaudio.AudioComponentInstanceDispose = (ma_proc)AudioComponentInstanceDispose;
- pContext->coreaudio.AudioComponentInstanceNew = (ma_proc)AudioComponentInstanceNew;
- pContext->coreaudio.AudioOutputUnitStart = (ma_proc)AudioOutputUnitStart;
- pContext->coreaudio.AudioOutputUnitStop = (ma_proc)AudioOutputUnitStop;
- pContext->coreaudio.AudioUnitAddPropertyListener = (ma_proc)AudioUnitAddPropertyListener;
- pContext->coreaudio.AudioUnitGetPropertyInfo = (ma_proc)AudioUnitGetPropertyInfo;
- pContext->coreaudio.AudioUnitGetProperty = (ma_proc)AudioUnitGetProperty;
- pContext->coreaudio.AudioUnitSetProperty = (ma_proc)AudioUnitSetProperty;
- pContext->coreaudio.AudioUnitInitialize = (ma_proc)AudioUnitInitialize;
- pContext->coreaudio.AudioUnitRender = (ma_proc)AudioUnitRender;
-#endif
- pContext->isBackendAsynchronous = MA_TRUE;
-
- pContext->onUninit = ma_context_uninit__coreaudio;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__coreaudio;
- pContext->onEnumDevices = ma_context_enumerate_devices__coreaudio;
- pContext->onGetDeviceInfo = ma_context_get_device_info__coreaudio;
- pContext->onDeviceInit = ma_device_init__coreaudio;
- pContext->onDeviceUninit = ma_device_uninit__coreaudio;
- pContext->onDeviceStart = ma_device_start__coreaudio;
- pContext->onDeviceStop = ma_device_stop__coreaudio;
-
- /* Audio component. */
- {
- AudioComponentDescription desc;
- desc.componentType = kAudioUnitType_Output;
- #if defined(MA_APPLE_DESKTOP)
- desc.componentSubType = kAudioUnitSubType_HALOutput;
- #else
- desc.componentSubType = kAudioUnitSubType_RemoteIO;
- #endif
- desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- desc.componentFlags = 0;
- desc.componentFlagsMask = 0;
-
- pContext->coreaudio.component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc);
- if (pContext->coreaudio.component == NULL) {
- #if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE)
- ma_dlclose(pContext, pContext->coreaudio.hAudioUnit);
- ma_dlclose(pContext, pContext->coreaudio.hCoreAudio);
- ma_dlclose(pContext, pContext->coreaudio.hCoreFoundation);
- #endif
- return MA_FAILED_TO_INIT_BACKEND;
+ periodSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) {
+ resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pDevice->opensl.pBufferCapture + (periodSizeInBytes * iPeriod), periodSizeInBytes);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for capture device.", ma_result_from_OpenSL(resultSL));
+ }
+ }
+ }
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_PLAYING);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal playback device.", ma_result_from_OpenSL(resultSL));
+ }
+
+ /* In playback mode (no duplex) we need to load some initial buffers. In duplex mode we need to enqueu silent buffers. */
+ if (pDevice->type == ma_device_type_duplex) {
+ MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
+ } else {
+ ma_device__read_frames_from_client(pDevice, pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods, pDevice->opensl.pBufferPlayback);
+ }
+
+ periodSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) {
+ resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pDevice->opensl.pBufferPlayback + (periodSizeInBytes * iPeriod), periodSizeInBytes);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED);
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for playback device.", ma_result_from_OpenSL(resultSL));
+ }
}
}
return MA_SUCCESS;
}
-#endif /* Core Audio */
+static ma_result ma_device_drain__opensl(ma_device* pDevice, ma_device_type deviceType)
+{
+ SLAndroidSimpleBufferQueueItf pBufferQueue;
+ MA_ASSERT(deviceType == ma_device_type_capture || deviceType == ma_device_type_playback);
-/******************************************************************************
+ if (pDevice->type == ma_device_type_capture) {
+ pBufferQueue = (SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture;
+ pDevice->opensl.isDrainingCapture = MA_TRUE;
+ } else {
+ pBufferQueue = (SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback;
+ pDevice->opensl.isDrainingPlayback = MA_TRUE;
+ }
-sndio Backend
+ for (;;) {
+ SLAndroidSimpleBufferQueueState state;
-******************************************************************************/
-#ifdef MA_HAS_SNDIO
-#include
-#include
+ MA_OPENSL_BUFFERQUEUE(pBufferQueue)->GetState(pBufferQueue, &state);
+ if (state.count == 0) {
+ break;
+ }
-/*
-Only supporting OpenBSD. This did not work very well at all on FreeBSD when I tried it. Not sure if this is due
-to miniaudio's implementation or if it's some kind of system configuration issue, but basically the default device
-just doesn't emit any sound, or at times you'll hear tiny pieces. I will consider enabling this when there's
-demand for it or if I can get it tested and debugged more thoroughly.
-*/
-#if 0
-#if defined(__NetBSD__) || defined(__OpenBSD__)
-#include
-#endif
-#if defined(__FreeBSD__) || defined(__DragonFly__)
-#include
-#endif
-#endif
+ ma_sleep(10);
+ }
-#define MA_SIO_DEVANY "default"
-#define MA_SIO_PLAY 1
-#define MA_SIO_REC 2
-#define MA_SIO_NENC 8
-#define MA_SIO_NCHAN 8
-#define MA_SIO_NRATE 16
-#define MA_SIO_NCONF 4
+ if (pDevice->type == ma_device_type_capture) {
+ pDevice->opensl.isDrainingCapture = MA_FALSE;
+ } else {
+ pDevice->opensl.isDrainingPlayback = MA_FALSE;
+ }
-struct ma_sio_hdl; /* <-- Opaque */
+ return MA_SUCCESS;
+}
-struct ma_sio_par
+static ma_result ma_device_stop__opensl(ma_device* pDevice)
{
- unsigned int bits;
- unsigned int bps;
- unsigned int sig;
- unsigned int le;
- unsigned int msb;
- unsigned int rchan;
- unsigned int pchan;
- unsigned int rate;
- unsigned int bufsz;
- unsigned int xrun;
- unsigned int round;
- unsigned int appbufsz;
- int __pad[3];
- unsigned int __magic;
-};
+ SLresult resultSL;
+ ma_stop_proc onStop;
-struct ma_sio_enc
-{
- unsigned int bits;
- unsigned int bps;
- unsigned int sig;
- unsigned int le;
- unsigned int msb;
-};
+ MA_ASSERT(pDevice != NULL);
-struct ma_sio_conf
-{
- unsigned int enc;
- unsigned int rchan;
- unsigned int pchan;
- unsigned int rate;
-};
+ MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before stopping/uninitializing the device. */
+ if (g_maOpenSLInitCounter == 0) {
+ return MA_INVALID_OPERATION;
+ }
-struct ma_sio_cap
-{
- struct ma_sio_enc enc[MA_SIO_NENC];
- unsigned int rchan[MA_SIO_NCHAN];
- unsigned int pchan[MA_SIO_NCHAN];
- unsigned int rate[MA_SIO_NRATE];
- int __pad[7];
- unsigned int nconf;
- struct ma_sio_conf confs[MA_SIO_NCONF];
-};
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_device_drain__opensl(pDevice, ma_device_type_capture);
-typedef struct ma_sio_hdl* (* ma_sio_open_proc) (const char*, unsigned int, int);
-typedef void (* ma_sio_close_proc) (struct ma_sio_hdl*);
-typedef int (* ma_sio_setpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*);
-typedef int (* ma_sio_getpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*);
-typedef int (* ma_sio_getcap_proc) (struct ma_sio_hdl*, struct ma_sio_cap*);
-typedef size_t (* ma_sio_write_proc) (struct ma_sio_hdl*, const void*, size_t);
-typedef size_t (* ma_sio_read_proc) (struct ma_sio_hdl*, void*, size_t);
-typedef int (* ma_sio_start_proc) (struct ma_sio_hdl*);
-typedef int (* ma_sio_stop_proc) (struct ma_sio_hdl*);
-typedef int (* ma_sio_initpar_proc)(struct ma_sio_par*);
+ resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal capture device.", ma_result_from_OpenSL(resultSL));
+ }
-ma_format ma_format_from_sio_enc__sndio(unsigned int bits, unsigned int bps, unsigned int sig, unsigned int le, unsigned int msb)
-{
- /* We only support native-endian right now. */
- if ((ma_is_little_endian() && le == 0) || (ma_is_big_endian() && le == 1)) {
- return ma_format_unknown;
- }
-
- if (bits == 8 && bps == 1 && sig == 0) {
- return ma_format_u8;
- }
- if (bits == 16 && bps == 2 && sig == 1) {
- return ma_format_s16;
+ MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture);
}
- if (bits == 24 && bps == 3 && sig == 1) {
- return ma_format_s24;
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_device_drain__opensl(pDevice, ma_device_type_playback);
+
+ resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal playback device.", ma_result_from_OpenSL(resultSL));
+ }
+
+ MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback);
}
- if (bits == 24 && bps == 4 && sig == 1 && msb == 0) {
- /*return ma_format_s24_32;*/
+
+ /* Make sure the client is aware that the device has stopped. There may be an OpenSL|ES callback for this, but I haven't found it. */
+ onStop = pDevice->onStop;
+ if (onStop) {
+ onStop(pDevice);
}
- if (bits == 32 && bps == 4 && sig == 1) {
- return ma_format_s32;
+
+ return MA_SUCCESS;
+}
+
+
+static ma_result ma_context_uninit__opensl(ma_context* pContext)
+{
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_opensl);
+ (void)pContext;
+
+ /* Uninit global data. */
+ if (g_maOpenSLInitCounter > 0) {
+ if (ma_atomic_decrement_32(&g_maOpenSLInitCounter) == 0) {
+ (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL);
+ }
}
-
- return ma_format_unknown;
+
+ return MA_SUCCESS;
}
-ma_format ma_find_best_format_from_sio_cap__sndio(struct ma_sio_cap* caps)
+static ma_result ma_context_init__opensl(const ma_context_config* pConfig, ma_context* pContext)
{
- ma_format bestFormat;
- unsigned int iConfig;
+ MA_ASSERT(pContext != NULL);
- ma_assert(caps != NULL);
-
- bestFormat = ma_format_unknown;
- for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) {
- unsigned int iEncoding;
- for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
- unsigned int bits;
- unsigned int bps;
- unsigned int sig;
- unsigned int le;
- unsigned int msb;
- ma_format format;
+ (void)pConfig;
- if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) {
- continue;
- }
-
- bits = caps->enc[iEncoding].bits;
- bps = caps->enc[iEncoding].bps;
- sig = caps->enc[iEncoding].sig;
- le = caps->enc[iEncoding].le;
- msb = caps->enc[iEncoding].msb;
- format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
- if (format == ma_format_unknown) {
- continue; /* Format not supported. */
- }
-
- if (bestFormat == ma_format_unknown) {
- bestFormat = format;
- } else {
- if (ma_get_format_priority_index(bestFormat) > ma_get_format_priority_index(format)) { /* <-- Lower = better. */
- bestFormat = format;
- }
- }
+ /* Initialize global data first if applicable. */
+ if (ma_atomic_increment_32(&g_maOpenSLInitCounter) == 1) {
+ SLresult resultSL = slCreateEngine(&g_maEngineObjectSL, 0, NULL, 0, NULL, NULL);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ ma_atomic_decrement_32(&g_maOpenSLInitCounter);
+ return ma_result_from_OpenSL(resultSL);
+ }
+
+ (*g_maEngineObjectSL)->Realize(g_maEngineObjectSL, SL_BOOLEAN_FALSE);
+
+ resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_ENGINE, &g_maEngineSL);
+ if (resultSL != SL_RESULT_SUCCESS) {
+ (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL);
+ ma_atomic_decrement_32(&g_maOpenSLInitCounter);
+ return ma_result_from_OpenSL(resultSL);
}
}
-
- return ma_format_unknown;
+
+ pContext->isBackendAsynchronous = MA_TRUE;
+
+ pContext->onUninit = ma_context_uninit__opensl;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__opensl;
+ pContext->onEnumDevices = ma_context_enumerate_devices__opensl;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__opensl;
+ pContext->onDeviceInit = ma_device_init__opensl;
+ pContext->onDeviceUninit = ma_device_uninit__opensl;
+ pContext->onDeviceStart = ma_device_start__opensl;
+ pContext->onDeviceStop = ma_device_stop__opensl;
+
+ return MA_SUCCESS;
+}
+#endif /* OpenSL|ES */
+
+
+/******************************************************************************
+
+Web Audio Backend
+
+******************************************************************************/
+#ifdef MA_HAS_WEBAUDIO
+#include
+
+static ma_bool32 ma_is_capture_supported__webaudio()
+{
+ return EM_ASM_INT({
+ return (navigator.mediaDevices !== undefined && navigator.mediaDevices.getUserMedia !== undefined);
+ }, 0) != 0; /* Must pass in a dummy argument for C99 compatibility. */
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+void EMSCRIPTEN_KEEPALIVE ma_device_process_pcm_frames_capture__webaudio(ma_device* pDevice, int frameCount, float* pFrames)
+{
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_capture(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB);
+ } else {
+ ma_device__send_frames_to_client(pDevice, (ma_uint32)frameCount, pFrames); /* Send directly to the client. */
+ }
+}
+
+void EMSCRIPTEN_KEEPALIVE ma_device_process_pcm_frames_playback__webaudio(ma_device* pDevice, int frameCount, float* pFrames)
+{
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_device__handle_duplex_callback_playback(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB);
+ } else {
+ ma_device__read_frames_from_client(pDevice, (ma_uint32)frameCount, pFrames); /* Read directly from the device. */
+ }
+}
+#ifdef __cplusplus
}
+#endif
-ma_uint32 ma_find_best_channels_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat)
+static ma_bool32 ma_context_is_device_id_equal__webaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
{
- ma_uint32 maxChannels;
- unsigned int iConfig;
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pID0 != NULL);
+ MA_ASSERT(pID1 != NULL);
+ (void)pContext;
- ma_assert(caps != NULL);
- ma_assert(requiredFormat != ma_format_unknown);
-
- /* Just pick whatever configuration has the most channels. */
- maxChannels = 0;
- for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) {
- /* The encoding should be of requiredFormat. */
- unsigned int iEncoding;
- for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
- unsigned int iChannel;
- unsigned int bits;
- unsigned int bps;
- unsigned int sig;
- unsigned int le;
- unsigned int msb;
- ma_format format;
+ return ma_strcmp(pID0->webaudio, pID1->webaudio) == 0;
+}
- if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) {
- continue;
- }
-
- bits = caps->enc[iEncoding].bits;
- bps = caps->enc[iEncoding].bps;
- sig = caps->enc[iEncoding].sig;
- le = caps->enc[iEncoding].le;
- msb = caps->enc[iEncoding].msb;
- format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
- if (format != requiredFormat) {
- continue;
- }
-
- /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */
- for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) {
- unsigned int chan = 0;
- unsigned int channels;
+static ma_result ma_context_enumerate_devices__webaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_bool32 cbResult = MA_TRUE;
- if (deviceType == ma_device_type_playback) {
- chan = caps->confs[iConfig].pchan;
- } else {
- chan = caps->confs[iConfig].rchan;
- }
-
- if ((chan & (1UL << iChannel)) == 0) {
- continue;
- }
-
- if (deviceType == ma_device_type_playback) {
- channels = caps->pchan[iChannel];
- } else {
- channels = caps->rchan[iChannel];
- }
-
- if (maxChannels < channels) {
- maxChannels = channels;
- }
- }
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(callback != NULL);
+
+ /* Only supporting default devices for now. */
+
+ /* Playback. */
+ if (cbResult) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ }
+
+ /* Capture. */
+ if (cbResult) {
+ if (ma_is_capture_supported__webaudio()) {
+ ma_device_info deviceInfo;
+ MA_ZERO_OBJECT(&deviceInfo);
+ ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
}
}
-
- return maxChannels;
+
+ return MA_SUCCESS;
}
-ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat, ma_uint32 requiredChannels)
+static ma_result ma_context_get_device_info__webaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
{
- ma_uint32 firstSampleRate;
- ma_uint32 bestSampleRate;
- unsigned int iConfig;
+ MA_ASSERT(pContext != NULL);
- ma_assert(caps != NULL);
- ma_assert(requiredFormat != ma_format_unknown);
- ma_assert(requiredChannels > 0);
- ma_assert(requiredChannels <= MA_MAX_CHANNELS);
-
- firstSampleRate = 0; /* <-- If the device does not support a standard rate we'll fall back to the first one that's found. */
- bestSampleRate = 0;
+ /* No exclusive mode with Web Audio. */
+ if (shareMode == ma_share_mode_exclusive) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
+ }
- for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) {
- /* The encoding should be of requiredFormat. */
- unsigned int iEncoding;
- for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
- unsigned int iChannel;
- unsigned int bits;
- unsigned int bps;
- unsigned int sig;
- unsigned int le;
- unsigned int msb;
- ma_format format;
+ if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) {
+ return MA_NO_DEVICE;
+ }
- if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) {
- continue;
- }
-
- bits = caps->enc[iEncoding].bits;
- bps = caps->enc[iEncoding].bps;
- sig = caps->enc[iEncoding].sig;
- le = caps->enc[iEncoding].le;
- msb = caps->enc[iEncoding].msb;
- format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
- if (format != requiredFormat) {
- continue;
- }
-
- /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */
- for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) {
- unsigned int chan = 0;
- unsigned int channels;
- unsigned int iRate;
- if (deviceType == ma_device_type_playback) {
- chan = caps->confs[iConfig].pchan;
- } else {
- chan = caps->confs[iConfig].rchan;
- }
-
- if ((chan & (1UL << iChannel)) == 0) {
- continue;
- }
-
- if (deviceType == ma_device_type_playback) {
- channels = caps->pchan[iChannel];
- } else {
- channels = caps->rchan[iChannel];
- }
-
- if (channels != requiredChannels) {
- continue;
- }
-
- /* Getting here means we have found a compatible encoding/channel pair. */
- for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) {
- ma_uint32 rate = (ma_uint32)caps->rate[iRate];
- ma_uint32 ratePriority;
-
- if (firstSampleRate == 0) {
- firstSampleRate = rate;
- }
-
- /* Disregard this rate if it's not a standard one. */
- ratePriority = ma_get_standard_sample_rate_priority_index(rate);
- if (ratePriority == (ma_uint32)-1) {
- continue;
- }
-
- if (ma_get_standard_sample_rate_priority_index(bestSampleRate) > ratePriority) { /* Lower = better. */
- bestSampleRate = rate;
- }
- }
- }
- }
+ MA_ZERO_MEMORY(pDeviceInfo->id.webaudio, sizeof(pDeviceInfo->id.webaudio));
+
+ /* Only supporting default devices for now. */
+ (void)pDeviceID;
+ if (deviceType == ma_device_type_playback) {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ } else {
+ ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
}
-
- /* If a standard sample rate was not found just fall back to the first one that was iterated. */
- if (bestSampleRate == 0) {
- bestSampleRate = firstSampleRate;
+
+ /* Web Audio can support any number of channels and sample rates. It only supports f32 formats, however. */
+ pDeviceInfo->minChannels = 1;
+ pDeviceInfo->maxChannels = MA_MAX_CHANNELS;
+ if (pDeviceInfo->maxChannels > 32) {
+ pDeviceInfo->maxChannels = 32; /* Maximum output channel count is 32 for createScriptProcessor() (JavaScript). */
}
-
- return bestSampleRate;
-}
+ /* We can query the sample rate by just using a temporary audio context. */
+ pDeviceInfo->minSampleRate = EM_ASM_INT({
+ try {
+ var temp = new (window.AudioContext || window.webkitAudioContext)();
+ var sampleRate = temp.sampleRate;
+ temp.close();
+ return sampleRate;
+ } catch(e) {
+ return 0;
+ }
+ }, 0); /* Must pass in a dummy argument for C99 compatibility. */
+ pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate;
+ if (pDeviceInfo->minSampleRate == 0) {
+ return MA_NO_DEVICE;
+ }
-ma_bool32 ma_context_is_device_id_equal__sndio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
-{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
+ /* Web Audio only supports f32. */
+ pDeviceInfo->formatCount = 1;
+ pDeviceInfo->formats[0] = ma_format_f32;
- return ma_strcmp(pID0->sndio, pID1->sndio) == 0;
+ return MA_SUCCESS;
}
-ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+
+static void ma_device_uninit_by_index__webaudio(ma_device* pDevice, ma_device_type deviceType, int deviceIndex)
{
- ma_bool32 isTerminating = MA_FALSE;
- struct ma_sio_hdl* handle;
+ MA_ASSERT(pDevice != NULL);
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
-
- /* sndio doesn't seem to have a good device enumeration API, so I'm therefore only enumerating over default devices for now. */
-
- /* Playback. */
- if (!isTerminating) {
- handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_PLAY, 0);
- if (handle != NULL) {
- /* Supports playback. */
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), MA_SIO_DEVANY);
- ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME);
-
- isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
-
- ((ma_sio_close_proc)pContext->sndio.sio_close)(handle);
+ EM_ASM({
+ var device = miniaudio.get_device_by_index($0);
+
+ /* Make sure all nodes are disconnected and marked for collection. */
+ if (device.scriptNode !== undefined) {
+ device.scriptNode.onaudioprocess = function(e) {}; /* We want to reset the callback to ensure it doesn't get called after AudioContext.close() has returned. Shouldn't happen since we're disconnecting, but just to be safe... */
+ device.scriptNode.disconnect();
+ device.scriptNode = undefined;
+ }
+ if (device.streamNode !== undefined) {
+ device.streamNode.disconnect();
+ device.streamNode = undefined;
+ }
+
+ /*
+ Stop the device. I think there is a chance the callback could get fired after calling this, hence why we want
+ to clear the callback before closing.
+ */
+ device.webaudio.close();
+ device.webaudio = undefined;
+
+ /* Can't forget to free the intermediary buffer. This is the buffer that's shared between JavaScript and C. */
+ if (device.intermediaryBuffer !== undefined) {
+ Module._free(device.intermediaryBuffer);
+ device.intermediaryBuffer = undefined;
+ device.intermediaryBufferView = undefined;
+ device.intermediaryBufferSizeInBytes = undefined;
}
+
+ /* Make sure the device is untracked so the slot can be reused later. */
+ miniaudio.untrack_device_by_index($0);
+ }, deviceIndex, deviceType);
+}
+
+static void ma_device_uninit__webaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture);
}
-
- /* Capture. */
- if (!isTerminating) {
- handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_REC, 0);
- if (handle != NULL) {
- /* Supports capture. */
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), "default");
- ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME);
- isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
-
- ((ma_sio_close_proc)pContext->sndio.sio_close)(handle);
- }
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback);
+ }
+
+ if (pDevice->type == ma_device_type_duplex) {
+ ma_pcm_rb_uninit(&pDevice->webaudio.duplexRB);
}
-
- return MA_SUCCESS;
}
-ma_result ma_context_get_device_info__sndio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+static ma_result ma_device_init_by_type__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
{
- char devid[256];
- struct ma_sio_hdl* handle;
- struct ma_sio_cap caps;
- unsigned int iConfig;
+ int deviceIndex;
+ ma_uint32 internalPeriodSizeInFrames;
- ma_assert(pContext != NULL);
- (void)shareMode;
-
- /* We need to open the device before we can get information about it. */
- if (pDeviceID == NULL) {
- ma_strcpy_s(devid, sizeof(devid), MA_SIO_DEVANY);
- ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (deviceType == ma_device_type_playback) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : MA_DEFAULT_CAPTURE_DEVICE_NAME);
- } else {
- ma_strcpy_s(devid, sizeof(devid), pDeviceID->sndio);
- ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), devid);
- }
-
- handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(devid, (deviceType == ma_device_type_playback) ? MA_SIO_PLAY : MA_SIO_REC, 0);
- if (handle == NULL) {
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(deviceType != ma_device_type_duplex);
+ MA_ASSERT(pDevice != NULL);
+
+ if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) {
return MA_NO_DEVICE;
}
-
- if (((ma_sio_getcap_proc)pContext->sndio.sio_getcap)(handle, &caps) == 0) {
- return MA_ERROR;
+
+ /* Try calculating an appropriate buffer size. */
+ internalPeriodSizeInFrames = pConfig->periodSizeInFrames;
+ if (internalPeriodSizeInFrames == 0) {
+ internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pConfig->sampleRate);
}
-
- for (iConfig = 0; iConfig < caps.nconf; iConfig += 1) {
+
+ /* The size of the buffer must be a power of 2 and between 256 and 16384. */
+ if (internalPeriodSizeInFrames < 256) {
+ internalPeriodSizeInFrames = 256;
+ } else if (internalPeriodSizeInFrames > 16384) {
+ internalPeriodSizeInFrames = 16384;
+ } else {
+ internalPeriodSizeInFrames = ma_next_power_of_2(internalPeriodSizeInFrames);
+ }
+
+ /* We create the device on the JavaScript side and reference it using an index. We use this to make it possible to reference the device between JavaScript and C. */
+ deviceIndex = EM_ASM_INT({
+ var channels = $0;
+ var sampleRate = $1;
+ var bufferSize = $2; /* In PCM frames. */
+ var isCapture = $3;
+ var pDevice = $4;
+
+ if (typeof(miniaudio) === 'undefined') {
+ return -1; /* Context not initialized. */
+ }
+
+ var device = {};
+
+ /* The AudioContext must be created in a suspended state. */
+ device.webaudio = new (window.AudioContext || window.webkitAudioContext)({sampleRate:sampleRate});
+ device.webaudio.suspend();
+
/*
- The main thing we care about is that the encoding is supported by miniaudio. If it is, we want to give
- preference to some formats over others.
+ We need an intermediary buffer which we use for JavaScript and C interop. This buffer stores interleaved f32 PCM data. Because it's passed between
+ JavaScript and C it needs to be allocated and freed using Module._malloc() and Module._free().
*/
- unsigned int iEncoding;
- unsigned int iChannel;
- unsigned int iRate;
+ device.intermediaryBufferSizeInBytes = channels * bufferSize * 4;
+ device.intermediaryBuffer = Module._malloc(device.intermediaryBufferSizeInBytes);
+ device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
- for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) {
- unsigned int bits;
- unsigned int bps;
- unsigned int sig;
- unsigned int le;
- unsigned int msb;
- ma_format format;
- ma_bool32 formatExists = MA_FALSE;
- ma_uint32 iExistingFormat;
+ /*
+ Both playback and capture devices use a ScriptProcessorNode for performing per-sample operations.
- if ((caps.confs[iConfig].enc & (1UL << iEncoding)) == 0) {
- continue;
- }
-
- bits = caps.enc[iEncoding].bits;
- bps = caps.enc[iEncoding].bps;
- sig = caps.enc[iEncoding].sig;
- le = caps.enc[iEncoding].le;
- msb = caps.enc[iEncoding].msb;
- format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb);
- if (format == ma_format_unknown) {
- continue; /* Format not supported. */
- }
-
- /* Add this format if it doesn't already exist. */
- for (iExistingFormat = 0; iExistingFormat < pDeviceInfo->formatCount; iExistingFormat += 1) {
- if (pDeviceInfo->formats[iExistingFormat] == format) {
- formatExists = MA_TRUE;
- break;
+ ScriptProcessorNode is actually deprecated so this is likely to be temporary. The way this works for playback is very simple. You just set a callback
+ that's periodically fired, just like a normal audio callback function. But apparently this design is "flawed" and is now deprecated in favour of
+ something called AudioWorklets which _forces_ you to load a _separate_ .js file at run time... nice... Hopefully ScriptProcessorNode will continue to
+ work for years to come, but this may need to change to use AudioSourceBufferNode instead, which I think is what Emscripten uses for it's built-in SDL
+ implementation. I'll be avoiding that insane AudioWorklet API like the plague...
+
+ For capture it is a bit unintuitive. We use the ScriptProccessorNode _only_ to get the raw PCM data. It is connected to an AudioContext just like the
+ playback case, however we just output silence to the AudioContext instead of passing any real data. It would make more sense to me to use the
+ MediaRecorder API, but unfortunately you need to specify a MIME time (Opus, Vorbis, etc.) for the binary blob that's returned to the client, but I've
+ been unable to figure out how to get this as raw PCM. The closest I can think is to use the MIME type for WAV files and just parse it, but I don't know
+ how well this would work. Although ScriptProccessorNode is deprecated, in practice it seems to have pretty good browser support so I'm leaving it like
+ this for now. If anyone knows how I could get raw PCM data using the MediaRecorder API please let me know!
+ */
+ device.scriptNode = device.webaudio.createScriptProcessor(bufferSize, channels, channels);
+
+ if (isCapture) {
+ device.scriptNode.onaudioprocess = function(e) {
+ if (device.intermediaryBuffer === undefined) {
+ return; /* This means the device has been uninitialized. */
}
- }
-
- if (!formatExists) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = format;
- }
- }
-
- /* Channels. */
- for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) {
- unsigned int chan = 0;
- unsigned int channels;
- if (deviceType == ma_device_type_playback) {
- chan = caps.confs[iConfig].pchan;
- } else {
- chan = caps.confs[iConfig].rchan;
- }
-
- if ((chan & (1UL << iChannel)) == 0) {
- continue;
- }
-
- if (deviceType == ma_device_type_playback) {
- channels = caps.pchan[iChannel];
- } else {
- channels = caps.rchan[iChannel];
- }
-
- if (pDeviceInfo->minChannels > channels) {
- pDeviceInfo->minChannels = channels;
- }
- if (pDeviceInfo->maxChannels < channels) {
- pDeviceInfo->maxChannels = channels;
- }
- }
-
- /* Sample rates. */
- for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) {
- if ((caps.confs[iConfig].rate & (1UL << iRate)) != 0) {
- unsigned int rate = caps.rate[iRate];
- if (pDeviceInfo->minSampleRate > rate) {
- pDeviceInfo->minSampleRate = rate;
+ /* Make sure silence it output to the AudioContext destination. Not doing this will cause sound to come out of the speakers! */
+ for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
+ e.outputBuffer.getChannelData(iChannel).fill(0.0);
}
- if (pDeviceInfo->maxSampleRate < rate) {
- pDeviceInfo->maxSampleRate = rate;
+
+ /* There are some situations where we may want to send silence to the client. */
+ var sendSilence = false;
+ if (device.streamNode === undefined) {
+ sendSilence = true;
+ }
+
+ /* Sanity check. This will never happen, right? */
+ if (e.inputBuffer.numberOfChannels != channels) {
+ console.log("Capture: Channel count mismatch. " + e.inputBufer.numberOfChannels + " != " + channels + ". Sending silence.");
+ sendSilence = true;
+ }
+
+ /* This looped design guards against the situation where e.inputBuffer is a different size to the original buffer size. Should never happen in practice. */
+ var totalFramesProcessed = 0;
+ while (totalFramesProcessed < e.inputBuffer.length) {
+ var framesRemaining = e.inputBuffer.length - totalFramesProcessed;
+ var framesToProcess = framesRemaining;
+ if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
+ framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4);
+ }
+
+ /* We need to do the reverse of the playback case. We need to interleave the input data and copy it into the intermediary buffer. Then we send it to the client. */
+ if (sendSilence) {
+ device.intermediaryBufferView.fill(0.0);
+ } else {
+ for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) {
+ for (var iChannel = 0; iChannel < e.inputBuffer.numberOfChannels; ++iChannel) {
+ device.intermediaryBufferView[iFrame*channels + iChannel] = e.inputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame];
+ }
+ }
+ }
+
+ /* Send data to the client from our intermediary buffer. */
+ ccall("ma_device_process_pcm_frames_capture__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]);
+
+ totalFramesProcessed += framesToProcess;
+ }
+ };
+
+ navigator.mediaDevices.getUserMedia({audio:true, video:false})
+ .then(function(stream) {
+ device.streamNode = device.webaudio.createMediaStreamSource(stream);
+ device.streamNode.connect(device.scriptNode);
+ device.scriptNode.connect(device.webaudio.destination);
+ })
+ .catch(function(error) {
+ /* I think this should output silence... */
+ device.scriptNode.connect(device.webaudio.destination);
+ });
+ } else {
+ device.scriptNode.onaudioprocess = function(e) {
+ if (device.intermediaryBuffer === undefined) {
+ return; /* This means the device has been uninitialized. */
+ }
+
+ var outputSilence = false;
+
+ /* Sanity check. This will never happen, right? */
+ if (e.outputBuffer.numberOfChannels != channels) {
+ console.log("Playback: Channel count mismatch. " + e.outputBufer.numberOfChannels + " != " + channels + ". Outputting silence.");
+ outputSilence = true;
+ return;
+ }
+
+ /* This looped design guards against the situation where e.outputBuffer is a different size to the original buffer size. Should never happen in practice. */
+ var totalFramesProcessed = 0;
+ while (totalFramesProcessed < e.outputBuffer.length) {
+ var framesRemaining = e.outputBuffer.length - totalFramesProcessed;
+ var framesToProcess = framesRemaining;
+ if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
+ framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4);
+ }
+
+ /* Read data from the client into our intermediary buffer. */
+ ccall("ma_device_process_pcm_frames_playback__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]);
+
+ /* At this point we'll have data in our intermediary buffer which we now need to deinterleave and copy over to the output buffers. */
+ if (outputSilence) {
+ for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
+ e.outputBuffer.getChannelData(iChannel).fill(0.0);
+ }
+ } else {
+ for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
+ for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) {
+ e.outputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame] = device.intermediaryBufferView[iFrame*channels + iChannel];
+ }
+ }
+ }
+
+ totalFramesProcessed += framesToProcess;
}
- }
- }
- }
+ };
- ((ma_sio_close_proc)pContext->sndio.sio_close)(handle);
- return MA_SUCCESS;
-}
+ device.scriptNode.connect(device.webaudio.destination);
+ }
-void ma_device_uninit__sndio(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
+ return miniaudio.track_device(device);
+ }, (deviceType == ma_device_type_capture) ? pConfig->capture.channels : pConfig->playback.channels, pConfig->sampleRate, internalPeriodSizeInFrames, deviceType == ma_device_type_capture, pDevice);
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handleCapture);
+ if (deviceIndex < 0) {
+ return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
}
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback);
+ if (deviceType == ma_device_type_capture) {
+ pDevice->webaudio.indexCapture = deviceIndex;
+ pDevice->capture.internalFormat = ma_format_f32;
+ pDevice->capture.internalChannels = pConfig->capture.channels;
+ ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
+ pDevice->capture.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
+ pDevice->capture.internalPeriodSizeInFrames = internalPeriodSizeInFrames;
+ pDevice->capture.internalPeriods = 1;
+ } else {
+ pDevice->webaudio.indexPlayback = deviceIndex;
+ pDevice->playback.internalFormat = ma_format_f32;
+ pDevice->playback.internalChannels = pConfig->playback.channels;
+ ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
+ pDevice->playback.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
+ pDevice->playback.internalPeriodSizeInFrames = internalPeriodSizeInFrames;
+ pDevice->playback.internalPeriods = 1;
}
+
+ return MA_SUCCESS;
}
-ma_result ma_device_init_handle__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
+static ma_result ma_device_init__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
- const char* pDeviceName;
- ma_ptr handle;
- int openFlags = 0;
- struct ma_sio_cap caps;
- struct ma_sio_par par;
- ma_device_id* pDeviceID;
- ma_format format;
- ma_uint32 channels;
- ma_uint32 sampleRate;
- ma_format internalFormat;
- ma_uint32 internalChannels;
- ma_uint32 internalSampleRate;
- ma_uint32 internalBufferSizeInFrames;
- ma_uint32 internalPeriods;
-
- ma_assert(pContext != NULL);
- ma_assert(pConfig != NULL);
- ma_assert(deviceType != ma_device_type_duplex);
- ma_assert(pDevice != NULL);
+ ma_result result;
- if (deviceType == ma_device_type_capture) {
- openFlags = MA_SIO_REC;
- pDeviceID = pConfig->capture.pDeviceID;
- format = pConfig->capture.format;
- channels = pConfig->capture.channels;
- sampleRate = pConfig->sampleRate;
- } else {
- openFlags = MA_SIO_PLAY;
- pDeviceID = pConfig->playback.pDeviceID;
- format = pConfig->playback.format;
- channels = pConfig->playback.channels;
- sampleRate = pConfig->sampleRate;
+ if (pConfig->deviceType == ma_device_type_loopback) {
+ return MA_DEVICE_TYPE_NOT_SUPPORTED;
}
- pDeviceName = MA_SIO_DEVANY;
- if (pDeviceID != NULL) {
- pDeviceName = pDeviceID->sndio;
+ /* No exclusive mode with Web Audio. */
+ if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
+ ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
+ return MA_SHARE_MODE_NOT_SUPPORTED;
}
- handle = (ma_ptr)((ma_sio_open_proc)pContext->sndio.sio_open)(pDeviceName, openFlags, 0);
- if (handle == NULL) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to open device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
+ result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_capture, pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
}
- /* We need to retrieve the device caps to determine the most appropriate format to use. */
- if (((ma_sio_getcap_proc)pContext->sndio.sio_getcap)((struct ma_sio_hdl*)handle, &caps) == 0) {
- ((ma_sio_close_proc)pContext->sndio.sio_close)((struct ma_sio_hdl*)handle);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve device caps.", MA_ERROR);
+ if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
+ result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_playback, pDevice);
+ if (result != MA_SUCCESS) {
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture);
+ }
+ return result;
+ }
}
/*
- Note: sndio reports a huge range of available channels. This is inconvenient for us because there's no real
- way, as far as I can tell, to get the _actual_ channel count of the device. I'm therefore restricting this
- to the requested channels, regardless of whether or not the default channel count is requested.
-
- For hardware devices, I'm suspecting only a single channel count will be reported and we can safely use the
- value returned by ma_find_best_channels_from_sio_cap__sndio().
+ We need a ring buffer for moving data from the capture device to the playback device. The capture callback is the producer
+ and the playback callback is the consumer. The buffer needs to be large enough to hold internalPeriodSizeInFrames based on
+ the external sample rate.
*/
- if (deviceType == ma_device_type_capture) {
- if (pDevice->capture.usingDefaultFormat) {
- format = ma_find_best_format_from_sio_cap__sndio(&caps);
- }
- if (pDevice->capture.usingDefaultChannels) {
- if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) {
- channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format);
+ if (pConfig->deviceType == ma_device_type_duplex) {
+ ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames) * 2;
+ result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->webaudio.duplexRB);
+ if (result != MA_SUCCESS) {
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture);
}
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback);
+ }
+ return result;
}
- } else {
- if (pDevice->playback.usingDefaultFormat) {
- format = ma_find_best_format_from_sio_cap__sndio(&caps);
- }
- if (pDevice->playback.usingDefaultChannels) {
- if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) {
- channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format);
+
+ /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */
+ {
+ ma_uint32 marginSizeInFrames = rbSizeInFrames / 3; /* <-- Dividing by 3 because internalPeriods is always set to 1 for WebAudio. */
+ void* pMarginData;
+ ma_pcm_rb_acquire_write(&pDevice->webaudio.duplexRB, &marginSizeInFrames, &pMarginData);
+ {
+ MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels));
}
+ ma_pcm_rb_commit_write(&pDevice->webaudio.duplexRB, marginSizeInFrames, pMarginData);
}
}
-
- if (pDevice->usingDefaultSampleRate) {
- sampleRate = ma_find_best_sample_rate_from_sio_cap__sndio(&caps, pConfig->deviceType, format, channels);
- }
+ return MA_SUCCESS;
+}
- ((ma_sio_initpar_proc)pDevice->pContext->sndio.sio_initpar)(&par);
- par.msb = 0;
- par.le = ma_is_little_endian();
-
- switch (format) {
- case ma_format_u8:
- {
- par.bits = 8;
- par.bps = 1;
- par.sig = 0;
- } break;
-
- case ma_format_s24:
- {
- par.bits = 24;
- par.bps = 3;
- par.sig = 1;
- } break;
-
- case ma_format_s32:
- {
- par.bits = 32;
- par.bps = 4;
- par.sig = 1;
- } break;
-
- case ma_format_s16:
- case ma_format_f32:
- default:
- {
- par.bits = 16;
- par.bps = 2;
- par.sig = 1;
- } break;
+static ma_result ma_device_start__webaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ EM_ASM({
+ miniaudio.get_device_by_index($0).webaudio.resume();
+ }, pDevice->webaudio.indexCapture);
}
-
- if (deviceType == ma_device_type_capture) {
- par.rchan = channels;
- } else {
- par.pchan = channels;
+
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ EM_ASM({
+ miniaudio.get_device_by_index($0).webaudio.resume();
+ }, pDevice->webaudio.indexPlayback);
}
- par.rate = sampleRate;
+ return MA_SUCCESS;
+}
- internalBufferSizeInFrames = pConfig->bufferSizeInFrames;
- if (internalBufferSizeInFrames == 0) {
- internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, par.rate);
- }
+static ma_result ma_device_stop__webaudio(ma_device* pDevice)
+{
+ MA_ASSERT(pDevice != NULL);
- par.round = internalBufferSizeInFrames / pConfig->periods;
- par.appbufsz = par.round * pConfig->periods;
-
- if (((ma_sio_setpar_proc)pContext->sndio.sio_setpar)((struct ma_sio_hdl*)handle, &par) == 0) {
- ((ma_sio_close_proc)pContext->sndio.sio_close)((struct ma_sio_hdl*)handle);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to set buffer size.", MA_FORMAT_NOT_SUPPORTED);
- }
- if (((ma_sio_getpar_proc)pContext->sndio.sio_getpar)((struct ma_sio_hdl*)handle, &par) == 0) {
- ((ma_sio_close_proc)pContext->sndio.sio_close)((struct ma_sio_hdl*)handle);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve buffer size.", MA_FORMAT_NOT_SUPPORTED);
- }
+ /*
+ From the WebAudio API documentation for AudioContext.suspend():
- internalFormat = ma_format_from_sio_enc__sndio(par.bits, par.bps, par.sig, par.le, par.msb);
- internalChannels = (deviceType == ma_device_type_capture) ? par.rchan : par.pchan;
- internalSampleRate = par.rate;
- internalPeriods = par.appbufsz / par.round;
- internalBufferSizeInFrames = par.appbufsz;
+ Suspends the progression of AudioContext's currentTime, allows any current context processing blocks that are already processed to be played to the
+ destination, and then allows the system to release its claim on audio hardware.
- if (deviceType == ma_device_type_capture) {
- pDevice->sndio.handleCapture = handle;
- pDevice->capture.internalFormat = internalFormat;
- pDevice->capture.internalChannels = internalChannels;
- pDevice->capture.internalSampleRate = internalSampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_sndio, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
- pDevice->capture.internalBufferSizeInFrames = internalBufferSizeInFrames;
- pDevice->capture.internalPeriods = internalPeriods;
- } else {
- pDevice->sndio.handlePlayback = handle;
- pDevice->playback.internalFormat = internalFormat;
- pDevice->playback.internalChannels = internalChannels;
- pDevice->playback.internalSampleRate = internalSampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_sndio, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
- pDevice->playback.internalBufferSizeInFrames = internalBufferSizeInFrames;
- pDevice->playback.internalPeriods = internalPeriods;
+ I read this to mean that "any current context processing blocks" are processed by suspend() - i.e. They they are drained. We therefore shouldn't need to
+ do any kind of explicit draining.
+ */
+
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ EM_ASM({
+ miniaudio.get_device_by_index($0).webaudio.suspend();
+ }, pDevice->webaudio.indexCapture);
}
-#ifdef MA_DEBUG_OUTPUT
- printf("DEVICE INFO\n");
- printf(" Format: %s\n", ma_get_format_name(internalFormat));
- printf(" Channels: %d\n", internalChannels);
- printf(" Sample Rate: %d\n", internalSampleRate);
- printf(" Buffer Size: %d\n", internalBufferSizeInFrames);
- printf(" Periods: %d\n", internalPeriods);
- printf(" appbufsz: %d\n", par.appbufsz);
- printf(" round: %d\n", par.round);
-#endif
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ EM_ASM({
+ miniaudio.get_device_by_index($0).webaudio.suspend();
+ }, pDevice->webaudio.indexPlayback);
+ }
+
+ ma_stop_proc onStop = pDevice->onStop;
+ if (onStop) {
+ onStop(pDevice);
+ }
return MA_SUCCESS;
}
-ma_result ma_device_init__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+static ma_result ma_context_uninit__webaudio(ma_context* pContext)
{
- ma_assert(pDevice != NULL);
+ MA_ASSERT(pContext != NULL);
+ MA_ASSERT(pContext->backend == ma_backend_webaudio);
- ma_zero_object(&pDevice->sndio);
+ /* Nothing needs to be done here. */
+ (void)pContext;
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ma_result result = ma_device_init_handle__sndio(pContext, pConfig, ma_device_type_capture, pDevice);
- if (result != MA_SUCCESS) {
- return result;
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init__webaudio(const ma_context_config* pConfig, ma_context* pContext)
+{
+ int resultFromJS;
+
+ MA_ASSERT(pContext != NULL);
+
+ /* Here is where our global JavaScript object is initialized. */
+ resultFromJS = EM_ASM_INT({
+ if ((window.AudioContext || window.webkitAudioContext) === undefined) {
+ return 0; /* Web Audio not supported. */
}
- }
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ma_result result = ma_device_init_handle__sndio(pContext, pConfig, ma_device_type_playback, pDevice);
- if (result != MA_SUCCESS) {
- return result;
+ if (typeof(miniaudio) === 'undefined') {
+ miniaudio = {};
+ miniaudio.devices = []; /* Device cache for mapping devices to indexes for JavaScript/C interop. */
+
+ miniaudio.track_device = function(device) {
+ /* Try inserting into a free slot first. */
+ for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) {
+ if (miniaudio.devices[iDevice] == null) {
+ miniaudio.devices[iDevice] = device;
+ return iDevice;
+ }
+ }
+
+ /* Getting here means there is no empty slots in the array so we just push to the end. */
+ miniaudio.devices.push(device);
+ return miniaudio.devices.length - 1;
+ };
+
+ miniaudio.untrack_device_by_index = function(deviceIndex) {
+ /* We just set the device's slot to null. The slot will get reused in the next call to ma_track_device. */
+ miniaudio.devices[deviceIndex] = null;
+
+ /* Trim the array if possible. */
+ while (miniaudio.devices.length > 0) {
+ if (miniaudio.devices[miniaudio.devices.length-1] == null) {
+ miniaudio.devices.pop();
+ } else {
+ break;
+ }
+ }
+ };
+
+ miniaudio.untrack_device = function(device) {
+ for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) {
+ if (miniaudio.devices[iDevice] == device) {
+ return miniaudio.untrack_device_by_index(iDevice);
+ }
+ }
+ };
+
+ miniaudio.get_device_by_index = function(deviceIndex) {
+ return miniaudio.devices[deviceIndex];
+ };
}
+
+ return 1;
+ }, 0); /* Must pass in a dummy argument for C99 compatibility. */
+
+ if (resultFromJS != 1) {
+ return MA_FAILED_TO_INIT_BACKEND;
}
+
+ pContext->isBackendAsynchronous = MA_TRUE;
+
+ pContext->onUninit = ma_context_uninit__webaudio;
+ pContext->onDeviceIDEqual = ma_context_is_device_id_equal__webaudio;
+ pContext->onEnumDevices = ma_context_enumerate_devices__webaudio;
+ pContext->onGetDeviceInfo = ma_context_get_device_info__webaudio;
+ pContext->onDeviceInit = ma_device_init__webaudio;
+ pContext->onDeviceUninit = ma_device_uninit__webaudio;
+ pContext->onDeviceStart = ma_device_start__webaudio;
+ pContext->onDeviceStop = ma_device_stop__webaudio;
+
+ (void)pConfig; /* Unused. */
return MA_SUCCESS;
}
+#endif /* Web Audio */
+
-ma_result ma_device_stop__sndio(ma_device* pDevice)
+
+static ma_bool32 ma__is_channel_map_valid(const ma_channel* channelMap, ma_uint32 channels)
{
- ma_assert(pDevice != NULL);
+ /* A blank channel map should be allowed, in which case it should use an appropriate default which will depend on context. */
+ if (channelMap[0] != MA_CHANNEL_NONE) {
+ ma_uint32 iChannel;
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handleCapture);
- ma_atomic_exchange_32(&pDevice->sndio.isStartedCapture, MA_FALSE);
- }
+ if (channels == 0) {
+ return MA_FALSE; /* No channels. */
+ }
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback);
- ma_atomic_exchange_32(&pDevice->sndio.isStartedPlayback, MA_FALSE);
+ /* A channel cannot be present in the channel map more than once. */
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ ma_uint32 jChannel;
+ for (jChannel = iChannel + 1; jChannel < channels; ++jChannel) {
+ if (channelMap[iChannel] == channelMap[jChannel]) {
+ return MA_FALSE;
+ }
+ }
+ }
}
- return MA_SUCCESS;
+ return MA_TRUE;
}
-ma_result ma_device_write__sndio(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount)
+
+static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType)
{
- int result;
+ ma_result result;
- if (!pDevice->sndio.isStartedPlayback) {
- ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); /* <-- Doesn't actually playback until data is written. */
- ma_atomic_exchange_32(&pDevice->sndio.isStartedPlayback, MA_TRUE);
+ MA_ASSERT(pDevice != NULL);
+
+ if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) {
+ if (pDevice->capture.usingDefaultFormat) {
+ pDevice->capture.format = pDevice->capture.internalFormat;
+ }
+ if (pDevice->capture.usingDefaultChannels) {
+ pDevice->capture.channels = pDevice->capture.internalChannels;
+ }
+ if (pDevice->capture.usingDefaultChannelMap) {
+ if (pDevice->capture.internalChannels == pDevice->capture.channels) {
+ ma_channel_map_copy(pDevice->capture.channelMap, pDevice->capture.internalChannelMap, pDevice->capture.channels);
+ } else {
+ ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->capture.channels, pDevice->capture.channelMap);
+ }
+ }
}
- result = ((ma_sio_write_proc)pDevice->pContext->sndio.sio_write)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
- if (result == 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to send data from the client to the device.", MA_FAILED_TO_SEND_DATA_TO_DEVICE);
+ if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) {
+ if (pDevice->playback.usingDefaultFormat) {
+ pDevice->playback.format = pDevice->playback.internalFormat;
+ }
+ if (pDevice->playback.usingDefaultChannels) {
+ pDevice->playback.channels = pDevice->playback.internalChannels;
+ }
+ if (pDevice->playback.usingDefaultChannelMap) {
+ if (pDevice->playback.internalChannels == pDevice->playback.channels) {
+ ma_channel_map_copy(pDevice->playback.channelMap, pDevice->playback.internalChannelMap, pDevice->playback.channels);
+ } else {
+ ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->playback.channels, pDevice->playback.channelMap);
+ }
+ }
}
-
- return MA_SUCCESS;
-}
-ma_result ma_device_read__sndio(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount)
-{
- int result;
+ if (pDevice->usingDefaultSampleRate) {
+ if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) {
+ pDevice->sampleRate = pDevice->capture.internalSampleRate;
+ } else {
+ pDevice->sampleRate = pDevice->playback.internalSampleRate;
+ }
+ }
+
+ /* PCM converters. */
+ if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) {
+ /* Converting from internal device format to client format. */
+ ma_data_converter_config converterConfig = ma_data_converter_config_init_default();
+ converterConfig.formatIn = pDevice->capture.internalFormat;
+ converterConfig.channelsIn = pDevice->capture.internalChannels;
+ converterConfig.sampleRateIn = pDevice->capture.internalSampleRate;
+ ma_channel_map_copy(converterConfig.channelMapIn, pDevice->capture.internalChannelMap, pDevice->capture.internalChannels);
+ converterConfig.formatOut = pDevice->capture.format;
+ converterConfig.channelsOut = pDevice->capture.channels;
+ converterConfig.sampleRateOut = pDevice->sampleRate;
+ ma_channel_map_copy(converterConfig.channelMapOut, pDevice->capture.channelMap, pDevice->capture.channels);
+ converterConfig.resampling.allowDynamicSampleRate = MA_FALSE;
+ converterConfig.resampling.algorithm = pDevice->resampling.algorithm;
+ converterConfig.resampling.linear.lpfOrder = pDevice->resampling.linear.lpfOrder;
+ converterConfig.resampling.speex.quality = pDevice->resampling.speex.quality;
- if (!pDevice->sndio.isStartedCapture) {
- ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); /* <-- Doesn't actually playback until data is written. */
- ma_atomic_exchange_32(&pDevice->sndio.isStartedCapture, MA_TRUE);
+ result = ma_data_converter_init(&converterConfig, &pDevice->capture.converter);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
}
-
- result = ((ma_sio_read_proc)pDevice->pContext->sndio.sio_read)((struct ma_sio_hdl*)pDevice->sndio.handleCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
- if (result == 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[sndio] Failed to read data from the device to be sent to the device.", MA_FAILED_TO_SEND_DATA_TO_DEVICE);
+
+ if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) {
+ /* Converting from client format to device format. */
+ ma_data_converter_config converterConfig = ma_data_converter_config_init_default();
+ converterConfig.formatIn = pDevice->playback.format;
+ converterConfig.channelsIn = pDevice->playback.channels;
+ converterConfig.sampleRateIn = pDevice->sampleRate;
+ ma_channel_map_copy(converterConfig.channelMapIn, pDevice->playback.channelMap, pDevice->playback.channels);
+ converterConfig.formatOut = pDevice->playback.internalFormat;
+ converterConfig.channelsOut = pDevice->playback.internalChannels;
+ converterConfig.sampleRateOut = pDevice->playback.internalSampleRate;
+ ma_channel_map_copy(converterConfig.channelMapOut, pDevice->playback.internalChannelMap, pDevice->playback.internalChannels);
+ converterConfig.resampling.allowDynamicSampleRate = MA_FALSE;
+ converterConfig.resampling.algorithm = pDevice->resampling.algorithm;
+ converterConfig.resampling.linear.lpfOrder = pDevice->resampling.linear.lpfOrder;
+ converterConfig.resampling.speex.quality = pDevice->resampling.speex.quality;
+
+ result = ma_data_converter_init(&converterConfig, &pDevice->playback.converter);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
}
-
+
return MA_SUCCESS;
}
-ma_result ma_context_uninit__sndio(ma_context* pContext)
+
+static ma_thread_result MA_THREADCALL ma_worker_thread(void* pData)
{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_sndio);
+ ma_device* pDevice = (ma_device*)pData;
+ MA_ASSERT(pDevice != NULL);
- (void)pContext;
- return MA_SUCCESS;
-}
+#ifdef MA_WIN32
+ ma_CoInitializeEx(pDevice->pContext, NULL, MA_COINIT_VALUE);
+#endif
-ma_result ma_context_init__sndio(const ma_context_config* pConfig, ma_context* pContext)
-{
-#ifndef MA_NO_RUNTIME_LINKING
- const char* libsndioNames[] = {
- "libsndio.so"
- };
- size_t i;
+ /*
+ When the device is being initialized it's initial state is set to MA_STATE_UNINITIALIZED. Before returning from
+ ma_device_init(), the state needs to be set to something valid. In miniaudio the device's default state immediately
+ after initialization is stopped, so therefore we need to mark the device as such. miniaudio will wait on the worker
+ thread to signal an event to know when the worker thread is ready for action.
+ */
+ ma_device__set_state(pDevice, MA_STATE_STOPPED);
+ ma_event_signal(&pDevice->stopEvent);
- for (i = 0; i < ma_countof(libsndioNames); ++i) {
- pContext->sndio.sndioSO = ma_dlopen(pContext, libsndioNames[i]);
- if (pContext->sndio.sndioSO != NULL) {
+ for (;;) { /* <-- This loop just keeps the thread alive. The main audio loop is inside. */
+ ma_stop_proc onStop;
+
+ /* We wait on an event to know when something has requested that the device be started and the main loop entered. */
+ ma_event_wait(&pDevice->wakeupEvent);
+
+ /* Default result code. */
+ pDevice->workResult = MA_SUCCESS;
+
+ /* If the reason for the wake up is that we are terminating, just break from the loop. */
+ if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) {
break;
}
- }
-
- if (pContext->sndio.sndioSO == NULL) {
- return MA_NO_BACKEND;
- }
-
- pContext->sndio.sio_open = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_open");
- pContext->sndio.sio_close = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_close");
- pContext->sndio.sio_setpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_setpar");
- pContext->sndio.sio_getpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_getpar");
- pContext->sndio.sio_getcap = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_getcap");
- pContext->sndio.sio_write = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_write");
- pContext->sndio.sio_read = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_read");
- pContext->sndio.sio_start = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_start");
- pContext->sndio.sio_stop = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_stop");
- pContext->sndio.sio_initpar = (ma_proc)ma_dlsym(pContext, pContext->sndio.sndioSO, "sio_initpar");
-#else
- pContext->sndio.sio_open = sio_open;
- pContext->sndio.sio_close = sio_close;
- pContext->sndio.sio_setpar = sio_setpar;
- pContext->sndio.sio_getpar = sio_getpar;
- pContext->sndio.sio_getcap = sio_getcap;
- pContext->sndio.sio_write = sio_write;
- pContext->sndio.sio_read = sio_read;
- pContext->sndio.sio_start = sio_start;
- pContext->sndio.sio_stop = sio_stop;
- pContext->sndio.sio_initpar = sio_initpar;
-#endif
- pContext->onUninit = ma_context_uninit__sndio;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__sndio;
- pContext->onEnumDevices = ma_context_enumerate_devices__sndio;
- pContext->onGetDeviceInfo = ma_context_get_device_info__sndio;
- pContext->onDeviceInit = ma_device_init__sndio;
- pContext->onDeviceUninit = ma_device_uninit__sndio;
- pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */
- pContext->onDeviceStop = ma_device_stop__sndio;
- pContext->onDeviceWrite = ma_device_write__sndio;
- pContext->onDeviceRead = ma_device_read__sndio;
+ /*
+ Getting to this point means the device is wanting to get started. The function that has requested that the device
+ be started will be waiting on an event (pDevice->startEvent) which means we need to make sure we signal the event
+ in both the success and error case. It's important that the state of the device is set _before_ signaling the event.
+ */
+ MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STARTING);
- (void)pConfig;
- return MA_SUCCESS;
-}
-#endif /* sndio */
+ /* Make sure the state is set appropriately. */
+ ma_device__set_state(pDevice, MA_STATE_STARTED);
+ ma_event_signal(&pDevice->startEvent);
+ if (pDevice->pContext->onDeviceMainLoop != NULL) {
+ pDevice->pContext->onDeviceMainLoop(pDevice);
+ } else {
+ ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "No main loop implementation.", MA_API_NOT_FOUND);
+ }
+ /*
+ Getting here means we have broken from the main loop which happens the application has requested that device be stopped. Note that this
+ may have actually already happened above if the device was lost and miniaudio has attempted to re-initialize the device. In this case we
+ don't want to be doing this a second time.
+ */
+ if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) {
+ if (pDevice->pContext->onDeviceStop) {
+ pDevice->pContext->onDeviceStop(pDevice);
+ }
+ }
-/******************************************************************************
+ /* After the device has stopped, make sure an event is posted. */
+ onStop = pDevice->onStop;
+ if (onStop) {
+ onStop(pDevice);
+ }
-audio(4) Backend
+ /*
+ A function somewhere is waiting for the device to have stopped for real so we need to signal an event to allow it to continue. Note that
+ it's possible that the device has been uninitialized which means we need to _not_ change the status to stopped. We cannot go from an
+ uninitialized state to stopped state.
+ */
+ if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) {
+ ma_device__set_state(pDevice, MA_STATE_STOPPED);
+ ma_event_signal(&pDevice->stopEvent);
+ }
+ }
-******************************************************************************/
-#ifdef MA_HAS_AUDIO4
-#include
-#include
-#include
-#include
-#include
-#include
-#include
+ /* Make sure we aren't continuously waiting on a stop event. */
+ ma_event_signal(&pDevice->stopEvent); /* <-- Is this still needed? */
-#if defined(__OpenBSD__)
- #include
- #if defined(OpenBSD) && OpenBSD >= 201709
- #define MA_AUDIO4_USE_NEW_API
- #endif
+#ifdef MA_WIN32
+ ma_CoUninitialize(pDevice->pContext);
#endif
-void ma_construct_device_id__audio4(char* id, size_t idSize, const char* base, int deviceIndex)
-{
- size_t baseLen;
-
- ma_assert(id != NULL);
- ma_assert(idSize > 0);
- ma_assert(deviceIndex >= 0);
-
- baseLen = strlen(base);
- ma_assert(idSize > baseLen);
-
- ma_strcpy_s(id, idSize, base);
- ma_itoa_s(deviceIndex, id+baseLen, idSize-baseLen, 10);
+ return (ma_thread_result)0;
}
-ma_result ma_extract_device_index_from_id__audio4(const char* id, const char* base, int* pIndexOut)
-{
- size_t idLen;
- size_t baseLen;
- const char* deviceIndexStr;
- ma_assert(id != NULL);
- ma_assert(base != NULL);
- ma_assert(pIndexOut != NULL);
-
- idLen = strlen(id);
- baseLen = strlen(base);
- if (idLen <= baseLen) {
- return MA_ERROR; /* Doesn't look like the id starts with the base. */
- }
-
- if (strncmp(id, base, baseLen) != 0) {
- return MA_ERROR; /* ID does not begin with base. */
- }
-
- deviceIndexStr = id + baseLen;
- if (deviceIndexStr[0] == '\0') {
- return MA_ERROR; /* No index specified in the ID. */
- }
-
- if (pIndexOut) {
- *pIndexOut = atoi(deviceIndexStr);
+/* Helper for determining whether or not the given device is initialized. */
+static ma_bool32 ma_device__is_initialized(ma_device* pDevice)
+{
+ if (pDevice == NULL) {
+ return MA_FALSE;
}
-
- return MA_SUCCESS;
+
+ return ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED;
}
-ma_bool32 ma_context_is_device_id_equal__audio4(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+
+#ifdef MA_WIN32
+static ma_result ma_context_uninit_backend_apis__win32(ma_context* pContext)
{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
+ ma_CoUninitialize(pContext);
+ ma_dlclose(pContext, pContext->win32.hUser32DLL);
+ ma_dlclose(pContext, pContext->win32.hOle32DLL);
+ ma_dlclose(pContext, pContext->win32.hAdvapi32DLL);
- return ma_strcmp(pID0->audio4, pID1->audio4) == 0;
+ return MA_SUCCESS;
}
-#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */
-ma_format ma_format_from_encoding__audio4(unsigned int encoding, unsigned int precision)
+static ma_result ma_context_init_backend_apis__win32(ma_context* pContext)
{
- if (precision == 8 && (encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR_LE || encoding == AUDIO_ENCODING_ULINEAR_BE)) {
- return ma_format_u8;
- } else {
- if (ma_is_little_endian() && encoding == AUDIO_ENCODING_SLINEAR_LE) {
- if (precision == 16) {
- return ma_format_s16;
- } else if (precision == 24) {
- return ma_format_s24;
- } else if (precision == 32) {
- return ma_format_s32;
- }
- } else if (ma_is_big_endian() && encoding == AUDIO_ENCODING_SLINEAR_BE) {
- if (precision == 16) {
- return ma_format_s16;
- } else if (precision == 24) {
- return ma_format_s24;
- } else if (precision == 32) {
- return ma_format_s32;
- }
- }
+#ifdef MA_WIN32_DESKTOP
+ /* Ole32.dll */
+ pContext->win32.hOle32DLL = ma_dlopen(pContext, "ole32.dll");
+ if (pContext->win32.hOle32DLL == NULL) {
+ return MA_FAILED_TO_INIT_BACKEND;
}
- return ma_format_unknown; /* Encoding not supported. */
-}
+ pContext->win32.CoInitializeEx = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoInitializeEx");
+ pContext->win32.CoUninitialize = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoUninitialize");
+ pContext->win32.CoCreateInstance = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoCreateInstance");
+ pContext->win32.CoTaskMemFree = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoTaskMemFree");
+ pContext->win32.PropVariantClear = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "PropVariantClear");
+ pContext->win32.StringFromGUID2 = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "StringFromGUID2");
-void ma_encoding_from_format__audio4(ma_format format, unsigned int* pEncoding, unsigned int* pPrecision)
-{
- ma_assert(format != ma_format_unknown);
- ma_assert(pEncoding != NULL);
- ma_assert(pPrecision != NULL);
- switch (format)
- {
- case ma_format_u8:
- {
- *pEncoding = AUDIO_ENCODING_ULINEAR;
- *pPrecision = 8;
- } break;
+ /* User32.dll */
+ pContext->win32.hUser32DLL = ma_dlopen(pContext, "user32.dll");
+ if (pContext->win32.hUser32DLL == NULL) {
+ return MA_FAILED_TO_INIT_BACKEND;
+ }
- case ma_format_s24:
- {
- *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE;
- *pPrecision = 24;
- } break;
+ pContext->win32.GetForegroundWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetForegroundWindow");
+ pContext->win32.GetDesktopWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetDesktopWindow");
- case ma_format_s32:
- {
- *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE;
- *pPrecision = 32;
- } break;
- case ma_format_s16:
- case ma_format_f32:
- default:
- {
- *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE;
- *pPrecision = 16;
- } break;
+ /* Advapi32.dll */
+ pContext->win32.hAdvapi32DLL = ma_dlopen(pContext, "advapi32.dll");
+ if (pContext->win32.hAdvapi32DLL == NULL) {
+ return MA_FAILED_TO_INIT_BACKEND;
}
-}
-ma_format ma_format_from_prinfo__audio4(struct audio_prinfo* prinfo)
-{
- return ma_format_from_encoding__audio4(prinfo->encoding, prinfo->precision);
+ pContext->win32.RegOpenKeyExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegOpenKeyExA");
+ pContext->win32.RegCloseKey = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegCloseKey");
+ pContext->win32.RegQueryValueExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegQueryValueExA");
+#endif
+
+ ma_CoInitializeEx(pContext, NULL, MA_COINIT_VALUE);
+ return MA_SUCCESS;
}
#else
-ma_format ma_format_from_swpar__audio4(struct audio_swpar* par)
+static ma_result ma_context_uninit_backend_apis__nix(ma_context* pContext)
{
- if (par->bits == 8 && par->bps == 1 && par->sig == 0) {
- return ma_format_u8;
- }
- if (par->bits == 16 && par->bps == 2 && par->sig == 1 && par->le == ma_is_little_endian()) {
- return ma_format_s16;
- }
- if (par->bits == 24 && par->bps == 3 && par->sig == 1 && par->le == ma_is_little_endian()) {
- return ma_format_s24;
+#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING)
+ ma_dlclose(pContext, pContext->posix.pthreadSO);
+#else
+ (void)pContext;
+#endif
+
+ return MA_SUCCESS;
+}
+
+static ma_result ma_context_init_backend_apis__nix(ma_context* pContext)
+{
+ /* pthread */
+#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING)
+ const char* libpthreadFileNames[] = {
+ "libpthread.so",
+ "libpthread.so.0",
+ "libpthread.dylib"
+ };
+ size_t i;
+
+ for (i = 0; i < sizeof(libpthreadFileNames) / sizeof(libpthreadFileNames[0]); ++i) {
+ pContext->posix.pthreadSO = ma_dlopen(pContext, libpthreadFileNames[i]);
+ if (pContext->posix.pthreadSO != NULL) {
+ break;
+ }
}
- if (par->bits == 32 && par->bps == 4 && par->sig == 1 && par->le == ma_is_little_endian()) {
- return ma_format_f32;
+
+ if (pContext->posix.pthreadSO == NULL) {
+ return MA_FAILED_TO_INIT_BACKEND;
}
- /* Format not supported. */
- return ma_format_unknown;
+ pContext->posix.pthread_create = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_create");
+ pContext->posix.pthread_join = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_join");
+ pContext->posix.pthread_mutex_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_init");
+ pContext->posix.pthread_mutex_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_destroy");
+ pContext->posix.pthread_mutex_lock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_lock");
+ pContext->posix.pthread_mutex_unlock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_unlock");
+ pContext->posix.pthread_cond_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_init");
+ pContext->posix.pthread_cond_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_destroy");
+ pContext->posix.pthread_cond_wait = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_wait");
+ pContext->posix.pthread_cond_signal = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_signal");
+ pContext->posix.pthread_attr_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_init");
+ pContext->posix.pthread_attr_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_destroy");
+ pContext->posix.pthread_attr_setschedpolicy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedpolicy");
+ pContext->posix.pthread_attr_getschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_getschedparam");
+ pContext->posix.pthread_attr_setschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedparam");
+#else
+ pContext->posix.pthread_create = (ma_proc)pthread_create;
+ pContext->posix.pthread_join = (ma_proc)pthread_join;
+ pContext->posix.pthread_mutex_init = (ma_proc)pthread_mutex_init;
+ pContext->posix.pthread_mutex_destroy = (ma_proc)pthread_mutex_destroy;
+ pContext->posix.pthread_mutex_lock = (ma_proc)pthread_mutex_lock;
+ pContext->posix.pthread_mutex_unlock = (ma_proc)pthread_mutex_unlock;
+ pContext->posix.pthread_cond_init = (ma_proc)pthread_cond_init;
+ pContext->posix.pthread_cond_destroy = (ma_proc)pthread_cond_destroy;
+ pContext->posix.pthread_cond_wait = (ma_proc)pthread_cond_wait;
+ pContext->posix.pthread_cond_signal = (ma_proc)pthread_cond_signal;
+ pContext->posix.pthread_attr_init = (ma_proc)pthread_attr_init;
+ pContext->posix.pthread_attr_destroy = (ma_proc)pthread_attr_destroy;
+#if !defined(__EMSCRIPTEN__)
+ pContext->posix.pthread_attr_setschedpolicy = (ma_proc)pthread_attr_setschedpolicy;
+ pContext->posix.pthread_attr_getschedparam = (ma_proc)pthread_attr_getschedparam;
+ pContext->posix.pthread_attr_setschedparam = (ma_proc)pthread_attr_setschedparam;
+#endif
+#endif
+
+ return MA_SUCCESS;
}
#endif
-ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_device_type deviceType, int fd, ma_device_info* pInfoOut)
+static ma_result ma_context_init_backend_apis(ma_context* pContext)
+{
+ ma_result result;
+#ifdef MA_WIN32
+ result = ma_context_init_backend_apis__win32(pContext);
+#else
+ result = ma_context_init_backend_apis__nix(pContext);
+#endif
+
+ return result;
+}
+
+static ma_result ma_context_uninit_backend_apis(ma_context* pContext)
{
- audio_device_t fdDevice;
-#if !defined(MA_AUDIO4_USE_NEW_API)
- int counter = 0;
- audio_info_t fdInfo;
+ ma_result result;
+#ifdef MA_WIN32
+ result = ma_context_uninit_backend_apis__win32(pContext);
#else
- struct audio_swpar fdPar;
- ma_format format;
+ result = ma_context_uninit_backend_apis__nix(pContext);
#endif
- ma_assert(pContext != NULL);
- ma_assert(fd >= 0);
- ma_assert(pInfoOut != NULL);
-
- (void)pContext;
- (void)deviceType;
+ return result;
+}
- if (ioctl(fd, AUDIO_GETDEV, &fdDevice) < 0) {
- return MA_ERROR; /* Failed to retrieve device info. */
- }
- /* Name. */
- ma_strcpy_s(pInfoOut->name, sizeof(pInfoOut->name), fdDevice.name);
+static ma_bool32 ma_context_is_backend_asynchronous(ma_context* pContext)
+{
+ return pContext->isBackendAsynchronous;
+}
-#if !defined(MA_AUDIO4_USE_NEW_API)
- /* Supported formats. We get this by looking at the encodings. */
- for (;;) {
- audio_encoding_t encoding;
- ma_format format;
- ma_zero_object(&encoding);
- encoding.index = counter;
- if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) {
- break;
- }
+ma_context_config ma_context_config_init()
+{
+ ma_context_config config;
+ MA_ZERO_OBJECT(&config);
- format = ma_format_from_encoding__audio4(encoding.encoding, encoding.precision);
- if (format != ma_format_unknown) {
- pInfoOut->formats[pInfoOut->formatCount++] = format;
- }
+ return config;
+}
- counter += 1;
- }
+ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext)
+{
+ ma_result result;
+ ma_context_config config;
+ ma_backend defaultBackends[ma_backend_null+1];
+ ma_uint32 iBackend;
+ ma_backend* pBackendsToIterate;
+ ma_uint32 backendsToIterateCount;
- if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) {
- return MA_ERROR;
+ if (pContext == NULL) {
+ return MA_INVALID_ARGS;
}
- if (deviceType == ma_device_type_playback) {
- pInfoOut->minChannels = fdInfo.play.channels;
- pInfoOut->maxChannels = fdInfo.play.channels;
- pInfoOut->minSampleRate = fdInfo.play.sample_rate;
- pInfoOut->maxSampleRate = fdInfo.play.sample_rate;
+ MA_ZERO_OBJECT(pContext);
+
+ /* Always make sure the config is set first to ensure properties are available as soon as possible. */
+ if (pConfig != NULL) {
+ config = *pConfig;
} else {
- pInfoOut->minChannels = fdInfo.record.channels;
- pInfoOut->maxChannels = fdInfo.record.channels;
- pInfoOut->minSampleRate = fdInfo.record.sample_rate;
- pInfoOut->maxSampleRate = fdInfo.record.sample_rate;
+ config = ma_context_config_init();
}
-#else
- if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) {
- return MA_ERROR;
+
+ pContext->logCallback = config.logCallback;
+ pContext->threadPriority = config.threadPriority;
+ pContext->pUserData = config.pUserData;
+
+ result = ma_allocation_callbacks_init_copy(&pContext->allocationCallbacks, &config.allocationCallbacks);
+ if (result != MA_SUCCESS) {
+ return result;
}
-
- format = ma_format_from_swpar__audio4(&fdPar);
- if (format == ma_format_unknown) {
- return MA_FORMAT_NOT_SUPPORTED;
+
+ /* Backend APIs need to be initialized first. This is where external libraries will be loaded and linked. */
+ result = ma_context_init_backend_apis(pContext);
+ if (result != MA_SUCCESS) {
+ return result;
}
- pInfoOut->formats[pInfoOut->formatCount++] = format;
-
- if (deviceType == ma_device_type_playback) {
- pInfoOut->minChannels = fdPar.pchan;
- pInfoOut->maxChannels = fdPar.pchan;
- } else {
- pInfoOut->minChannels = fdPar.rchan;
- pInfoOut->maxChannels = fdPar.rchan;
+
+ for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) {
+ defaultBackends[iBackend] = (ma_backend)iBackend;
}
-
- pInfoOut->minSampleRate = fdPar.rate;
- pInfoOut->maxSampleRate = fdPar.rate;
-#endif
-
- return MA_SUCCESS;
-}
-ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
-{
- const int maxDevices = 64;
- char devpath[256];
- int iDevice;
+ pBackendsToIterate = (ma_backend*)backends;
+ backendsToIterateCount = backendCount;
+ if (pBackendsToIterate == NULL) {
+ pBackendsToIterate = (ma_backend*)defaultBackends;
+ backendsToIterateCount = ma_countof(defaultBackends);
+ }
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
-
- /*
- Every device will be named "/dev/audioN", with a "/dev/audioctlN" equivalent. We use the "/dev/audioctlN"
- version here since we can open it even when another process has control of the "/dev/audioN" device.
- */
- for (iDevice = 0; iDevice < maxDevices; ++iDevice) {
- struct stat st;
- int fd;
- ma_bool32 isTerminating = MA_FALSE;
+ MA_ASSERT(pBackendsToIterate != NULL);
- ma_strcpy_s(devpath, sizeof(devpath), "/dev/audioctl");
- ma_itoa_s(iDevice, devpath+strlen(devpath), sizeof(devpath)-strlen(devpath), 10);
-
- if (stat(devpath, &st) < 0) {
- break;
+ for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) {
+ ma_backend backend = pBackendsToIterate[iBackend];
+
+ result = MA_NO_BACKEND;
+ switch (backend) {
+ #ifdef MA_HAS_WASAPI
+ case ma_backend_wasapi:
+ {
+ result = ma_context_init__wasapi(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_DSOUND
+ case ma_backend_dsound:
+ {
+ result = ma_context_init__dsound(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_WINMM
+ case ma_backend_winmm:
+ {
+ result = ma_context_init__winmm(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_ALSA
+ case ma_backend_alsa:
+ {
+ result = ma_context_init__alsa(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_PULSEAUDIO
+ case ma_backend_pulseaudio:
+ {
+ result = ma_context_init__pulse(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_JACK
+ case ma_backend_jack:
+ {
+ result = ma_context_init__jack(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_COREAUDIO
+ case ma_backend_coreaudio:
+ {
+ result = ma_context_init__coreaudio(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_SNDIO
+ case ma_backend_sndio:
+ {
+ result = ma_context_init__sndio(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_AUDIO4
+ case ma_backend_audio4:
+ {
+ result = ma_context_init__audio4(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_OSS
+ case ma_backend_oss:
+ {
+ result = ma_context_init__oss(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_AAUDIO
+ case ma_backend_aaudio:
+ {
+ result = ma_context_init__aaudio(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_OPENSL
+ case ma_backend_opensl:
+ {
+ result = ma_context_init__opensl(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_WEBAUDIO
+ case ma_backend_webaudio:
+ {
+ result = ma_context_init__webaudio(&config, pContext);
+ } break;
+ #endif
+ #ifdef MA_HAS_NULL
+ case ma_backend_null:
+ {
+ result = ma_context_init__null(&config, pContext);
+ } break;
+ #endif
+
+ default: break;
}
- /* The device exists, but we need to check if it's usable as playback and/or capture. */
-
- /* Playback. */
- if (!isTerminating) {
- fd = open(devpath, O_RDONLY, 0);
- if (fd >= 0) {
- /* Supports playback. */
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice);
- if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_playback, fd, &deviceInfo) == MA_SUCCESS) {
- isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
- }
-
- close(fd);
+ /* If this iteration was successful, return. */
+ if (result == MA_SUCCESS) {
+ result = ma_mutex_init(pContext, &pContext->deviceEnumLock);
+ if (result != MA_SUCCESS) {
+ ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device enumeration. ma_context_get_devices() is not thread safe.", result);
}
- }
-
- /* Capture. */
- if (!isTerminating) {
- fd = open(devpath, O_WRONLY, 0);
- if (fd >= 0) {
- /* Supports capture. */
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice);
- if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_capture, fd, &deviceInfo) == MA_SUCCESS) {
- isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- }
-
- close(fd);
+ result = ma_mutex_init(pContext, &pContext->deviceInfoLock);
+ if (result != MA_SUCCESS) {
+ ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device info retrieval. ma_context_get_device_info() is not thread safe.", result);
}
- }
-
- if (isTerminating) {
- break;
- }
- }
-
- return MA_SUCCESS;
-}
-ma_result ma_context_get_device_info__audio4(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
-{
- int fd = -1;
- int deviceIndex = -1;
- char ctlid[256];
- ma_result result;
+#ifdef MA_DEBUG_OUTPUT
+ printf("[miniaudio] Endian: %s\n", ma_is_little_endian() ? "LE" : "BE");
+ printf("[miniaudio] SSE2: %s\n", ma_has_sse2() ? "YES" : "NO");
+ printf("[miniaudio] AVX2: %s\n", ma_has_avx2() ? "YES" : "NO");
+ printf("[miniaudio] AVX512F: %s\n", ma_has_avx512f() ? "YES" : "NO");
+ printf("[miniaudio] NEON: %s\n", ma_has_neon() ? "YES" : "NO");
+#endif
- ma_assert(pContext != NULL);
- (void)shareMode;
-
- /*
- We need to open the "/dev/audioctlN" device to get the info. To do this we need to extract the number
- from the device ID which will be in "/dev/audioN" format.
- */
- if (pDeviceID == NULL) {
- /* Default device. */
- ma_strcpy_s(ctlid, sizeof(ctlid), "/dev/audioctl");
- } else {
- /* Specific device. We need to convert from "/dev/audioN" to "/dev/audioctlN". */
- result = ma_extract_device_index_from_id__audio4(pDeviceID->audio4, "/dev/audio", &deviceIndex);
- if (result != MA_SUCCESS) {
+ pContext->backend = backend;
return result;
}
-
- ma_construct_device_id__audio4(ctlid, sizeof(ctlid), "/dev/audioctl", deviceIndex);
- }
-
- fd = open(ctlid, (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY, 0);
- if (fd == -1) {
- return MA_NO_DEVICE;
- }
-
- if (deviceIndex == -1) {
- ma_strcpy_s(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio");
- } else {
- ma_construct_device_id__audio4(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio", deviceIndex);
}
-
- result = ma_context_get_device_info_from_fd__audio4(pContext, deviceType, fd, pDeviceInfo);
-
- close(fd);
- return result;
-}
-
-void ma_device_uninit__audio4(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- close(pDevice->audio4.fdCapture);
- }
-
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- close(pDevice->audio4.fdPlayback);
- }
+ /* If we get here it means an error occurred. */
+ MA_ZERO_OBJECT(pContext); /* Safety. */
+ return MA_NO_BACKEND;
}
-ma_result ma_device_init_fd__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
+ma_result ma_context_uninit(ma_context* pContext)
{
- const char* pDefaultDeviceNames[] = {
- "/dev/audio",
- "/dev/audio0"
- };
- int fd;
- int fdFlags = 0;
-#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */
- audio_info_t fdInfo;
-#else
- struct audio_swpar fdPar;
-#endif
- ma_format internalFormat;
- ma_uint32 internalChannels;
- ma_uint32 internalSampleRate;
- ma_uint32 internalBufferSizeInFrames;
- ma_uint32 internalPeriods;
-
- ma_assert(pContext != NULL);
- ma_assert(pConfig != NULL);
- ma_assert(deviceType != ma_device_type_duplex);
- ma_assert(pDevice != NULL);
-
- (void)pContext;
-
- /* The first thing to do is open the file. */
- if (deviceType == ma_device_type_capture) {
- fdFlags = O_RDONLY;
- } else {
- fdFlags = O_WRONLY;
- }
- fdFlags |= O_NONBLOCK;
-
- if ((deviceType == ma_device_type_capture && pConfig->capture.pDeviceID == NULL) || (deviceType == ma_device_type_playback && pConfig->playback.pDeviceID == NULL)) {
- /* Default device. */
- size_t iDevice;
- for (iDevice = 0; iDevice < ma_countof(pDefaultDeviceNames); ++iDevice) {
- fd = open(pDefaultDeviceNames[iDevice], fdFlags, 0);
- if (fd != -1) {
- break;
- }
- }
- } else {
- /* Specific device. */
- fd = open((deviceType == ma_device_type_capture) ? pConfig->capture.pDeviceID->audio4 : pConfig->playback.pDeviceID->audio4, fdFlags, 0);
+ if (pContext == NULL) {
+ return MA_INVALID_ARGS;
}
- if (fd == -1) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to open device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ pContext->onUninit(pContext);
-#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */
- AUDIO_INITINFO(&fdInfo);
+ ma_mutex_uninit(&pContext->deviceEnumLock);
+ ma_mutex_uninit(&pContext->deviceInfoLock);
+ ma__free_from_callbacks(pContext->pDeviceInfos, &pContext->allocationCallbacks);
+ ma_context_uninit_backend_apis(pContext);
- /* We get the driver to do as much of the data conversion as possible. */
- if (deviceType == ma_device_type_capture) {
- fdInfo.mode = AUMODE_RECORD;
- ma_encoding_from_format__audio4(pConfig->capture.format, &fdInfo.record.encoding, &fdInfo.record.precision);
- fdInfo.record.channels = pConfig->capture.channels;
- fdInfo.record.sample_rate = pConfig->sampleRate;
- } else {
- fdInfo.mode = AUMODE_PLAY;
- ma_encoding_from_format__audio4(pConfig->playback.format, &fdInfo.play.encoding, &fdInfo.play.precision);
- fdInfo.play.channels = pConfig->playback.channels;
- fdInfo.play.sample_rate = pConfig->sampleRate;
- }
+ return MA_SUCCESS;
+}
- if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device format. AUDIO_SETINFO failed.", MA_FORMAT_NOT_SUPPORTED);
- }
-
- if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] AUDIO_GETINFO failed.", MA_FORMAT_NOT_SUPPORTED);
- }
- if (deviceType == ma_device_type_capture) {
- internalFormat = ma_format_from_prinfo__audio4(&fdInfo.record);
- internalChannels = fdInfo.record.channels;
- internalSampleRate = fdInfo.record.sample_rate;
- } else {
- internalFormat = ma_format_from_prinfo__audio4(&fdInfo.play);
- internalChannels = fdInfo.play.channels;
- internalSampleRate = fdInfo.play.sample_rate;
- }
+ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+{
+ ma_result result;
- if (internalFormat == ma_format_unknown) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable.", MA_FORMAT_NOT_SUPPORTED);
+ if (pContext == NULL || pContext->onEnumDevices == NULL || callback == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Buffer. */
+ ma_mutex_lock(&pContext->deviceEnumLock);
{
- ma_uint32 internalBufferSizeInBytes;
-
- internalBufferSizeInFrames = pConfig->bufferSizeInFrames;
- if (internalBufferSizeInFrames == 0) {
- internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, internalSampleRate);
- }
-
- internalBufferSizeInBytes = internalBufferSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels);
- if (internalBufferSizeInBytes < 16) {
- internalBufferSizeInBytes = 16;
- }
-
- internalPeriods = pConfig->periods;
- if (internalPeriods < 2) {
- internalPeriods = 2;
- }
-
- /* What miniaudio calls a fragment, audio4 calls a block. */
- AUDIO_INITINFO(&fdInfo);
- fdInfo.hiwat = internalPeriods;
- fdInfo.lowat = internalPeriods-1;
- fdInfo.blocksize = internalBufferSizeInBytes / internalPeriods;
- if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to set internal buffer size. AUDIO_SETINFO failed.", MA_FORMAT_NOT_SUPPORTED);
- }
-
- internalPeriods = fdInfo.hiwat;
- internalBufferSizeInFrames = (fdInfo.blocksize * fdInfo.hiwat) / ma_get_bytes_per_frame(internalFormat, internalChannels);
- }
-#else
- /* We need to retrieve the format of the device so we can know the channel count and sample rate. Then we can calculate the buffer size. */
- if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve initial device parameters.", MA_FORMAT_NOT_SUPPORTED);
+ result = pContext->onEnumDevices(pContext, callback, pUserData);
}
+ ma_mutex_unlock(&pContext->deviceEnumLock);
- internalFormat = ma_format_from_swpar__audio4(&fdPar);
- internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan;
- internalSampleRate = fdPar.rate;
+ return result;
+}
- if (internalFormat == ma_format_unknown) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable.", MA_FORMAT_NOT_SUPPORTED);
- }
- /* Buffer. */
- {
- ma_uint32 internalBufferSizeInBytes;
+static ma_bool32 ma_context_get_devices__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData)
+{
+ /*
+ We need to insert the device info into our main internal buffer. Where it goes depends on the device type. If it's a capture device
+ it's just appended to the end. If it's a playback device it's inserted just before the first capture device.
+ */
- internalBufferSizeInFrames = pConfig->bufferSizeInFrames;
- if (internalBufferSizeInFrames == 0) {
- internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, internalSampleRate);
- }
+ /*
+ First make sure we have room. Since the number of devices we add to the list is usually relatively small I've decided to use a
+ simple fixed size increment for buffer expansion.
+ */
+ const ma_uint32 bufferExpansionCount = 2;
+ const ma_uint32 totalDeviceInfoCount = pContext->playbackDeviceInfoCount + pContext->captureDeviceInfoCount;
- /* What miniaudio calls a fragment, audio4 calls a block. */
- internalBufferSizeInBytes = internalBufferSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels);
- if (internalBufferSizeInBytes < 16) {
- internalBufferSizeInBytes = 16;
- }
-
- fdPar.nblks = pConfig->periods;
- fdPar.round = internalBufferSizeInBytes / fdPar.nblks;
-
- if (ioctl(fd, AUDIO_SETPAR, &fdPar) < 0) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device parameters.", MA_FORMAT_NOT_SUPPORTED);
+ if (pContext->deviceInfoCapacity >= totalDeviceInfoCount) {
+ ma_uint32 oldCapacity = pContext->deviceInfoCapacity;
+ ma_uint32 newCapacity = oldCapacity + bufferExpansionCount;
+ ma_device_info* pNewInfos = (ma_device_info*)ma__realloc_from_callbacks(pContext->pDeviceInfos, sizeof(*pContext->pDeviceInfos)*newCapacity, sizeof(*pContext->pDeviceInfos)*oldCapacity, &pContext->allocationCallbacks);
+ if (pNewInfos == NULL) {
+ return MA_FALSE; /* Out of memory. */
}
- if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve actual device parameters.", MA_FORMAT_NOT_SUPPORTED);
- }
+ pContext->pDeviceInfos = pNewInfos;
+ pContext->deviceInfoCapacity = newCapacity;
}
- internalFormat = ma_format_from_swpar__audio4(&fdPar);
- internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan;
- internalSampleRate = fdPar.rate;
- internalPeriods = fdPar.nblks;
- internalBufferSizeInFrames = (fdPar.nblks * fdPar.round) / ma_get_bytes_per_frame(internalFormat, internalChannels);
-#endif
+ if (deviceType == ma_device_type_playback) {
+ /* Playback. Insert just before the first capture device. */
- if (internalFormat == ma_format_unknown) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable.", MA_FORMAT_NOT_SUPPORTED);
- }
+ /* The first thing to do is move all of the capture devices down a slot. */
+ ma_uint32 iFirstCaptureDevice = pContext->playbackDeviceInfoCount;
+ size_t iCaptureDevice;
+ for (iCaptureDevice = totalDeviceInfoCount; iCaptureDevice > iFirstCaptureDevice; --iCaptureDevice) {
+ pContext->pDeviceInfos[iCaptureDevice] = pContext->pDeviceInfos[iCaptureDevice-1];
+ }
- if (deviceType == ma_device_type_capture) {
- pDevice->audio4.fdCapture = fd;
- pDevice->capture.internalFormat = internalFormat;
- pDevice->capture.internalChannels = internalChannels;
- pDevice->capture.internalSampleRate = internalSampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_sound4, internalChannels, pDevice->capture.internalChannelMap);
- pDevice->capture.internalBufferSizeInFrames = internalBufferSizeInFrames;
- pDevice->capture.internalPeriods = internalPeriods;
- } else {
- pDevice->audio4.fdPlayback = fd;
- pDevice->playback.internalFormat = internalFormat;
- pDevice->playback.internalChannels = internalChannels;
- pDevice->playback.internalSampleRate = internalSampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_sound4, internalChannels, pDevice->playback.internalChannelMap);
- pDevice->playback.internalBufferSizeInFrames = internalBufferSizeInFrames;
- pDevice->playback.internalPeriods = internalPeriods;
+ /* Now just insert where the first capture device was before moving it down a slot. */
+ pContext->pDeviceInfos[iFirstCaptureDevice] = *pInfo;
+ pContext->playbackDeviceInfoCount += 1;
+ } else {
+ /* Capture. Insert at the end. */
+ pContext->pDeviceInfos[totalDeviceInfoCount] = *pInfo;
+ pContext->captureDeviceInfoCount += 1;
}
- return MA_SUCCESS;
+ (void)pUserData;
+ return MA_TRUE;
}
-ma_result ma_device_init__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount)
{
- ma_assert(pDevice != NULL);
+ ma_result result;
- ma_zero_object(&pDevice->audio4);
-
- pDevice->audio4.fdCapture = -1;
- pDevice->audio4.fdPlayback = -1;
+ /* Safety. */
+ if (ppPlaybackDeviceInfos != NULL) *ppPlaybackDeviceInfos = NULL;
+ if (pPlaybackDeviceCount != NULL) *pPlaybackDeviceCount = 0;
+ if (ppCaptureDeviceInfos != NULL) *ppCaptureDeviceInfos = NULL;
+ if (pCaptureDeviceCount != NULL) *pCaptureDeviceCount = 0;
- /*
- The version of the operating system dictates whether or not the device is exclusive or shared. NetBSD
- introduced in-kernel mixing which means it's shared. All other BSD flavours are exclusive as far as
- I'm aware.
- */
-#if defined(__NetBSD_Version__) && __NetBSD_Version__ >= 800000000
- /* NetBSD 8.0+ */
- if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
- ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ if (pContext == NULL || pContext->onEnumDevices == NULL) {
+ return MA_INVALID_ARGS;
}
-#else
- /* All other flavors. */
-#endif
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ma_result result = ma_device_init_fd__audio4(pContext, pConfig, ma_device_type_capture, pDevice);
- if (result != MA_SUCCESS) {
- return result;
- }
- }
+ /* Note that we don't use ma_context_enumerate_devices() here because we want to do locking at a higher level. */
+ ma_mutex_lock(&pContext->deviceEnumLock);
+ {
+ /* Reset everything first. */
+ pContext->playbackDeviceInfoCount = 0;
+ pContext->captureDeviceInfoCount = 0;
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ma_result result = ma_device_init_fd__audio4(pContext, pConfig, ma_device_type_playback, pDevice);
- if (result != MA_SUCCESS) {
- if (pConfig->deviceType == ma_device_type_duplex) {
- close(pDevice->audio4.fdCapture);
+ /* Now enumerate over available devices. */
+ result = pContext->onEnumDevices(pContext, ma_context_get_devices__enum_callback, NULL);
+ if (result == MA_SUCCESS) {
+ /* Playback devices. */
+ if (ppPlaybackDeviceInfos != NULL) {
+ *ppPlaybackDeviceInfos = pContext->pDeviceInfos;
+ }
+ if (pPlaybackDeviceCount != NULL) {
+ *pPlaybackDeviceCount = pContext->playbackDeviceInfoCount;
+ }
+
+ /* Capture devices. */
+ if (ppCaptureDeviceInfos != NULL) {
+ *ppCaptureDeviceInfos = pContext->pDeviceInfos + pContext->playbackDeviceInfoCount; /* Capture devices come after playback devices. */
+ }
+ if (pCaptureDeviceCount != NULL) {
+ *pCaptureDeviceCount = pContext->captureDeviceInfoCount;
}
- return result;
}
}
+ ma_mutex_unlock(&pContext->deviceEnumLock);
- return MA_SUCCESS;
+ return result;
}
-#if 0
-ma_result ma_device_start__audio4(ma_device* pDevice)
+ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
{
- ma_assert(pDevice != NULL);
+ ma_device_info deviceInfo;
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- if (pDevice->audio4.fdCapture == -1) {
- return MA_INVALID_ARGS;
- }
+ /* NOTE: Do not clear pDeviceInfo on entry. The reason is the pDeviceID may actually point to pDeviceInfo->id which will break things. */
+ if (pContext == NULL || pDeviceInfo == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- if (pDevice->audio4.fdPlayback == -1) {
- return MA_INVALID_ARGS;
+ MA_ZERO_OBJECT(&deviceInfo);
+
+ /* Help the backend out by copying over the device ID if we have one. */
+ if (pDeviceID != NULL) {
+ MA_COPY_MEMORY(&deviceInfo.id, pDeviceID, sizeof(*pDeviceID));
+ }
+
+ /* The backend may have an optimized device info retrieval function. If so, try that first. */
+ if (pContext->onGetDeviceInfo != NULL) {
+ ma_result result;
+ ma_mutex_lock(&pContext->deviceInfoLock);
+ {
+ result = pContext->onGetDeviceInfo(pContext, deviceType, pDeviceID, shareMode, &deviceInfo);
}
+ ma_mutex_unlock(&pContext->deviceInfoLock);
+
+ /* Clamp ranges. */
+ deviceInfo.minChannels = ma_max(deviceInfo.minChannels, MA_MIN_CHANNELS);
+ deviceInfo.maxChannels = ma_min(deviceInfo.maxChannels, MA_MAX_CHANNELS);
+ deviceInfo.minSampleRate = ma_max(deviceInfo.minSampleRate, MA_MIN_SAMPLE_RATE);
+ deviceInfo.maxSampleRate = ma_min(deviceInfo.maxSampleRate, MA_MAX_SAMPLE_RATE);
+
+ *pDeviceInfo = deviceInfo;
+ return result;
}
- return MA_SUCCESS;
+ /* Getting here means onGetDeviceInfo has not been set. */
+ return MA_ERROR;
}
-#endif
-ma_result ma_device_stop_fd__audio4(ma_device* pDevice, int fd)
+ma_bool32 ma_context_is_loopback_supported(ma_context* pContext)
{
- if (fd == -1) {
- return MA_INVALID_ARGS;
- }
-
-#if !defined(MA_AUDIO4_USE_NEW_API)
- if (ioctl(fd, AUDIO_FLUSH, 0) < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_FLUSH failed.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- }
-#else
- if (ioctl(fd, AUDIO_STOP, 0) < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_STOP failed.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
+ if (pContext == NULL) {
+ return MA_FALSE;
}
-#endif
- return MA_SUCCESS;
+ return ma_is_loopback_supported(pContext->backend);
}
-ma_result ma_device_stop__audio4(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ma_result result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdCapture);
- if (result != MA_SUCCESS) {
- return result;
- }
- }
+ma_device_config ma_device_config_init(ma_device_type deviceType)
+{
+ ma_device_config config;
+ MA_ZERO_OBJECT(&config);
+ config.deviceType = deviceType;
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ma_result result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdPlayback);
- if (result != MA_SUCCESS) {
- return result;
- }
- }
+ /* Resampling defaults. We must never use the Speex backend by default because it uses licensed third party code. */
+ config.resampling.algorithm = ma_resample_algorithm_linear;
+ config.resampling.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER);
+ config.resampling.speex.quality = 3;
- return MA_SUCCESS;
+ return config;
}
-ma_result ma_device_write__audio4(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount)
+ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
{
- int result = write(pDevice->audio4.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
- if (result < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to write data to the device.", MA_FAILED_TO_SEND_DATA_TO_DEVICE);
+ ma_result result;
+ ma_device_config config;
+
+ if (pContext == NULL) {
+ return ma_device_init_ex(NULL, 0, NULL, pConfig, pDevice);
+ }
+ if (pDevice == NULL) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS);
+ }
+ if (pConfig == NULL) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pConfig == NULL).", MA_INVALID_ARGS);
}
- return MA_SUCCESS;
-}
+ /* We need to make a copy of the config so we can set default values if they were left unset in the input config. */
+ config = *pConfig;
-ma_result ma_device_read__audio4(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount)
-{
- int result = read(pDevice->audio4.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
- if (result < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[audio4] Failed to read data from the device.", MA_FAILED_TO_READ_DATA_FROM_DEVICE);
+ /* Basic config validation. */
+ if (config.deviceType != ma_device_type_playback && config.deviceType != ma_device_type_capture && config.deviceType != ma_device_type_duplex && config.deviceType != ma_device_type_loopback) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Device type is invalid. Make sure the device type has been set in the config.", MA_INVALID_DEVICE_CONFIG);
}
- return MA_SUCCESS;
-}
+ if (config.deviceType == ma_device_type_capture || config.deviceType == ma_device_type_duplex) {
+ if (config.capture.channels > MA_MAX_CHANNELS) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Capture channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG);
+ }
+ if (!ma__is_channel_map_valid(config.capture.channelMap, config.capture.channels)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Capture channel map is invalid.", MA_INVALID_DEVICE_CONFIG);
+ }
+ }
-ma_result ma_context_uninit__audio4(ma_context* pContext)
-{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_audio4);
+ if (config.deviceType == ma_device_type_playback || config.deviceType == ma_device_type_duplex || config.deviceType == ma_device_type_loopback) {
+ if (config.playback.channels > MA_MAX_CHANNELS) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Playback channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG);
+ }
+ if (!ma__is_channel_map_valid(config.playback.channelMap, config.playback.channels)) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Playback channel map is invalid.", MA_INVALID_DEVICE_CONFIG);
+ }
+ }
- (void)pContext;
- return MA_SUCCESS;
-}
-ma_result ma_context_init__audio4(const ma_context_config* pConfig, ma_context* pContext)
-{
- ma_assert(pContext != NULL);
+ MA_ZERO_OBJECT(pDevice);
+ pDevice->pContext = pContext;
- (void)pConfig;
+ /* Set the user data and log callback ASAP to ensure it is available for the entire initialization process. */
+ pDevice->pUserData = config.pUserData;
+ pDevice->onData = config.dataCallback;
+ pDevice->onStop = config.stopCallback;
+
+ if (((ma_uintptr)pDevice % sizeof(pDevice)) != 0) {
+ if (pContext->logCallback) {
+ pContext->logCallback(pContext, pDevice, MA_LOG_LEVEL_WARNING, "WARNING: ma_device_init() called for a device that is not properly aligned. Thread safety is not supported.");
+ }
+ }
- pContext->onUninit = ma_context_uninit__audio4;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__audio4;
- pContext->onEnumDevices = ma_context_enumerate_devices__audio4;
- pContext->onGetDeviceInfo = ma_context_get_device_info__audio4;
- pContext->onDeviceInit = ma_device_init__audio4;
- pContext->onDeviceUninit = ma_device_uninit__audio4;
- pContext->onDeviceStart = NULL;
- pContext->onDeviceStop = ma_device_stop__audio4;
- pContext->onDeviceWrite = ma_device_write__audio4;
- pContext->onDeviceRead = ma_device_read__audio4;
+ pDevice->noPreZeroedOutputBuffer = config.noPreZeroedOutputBuffer;
+ pDevice->noClip = config.noClip;
+ pDevice->masterVolumeFactor = 1;
- return MA_SUCCESS;
-}
-#endif /* audio4 */
+ /*
+ When passing in 0 for the format/channels/rate/chmap it means the device will be using whatever is chosen by the backend. If everything is set
+ to defaults it means the format conversion pipeline will run on a fast path where data transfer is just passed straight through to the backend.
+ */
+ if (config.sampleRate == 0) {
+ config.sampleRate = MA_DEFAULT_SAMPLE_RATE;
+ pDevice->usingDefaultSampleRate = MA_TRUE;
+ }
+ if (config.capture.format == ma_format_unknown) {
+ config.capture.format = MA_DEFAULT_FORMAT;
+ pDevice->capture.usingDefaultFormat = MA_TRUE;
+ }
+ if (config.capture.channels == 0) {
+ config.capture.channels = MA_DEFAULT_CHANNELS;
+ pDevice->capture.usingDefaultChannels = MA_TRUE;
+ }
+ if (config.capture.channelMap[0] == MA_CHANNEL_NONE) {
+ pDevice->capture.usingDefaultChannelMap = MA_TRUE;
+ }
-/******************************************************************************
+ if (config.playback.format == ma_format_unknown) {
+ config.playback.format = MA_DEFAULT_FORMAT;
+ pDevice->playback.usingDefaultFormat = MA_TRUE;
+ }
+ if (config.playback.channels == 0) {
+ config.playback.channels = MA_DEFAULT_CHANNELS;
+ pDevice->playback.usingDefaultChannels = MA_TRUE;
+ }
+ if (config.playback.channelMap[0] == MA_CHANNEL_NONE) {
+ pDevice->playback.usingDefaultChannelMap = MA_TRUE;
+ }
-OSS Backend
-******************************************************************************/
-#ifdef MA_HAS_OSS
-#include
-#include
-#include
-#include
+ /* Default periods. */
+ if (config.periods == 0) {
+ config.periods = MA_DEFAULT_PERIODS;
+ pDevice->usingDefaultPeriods = MA_TRUE;
+ }
-#ifndef SNDCTL_DSP_HALT
-#define SNDCTL_DSP_HALT SNDCTL_DSP_RESET
-#endif
+ /*
+ Must have at least 3 periods for full-duplex mode. The idea is that the playback and capture positions hang out in the middle period, with the surrounding
+ periods acting as a buffer in case the capture and playback devices get's slightly out of sync.
+ */
+ if (config.deviceType == ma_device_type_duplex && config.periods < 3) {
+ config.periods = 3;
+ }
-int ma_open_temp_device__oss()
-{
- /* The OSS sample code uses "/dev/mixer" as the device for getting system properties so I'm going to do the same. */
- int fd = open("/dev/mixer", O_RDONLY, 0);
- if (fd >= 0) {
- return fd;
+ /* Default buffer size. */
+ if (config.periodSizeInMilliseconds == 0 && config.periodSizeInFrames == 0) {
+ config.periodSizeInMilliseconds = (config.performanceProfile == ma_performance_profile_low_latency) ? MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY : MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE;
+ pDevice->usingDefaultBufferSize = MA_TRUE;
}
+
- return -1;
-}
-ma_result ma_context_open_device__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, int* pfd)
-{
- const char* deviceName;
- int flags;
+ pDevice->type = config.deviceType;
+ pDevice->sampleRate = config.sampleRate;
+ pDevice->resampling.algorithm = config.resampling.algorithm;
+ pDevice->resampling.linear.lpfOrder = config.resampling.linear.lpfOrder;
+ pDevice->resampling.speex.quality = config.resampling.speex.quality;
+
+ pDevice->capture.shareMode = config.capture.shareMode;
+ pDevice->capture.format = config.capture.format;
+ pDevice->capture.channels = config.capture.channels;
+ ma_channel_map_copy(pDevice->capture.channelMap, config.capture.channelMap, config.capture.channels);
+
+ pDevice->playback.shareMode = config.playback.shareMode;
+ pDevice->playback.format = config.playback.format;
+ pDevice->playback.channels = config.playback.channels;
+ ma_channel_map_copy(pDevice->playback.channelMap, config.playback.channelMap, config.playback.channels);
- ma_assert(pContext != NULL);
- ma_assert(pfd != NULL);
- (void)pContext;
- *pfd = -1;
+ /* The internal format, channel count and sample rate can be modified by the backend. */
+ pDevice->capture.internalFormat = pDevice->capture.format;
+ pDevice->capture.internalChannels = pDevice->capture.channels;
+ pDevice->capture.internalSampleRate = pDevice->sampleRate;
+ ma_channel_map_copy(pDevice->capture.internalChannelMap, pDevice->capture.channelMap, pDevice->capture.channels);
- /* This function should only be called for playback or capture, not duplex. */
- if (deviceType == ma_device_type_duplex) {
- return MA_INVALID_ARGS;
+ pDevice->playback.internalFormat = pDevice->playback.format;
+ pDevice->playback.internalChannels = pDevice->playback.channels;
+ pDevice->playback.internalSampleRate = pDevice->sampleRate;
+ ma_channel_map_copy(pDevice->playback.internalChannelMap, pDevice->playback.channelMap, pDevice->playback.channels);
+
+ result = ma_mutex_init(pContext, &pDevice->lock);
+ if (result != MA_SUCCESS) {
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create mutex.", result);
}
- deviceName = "/dev/dsp";
- if (pDeviceID != NULL) {
- deviceName = pDeviceID->oss;
+ /*
+ When the device is started, the worker thread is the one that does the actual startup of the backend device. We
+ use a semaphore to wait for the background thread to finish the work. The same applies for stopping the device.
+
+ Each of these semaphores is released internally by the worker thread when the work is completed. The start
+ semaphore is also used to wake up the worker thread.
+ */
+ result = ma_event_init(pContext, &pDevice->wakeupEvent);
+ if (result != MA_SUCCESS) {
+ ma_mutex_uninit(&pDevice->lock);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread wakeup event.", result);
}
- flags = (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY;
- if (shareMode == ma_share_mode_exclusive) {
- flags |= O_EXCL;
+ result = ma_event_init(pContext, &pDevice->startEvent);
+ if (result != MA_SUCCESS) {
+ ma_event_uninit(&pDevice->wakeupEvent);
+ ma_mutex_uninit(&pDevice->lock);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread start event.", result);
}
- *pfd = open(deviceName, flags, 0);
- if (*pfd == -1) {
- return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
+ result = ma_event_init(pContext, &pDevice->stopEvent);
+ if (result != MA_SUCCESS) {
+ ma_event_uninit(&pDevice->startEvent);
+ ma_event_uninit(&pDevice->wakeupEvent);
+ ma_mutex_uninit(&pDevice->lock);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread stop event.", result);
}
- return MA_SUCCESS;
-}
-
-ma_bool32 ma_context_is_device_id_equal__oss(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
-{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
- return ma_strcmp(pID0->oss, pID1->oss) == 0;
-}
+ result = pContext->onDeviceInit(pContext, &config, pDevice);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-ma_result ma_context_enumerate_devices__oss(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
-{
- int fd;
- oss_sysinfo si;
- int result;
+ ma_device__post_init_setup(pDevice, pConfig->deviceType);
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
- fd = ma_open_temp_device__oss();
- if (fd == -1) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration.", MA_NO_BACKEND);
+ /* If the backend did not fill out a name for the device, try a generic method. */
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ if (pDevice->capture.name[0] == '\0') {
+ if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_capture, config.capture.pDeviceID, pDevice->capture.name, sizeof(pDevice->capture.name)) != MA_SUCCESS) {
+ ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), (config.capture.pDeviceID == NULL) ? MA_DEFAULT_CAPTURE_DEVICE_NAME : "Capture Device", (size_t)-1);
+ }
+ }
+ }
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) {
+ if (pDevice->playback.name[0] == '\0') {
+ if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_playback, config.playback.pDeviceID, pDevice->playback.name, sizeof(pDevice->playback.name)) != MA_SUCCESS) {
+ ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), (config.playback.pDeviceID == NULL) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : "Playback Device", (size_t)-1);
+ }
+ }
}
- result = ioctl(fd, SNDCTL_SYSINFO, &si);
- if (result != -1) {
- int iAudioDevice;
- for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) {
- oss_audioinfo ai;
- ai.dev = iAudioDevice;
- result = ioctl(fd, SNDCTL_AUDIOINFO, &ai);
- if (result != -1) {
- if (ai.devnode[0] != '\0') { /* <-- Can be blank, according to documentation. */
- ma_device_info deviceInfo;
- ma_bool32 isTerminating = MA_FALSE;
-
- ma_zero_object(&deviceInfo);
- /* ID */
- ma_strncpy_s(deviceInfo.id.oss, sizeof(deviceInfo.id.oss), ai.devnode, (size_t)-1);
+ /* Some backends don't require the worker thread. */
+ if (!ma_context_is_backend_asynchronous(pContext)) {
+ /* The worker thread. */
+ result = ma_thread_create(pContext, &pDevice->thread, ma_worker_thread, pDevice);
+ if (result != MA_SUCCESS) {
+ ma_device_uninit(pDevice);
+ return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread.", result);
+ }
- /*
- The human readable device name should be in the "ai.handle" variable, but it can
- sometimes be empty in which case we just fall back to "ai.name" which is less user
- friendly, but usually has a value.
- */
- if (ai.handle[0] != '\0') {
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.handle, (size_t)-1);
- } else {
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.name, (size_t)-1);
- }
+ /* Wait for the worker thread to put the device into it's stopped state for real. */
+ ma_event_wait(&pDevice->stopEvent);
+ } else {
+ ma_device__set_state(pDevice, MA_STATE_STOPPED);
+ }
- /* The device can be both playback and capture. */
- if (!isTerminating && (ai.caps & PCM_CAP_OUTPUT) != 0) {
- isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
- }
- if (!isTerminating && (ai.caps & PCM_CAP_INPUT) != 0) {
- isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- }
- if (isTerminating) {
- break;
- }
- }
- }
- }
- } else {
- close(fd);
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration.", MA_NO_BACKEND);
+#ifdef MA_DEBUG_OUTPUT
+ printf("[%s]\n", ma_get_backend_name(pDevice->pContext->backend));
+ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
+ printf(" %s (%s)\n", pDevice->capture.name, "Capture");
+ printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->capture.format), ma_get_format_name(pDevice->capture.internalFormat));
+ printf(" Channels: %d -> %d\n", pDevice->capture.channels, pDevice->capture.internalChannels);
+ printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->capture.internalSampleRate);
+ printf(" Buffer Size: %d*%d (%d)\n", pDevice->capture.internalPeriodSizeInFrames, pDevice->capture.internalPeriods, (pDevice->capture.internalPeriodSizeInFrames * pDevice->capture.internalPeriods));
+ printf(" Conversion:\n");
+ printf(" Pre Format Conversion: %s\n", pDevice->capture.converter.hasPreFormatConversion ? "YES" : "NO");
+ printf(" Post Format Conversion: %s\n", pDevice->capture.converter.hasPostFormatConversion ? "YES" : "NO");
+ printf(" Channel Routing: %s\n", pDevice->capture.converter.hasChannelConverter ? "YES" : "NO");
+ printf(" Resampling: %s\n", pDevice->capture.converter.hasResampler ? "YES" : "NO");
+ printf(" Passthrough: %s\n", pDevice->capture.converter.isPassthrough ? "YES" : "NO");
+ }
+ if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
+ printf(" %s (%s)\n", pDevice->playback.name, "Playback");
+ printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->playback.format), ma_get_format_name(pDevice->playback.internalFormat));
+ printf(" Channels: %d -> %d\n", pDevice->playback.channels, pDevice->playback.internalChannels);
+ printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->playback.internalSampleRate);
+ printf(" Buffer Size: %d*%d (%d)\n", pDevice->playback.internalPeriodSizeInFrames, pDevice->playback.internalPeriods, (pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods));
+ printf(" Conversion:\n");
+ printf(" Pre Format Conversion: %s\n", pDevice->playback.converter.hasPreFormatConversion ? "YES" : "NO");
+ printf(" Post Format Conversion: %s\n", pDevice->playback.converter.hasPostFormatConversion ? "YES" : "NO");
+ printf(" Channel Routing: %s\n", pDevice->playback.converter.hasChannelConverter ? "YES" : "NO");
+ printf(" Resampling: %s\n", pDevice->playback.converter.hasResampler ? "YES" : "NO");
+ printf(" Passthrough: %s\n", pDevice->playback.converter.isPassthrough ? "YES" : "NO");
}
+#endif
- close(fd);
+
+ MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STOPPED);
return MA_SUCCESS;
}
-ma_result ma_context_get_device_info__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice)
{
- ma_bool32 foundDevice;
- int fdTemp;
- oss_sysinfo si;
- int result;
+ ma_result result;
+ ma_context* pContext;
+ ma_backend defaultBackends[ma_backend_null+1];
+ ma_uint32 iBackend;
+ ma_backend* pBackendsToIterate;
+ ma_uint32 backendsToIterateCount;
+ ma_allocation_callbacks allocationCallbacks;
- ma_assert(pContext != NULL);
- (void)shareMode;
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
- /* Handle the default device a little differently. */
- if (pDeviceID == NULL) {
- if (deviceType == ma_device_type_playback) {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- } else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ if (pContextConfig != NULL) {
+ result = ma_allocation_callbacks_init_copy(&allocationCallbacks, &pContextConfig->allocationCallbacks);
+ if (result != MA_SUCCESS) {
+ return result;
}
-
- return MA_SUCCESS;
+ } else {
+ allocationCallbacks = ma_allocation_callbacks_init_default();
}
+
-
- /* If we get here it means we are _not_ using the default device. */
- foundDevice = MA_FALSE;
-
- fdTemp = ma_open_temp_device__oss();
- if (fdTemp == -1) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration.", MA_NO_BACKEND);
+ pContext = (ma_context*)ma__malloc_from_callbacks(sizeof(*pContext), &allocationCallbacks);
+ if (pContext == NULL) {
+ return MA_OUT_OF_MEMORY;
}
- result = ioctl(fdTemp, SNDCTL_SYSINFO, &si);
- if (result != -1) {
- int iAudioDevice;
- for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) {
- oss_audioinfo ai;
- ai.dev = iAudioDevice;
- result = ioctl(fdTemp, SNDCTL_AUDIOINFO, &ai);
- if (result != -1) {
- if (ma_strcmp(ai.devnode, pDeviceID->oss) == 0) {
- /* It has the same name, so now just confirm the type. */
- if ((deviceType == ma_device_type_playback && ((ai.caps & PCM_CAP_OUTPUT) != 0)) ||
- (deviceType == ma_device_type_capture && ((ai.caps & PCM_CAP_INPUT) != 0))) {
- unsigned int formatMask;
-
- /* ID */
- ma_strncpy_s(pDeviceInfo->id.oss, sizeof(pDeviceInfo->id.oss), ai.devnode, (size_t)-1);
-
- /*
- The human readable device name should be in the "ai.handle" variable, but it can
- sometimes be empty in which case we just fall back to "ai.name" which is less user
- friendly, but usually has a value.
- */
- if (ai.handle[0] != '\0') {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.handle, (size_t)-1);
- } else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.name, (size_t)-1);
- }
-
- pDeviceInfo->minChannels = ai.min_channels;
- pDeviceInfo->maxChannels = ai.max_channels;
- pDeviceInfo->minSampleRate = ai.min_rate;
- pDeviceInfo->maxSampleRate = ai.max_rate;
- pDeviceInfo->formatCount = 0;
+ for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) {
+ defaultBackends[iBackend] = (ma_backend)iBackend;
+ }
- if (deviceType == ma_device_type_playback) {
- formatMask = ai.oformats;
- } else {
- formatMask = ai.iformats;
- }
+ pBackendsToIterate = (ma_backend*)backends;
+ backendsToIterateCount = backendCount;
+ if (pBackendsToIterate == NULL) {
+ pBackendsToIterate = (ma_backend*)defaultBackends;
+ backendsToIterateCount = ma_countof(defaultBackends);
+ }
- if ((formatMask & AFMT_U8) != 0) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_u8;
- }
- if (((formatMask & AFMT_S16_LE) != 0 && ma_is_little_endian()) || (AFMT_S16_BE && ma_is_big_endian())) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s16;
- }
- if (((formatMask & AFMT_S32_LE) != 0 && ma_is_little_endian()) || (AFMT_S32_BE && ma_is_big_endian())) {
- pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_s32;
- }
+ result = MA_NO_BACKEND;
- foundDevice = MA_TRUE;
- break;
- }
- }
+ for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) {
+ result = ma_context_init(&pBackendsToIterate[iBackend], 1, pContextConfig, pContext);
+ if (result == MA_SUCCESS) {
+ result = ma_device_init(pContext, pConfig, pDevice);
+ if (result == MA_SUCCESS) {
+ break; /* Success. */
+ } else {
+ ma_context_uninit(pContext); /* Failure. */
}
}
- } else {
- close(fdTemp);
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration.", MA_NO_BACKEND);
}
-
- close(fdTemp);
-
- if (!foundDevice) {
- return MA_NO_DEVICE;
+ if (result != MA_SUCCESS) {
+ ma__free_from_callbacks(pContext, &allocationCallbacks);
+ return result;
}
- return MA_SUCCESS;
+ pDevice->isOwnerOfContext = MA_TRUE;
+ return result;
}
-void ma_device_uninit__oss(ma_device* pDevice)
+void ma_device_uninit(ma_device* pDevice)
{
- ma_assert(pDevice != NULL);
-
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- close(pDevice->oss.fdCapture);
- }
-
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- close(pDevice->oss.fdPlayback);
+ if (!ma_device__is_initialized(pDevice)) {
+ return;
}
-}
-int ma_format_to_oss(ma_format format)
-{
- int ossFormat = AFMT_U8;
- switch (format) {
- case ma_format_s16: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break;
- case ma_format_s24: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break;
- case ma_format_s32: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break;
- case ma_format_f32: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break;
- case ma_format_u8:
- default: ossFormat = AFMT_U8; break;
+ /* Make sure the device is stopped first. The backends will probably handle this naturally, but I like to do it explicitly for my own sanity. */
+ if (ma_device_is_started(pDevice)) {
+ ma_device_stop(pDevice);
}
- return ossFormat;
-}
+ /* Putting the device into an uninitialized state will make the worker thread return. */
+ ma_device__set_state(pDevice, MA_STATE_UNINITIALIZED);
-ma_format ma_format_from_oss(int ossFormat)
-{
- if (ossFormat == AFMT_U8) {
- return ma_format_u8;
- } else {
- if (ma_is_little_endian()) {
- switch (ossFormat) {
- case AFMT_S16_LE: return ma_format_s16;
- case AFMT_S32_LE: return ma_format_s32;
- default: return ma_format_unknown;
- }
- } else {
- switch (ossFormat) {
- case AFMT_S16_BE: return ma_format_s16;
- case AFMT_S32_BE: return ma_format_s32;
- default: return ma_format_unknown;
- }
- }
+ /* Wake up the worker thread and wait for it to properly terminate. */
+ if (!ma_context_is_backend_asynchronous(pDevice->pContext)) {
+ ma_event_signal(&pDevice->wakeupEvent);
+ ma_thread_wait(&pDevice->thread);
}
- return ma_format_unknown;
-}
-
-ma_result ma_device_init_fd__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
-{
- ma_result result;
- int ossResult;
- int fd;
- const ma_device_id* pDeviceID = NULL;
- ma_share_mode shareMode;
- int ossFormat;
- int ossChannels;
- int ossSampleRate;
- int ossFragment;
+ pDevice->pContext->onDeviceUninit(pDevice);
- ma_assert(pContext != NULL);
- ma_assert(pConfig != NULL);
- ma_assert(deviceType != ma_device_type_duplex);
- ma_assert(pDevice != NULL);
+ ma_event_uninit(&pDevice->stopEvent);
+ ma_event_uninit(&pDevice->startEvent);
+ ma_event_uninit(&pDevice->wakeupEvent);
+ ma_mutex_uninit(&pDevice->lock);
- (void)pContext;
+ if (pDevice->isOwnerOfContext) {
+ ma_allocation_callbacks allocationCallbacks = pDevice->pContext->allocationCallbacks;
- if (deviceType == ma_device_type_capture) {
- pDeviceID = pConfig->capture.pDeviceID;
- shareMode = pConfig->capture.shareMode;
- ossFormat = ma_format_to_oss(pConfig->capture.format);
- ossChannels = (int)pConfig->capture.channels;
- ossSampleRate = (int)pConfig->sampleRate;
- } else {
- pDeviceID = pConfig->playback.pDeviceID;
- shareMode = pConfig->playback.shareMode;
- ossFormat = ma_format_to_oss(pConfig->playback.format);
- ossChannels = (int)pConfig->playback.channels;
- ossSampleRate = (int)pConfig->sampleRate;
+ ma_context_uninit(pDevice->pContext);
+ ma__free_from_callbacks(pDevice->pContext, &allocationCallbacks);
}
- result = ma_context_open_device__oss(pContext, deviceType, pDeviceID, shareMode, &fd);
- if (result != MA_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ MA_ZERO_OBJECT(pDevice);
+}
- /*
- The OSS documantation is very clear about the order we should be initializing the device's properties:
- 1) Format
- 2) Channels
- 3) Sample rate.
- */
+ma_result ma_device_start(ma_device* pDevice)
+{
+ ma_result result;
- /* Format. */
- ossResult = ioctl(fd, SNDCTL_DSP_SETFMT, &ossFormat);
- if (ossResult == -1) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set format.", MA_FORMAT_NOT_SUPPORTED);
+ if (pDevice == NULL) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS);
}
- /* Channels. */
- ossResult = ioctl(fd, SNDCTL_DSP_CHANNELS, &ossChannels);
- if (ossResult == -1) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set channel count.", MA_FORMAT_NOT_SUPPORTED);
+ if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED);
}
- /* Sample Rate. */
- ossResult = ioctl(fd, SNDCTL_DSP_SPEED, &ossSampleRate);
- if (ossResult == -1) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set sample rate.", MA_FORMAT_NOT_SUPPORTED);
+ if (ma_device__get_state(pDevice) == MA_STATE_STARTED) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_WARNING, "ma_device_start() called when the device is already started.", MA_INVALID_OPERATION); /* Already started. Returning an error to let the application know because it probably means they're doing something wrong. */
}
- /*
- Buffer.
-
- The documentation says that the fragment settings should be set as soon as possible, but I'm not sure if
- it should be done before or after format/channels/rate.
-
- OSS wants the fragment size in bytes and a power of 2. When setting, we specify the power, not the actual
- value.
- */
+ result = MA_ERROR;
+ ma_mutex_lock(&pDevice->lock);
{
- ma_uint32 fragmentSizeInBytes;
- ma_uint32 bufferSizeInFrames;
- ma_uint32 ossFragmentSizePower;
-
- bufferSizeInFrames = pConfig->bufferSizeInFrames;
- if (bufferSizeInFrames == 0) {
- bufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, (ma_uint32)ossSampleRate);
- }
-
- fragmentSizeInBytes = ma_round_to_power_of_2((bufferSizeInFrames / pConfig->periods) * ma_get_bytes_per_frame(ma_format_from_oss(ossFormat), ossChannels));
- if (fragmentSizeInBytes < 16) {
- fragmentSizeInBytes = 16;
- }
-
- ossFragmentSizePower = 4;
- fragmentSizeInBytes >>= 4;
- while (fragmentSizeInBytes >>= 1) {
- ossFragmentSizePower += 1;
- }
-
- ossFragment = (int)((pConfig->periods << 16) | ossFragmentSizePower);
- ossResult = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &ossFragment);
- if (ossResult == -1) {
- close(fd);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to set fragment size and period count.", MA_FORMAT_NOT_SUPPORTED);
- }
- }
+ /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a stopped or paused state. */
+ MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STOPPED);
- /* Internal settings. */
- if (deviceType == ma_device_type_capture) {
- pDevice->oss.fdCapture = fd;
- pDevice->capture.internalFormat = ma_format_from_oss(ossFormat);
- pDevice->capture.internalChannels = ossChannels;
- pDevice->capture.internalSampleRate = ossSampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_sound4, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
- pDevice->capture.internalPeriods = (ma_uint32)(ossFragment >> 16);
- pDevice->capture.internalBufferSizeInFrames = (((ma_uint32)(1 << (ossFragment & 0xFFFF))) * pDevice->capture.internalPeriods) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
+ ma_device__set_state(pDevice, MA_STATE_STARTING);
- if (pDevice->capture.internalFormat == ma_format_unknown) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] The device's internal format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED);
- }
- } else {
- pDevice->oss.fdPlayback = fd;
- pDevice->playback.internalFormat = ma_format_from_oss(ossFormat);
- pDevice->playback.internalChannels = ossChannels;
- pDevice->playback.internalSampleRate = ossSampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_sound4, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
- pDevice->playback.internalPeriods = (ma_uint32)(ossFragment >> 16);
- pDevice->playback.internalBufferSizeInFrames = (((ma_uint32)(1 << (ossFragment & 0xFFFF))) * pDevice->playback.internalPeriods) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
+ /* Asynchronous backends need to be handled differently. */
+ if (ma_context_is_backend_asynchronous(pDevice->pContext)) {
+ result = pDevice->pContext->onDeviceStart(pDevice);
+ if (result == MA_SUCCESS) {
+ ma_device__set_state(pDevice, MA_STATE_STARTED);
+ }
+ } else {
+ /*
+ Synchronous backends are started by signaling an event that's being waited on in the worker thread. We first wake up the
+ thread and then wait for the start event.
+ */
+ ma_event_signal(&pDevice->wakeupEvent);
- if (pDevice->playback.internalFormat == ma_format_unknown) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] The device's internal format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED);
+ /*
+ Wait for the worker thread to finish starting the device. Note that the worker thread will be the one who puts the device
+ into the started state. Don't call ma_device__set_state() here.
+ */
+ ma_event_wait(&pDevice->startEvent);
+ result = pDevice->workResult;
}
}
+ ma_mutex_unlock(&pDevice->lock);
- return MA_SUCCESS;
+ return result;
}
-ma_result ma_device_init__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+ma_result ma_device_stop(ma_device* pDevice)
{
- ma_assert(pContext != NULL);
- ma_assert(pConfig != NULL);
- ma_assert(pDevice != NULL);
+ ma_result result;
- ma_zero_object(&pDevice->oss);
+ if (pDevice == NULL) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS);
+ }
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ma_result result = ma_device_init_fd__oss(pContext, pConfig, ma_device_type_capture, pDevice);
- if (result != MA_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED);
}
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ma_result result = ma_device_init_fd__oss(pContext, pConfig, ma_device_type_playback, pDevice);
- if (result != MA_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ if (ma_device__get_state(pDevice) == MA_STATE_STOPPED) {
+ return ma_post_error(pDevice, MA_LOG_LEVEL_WARNING, "ma_device_stop() called when the device is already stopped.", MA_INVALID_OPERATION); /* Already stopped. Returning an error to let the application know because it probably means they're doing something wrong. */
}
- return MA_SUCCESS;
-}
+ result = MA_ERROR;
+ ma_mutex_lock(&pDevice->lock);
+ {
+ /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a started or paused state. */
+ MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STARTED);
-ma_result ma_device_stop__oss(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
+ ma_device__set_state(pDevice, MA_STATE_STOPPING);
- /*
- We want to use SNDCTL_DSP_HALT. From the documentation:
-
- In multithreaded applications SNDCTL_DSP_HALT (SNDCTL_DSP_RESET) must only be called by the thread
- that actually reads/writes the audio device. It must not be called by some master thread to kill the
- audio thread. The audio thread will not stop or get any kind of notification that the device was
- stopped by the master thread. The device gets stopped but the next read or write call will silently
- restart the device.
-
- This is actually safe in our case, because this function is only ever called from within our worker
- thread anyway. Just keep this in mind, though...
- */
+ /* There's no need to wake up the thread like we do when starting. */
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- int result = ioctl(pDevice->oss.fdCapture, SNDCTL_DSP_HALT, 0);
- if (result == -1) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to stop device. SNDCTL_DSP_HALT failed.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
+ if (pDevice->pContext->onDeviceStop) {
+ result = pDevice->pContext->onDeviceStop(pDevice);
+ } else {
+ result = MA_SUCCESS;
}
- }
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- int result = ioctl(pDevice->oss.fdPlayback, SNDCTL_DSP_HALT, 0);
- if (result == -1) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to stop device. SNDCTL_DSP_HALT failed.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
+ /* Asynchronous backends need to be handled differently. */
+ if (ma_context_is_backend_asynchronous(pDevice->pContext)) {
+ ma_device__set_state(pDevice, MA_STATE_STOPPED);
+ } else {
+ /* Synchronous backends. */
+
+ /*
+ We need to wait for the worker thread to become available for work before returning. Note that the worker thread will be
+ the one who puts the device into the stopped state. Don't call ma_device__set_state() here.
+ */
+ ma_event_wait(&pDevice->stopEvent);
+ result = MA_SUCCESS;
}
}
+ ma_mutex_unlock(&pDevice->lock);
- return MA_SUCCESS;
+ return result;
}
-ma_result ma_device_write__oss(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount)
+ma_bool32 ma_device_is_started(ma_device* pDevice)
{
- int resultOSS = write(pDevice->oss.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
- if (resultOSS < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to send data from the client to the device.", MA_FAILED_TO_SEND_DATA_TO_DEVICE);
+ if (pDevice == NULL) {
+ return MA_FALSE;
}
-
- return MA_SUCCESS;
+
+ return ma_device__get_state(pDevice) == MA_STATE_STARTED;
}
-ma_result ma_device_read__oss(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount)
+ma_result ma_device_set_master_volume(ma_device* pDevice, float volume)
{
- int resultOSS = read(pDevice->oss.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels));
- if (resultOSS < 0) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OSS] Failed to read data from the device to be sent to the client.", MA_FAILED_TO_READ_DATA_FROM_DEVICE);
+ if (pDevice == NULL) {
+ return MA_INVALID_ARGS;
}
-
+
+ if (volume < 0.0f || volume > 1.0f) {
+ return MA_INVALID_ARGS;
+ }
+
+ pDevice->masterVolumeFactor = volume;
+
return MA_SUCCESS;
}
-ma_result ma_context_uninit__oss(ma_context* pContext)
+ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume)
{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_oss);
+ if (pVolume == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ if (pDevice == NULL) {
+ *pVolume = 0;
+ return MA_INVALID_ARGS;
+ }
+
+ *pVolume = pDevice->masterVolumeFactor;
- (void)pContext;
return MA_SUCCESS;
}
-ma_result ma_context_init__oss(const ma_context_config* pConfig, ma_context* pContext)
+ma_result ma_device_set_master_gain_db(ma_device* pDevice, float gainDB)
{
- int fd;
- int ossVersion;
- int result;
+ if (gainDB > 0) {
+ return MA_INVALID_ARGS;
+ }
- ma_assert(pContext != NULL);
+ return ma_device_set_master_volume(pDevice, ma_gain_db_to_factor(gainDB));
+}
- (void)pConfig;
+ma_result ma_device_get_master_gain_db(ma_device* pDevice, float* pGainDB)
+{
+ float factor;
+ ma_result result;
- /* Try opening a temporary device first so we can get version information. This is closed at the end. */
- fd = ma_open_temp_device__oss();
- if (fd == -1) {
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to open temporary device for retrieving system properties.", MA_NO_BACKEND); /* Looks liks OSS isn't installed, or there are no available devices. */
+ if (pGainDB == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Grab the OSS version. */
- ossVersion = 0;
- result = ioctl(fd, OSS_GETVERSION, &ossVersion);
- if (result == -1) {
- close(fd);
- return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve OSS version.", MA_NO_BACKEND);
+ result = ma_device_get_master_volume(pDevice, &factor);
+ if (result != MA_SUCCESS) {
+ *pGainDB = 0;
+ return result;
}
- pContext->oss.versionMajor = ((ossVersion & 0xFF0000) >> 16);
- pContext->oss.versionMinor = ((ossVersion & 0x00FF00) >> 8);
-
- pContext->onUninit = ma_context_uninit__oss;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__oss;
- pContext->onEnumDevices = ma_context_enumerate_devices__oss;
- pContext->onGetDeviceInfo = ma_context_get_device_info__oss;
- pContext->onDeviceInit = ma_device_init__oss;
- pContext->onDeviceUninit = ma_device_uninit__oss;
- pContext->onDeviceStart = NULL; /* Not required for synchronous backends. */
- pContext->onDeviceStop = ma_device_stop__oss;
- pContext->onDeviceWrite = ma_device_write__oss;
- pContext->onDeviceRead = ma_device_read__oss;
+ *pGainDB = ma_factor_to_gain_db(factor);
- close(fd);
return MA_SUCCESS;
}
-#endif /* OSS */
+#endif /* MA_NO_DEVICE_IO */
-/******************************************************************************
+/**************************************************************************************************************************************************************
-AAudio Backend
+Biquad Filter
-******************************************************************************/
-#ifdef MA_HAS_AAUDIO
-/*#include */
+**************************************************************************************************************************************************************/
+#ifndef MA_BIQUAD_FIXED_POINT_SHIFT
+#define MA_BIQUAD_FIXED_POINT_SHIFT 14
+#endif
-#define MA_AAUDIO_UNSPECIFIED 0
+static ma_int32 ma_biquad_float_to_fp(double x)
+{
+ return (ma_int32)(x * (1 << MA_BIQUAD_FIXED_POINT_SHIFT));
+}
-typedef int32_t ma_aaudio_result_t;
-typedef int32_t ma_aaudio_direction_t;
-typedef int32_t ma_aaudio_sharing_mode_t;
-typedef int32_t ma_aaudio_format_t;
-typedef int32_t ma_aaudio_stream_state_t;
-typedef int32_t ma_aaudio_performance_mode_t;
-typedef int32_t ma_aaudio_data_callback_result_t;
+ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2)
+{
+ ma_biquad_config config;
-/* Result codes. miniaudio only cares about the success code. */
-#define MA_AAUDIO_OK 0
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.b0 = b0;
+ config.b1 = b1;
+ config.b2 = b2;
+ config.a0 = a0;
+ config.a1 = a1;
+ config.a2 = a2;
-/* Directions. */
-#define MA_AAUDIO_DIRECTION_OUTPUT 0
-#define MA_AAUDIO_DIRECTION_INPUT 1
+ return config;
+}
-/* Sharing modes. */
-#define MA_AAUDIO_SHARING_MODE_EXCLUSIVE 0
-#define MA_AAUDIO_SHARING_MODE_SHARED 1
+ma_result ma_biquad_init(const ma_biquad_config* pConfig, ma_biquad* pBQ)
+{
+ if (pBQ == NULL) {
+ return MA_INVALID_ARGS;
+ }
-/* Formats. */
-#define MA_AAUDIO_FORMAT_PCM_I16 1
-#define MA_AAUDIO_FORMAT_PCM_FLOAT 2
+ MA_ZERO_OBJECT(pBQ);
-/* Stream states. */
-#define MA_AAUDIO_STREAM_STATE_UNINITIALIZED 0
-#define MA_AAUDIO_STREAM_STATE_UNKNOWN 1
-#define MA_AAUDIO_STREAM_STATE_OPEN 2
-#define MA_AAUDIO_STREAM_STATE_STARTING 3
-#define MA_AAUDIO_STREAM_STATE_STARTED 4
-#define MA_AAUDIO_STREAM_STATE_PAUSING 5
-#define MA_AAUDIO_STREAM_STATE_PAUSED 6
-#define MA_AAUDIO_STREAM_STATE_FLUSHING 7
-#define MA_AAUDIO_STREAM_STATE_FLUSHED 8
-#define MA_AAUDIO_STREAM_STATE_STOPPING 9
-#define MA_AAUDIO_STREAM_STATE_STOPPED 10
-#define MA_AAUDIO_STREAM_STATE_CLOSING 11
-#define MA_AAUDIO_STREAM_STATE_CLOSED 12
-#define MA_AAUDIO_STREAM_STATE_DISCONNECTED 13
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
-/* Performance modes. */
-#define MA_AAUDIO_PERFORMANCE_MODE_NONE 10
-#define MA_AAUDIO_PERFORMANCE_MODE_POWER_SAVING 11
-#define MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY 12
+ return ma_biquad_reinit(pConfig, pBQ);
+}
-/* Callback results. */
-#define MA_AAUDIO_CALLBACK_RESULT_CONTINUE 0
-#define MA_AAUDIO_CALLBACK_RESULT_STOP 1
+ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ)
+{
+ if (pBQ == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
-/* Objects. */
-typedef struct ma_AAudioStreamBuilder_t* ma_AAudioStreamBuilder;
-typedef struct ma_AAudioStream_t* ma_AAudioStream;
+ if (pConfig->a0 == 0) {
+ return MA_INVALID_ARGS; /* Division by zero. */
+ }
-typedef ma_aaudio_data_callback_result_t (*ma_AAudioStream_dataCallback)(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t numFrames);
+ /* Only supporting f32 and s16. */
+ if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) {
+ return MA_INVALID_ARGS;
+ }
-typedef ma_aaudio_result_t (* MA_PFN_AAudio_createStreamBuilder) (ma_AAudioStreamBuilder** ppBuilder);
-typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_delete) (ma_AAudioStreamBuilder* pBuilder);
-typedef void (* MA_PFN_AAudioStreamBuilder_setDeviceId) (ma_AAudioStreamBuilder* pBuilder, int32_t deviceId);
-typedef void (* MA_PFN_AAudioStreamBuilder_setDirection) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_direction_t direction);
-typedef void (* MA_PFN_AAudioStreamBuilder_setSharingMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_sharing_mode_t sharingMode);
-typedef void (* MA_PFN_AAudioStreamBuilder_setFormat) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_format_t format);
-typedef void (* MA_PFN_AAudioStreamBuilder_setChannelCount) (ma_AAudioStreamBuilder* pBuilder, int32_t channelCount);
-typedef void (* MA_PFN_AAudioStreamBuilder_setSampleRate) (ma_AAudioStreamBuilder* pBuilder, int32_t sampleRate);
-typedef void (* MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)(ma_AAudioStreamBuilder* pBuilder, int32_t numFrames);
-typedef void (* MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback) (ma_AAudioStreamBuilder* pBuilder, int32_t numFrames);
-typedef void (* MA_PFN_AAudioStreamBuilder_setDataCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_dataCallback callback, void* pUserData);
-typedef void (* MA_PFN_AAudioStreamBuilder_setPerformanceMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_performance_mode_t mode);
-typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_openStream) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream** ppStream);
-typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_close) (ma_AAudioStream* pStream);
-typedef ma_aaudio_stream_state_t (* MA_PFN_AAudioStream_getState) (ma_AAudioStream* pStream);
-typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_waitForStateChange) (ma_AAudioStream* pStream, ma_aaudio_stream_state_t inputState, ma_aaudio_stream_state_t* pNextState, int64_t timeoutInNanoseconds);
-typedef ma_aaudio_format_t (* MA_PFN_AAudioStream_getFormat) (ma_AAudioStream* pStream);
-typedef int32_t (* MA_PFN_AAudioStream_getChannelCount) (ma_AAudioStream* pStream);
-typedef int32_t (* MA_PFN_AAudioStream_getSampleRate) (ma_AAudioStream* pStream);
-typedef int32_t (* MA_PFN_AAudioStream_getBufferCapacityInFrames) (ma_AAudioStream* pStream);
-typedef int32_t (* MA_PFN_AAudioStream_getFramesPerDataCallback) (ma_AAudioStream* pStream);
-typedef int32_t (* MA_PFN_AAudioStream_getFramesPerBurst) (ma_AAudioStream* pStream);
-typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStart) (ma_AAudioStream* pStream);
-typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStop) (ma_AAudioStream* pStream);
+ /* The format cannot be changed after initialization. */
+ if (pBQ->format != ma_format_unknown && pBQ->format != pConfig->format) {
+ return MA_INVALID_OPERATION;
+ }
-ma_result ma_result_from_aaudio(ma_aaudio_result_t resultAA)
-{
- switch (resultAA)
- {
- case MA_AAUDIO_OK: return MA_SUCCESS;
- default: break;
+ /* The channel count cannot be changed after initialization. */
+ if (pBQ->channels != 0 && pBQ->channels != pConfig->channels) {
+ return MA_INVALID_OPERATION;
}
- return MA_ERROR;
-}
-ma_aaudio_data_callback_result_t ma_stream_data_callback_capture__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount)
-{
- ma_device* pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
+ pBQ->format = pConfig->format;
+ pBQ->channels = pConfig->channels;
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_capture(pDevice, frameCount, pAudioData, &pDevice->aaudio.duplexRB);
+ /* Normalize. */
+ if (pConfig->format == ma_format_f32) {
+ pBQ->b0.f32 = (float)(pConfig->b0 / pConfig->a0);
+ pBQ->b1.f32 = (float)(pConfig->b1 / pConfig->a0);
+ pBQ->b2.f32 = (float)(pConfig->b2 / pConfig->a0);
+ pBQ->a1.f32 = (float)(pConfig->a1 / pConfig->a0);
+ pBQ->a2.f32 = (float)(pConfig->a2 / pConfig->a0);
} else {
- ma_device__send_frames_to_client(pDevice, frameCount, pAudioData); /* Send directly to the client. */
+ pBQ->b0.s32 = ma_biquad_float_to_fp(pConfig->b0 / pConfig->a0);
+ pBQ->b1.s32 = ma_biquad_float_to_fp(pConfig->b1 / pConfig->a0);
+ pBQ->b2.s32 = ma_biquad_float_to_fp(pConfig->b2 / pConfig->a0);
+ pBQ->a1.s32 = ma_biquad_float_to_fp(pConfig->a1 / pConfig->a0);
+ pBQ->a2.s32 = ma_biquad_float_to_fp(pConfig->a2 / pConfig->a0);
}
- (void)pStream;
- return MA_AAUDIO_CALLBACK_RESULT_CONTINUE;
+ return MA_SUCCESS;
}
-ma_aaudio_data_callback_result_t ma_stream_data_callback_playback__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount)
+static MA_INLINE void ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(ma_biquad* pBQ, float* pY, const float* pX)
{
- ma_device* pDevice = (ma_device*)pUserData;
- ma_assert(pDevice != NULL);
+ ma_uint32 c;
+ const float b0 = pBQ->b0.f32;
+ const float b1 = pBQ->b1.f32;
+ const float b2 = pBQ->b2.f32;
+ const float a1 = pBQ->a1.f32;
+ const float a2 = pBQ->a2.f32;
+
+ for (c = 0; c < pBQ->channels; c += 1) {
+ float r1 = pBQ->r1[c].f32;
+ float r2 = pBQ->r2[c].f32;
+ float x = pX[c];
+ float y;
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_playback(pDevice, frameCount, pAudioData, &pDevice->aaudio.duplexRB);
- } else {
- ma_device__read_frames_from_client(pDevice, frameCount, pAudioData); /* Read directly from the client. */
- }
+ y = b0*x + r1;
+ r1 = b1*x - a1*y + r2;
+ r2 = b2*x - a2*y;
- (void)pStream;
- return MA_AAUDIO_CALLBACK_RESULT_CONTINUE;
+ pY[c] = y;
+ pBQ->r1[c].f32 = r1;
+ pBQ->r2[c].f32 = r2;
+ }
}
-ma_result ma_open_stream__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, const ma_device_config* pConfig, const ma_device* pDevice, ma_AAudioStream** ppStream)
+static MA_INLINE void ma_biquad_process_pcm_frame_f32(ma_biquad* pBQ, float* pY, const float* pX)
{
- ma_AAudioStreamBuilder* pBuilder;
- ma_aaudio_result_t resultAA;
+ ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX);
+}
- ma_assert(deviceType != ma_device_type_duplex); /* This function should not be called for a full-duplex device type. */
+static MA_INLINE void ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX)
+{
+ ma_uint32 c;
+ const ma_int32 b0 = pBQ->b0.s32;
+ const ma_int32 b1 = pBQ->b1.s32;
+ const ma_int32 b2 = pBQ->b2.s32;
+ const ma_int32 a1 = pBQ->a1.s32;
+ const ma_int32 a2 = pBQ->a2.s32;
+
+ for (c = 0; c < pBQ->channels; c += 1) {
+ ma_int32 r1 = pBQ->r1[c].s32;
+ ma_int32 r2 = pBQ->r2[c].s32;
+ ma_int32 x = pX[c];
+ ma_int32 y;
- *ppStream = NULL;
+ y = (b0*x + r1) >> MA_BIQUAD_FIXED_POINT_SHIFT;
+ r1 = (b1*x - a1*y + r2);
+ r2 = (b2*x - a2*y);
- resultAA = ((MA_PFN_AAudio_createStreamBuilder)pContext->aaudio.AAudio_createStreamBuilder)(&pBuilder);
- if (resultAA != MA_AAUDIO_OK) {
- return ma_result_from_aaudio(resultAA);
+ pY[c] = (ma_int16)ma_clamp(y, -32768, 32767);
+ pBQ->r1[c].s32 = r1;
+ pBQ->r2[c].s32 = r2;
}
+}
- if (pDeviceID != NULL) {
- ((MA_PFN_AAudioStreamBuilder_setDeviceId)pContext->aaudio.AAudioStreamBuilder_setDeviceId)(pBuilder, pDeviceID->aaudio);
- }
+static MA_INLINE void ma_biquad_process_pcm_frame_s16(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX)
+{
+ ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX);
+}
- ((MA_PFN_AAudioStreamBuilder_setDirection)pContext->aaudio.AAudioStreamBuilder_setDirection)(pBuilder, (deviceType == ma_device_type_playback) ? MA_AAUDIO_DIRECTION_OUTPUT : MA_AAUDIO_DIRECTION_INPUT);
- ((MA_PFN_AAudioStreamBuilder_setSharingMode)pContext->aaudio.AAudioStreamBuilder_setSharingMode)(pBuilder, (shareMode == ma_share_mode_shared) ? MA_AAUDIO_SHARING_MODE_SHARED : MA_AAUDIO_SHARING_MODE_EXCLUSIVE);
+ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ ma_uint32 n;
- if (pConfig != NULL) {
- ma_uint32 bufferCapacityInFrames;
+ if (pBQ == NULL || pFramesOut == NULL || pFramesIn == NULL) {
+ return MA_INVALID_ARGS;
+ }
- if (pDevice == NULL || !pDevice->usingDefaultSampleRate) {
- ((MA_PFN_AAudioStreamBuilder_setSampleRate)pContext->aaudio.AAudioStreamBuilder_setSampleRate)(pBuilder, pConfig->sampleRate);
- }
+ /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */
- if (deviceType == ma_device_type_capture) {
- if (pDevice == NULL || !pDevice->capture.usingDefaultChannels) {
- ((MA_PFN_AAudioStreamBuilder_setChannelCount)pContext->aaudio.AAudioStreamBuilder_setChannelCount)(pBuilder, pConfig->capture.channels);
- }
- if (pDevice == NULL || !pDevice->capture.usingDefaultFormat) {
- ((MA_PFN_AAudioStreamBuilder_setFormat)pContext->aaudio.AAudioStreamBuilder_setFormat)(pBuilder, (pConfig->capture.format == ma_format_s16) ? MA_AAUDIO_FORMAT_PCM_I16 : MA_AAUDIO_FORMAT_PCM_FLOAT);
- }
- } else {
- if (pDevice == NULL || !pDevice->playback.usingDefaultChannels) {
- ((MA_PFN_AAudioStreamBuilder_setChannelCount)pContext->aaudio.AAudioStreamBuilder_setChannelCount)(pBuilder, pConfig->playback.channels);
- }
- if (pDevice == NULL || !pDevice->playback.usingDefaultFormat) {
- ((MA_PFN_AAudioStreamBuilder_setFormat)pContext->aaudio.AAudioStreamBuilder_setFormat)(pBuilder, (pConfig->playback.format == ma_format_s16) ? MA_AAUDIO_FORMAT_PCM_I16 : MA_AAUDIO_FORMAT_PCM_FLOAT);
- }
- }
+ if (pBQ->format == ma_format_f32) {
+ /* */ float* pY = ( float*)pFramesOut;
+ const float* pX = (const float*)pFramesIn;
- bufferCapacityInFrames = pConfig->bufferSizeInFrames;
- if (bufferCapacityInFrames == 0) {
- bufferCapacityInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pConfig->sampleRate);
+ for (n = 0; n < frameCount; n += 1) {
+ ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX);
+ pY += pBQ->channels;
+ pX += pBQ->channels;
}
- ((MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames)(pBuilder, bufferCapacityInFrames);
+ } else if (pBQ->format == ma_format_s16) {
+ /* */ ma_int16* pY = ( ma_int16*)pFramesOut;
+ const ma_int16* pX = (const ma_int16*)pFramesIn;
- ((MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback)pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback)(pBuilder, bufferCapacityInFrames / pConfig->periods);
-
- if (deviceType == ma_device_type_capture) {
- ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_capture__aaudio, (void*)pDevice);
- } else {
- ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_playback__aaudio, (void*)pDevice);
+ for (n = 0; n < frameCount; n += 1) {
+ ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX);
+ pY += pBQ->channels;
+ pX += pBQ->channels;
}
-
- /* Not sure how this affects things, but since there's a mapping between miniaudio's performance profiles and AAudio's performance modes, let go ahead and set it. */
- ((MA_PFN_AAudioStreamBuilder_setPerformanceMode)pContext->aaudio.AAudioStreamBuilder_setPerformanceMode)(pBuilder, (pConfig->performanceProfile == ma_performance_profile_low_latency) ? MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY : MA_AAUDIO_PERFORMANCE_MODE_NONE);
- }
-
- resultAA = ((MA_PFN_AAudioStreamBuilder_openStream)pContext->aaudio.AAudioStreamBuilder_openStream)(pBuilder, ppStream);
- if (resultAA != MA_AAUDIO_OK) {
- *ppStream = NULL;
- ((MA_PFN_AAudioStreamBuilder_delete)pContext->aaudio.AAudioStreamBuilder_delete)(pBuilder);
- return ma_result_from_aaudio(resultAA);
+ } else {
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */
}
- ((MA_PFN_AAudioStreamBuilder_delete)pContext->aaudio.AAudioStreamBuilder_delete)(pBuilder);
return MA_SUCCESS;
}
-ma_result ma_close_stream__aaudio(ma_context* pContext, ma_AAudioStream* pStream)
+ma_uint32 ma_biquad_get_latency(ma_biquad* pBQ)
{
- return ma_result_from_aaudio(((MA_PFN_AAudioStream_close)pContext->aaudio.AAudioStream_close)(pStream));
+ if (pBQ == NULL) {
+ return 0;
+ }
+
+ return 2;
}
-ma_bool32 ma_has_default_device__aaudio(ma_context* pContext, ma_device_type deviceType)
-{
- /* The only way to know this is to try creating a stream. */
- ma_AAudioStream* pStream;
- ma_result result = ma_open_stream__aaudio(pContext, deviceType, NULL, ma_share_mode_shared, NULL, NULL, &pStream);
- if (result != MA_SUCCESS) {
- return MA_FALSE;
- }
- ma_close_stream__aaudio(pContext, pStream);
- return MA_TRUE;
+/**************************************************************************************************************************************************************
+
+Low-Pass Filter
+
+**************************************************************************************************************************************************************/
+ma_lpf1_config ma_lpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency)
+{
+ ma_lpf2_config config;
+
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.cutoffFrequency = cutoffFrequency;
+ config.q = 0.5;
+
+ return config;
}
-ma_result ma_wait_for_simple_state_transition__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_aaudio_stream_state_t oldState, ma_aaudio_stream_state_t newState)
+ma_lpf2_config ma_lpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q)
{
- ma_aaudio_stream_state_t actualNewState;
- ma_aaudio_result_t resultAA = ((MA_PFN_AAudioStream_waitForStateChange)pContext->aaudio.AAudioStream_waitForStateChange)(pStream, oldState, &actualNewState, 5000000000); /* 5 second timeout. */
- if (resultAA != MA_AAUDIO_OK) {
- return ma_result_from_aaudio(resultAA);
- }
+ ma_lpf1_config config;
+
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.cutoffFrequency = cutoffFrequency;
+ config.q = q;
- if (newState != actualNewState) {
- return MA_ERROR; /* Failed to transition into the expected state. */
+ /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */
+ if (config.q == 0) {
+ config.q = 0.707107;
}
- return MA_SUCCESS;
+ return config;
}
-ma_bool32 ma_context_is_device_id_equal__aaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+ma_result ma_lpf1_init(const ma_lpf1_config* pConfig, ma_lpf1* pLPF)
{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
+ if (pLPF == NULL) {
+ return MA_INVALID_ARGS;
+ }
- return pID0->aaudio == pID1->aaudio;
+ MA_ZERO_OBJECT(pLPF);
+
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ return ma_lpf1_reinit(pConfig, pLPF);
}
-ma_result ma_context_enumerate_devices__aaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+ma_result ma_lpf1_reinit(const ma_lpf1_config* pConfig, ma_lpf1* pLPF)
{
- ma_bool32 cbResult = MA_TRUE;
+ double a;
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
+ if (pLPF == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
- /* Unfortunately AAudio does not have an enumeration API. Therefore I'm only going to report default devices, but only if it can instantiate a stream. */
+ /* Only supporting f32 and s16. */
+ if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) {
+ return MA_INVALID_ARGS;
+ }
- /* Playback. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED;
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
+ /* The format cannot be changed after initialization. */
+ if (pLPF->format != ma_format_unknown && pLPF->format != pConfig->format) {
+ return MA_INVALID_OPERATION;
+ }
- if (ma_has_default_device__aaudio(pContext, ma_device_type_playback)) {
- cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
- }
+ /* The channel count cannot be changed after initialization. */
+ if (pLPF->channels != 0 && pLPF->channels != pConfig->channels) {
+ return MA_INVALID_OPERATION;
}
- /* Capture. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED;
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ pLPF->format = pConfig->format;
+ pLPF->channels = pConfig->channels;
- if (ma_has_default_device__aaudio(pContext, ma_device_type_capture)) {
- cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- }
+ a = ma_exp(-2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate);
+ if (pConfig->format == ma_format_f32) {
+ pLPF->a.f32 = (float)a;
+ } else {
+ pLPF->a.s32 = ma_biquad_float_to_fp(a);
}
return MA_SUCCESS;
}
-ma_result ma_context_get_device_info__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+static MA_INLINE void ma_lpf1_process_pcm_frame_f32(ma_lpf1* pLPF, float* pY, const float* pX)
{
- ma_AAudioStream* pStream;
- ma_result result;
+ ma_uint32 c;
+ const float a = pLPF->a.f32;
+ const float b = 1 - a;
+
+ for (c = 0; c < pLPF->channels; c += 1) {
+ float r1 = pLPF->r1[c].f32;
+ float x = pX[c];
+ float y;
- ma_assert(pContext != NULL);
+ y = b*x + a*r1;
- /* No exclusive mode with AAudio. */
- if (shareMode == ma_share_mode_exclusive) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ pY[c] = y;
+ pLPF->r1[c].f32 = y;
}
+}
- /* ID */
- if (pDeviceID != NULL) {
- pDeviceInfo->id.aaudio = pDeviceID->aaudio;
- } else {
- pDeviceInfo->id.aaudio = MA_AAUDIO_UNSPECIFIED;
- }
+static MA_INLINE void ma_lpf1_process_pcm_frame_s16(ma_lpf1* pLPF, ma_int16* pY, const ma_int16* pX)
+{
+ ma_uint32 c;
+ const ma_int32 a = pLPF->a.s32;
+ const ma_int32 b = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - a);
- /* Name */
- if (deviceType == ma_device_type_playback) {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- } else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ for (c = 0; c < pLPF->channels; c += 1) {
+ ma_int32 r1 = pLPF->r1[c].s32;
+ ma_int32 x = pX[c];
+ ma_int32 y;
+
+ y = (b*x + a*r1) >> MA_BIQUAD_FIXED_POINT_SHIFT;
+
+ pY[c] = (ma_int16)y;
+ pLPF->r1[c].s32 = (ma_int32)y;
}
+}
+ma_result ma_lpf1_process_pcm_frames(ma_lpf1* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ ma_uint32 n;
- /* We'll need to open the device to get accurate sample rate and channel count information. */
- result = ma_open_stream__aaudio(pContext, deviceType, pDeviceID, shareMode, NULL, NULL, &pStream);
- if (result != MA_SUCCESS) {
- return result;
+ if (pLPF == NULL || pFramesOut == NULL || pFramesIn == NULL) {
+ return MA_INVALID_ARGS;
}
- pDeviceInfo->minChannels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)(pStream);
- pDeviceInfo->maxChannels = pDeviceInfo->minChannels;
- pDeviceInfo->minSampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)(pStream);
- pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate;
+ /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */
- ma_close_stream__aaudio(pContext, pStream);
- pStream = NULL;
+ if (pLPF->format == ma_format_f32) {
+ /* */ float* pY = ( float*)pFramesOut;
+ const float* pX = (const float*)pFramesIn;
+ for (n = 0; n < frameCount; n += 1) {
+ ma_lpf1_process_pcm_frame_f32(pLPF, pY, pX);
+ pY += pLPF->channels;
+ pX += pLPF->channels;
+ }
+ } else if (pLPF->format == ma_format_s16) {
+ /* */ ma_int16* pY = ( ma_int16*)pFramesOut;
+ const ma_int16* pX = (const ma_int16*)pFramesIn;
- /* AAudio supports s16 and f32. */
- pDeviceInfo->formatCount = 2;
- pDeviceInfo->formats[0] = ma_format_s16;
- pDeviceInfo->formats[1] = ma_format_f32;
+ for (n = 0; n < frameCount; n += 1) {
+ ma_lpf1_process_pcm_frame_s16(pLPF, pY, pX);
+ pY += pLPF->channels;
+ pX += pLPF->channels;
+ }
+ } else {
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */
+ }
return MA_SUCCESS;
}
+ma_uint32 ma_lpf1_get_latency(ma_lpf1* pLPF)
+{
+ if (pLPF == NULL) {
+ return 0;
+ }
+
+ return 1;
+}
-void ma_device_uninit__aaudio(ma_device* pDevice)
+
+static MA_INLINE ma_biquad_config ma_lpf2__get_biquad_config(const ma_lpf2_config* pConfig)
{
- ma_assert(pDevice != NULL);
+ ma_biquad_config bqConfig;
+ double q;
+ double w;
+ double s;
+ double c;
+ double a;
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
- pDevice->aaudio.pStreamCapture = NULL;
- }
+ MA_ASSERT(pConfig != NULL);
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
- pDevice->aaudio.pStreamPlayback = NULL;
- }
+ q = pConfig->q;
+ w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate;
+ s = ma_sin(w);
+ c = ma_cos(w);
+ a = s / (2*q);
- if (pDevice->type == ma_device_type_duplex) {
- ma_pcm_rb_uninit(&pDevice->aaudio.duplexRB);
- }
+ bqConfig.b0 = (1 - c) / 2;
+ bqConfig.b1 = 1 - c;
+ bqConfig.b2 = (1 - c) / 2;
+ bqConfig.a0 = 1 + a;
+ bqConfig.a1 = -2 * c;
+ bqConfig.a2 = 1 - a;
+
+ bqConfig.format = pConfig->format;
+ bqConfig.channels = pConfig->channels;
+
+ return bqConfig;
}
-ma_result ma_device_init__aaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+ma_result ma_lpf2_init(const ma_lpf2_config* pConfig, ma_lpf2* pLPF)
{
ma_result result;
+ ma_biquad_config bqConfig;
- ma_assert(pDevice != NULL);
-
- /* No exclusive mode with AAudio. */
- if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
- ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ if (pLPF == NULL) {
+ return MA_INVALID_ARGS;
}
- /* We first need to try opening the stream. */
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- int32_t framesPerPeriod;
-
- result = ma_open_stream__aaudio(pContext, ma_device_type_capture, pConfig->capture.pDeviceID, pConfig->capture.shareMode, pConfig, pDevice, (ma_AAudioStream**)&pDevice->aaudio.pStreamCapture);
- if (result != MA_SUCCESS) {
- return result; /* Failed to open the AAudio stream. */
- }
-
- pDevice->capture.internalFormat = (((MA_PFN_AAudioStream_getFormat)pContext->aaudio.AAudioStream_getFormat)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture) == MA_AAUDIO_FORMAT_PCM_I16) ? ma_format_s16 : ma_format_f32;
- pDevice->capture.internalChannels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
- pDevice->capture.internalSampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
- ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); /* <-- Cannot find info on channel order, so assuming a default. */
- pDevice->capture.internalBufferSizeInFrames = ((MA_PFN_AAudioStream_getBufferCapacityInFrames)pContext->aaudio.AAudioStream_getBufferCapacityInFrames)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
+ MA_ZERO_OBJECT(pLPF);
- /*
- TODO: When synchronous reading and writing is supported, use AAudioStream_getFramesPerBurst() instead of AAudioStream_getFramesPerDataCallback(). Keep
- using AAudioStream_getFramesPerDataCallback() for asynchronous mode, though.
- */
- framesPerPeriod = ((MA_PFN_AAudioStream_getFramesPerDataCallback)pContext->aaudio.AAudioStream_getFramesPerDataCallback)((ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
- if (framesPerPeriod > 0) {
- pDevice->capture.internalPeriods = 1;
- } else {
- pDevice->capture.internalPeriods = pDevice->capture.internalBufferSizeInFrames / framesPerPeriod;
- }
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- int32_t framesPerPeriod;
+ bqConfig = ma_lpf2__get_biquad_config(pConfig);
+ result = ma_biquad_init(&bqConfig, &pLPF->bq);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- result = ma_open_stream__aaudio(pContext, ma_device_type_playback, pConfig->playback.pDeviceID, pConfig->playback.shareMode, pConfig, pDevice, (ma_AAudioStream**)&pDevice->aaudio.pStreamPlayback);
- if (result != MA_SUCCESS) {
- return result; /* Failed to open the AAudio stream. */
- }
+ return MA_SUCCESS;
+}
- pDevice->playback.internalFormat = (((MA_PFN_AAudioStream_getFormat)pContext->aaudio.AAudioStream_getFormat)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback) == MA_AAUDIO_FORMAT_PCM_I16) ? ma_format_s16 : ma_format_f32;
- pDevice->playback.internalChannels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
- pDevice->playback.internalSampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
- ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); /* <-- Cannot find info on channel order, so assuming a default. */
- pDevice->playback.internalBufferSizeInFrames = ((MA_PFN_AAudioStream_getBufferCapacityInFrames)pContext->aaudio.AAudioStream_getBufferCapacityInFrames)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
+ma_result ma_lpf2_reinit(const ma_lpf2_config* pConfig, ma_lpf2* pLPF)
+{
+ ma_result result;
+ ma_biquad_config bqConfig;
- framesPerPeriod = ((MA_PFN_AAudioStream_getFramesPerDataCallback)pContext->aaudio.AAudioStream_getFramesPerDataCallback)((ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
- if (framesPerPeriod > 0) {
- pDevice->playback.internalPeriods = 1;
- } else {
- pDevice->playback.internalPeriods = pDevice->playback.internalBufferSizeInFrames / framesPerPeriod;
- }
+ if (pLPF == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pConfig->deviceType == ma_device_type_duplex) {
- ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames);
- ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->aaudio.duplexRB);
- if (result != MA_SUCCESS) {
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
- }
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
- }
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[AAudio] Failed to initialize ring buffer.", result);
- }
+ bqConfig = ma_lpf2__get_biquad_config(pConfig);
+ result = ma_biquad_reinit(&bqConfig, &pLPF->bq);
+ if (result != MA_SUCCESS) {
+ return result;
}
return MA_SUCCESS;
}
-ma_result ma_device_start_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream)
+static MA_INLINE void ma_lpf2_process_pcm_frame_s16(ma_lpf2* pLPF, ma_int16* pFrameOut, const ma_int16* pFrameIn)
{
- ma_aaudio_result_t resultAA;
- ma_aaudio_stream_state_t currentState;
+ ma_biquad_process_pcm_frame_s16(&pLPF->bq, pFrameOut, pFrameIn);
+}
- ma_assert(pDevice != NULL);
+static MA_INLINE void ma_lpf2_process_pcm_frame_f32(ma_lpf2* pLPF, float* pFrameOut, const float* pFrameIn)
+{
+ ma_biquad_process_pcm_frame_f32(&pLPF->bq, pFrameOut, pFrameIn);
+}
- resultAA = ((MA_PFN_AAudioStream_requestStart)pDevice->pContext->aaudio.AAudioStream_requestStart)(pStream);
- if (resultAA != MA_AAUDIO_OK) {
- return ma_result_from_aaudio(resultAA);
+ma_result ma_lpf2_process_pcm_frames(ma_lpf2* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ if (pLPF == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Do we actually need to wait for the device to transition into it's started state? */
+ return ma_biquad_process_pcm_frames(&pLPF->bq, pFramesOut, pFramesIn, frameCount);
+}
- /* The device should be in either a starting or started state. If it's not set to started we need to wait for it to transition. It should go from starting to started. */
- currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream);
- if (currentState != MA_AAUDIO_STREAM_STATE_STARTED) {
- ma_result result;
+ma_uint32 ma_lpf2_get_latency(ma_lpf2* pLPF)
+{
+ if (pLPF == NULL) {
+ return 0;
+ }
- if (currentState != MA_AAUDIO_STREAM_STATE_STARTING) {
- return MA_ERROR; /* Expecting the stream to be a starting or started state. */
- }
+ return ma_biquad_get_latency(&pLPF->bq);
+}
- result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STARTED);
- if (result != MA_SUCCESS) {
- return result;
- }
- }
- return MA_SUCCESS;
+ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order)
+{
+ ma_lpf_config config;
+
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.cutoffFrequency = cutoffFrequency;
+ config.order = ma_min(order, MA_MAX_FILTER_ORDER);
+
+ return config;
}
-ma_result ma_device_stop_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream)
+static ma_result ma_lpf_reinit__internal(const ma_lpf_config* pConfig, ma_lpf* pLPF, ma_bool32 isNew)
{
- ma_aaudio_result_t resultAA;
- ma_aaudio_stream_state_t currentState;
+ ma_result result;
+ ma_uint32 lpf1Count;
+ ma_uint32 lpf2Count;
+ ma_uint32 ilpf1;
+ ma_uint32 ilpf2;
- ma_assert(pDevice != NULL);
+ if (pLPF == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
- resultAA = ((MA_PFN_AAudioStream_requestStop)pDevice->pContext->aaudio.AAudioStream_requestStop)(pStream);
- if (resultAA != MA_AAUDIO_OK) {
- return ma_result_from_aaudio(resultAA);
+ /* Only supporting f32 and s16. */
+ if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) {
+ return MA_INVALID_ARGS;
}
- /* The device should be in either a stopping or stopped state. If it's not set to started we need to wait for it to transition. It should go from stopping to stopped. */
- currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream);
- if (currentState != MA_AAUDIO_STREAM_STATE_STOPPED) {
- ma_result result;
+ /* The format cannot be changed after initialization. */
+ if (pLPF->format != ma_format_unknown && pLPF->format != pConfig->format) {
+ return MA_INVALID_OPERATION;
+ }
- if (currentState != MA_AAUDIO_STREAM_STATE_STOPPING) {
- return MA_ERROR; /* Expecting the stream to be a stopping or stopped state. */
- }
+ /* The channel count cannot be changed after initialization. */
+ if (pLPF->channels != 0 && pLPF->channels != pConfig->channels) {
+ return MA_INVALID_OPERATION;
+ }
- result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STOPPED);
- if (result != MA_SUCCESS) {
- return result;
- }
+ if (pConfig->order > MA_MAX_FILTER_ORDER) {
+ return MA_INVALID_ARGS;
}
- return MA_SUCCESS;
-}
+ lpf1Count = pConfig->order % 2;
+ lpf2Count = pConfig->order / 2;
-ma_result ma_device_start__aaudio(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
+ MA_ASSERT(lpf1Count <= ma_countof(pLPF->lpf1));
+ MA_ASSERT(lpf2Count <= ma_countof(pLPF->lpf2));
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
- if (result != MA_SUCCESS) {
- return result;
+ /* The filter order can't change between reinits. */
+ if (!isNew) {
+ if (pLPF->lpf1Count != lpf1Count || pLPF->lpf2Count != lpf2Count) {
+ return MA_INVALID_OPERATION;
}
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
+ for (ilpf1 = 0; ilpf1 < lpf1Count; ilpf1 += 1) {
+ ma_lpf1_config lpf1Config = ma_lpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency);
+
+ if (isNew) {
+ result = ma_lpf1_init(&lpf1Config, &pLPF->lpf1[ilpf1]);
+ } else {
+ result = ma_lpf1_reinit(&lpf1Config, &pLPF->lpf1[ilpf1]);
+ }
+
if (result != MA_SUCCESS) {
- if (pDevice->type == ma_device_type_duplex) {
- ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
- }
return result;
}
}
- return MA_SUCCESS;
-}
-
-ma_result ma_device_stop__aaudio(ma_device* pDevice)
-{
- ma_stop_proc onStop;
+ for (ilpf2 = 0; ilpf2 < lpf2Count; ilpf2 += 1) {
+ ma_lpf2_config lpf2Config;
+ double q;
+ double a;
- ma_assert(pDevice != NULL);
+ /* Tempting to use 0.707107, but won't result in a Butterworth filter if the order is > 2. */
+ if (lpf1Count == 1) {
+ a = (1 + ilpf2*1) * (MA_PI_D/(pConfig->order*1)); /* Odd order. */
+ } else {
+ a = (1 + ilpf2*2) * (MA_PI_D/(pConfig->order*2)); /* Even order. */
+ }
+ q = 1 / (2*ma_cos(a));
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture);
- if (result != MA_SUCCESS) {
- return result;
+ lpf2Config = ma_lpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q);
+
+ if (isNew) {
+ result = ma_lpf2_init(&lpf2Config, &pLPF->lpf2[ilpf2]);
+ } else {
+ result = ma_lpf2_reinit(&lpf2Config, &pLPF->lpf2[ilpf2]);
}
- }
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback);
if (result != MA_SUCCESS) {
return result;
}
}
- onStop = pDevice->onStop;
- if (onStop) {
- onStop(pDevice);
- }
+ pLPF->lpf1Count = lpf1Count;
+ pLPF->lpf2Count = lpf2Count;
+ pLPF->format = pConfig->format;
+ pLPF->channels = pConfig->channels;
return MA_SUCCESS;
}
-
-ma_result ma_context_uninit__aaudio(ma_context* pContext)
+ma_result ma_lpf_init(const ma_lpf_config* pConfig, ma_lpf* pLPF)
{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_aaudio);
-
- ma_dlclose(pContext, pContext->aaudio.hAAudio);
- pContext->aaudio.hAAudio = NULL;
+ if (pLPF == NULL) {
+ return MA_INVALID_ARGS;
+ }
- return MA_SUCCESS;
+ MA_ZERO_OBJECT(pLPF);
+
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ return ma_lpf_reinit__internal(pConfig, pLPF, /*isNew*/MA_TRUE);
}
-ma_result ma_context_init__aaudio(const ma_context_config* pConfig, ma_context* pContext)
+ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF)
{
- const char* libNames[] = {
- "libaaudio.so"
- };
- size_t i;
+ return ma_lpf_reinit__internal(pConfig, pLPF, /*isNew*/MA_FALSE);
+}
- for (i = 0; i < ma_countof(libNames); ++i) {
- pContext->aaudio.hAAudio = ma_dlopen(pContext, libNames[i]);
- if (pContext->aaudio.hAAudio != NULL) {
- break;
- }
+static MA_INLINE void ma_lpf_process_pcm_frame_f32(ma_lpf* pLPF, float* pY, const void* pX)
+{
+ ma_uint32 ilpf1;
+ ma_uint32 ilpf2;
+
+ MA_ASSERT(pLPF->format == ma_format_f32);
+
+ MA_COPY_MEMORY(pY, pX, ma_get_bytes_per_frame(pLPF->format, pLPF->channels));
+
+ for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) {
+ ma_lpf1_process_pcm_frame_f32(&pLPF->lpf1[ilpf1], pY, pY);
}
- if (pContext->aaudio.hAAudio == NULL) {
- return MA_FAILED_TO_INIT_BACKEND;
+ for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) {
+ ma_lpf2_process_pcm_frame_f32(&pLPF->lpf2[ilpf2], pY, pY);
}
+}
- pContext->aaudio.AAudio_createStreamBuilder = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudio_createStreamBuilder");
- pContext->aaudio.AAudioStreamBuilder_delete = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_delete");
- pContext->aaudio.AAudioStreamBuilder_setDeviceId = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDeviceId");
- pContext->aaudio.AAudioStreamBuilder_setDirection = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDirection");
- pContext->aaudio.AAudioStreamBuilder_setSharingMode = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSharingMode");
- pContext->aaudio.AAudioStreamBuilder_setFormat = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFormat");
- pContext->aaudio.AAudioStreamBuilder_setChannelCount = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setChannelCount");
- pContext->aaudio.AAudioStreamBuilder_setSampleRate = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSampleRate");
- pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setBufferCapacityInFrames");
- pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFramesPerDataCallback");
- pContext->aaudio.AAudioStreamBuilder_setDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDataCallback");
- pContext->aaudio.AAudioStreamBuilder_setPerformanceMode = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setPerformanceMode");
- pContext->aaudio.AAudioStreamBuilder_openStream = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_openStream");
- pContext->aaudio.AAudioStream_close = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_close");
- pContext->aaudio.AAudioStream_getState = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getState");
- pContext->aaudio.AAudioStream_waitForStateChange = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_waitForStateChange");
- pContext->aaudio.AAudioStream_getFormat = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getFormat");
- pContext->aaudio.AAudioStream_getChannelCount = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getChannelCount");
- pContext->aaudio.AAudioStream_getSampleRate = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getSampleRate");
- pContext->aaudio.AAudioStream_getBufferCapacityInFrames = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getBufferCapacityInFrames");
- pContext->aaudio.AAudioStream_getFramesPerDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getFramesPerDataCallback");
- pContext->aaudio.AAudioStream_getFramesPerBurst = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_getFramesPerBurst");
- pContext->aaudio.AAudioStream_requestStart = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_requestStart");
- pContext->aaudio.AAudioStream_requestStop = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_requestStop");
+static MA_INLINE void ma_lpf_process_pcm_frame_s16(ma_lpf* pLPF, ma_int16* pY, const ma_int16* pX)
+{
+ ma_uint32 ilpf1;
+ ma_uint32 ilpf2;
- pContext->isBackendAsynchronous = MA_TRUE;
+ MA_ASSERT(pLPF->format == ma_format_s16);
- pContext->onUninit = ma_context_uninit__aaudio;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__aaudio;
- pContext->onEnumDevices = ma_context_enumerate_devices__aaudio;
- pContext->onGetDeviceInfo = ma_context_get_device_info__aaudio;
- pContext->onDeviceInit = ma_device_init__aaudio;
- pContext->onDeviceUninit = ma_device_uninit__aaudio;
- pContext->onDeviceStart = ma_device_start__aaudio;
- pContext->onDeviceStop = ma_device_stop__aaudio;
+ MA_COPY_MEMORY(pY, pX, ma_get_bytes_per_frame(pLPF->format, pLPF->channels));
- (void)pConfig;
- return MA_SUCCESS;
+ for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) {
+ ma_lpf1_process_pcm_frame_s16(&pLPF->lpf1[ilpf1], pY, pY);
+ }
+
+ for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) {
+ ma_lpf2_process_pcm_frame_s16(&pLPF->lpf2[ilpf2], pY, pY);
+ }
}
-#endif /* AAudio */
+ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ ma_result result;
+ ma_uint32 ilpf1;
+ ma_uint32 ilpf2;
-/******************************************************************************
+ if (pLPF == NULL) {
+ return MA_INVALID_ARGS;
+ }
-OpenSL|ES Backend
+ /* Faster path for in-place. */
+ if (pFramesOut == pFramesIn) {
+ for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) {
+ result = ma_lpf1_process_pcm_frames(&pLPF->lpf1[ilpf1], pFramesOut, pFramesOut, frameCount);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
-******************************************************************************/
-#ifdef MA_HAS_OPENSL
-#include
-#ifdef MA_ANDROID
-#include
-#endif
+ for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) {
+ result = ma_lpf2_process_pcm_frames(&pLPF->lpf2[ilpf2], pFramesOut, pFramesOut, frameCount);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+ }
-/* OpenSL|ES has one-per-application objects :( */
-SLObjectItf g_maEngineObjectSL = NULL;
-SLEngineItf g_maEngineSL = NULL;
-ma_uint32 g_maOpenSLInitCounter = 0;
+ /* Slightly slower path for copying. */
+ if (pFramesOut != pFramesIn) {
+ ma_uint32 iFrame;
-#define MA_OPENSL_OBJ(p) (*((SLObjectItf)(p)))
-#define MA_OPENSL_OUTPUTMIX(p) (*((SLOutputMixItf)(p)))
-#define MA_OPENSL_PLAY(p) (*((SLPlayItf)(p)))
-#define MA_OPENSL_RECORD(p) (*((SLRecordItf)(p)))
+ /* */ if (pLPF->format == ma_format_f32) {
+ /* */ float* pFramesOutF32 = ( float*)pFramesOut;
+ const float* pFramesInF32 = (const float*)pFramesIn;
-#ifdef MA_ANDROID
-#define MA_OPENSL_BUFFERQUEUE(p) (*((SLAndroidSimpleBufferQueueItf)(p)))
-#else
-#define MA_OPENSL_BUFFERQUEUE(p) (*((SLBufferQueueItf)(p)))
-#endif
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_lpf_process_pcm_frame_f32(pLPF, pFramesOutF32, pFramesInF32);
+ pFramesOutF32 += pLPF->channels;
+ pFramesInF32 += pLPF->channels;
+ }
+ } else if (pLPF->format == ma_format_s16) {
+ /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut;
+ const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn;
-/* Converts an individual OpenSL-style channel identifier (SL_SPEAKER_FRONT_LEFT, etc.) to miniaudio. */
-ma_uint8 ma_channel_id_to_ma__opensl(SLuint32 id)
-{
- switch (id)
- {
- case SL_SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT;
- case SL_SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT;
- case SL_SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER;
- case SL_SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE;
- case SL_SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT;
- case SL_SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT;
- case SL_SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER;
- case SL_SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER;
- case SL_SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER;
- case SL_SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT;
- case SL_SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT;
- case SL_SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER;
- case SL_SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT;
- case SL_SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER;
- case SL_SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT;
- case SL_SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT;
- case SL_SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER;
- case SL_SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT;
- default: return 0;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_lpf_process_pcm_frame_s16(pLPF, pFramesOutS16, pFramesInS16);
+ pFramesOutS16 += pLPF->channels;
+ pFramesInS16 += pLPF->channels;
+ }
+ } else {
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_OPERATION; /* Should never hit this. */
+ }
}
+
+ return MA_SUCCESS;
}
-/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to OpenSL-style. */
-SLuint32 ma_channel_id_to_opensl(ma_uint8 id)
+ma_uint32 ma_lpf_get_latency(ma_lpf* pLPF)
{
- switch (id)
- {
- case MA_CHANNEL_MONO: return SL_SPEAKER_FRONT_CENTER;
- case MA_CHANNEL_FRONT_LEFT: return SL_SPEAKER_FRONT_LEFT;
- case MA_CHANNEL_FRONT_RIGHT: return SL_SPEAKER_FRONT_RIGHT;
- case MA_CHANNEL_FRONT_CENTER: return SL_SPEAKER_FRONT_CENTER;
- case MA_CHANNEL_LFE: return SL_SPEAKER_LOW_FREQUENCY;
- case MA_CHANNEL_BACK_LEFT: return SL_SPEAKER_BACK_LEFT;
- case MA_CHANNEL_BACK_RIGHT: return SL_SPEAKER_BACK_RIGHT;
- case MA_CHANNEL_FRONT_LEFT_CENTER: return SL_SPEAKER_FRONT_LEFT_OF_CENTER;
- case MA_CHANNEL_FRONT_RIGHT_CENTER: return SL_SPEAKER_FRONT_RIGHT_OF_CENTER;
- case MA_CHANNEL_BACK_CENTER: return SL_SPEAKER_BACK_CENTER;
- case MA_CHANNEL_SIDE_LEFT: return SL_SPEAKER_SIDE_LEFT;
- case MA_CHANNEL_SIDE_RIGHT: return SL_SPEAKER_SIDE_RIGHT;
- case MA_CHANNEL_TOP_CENTER: return SL_SPEAKER_TOP_CENTER;
- case MA_CHANNEL_TOP_FRONT_LEFT: return SL_SPEAKER_TOP_FRONT_LEFT;
- case MA_CHANNEL_TOP_FRONT_CENTER: return SL_SPEAKER_TOP_FRONT_CENTER;
- case MA_CHANNEL_TOP_FRONT_RIGHT: return SL_SPEAKER_TOP_FRONT_RIGHT;
- case MA_CHANNEL_TOP_BACK_LEFT: return SL_SPEAKER_TOP_BACK_LEFT;
- case MA_CHANNEL_TOP_BACK_CENTER: return SL_SPEAKER_TOP_BACK_CENTER;
- case MA_CHANNEL_TOP_BACK_RIGHT: return SL_SPEAKER_TOP_BACK_RIGHT;
- default: return 0;
+ if (pLPF == NULL) {
+ return 0;
}
+
+ return pLPF->lpf2Count*2 + pLPF->lpf1Count;
}
-/* Converts a channel mapping to an OpenSL-style channel mask. */
-SLuint32 ma_channel_map_to_channel_mask__opensl(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels)
+
+/**************************************************************************************************************************************************************
+
+High-Pass Filtering
+
+**************************************************************************************************************************************************************/
+ma_hpf1_config ma_hpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency)
{
- SLuint32 channelMask = 0;
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- channelMask |= ma_channel_id_to_opensl(channelMap[iChannel]);
- }
+ ma_hpf1_config config;
+
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.cutoffFrequency = cutoffFrequency;
- return channelMask;
+ return config;
}
-/* Converts an OpenSL-style channel mask to a miniaudio channel map. */
-void ma_channel_mask_to_channel_map__opensl(SLuint32 channelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+ma_hpf2_config ma_hpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q)
{
- if (channels == 1 && channelMask == 0) {
- channelMap[0] = MA_CHANNEL_MONO;
- } else if (channels == 2 && channelMask == 0) {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- } else {
- if (channels == 1 && (channelMask & SL_SPEAKER_FRONT_CENTER) != 0) {
- channelMap[0] = MA_CHANNEL_MONO;
- } else {
- /* Just iterate over each bit. */
- ma_uint32 iChannel = 0;
- ma_uint32 iBit;
- for (iBit = 0; iBit < 32; ++iBit) {
- SLuint32 bitValue = (channelMask & (1UL << iBit));
- if (bitValue != 0) {
- /* The bit is set. */
- channelMap[iChannel] = ma_channel_id_to_ma__opensl(bitValue);
- iChannel += 1;
- }
- }
- }
+ ma_hpf2_config config;
+
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.cutoffFrequency = cutoffFrequency;
+ config.q = q;
+
+ /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */
+ if (config.q == 0) {
+ config.q = 0.707107;
}
+
+ return config;
}
-SLuint32 ma_round_to_standard_sample_rate__opensl(SLuint32 samplesPerSec)
+
+ma_result ma_hpf1_init(const ma_hpf1_config* pConfig, ma_hpf1* pHPF)
{
- if (samplesPerSec <= SL_SAMPLINGRATE_8) {
- return SL_SAMPLINGRATE_8;
- }
- if (samplesPerSec <= SL_SAMPLINGRATE_11_025) {
- return SL_SAMPLINGRATE_11_025;
- }
- if (samplesPerSec <= SL_SAMPLINGRATE_12) {
- return SL_SAMPLINGRATE_12;
- }
- if (samplesPerSec <= SL_SAMPLINGRATE_16) {
- return SL_SAMPLINGRATE_16;
- }
- if (samplesPerSec <= SL_SAMPLINGRATE_22_05) {
- return SL_SAMPLINGRATE_22_05;
- }
- if (samplesPerSec <= SL_SAMPLINGRATE_24) {
- return SL_SAMPLINGRATE_24;
- }
- if (samplesPerSec <= SL_SAMPLINGRATE_32) {
- return SL_SAMPLINGRATE_32;
- }
- if (samplesPerSec <= SL_SAMPLINGRATE_44_1) {
- return SL_SAMPLINGRATE_44_1;
- }
- if (samplesPerSec <= SL_SAMPLINGRATE_48) {
- return SL_SAMPLINGRATE_48;
+ if (pHPF == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Android doesn't support more than 48000. */
-#ifndef MA_ANDROID
- if (samplesPerSec <= SL_SAMPLINGRATE_64) {
- return SL_SAMPLINGRATE_64;
- }
- if (samplesPerSec <= SL_SAMPLINGRATE_88_2) {
- return SL_SAMPLINGRATE_88_2;
+ MA_ZERO_OBJECT(pHPF);
+
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- if (samplesPerSec <= SL_SAMPLINGRATE_96) {
- return SL_SAMPLINGRATE_96;
+
+ return ma_hpf1_reinit(pConfig, pHPF);
+}
+
+ma_result ma_hpf1_reinit(const ma_hpf1_config* pConfig, ma_hpf1* pHPF)
+{
+ double a;
+
+ if (pHPF == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- if (samplesPerSec <= SL_SAMPLINGRATE_192) {
- return SL_SAMPLINGRATE_192;
+
+ /* Only supporting f32 and s16. */
+ if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) {
+ return MA_INVALID_ARGS;
}
-#endif
- return SL_SAMPLINGRATE_16;
-}
+ /* The format cannot be changed after initialization. */
+ if (pHPF->format != ma_format_unknown && pHPF->format != pConfig->format) {
+ return MA_INVALID_OPERATION;
+ }
+ /* The channel count cannot be changed after initialization. */
+ if (pHPF->channels != 0 && pHPF->channels != pConfig->channels) {
+ return MA_INVALID_OPERATION;
+ }
-ma_bool32 ma_context_is_device_id_equal__opensl(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
-{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
+ pHPF->format = pConfig->format;
+ pHPF->channels = pConfig->channels;
- return pID0->opensl == pID1->opensl;
+ a = ma_exp(-2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate);
+ if (pConfig->format == ma_format_f32) {
+ pHPF->a.f32 = (float)a;
+ } else {
+ pHPF->a.s32 = ma_biquad_float_to_fp(a);
+ }
+
+ return MA_SUCCESS;
}
-ma_result ma_context_enumerate_devices__opensl(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+static MA_INLINE void ma_hpf1_process_pcm_frame_f32(ma_hpf1* pHPF, float* pY, const float* pX)
{
- ma_bool32 cbResult;
+ ma_uint32 c;
+ const float a = 1 - pHPF->a.f32;
+ const float b = 1 - a;
+
+ for (c = 0; c < pHPF->channels; c += 1) {
+ float r1 = pHPF->r1[c].f32;
+ float x = pX[c];
+ float y;
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
+ y = b*x - a*r1;
- ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to enumerate devices. */
- if (g_maOpenSLInitCounter == 0) {
- return MA_INVALID_OPERATION;
+ pY[c] = y;
+ pHPF->r1[c].f32 = y;
}
+}
- /*
- TODO: Test Me.
+static MA_INLINE void ma_hpf1_process_pcm_frame_s16(ma_hpf1* pHPF, ma_int16* pY, const ma_int16* pX)
+{
+ ma_uint32 c;
+ const ma_int32 a = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - pHPF->a.s32);
+ const ma_int32 b = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - a);
- This is currently untested, so for now we are just returning default devices.
- */
-#if 0 && !defined(MA_ANDROID)
- ma_bool32 isTerminated = MA_FALSE;
+ for (c = 0; c < pHPF->channels; c += 1) {
+ ma_int32 r1 = pHPF->r1[c].s32;
+ ma_int32 x = pX[c];
+ ma_int32 y;
- SLuint32 pDeviceIDs[128];
- SLint32 deviceCount = sizeof(pDeviceIDs) / sizeof(pDeviceIDs[0]);
+ y = (b*x - a*r1) >> MA_BIQUAD_FIXED_POINT_SHIFT;
- SLAudioIODeviceCapabilitiesItf deviceCaps;
- SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps);
- if (resultSL != SL_RESULT_SUCCESS) {
- /* The interface may not be supported so just report a default device. */
- goto return_default_device;
+ pY[c] = (ma_int16)y;
+ pHPF->r1[c].s32 = (ma_int32)y;
}
+}
- /* Playback */
- if (!isTerminated) {
- resultSL = (*deviceCaps)->GetAvailableAudioOutputs(deviceCaps, &deviceCount, pDeviceIDs);
- if (resultSL != SL_RESULT_SUCCESS) {
- return MA_NO_DEVICE;
- }
-
- for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- deviceInfo.id.opensl = pDeviceIDs[iDevice];
-
- SLAudioOutputDescriptor desc;
- resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc);
- if (resultSL == SL_RESULT_SUCCESS) {
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.pDeviceName, (size_t)-1);
+ma_result ma_hpf1_process_pcm_frames(ma_hpf1* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ ma_uint32 n;
- ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
- if (cbResult == MA_FALSE) {
- isTerminated = MA_TRUE;
- break;
- }
- }
- }
+ if (pHPF == NULL || pFramesOut == NULL || pFramesIn == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Capture */
- if (!isTerminated) {
- resultSL = (*deviceCaps)->GetAvailableAudioInputs(deviceCaps, &deviceCount, pDeviceIDs);
- if (resultSL != SL_RESULT_SUCCESS) {
- return MA_NO_DEVICE;
- }
+ /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */
- for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- deviceInfo.id.opensl = pDeviceIDs[iDevice];
+ if (pHPF->format == ma_format_f32) {
+ /* */ float* pY = ( float*)pFramesOut;
+ const float* pX = (const float*)pFramesIn;
- SLAudioInputDescriptor desc;
- resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc);
- if (resultSL == SL_RESULT_SUCCESS) {
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.deviceName, (size_t)-1);
+ for (n = 0; n < frameCount; n += 1) {
+ ma_hpf1_process_pcm_frame_f32(pHPF, pY, pX);
+ pY += pHPF->channels;
+ pX += pHPF->channels;
+ }
+ } else if (pHPF->format == ma_format_s16) {
+ /* */ ma_int16* pY = ( ma_int16*)pFramesOut;
+ const ma_int16* pX = (const ma_int16*)pFramesIn;
- ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- if (cbResult == MA_FALSE) {
- isTerminated = MA_TRUE;
- break;
- }
- }
+ for (n = 0; n < frameCount; n += 1) {
+ ma_hpf1_process_pcm_frame_s16(pHPF, pY, pX);
+ pY += pHPF->channels;
+ pX += pHPF->channels;
}
+ } else {
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */
}
return MA_SUCCESS;
-#else
- goto return_default_device;
-#endif
-
-return_default_device:;
- cbResult = MA_TRUE;
-
- /* Playback. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
- }
+}
- /* Capture. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
- cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
+ma_uint32 ma_hpf1_get_latency(ma_hpf1* pHPF)
+{
+ if (pHPF == NULL) {
+ return 0;
}
- return MA_SUCCESS;
+ return 1;
}
-ma_result ma_context_get_device_info__opensl(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+
+static MA_INLINE ma_biquad_config ma_hpf2__get_biquad_config(const ma_hpf2_config* pConfig)
{
- ma_assert(pContext != NULL);
+ ma_biquad_config bqConfig;
+ double q;
+ double w;
+ double s;
+ double c;
+ double a;
- ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to get device info. */
- if (g_maOpenSLInitCounter == 0) {
- return MA_INVALID_OPERATION;
- }
+ MA_ASSERT(pConfig != NULL);
- /* No exclusive mode with OpenSL|ES. */
- if (shareMode == ma_share_mode_exclusive) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
- }
+ q = pConfig->q;
+ w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate;
+ s = ma_sin(w);
+ c = ma_cos(w);
+ a = s / (2*q);
- /*
- TODO: Test Me.
-
- This is currently untested, so for now we are just returning default devices.
- */
-#if 0 && !defined(MA_ANDROID)
- SLAudioIODeviceCapabilitiesItf deviceCaps;
- SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps);
- if (resultSL != SL_RESULT_SUCCESS) {
- /* The interface may not be supported so just report a default device. */
- goto return_default_device;
- }
+ bqConfig.b0 = (1 + c) / 2;
+ bqConfig.b1 = -(1 + c);
+ bqConfig.b2 = (1 + c) / 2;
+ bqConfig.a0 = 1 + a;
+ bqConfig.a1 = -2 * c;
+ bqConfig.a2 = 1 - a;
- if (deviceType == ma_device_type_playback) {
- SLAudioOutputDescriptor desc;
- resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, pDeviceID->opensl, &desc);
- if (resultSL != SL_RESULT_SUCCESS) {
- return MA_NO_DEVICE;
- }
+ bqConfig.format = pConfig->format;
+ bqConfig.channels = pConfig->channels;
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.pDeviceName, (size_t)-1);
- } else {
- SLAudioInputDescriptor desc;
- resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, pDeviceID->opensl, &desc);
- if (resultSL != SL_RESULT_SUCCESS) {
- return MA_NO_DEVICE;
- }
+ return bqConfig;
+}
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.deviceName, (size_t)-1);
+ma_result ma_hpf2_init(const ma_hpf2_config* pConfig, ma_hpf2* pHPF)
+{
+ ma_result result;
+ ma_biquad_config bqConfig;
+
+ if (pHPF == NULL) {
+ return MA_INVALID_ARGS;
}
- goto return_detailed_info;
-#else
- goto return_default_device;
-#endif
+ MA_ZERO_OBJECT(pHPF);
-return_default_device:
- if (pDeviceID != NULL) {
- if ((deviceType == ma_device_type_playback && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOOUTPUT) ||
- (deviceType == ma_device_type_capture && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOINPUT)) {
- return MA_NO_DEVICE; /* Don't know the device. */
- }
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Name / Description */
- if (deviceType == ma_device_type_playback) {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- } else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
+ bqConfig = ma_hpf2__get_biquad_config(pConfig);
+ result = ma_biquad_init(&bqConfig, &pHPF->bq);
+ if (result != MA_SUCCESS) {
+ return result;
}
- goto return_detailed_info;
+ return MA_SUCCESS;
+}
+ma_result ma_hpf2_reinit(const ma_hpf2_config* pConfig, ma_hpf2* pHPF)
+{
+ ma_result result;
+ ma_biquad_config bqConfig;
-return_detailed_info:
+ if (pHPF == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
- /*
- For now we're just outputting a set of values that are supported by the API but not necessarily supported
- by the device natively. Later on we should work on this so that it more closely reflects the device's
- actual native format.
- */
- pDeviceInfo->minChannels = 1;
- pDeviceInfo->maxChannels = 2;
- pDeviceInfo->minSampleRate = 8000;
- pDeviceInfo->maxSampleRate = 48000;
- pDeviceInfo->formatCount = 2;
- pDeviceInfo->formats[0] = ma_format_u8;
- pDeviceInfo->formats[1] = ma_format_s16;
-#if defined(MA_ANDROID) && __ANDROID_API__ >= 21
- pDeviceInfo->formats[pDeviceInfo->formatCount] = ma_format_f32;
- pDeviceInfo->formatCount += 1;
-#endif
+ bqConfig = ma_hpf2__get_biquad_config(pConfig);
+ result = ma_biquad_reinit(&bqConfig, &pHPF->bq);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
return MA_SUCCESS;
}
-
-#ifdef MA_ANDROID
-/*void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, SLuint32 eventFlags, const void* pBuffer, SLuint32 bufferSize, SLuint32 dataUsed, void* pContext)*/
-void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData)
+static MA_INLINE void ma_hpf2_process_pcm_frame_s16(ma_hpf2* pHPF, ma_int16* pFrameOut, const ma_int16* pFrameIn)
{
- ma_device* pDevice = (ma_device*)pUserData;
- size_t periodSizeInBytes;
- ma_uint8* pBuffer;
- SLresult resultSL;
-
- ma_assert(pDevice != NULL);
-
- (void)pBufferQueue;
+ ma_biquad_process_pcm_frame_s16(&pHPF->bq, pFrameOut, pFrameIn);
+}
- /*
- For now, don't do anything unless the buffer was fully processed. From what I can tell, it looks like
- OpenSL|ES 1.1 improves on buffer queues to the point that we could much more intelligently handle this,
- but unfortunately it looks like Android is only supporting OpenSL|ES 1.0.1 for now :(
- */
+static MA_INLINE void ma_hpf2_process_pcm_frame_f32(ma_hpf2* pHPF, float* pFrameOut, const float* pFrameIn)
+{
+ ma_biquad_process_pcm_frame_f32(&pHPF->bq, pFrameOut, pFrameIn);
+}
- /* Don't do anything if the device is not started. */
- if (pDevice->state != MA_STATE_STARTED) {
- return;
+ma_result ma_hpf2_process_pcm_frames(ma_hpf2* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ if (pHPF == NULL) {
+ return MA_INVALID_ARGS;
}
- periodSizeInBytes = (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods) * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- pBuffer = pDevice->opensl.pBufferCapture + (pDevice->opensl.currentBufferIndexCapture * periodSizeInBytes);
-
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_capture(pDevice, (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods), pBuffer, &pDevice->opensl.duplexRB);
- } else {
- ma_device__send_frames_to_client(pDevice, (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods), pBuffer);
- }
+ return ma_biquad_process_pcm_frames(&pHPF->bq, pFramesOut, pFramesIn, frameCount);
+}
- resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pBuffer, periodSizeInBytes);
- if (resultSL != SL_RESULT_SUCCESS) {
- return;
+ma_uint32 ma_hpf2_get_latency(ma_hpf2* pHPF)
+{
+ if (pHPF == NULL) {
+ return 0;
}
- pDevice->opensl.currentBufferIndexCapture = (pDevice->opensl.currentBufferIndexCapture + 1) % pDevice->capture.internalPeriods;
+ return ma_biquad_get_latency(&pHPF->bq);
}
-void ma_buffer_queue_callback_playback__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData)
-{
- ma_device* pDevice = (ma_device*)pUserData;
- size_t periodSizeInBytes;
- ma_uint8* pBuffer;
- SLresult resultSL;
- ma_assert(pDevice != NULL);
+ma_hpf_config ma_hpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order)
+{
+ ma_hpf_config config;
- (void)pBufferQueue;
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.cutoffFrequency = cutoffFrequency;
+ config.order = ma_min(order, MA_MAX_FILTER_ORDER);
- /* Don't do anything if the device is not started. */
- if (pDevice->state != MA_STATE_STARTED) {
- return;
- }
+ return config;
+}
- periodSizeInBytes = (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods) * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- pBuffer = pDevice->opensl.pBufferPlayback + (pDevice->opensl.currentBufferIndexPlayback * periodSizeInBytes);
+static ma_result ma_hpf_reinit__internal(const ma_hpf_config* pConfig, ma_hpf* pHPF, ma_bool32 isNew)
+{
+ ma_result result;
+ ma_uint32 hpf1Count;
+ ma_uint32 hpf2Count;
+ ma_uint32 ihpf1;
+ ma_uint32 ihpf2;
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_playback(pDevice, (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods), pBuffer, &pDevice->opensl.duplexRB);
- } else {
- ma_device__read_frames_from_client(pDevice, (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods), pBuffer);
+ if (pHPF == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pBuffer, periodSizeInBytes);
- if (resultSL != SL_RESULT_SUCCESS) {
- return;
+ /* Only supporting f32 and s16. */
+ if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) {
+ return MA_INVALID_ARGS;
}
- pDevice->opensl.currentBufferIndexPlayback = (pDevice->opensl.currentBufferIndexPlayback + 1) % pDevice->playback.internalPeriods;
-}
-#endif
+ /* The format cannot be changed after initialization. */
+ if (pHPF->format != ma_format_unknown && pHPF->format != pConfig->format) {
+ return MA_INVALID_OPERATION;
+ }
-void ma_device_uninit__opensl(ma_device* pDevice)
-{
- ma_assert(pDevice != NULL);
+ /* The channel count cannot be changed after initialization. */
+ if (pHPF->channels != 0 && pHPF->channels != pConfig->channels) {
+ return MA_INVALID_OPERATION;
+ }
- ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before uninitializing the device. */
- if (g_maOpenSLInitCounter == 0) {
- return;
+ if (pConfig->order > MA_MAX_FILTER_ORDER) {
+ return MA_INVALID_ARGS;
}
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- if (pDevice->opensl.pAudioRecorderObj) {
- MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioRecorderObj);
- }
+ hpf1Count = pConfig->order % 2;
+ hpf2Count = pConfig->order / 2;
- ma_free(pDevice->opensl.pBufferCapture);
- }
+ MA_ASSERT(hpf1Count <= ma_countof(pHPF->hpf1));
+ MA_ASSERT(hpf2Count <= ma_countof(pHPF->hpf2));
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- if (pDevice->opensl.pAudioPlayerObj) {
- MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioPlayerObj);
+ /* The filter order can't change between reinits. */
+ if (!isNew) {
+ if (pHPF->hpf1Count != hpf1Count || pHPF->hpf2Count != hpf2Count) {
+ return MA_INVALID_OPERATION;
}
- if (pDevice->opensl.pOutputMixObj) {
- MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Destroy((SLObjectItf)pDevice->opensl.pOutputMixObj);
- }
-
- ma_free(pDevice->opensl.pBufferPlayback);
}
- if (pDevice->type == ma_device_type_duplex) {
- ma_pcm_rb_uninit(&pDevice->opensl.duplexRB);
- }
-}
+ for (ihpf1 = 0; ihpf1 < hpf1Count; ihpf1 += 1) {
+ ma_hpf1_config hpf1Config = ma_hpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency);
-#if defined(MA_ANDROID) && __ANDROID_API__ >= 21
-typedef SLAndroidDataFormat_PCM_EX ma_SLDataFormat_PCM;
-#else
-typedef SLDataFormat_PCM ma_SLDataFormat_PCM;
-#endif
+ if (isNew) {
+ result = ma_hpf1_init(&hpf1Config, &pHPF->hpf1[ihpf1]);
+ } else {
+ result = ma_hpf1_reinit(&hpf1Config, &pHPF->hpf1[ihpf1]);
+ }
-ma_result ma_SLDataFormat_PCM_init__opensl(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* channelMap, ma_SLDataFormat_PCM* pDataFormat)
-{
-#if defined(MA_ANDROID) && __ANDROID_API__ >= 21
- if (format == ma_format_f32) {
- pDataFormat->formatType = SL_ANDROID_DATAFORMAT_PCM_EX;
- pDataFormat->representation = SL_ANDROID_PCM_REPRESENTATION_FLOAT;
- } else {
- pDataFormat->formatType = SL_DATAFORMAT_PCM;
+ if (result != MA_SUCCESS) {
+ return result;
+ }
}
-#else
- pDataFormat->formatType = SL_DATAFORMAT_PCM;
-#endif
- pDataFormat->numChannels = channels;
- ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = ma_round_to_standard_sample_rate__opensl(sampleRate * 1000); /* In millihertz. Annoyingly, the sample rate variable is named differently between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM */
- pDataFormat->bitsPerSample = ma_get_bytes_per_sample(format)*8;
- pDataFormat->channelMask = ma_channel_map_to_channel_mask__opensl(channelMap, channels);
- pDataFormat->endianness = (ma_is_little_endian()) ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN;
+ for (ihpf2 = 0; ihpf2 < hpf2Count; ihpf2 += 1) {
+ ma_hpf2_config hpf2Config;
+ double q;
+ double a;
- /*
- Android has a few restrictions on the format as documented here: https://developer.android.com/ndk/guides/audio/opensl-for-android.html
- - Only mono and stereo is supported.
- - Only u8 and s16 formats are supported.
- - Maximum sample rate of 48000.
- */
-#ifdef MA_ANDROID
- if (pDataFormat->numChannels > 2) {
- pDataFormat->numChannels = 2;
- }
-#if __ANDROID_API__ >= 21
- if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) {
- /* It's floating point. */
- ma_assert(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT);
- if (pDataFormat->bitsPerSample > 32) {
- pDataFormat->bitsPerSample = 32;
+ /* Tempting to use 0.707107, but won't result in a Butterworth filter if the order is > 2. */
+ if (hpf1Count == 1) {
+ a = (1 + ihpf2*1) * (MA_PI_D/(pConfig->order*1)); /* Odd order. */
+ } else {
+ a = (1 + ihpf2*2) * (MA_PI_D/(pConfig->order*2)); /* Even order. */
}
- } else {
- if (pDataFormat->bitsPerSample > 16) {
- pDataFormat->bitsPerSample = 16;
+ q = 1 / (2*ma_cos(a));
+
+ hpf2Config = ma_hpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q);
+
+ if (isNew) {
+ result = ma_hpf2_init(&hpf2Config, &pHPF->hpf2[ihpf2]);
+ } else {
+ result = ma_hpf2_reinit(&hpf2Config, &pHPF->hpf2[ihpf2]);
+ }
+
+ if (result != MA_SUCCESS) {
+ return result;
}
}
-#else
- if (pDataFormat->bitsPerSample > 16) {
- pDataFormat->bitsPerSample = 16;
- }
-#endif
- if (((SLDataFormat_PCM*)pDataFormat)->samplesPerSec > SL_SAMPLINGRATE_48) {
- ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = SL_SAMPLINGRATE_48;
- }
-#endif
- pDataFormat->containerSize = pDataFormat->bitsPerSample; /* Always tightly packed for now. */
+ pHPF->hpf1Count = hpf1Count;
+ pHPF->hpf2Count = hpf2Count;
+ pHPF->format = pConfig->format;
+ pHPF->channels = pConfig->channels;
return MA_SUCCESS;
}
-ma_result ma_deconstruct_SLDataFormat_PCM__opensl(ma_SLDataFormat_PCM* pDataFormat, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap)
+ma_result ma_hpf_init(const ma_hpf_config* pConfig, ma_hpf* pHPF)
{
- ma_bool32 isFloatingPoint = MA_FALSE;
-#if defined(MA_ANDROID) && __ANDROID_API__ >= 21
- if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) {
- ma_assert(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT);
- isFloatingPoint = MA_TRUE;
- }
-#endif
- if (isFloatingPoint) {
- if (pDataFormat->bitsPerSample == 32) {
- *pFormat = ma_format_f32;
- }
- } else {
- if (pDataFormat->bitsPerSample == 8) {
- *pFormat = ma_format_u8;
- } else if (pDataFormat->bitsPerSample == 16) {
- *pFormat = ma_format_s16;
- } else if (pDataFormat->bitsPerSample == 24) {
- *pFormat = ma_format_s24;
- } else if (pDataFormat->bitsPerSample == 32) {
- *pFormat = ma_format_s32;
- }
+ if (pHPF == NULL) {
+ return MA_INVALID_ARGS;
}
- *pChannels = pDataFormat->numChannels;
- *pSampleRate = ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec / 1000;
- ma_channel_mask_to_channel_map__opensl(pDataFormat->channelMask, pDataFormat->numChannels, pChannelMap);
+ MA_ZERO_OBJECT(pHPF);
- return MA_SUCCESS;
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ return ma_hpf_reinit__internal(pConfig, pHPF, /*isNew*/MA_TRUE);
}
-ma_result ma_device_init__opensl(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+ma_result ma_hpf_reinit(const ma_hpf_config* pConfig, ma_hpf* pHPF)
{
-#ifdef MA_ANDROID
- SLDataLocator_AndroidSimpleBufferQueue queue;
- SLresult resultSL;
- ma_uint32 bufferSizeInFrames;
- size_t bufferSizeInBytes;
- const SLInterfaceID itfIDs1[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
- const SLboolean itfIDsRequired1[] = {SL_BOOLEAN_TRUE};
-#endif
+ return ma_hpf_reinit__internal(pConfig, pHPF, /*isNew*/MA_FALSE);
+}
- (void)pContext;
+ma_result ma_hpf_process_pcm_frames(ma_hpf* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ ma_result result;
+ ma_uint32 ihpf1;
+ ma_uint32 ihpf2;
- ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to initialize a new device. */
- if (g_maOpenSLInitCounter == 0) {
- return MA_INVALID_OPERATION;
+ if (pHPF == NULL) {
+ return MA_INVALID_ARGS;
}
- /*
- For now, only supporting Android implementations of OpenSL|ES since that's the only one I've
- been able to test with and I currently depend on Android-specific extensions (simple buffer
- queues).
- */
-#ifdef MA_ANDROID
- /* No exclusive mode with OpenSL|ES. */
- if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
- ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ /* Faster path for in-place. */
+ if (pFramesOut == pFramesIn) {
+ for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) {
+ result = ma_hpf1_process_pcm_frames(&pHPF->hpf1[ihpf1], pFramesOut, pFramesOut, frameCount);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) {
+ result = ma_hpf2_process_pcm_frames(&pHPF->hpf2[ihpf2], pFramesOut, pFramesOut, frameCount);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
}
- /* Now we can start initializing the device properly. */
- ma_assert(pDevice != NULL);
- ma_zero_object(&pDevice->opensl);
+ /* Slightly slower path for copying. */
+ if (pFramesOut != pFramesIn) {
+ ma_uint32 iFrame;
- queue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
- queue.numBuffers = pConfig->periods;
+ /* */ if (pHPF->format == ma_format_f32) {
+ /* */ float* pFramesOutF32 = ( float*)pFramesOut;
+ const float* pFramesInF32 = (const float*)pFramesIn;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ MA_COPY_MEMORY(pFramesOutF32, pFramesInF32, ma_get_bytes_per_frame(pHPF->format, pHPF->channels));
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- ma_SLDataFormat_PCM pcm;
- SLDataLocator_IODevice locatorDevice;
- SLDataSource source;
- SLDataSink sink;
+ for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) {
+ ma_hpf1_process_pcm_frame_f32(&pHPF->hpf1[ihpf1], pFramesOutF32, pFramesOutF32);
+ }
- ma_SLDataFormat_PCM_init__opensl(pConfig->capture.format, pConfig->capture.channels, pConfig->sampleRate, pConfig->capture.channelMap, &pcm);
+ for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) {
+ ma_hpf2_process_pcm_frame_f32(&pHPF->hpf2[ihpf2], pFramesOutF32, pFramesOutF32);
+ }
- locatorDevice.locatorType = SL_DATALOCATOR_IODEVICE;
- locatorDevice.deviceType = SL_IODEVICE_AUDIOINPUT;
- locatorDevice.deviceID = (pConfig->capture.pDeviceID == NULL) ? SL_DEFAULTDEVICEID_AUDIOINPUT : pConfig->capture.pDeviceID->opensl;
- locatorDevice.device = NULL;
+ pFramesOutF32 += pHPF->channels;
+ pFramesInF32 += pHPF->channels;
+ }
+ } else if (pHPF->format == ma_format_s16) {
+ /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut;
+ const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn;
- source.pLocator = &locatorDevice;
- source.pFormat = NULL;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ MA_COPY_MEMORY(pFramesOutS16, pFramesInS16, ma_get_bytes_per_frame(pHPF->format, pHPF->channels));
- sink.pLocator = &queue;
- sink.pFormat = (SLDataFormat_PCM*)&pcm;
+ for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) {
+ ma_hpf1_process_pcm_frame_s16(&pHPF->hpf1[ihpf1], pFramesOutS16, pFramesOutS16);
+ }
- resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
- if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) {
- /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */
- pcm.formatType = SL_DATAFORMAT_PCM;
- pcm.numChannels = 1;
- ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; /* The name of the sample rate variable is different between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM. */
- pcm.bitsPerSample = 16;
- pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */
- pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
- resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
- }
+ for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) {
+ ma_hpf2_process_pcm_frame_s16(&pHPF->hpf2[ihpf2], pFramesOutS16, pFramesOutS16);
+ }
- if (resultSL != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio recorder.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
+ pFramesOutS16 += pHPF->channels;
+ pFramesInS16 += pHPF->channels;
+ }
+ } else {
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_OPERATION; /* Should never hit this. */
}
+ }
- if (MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Realize((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_BOOLEAN_FALSE) != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio recorder.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ return MA_SUCCESS;
+}
- if (MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_RECORD, &pDevice->opensl.pAudioRecorder) != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_RECORD interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ma_uint32 ma_hpf_get_latency(ma_hpf* pHPF)
+{
+ if (pHPF == NULL) {
+ return 0;
+ }
- if (MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueueCapture) != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ return pHPF->hpf2Count*2 + pHPF->hpf1Count;
+}
- if (MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, ma_buffer_queue_callback_capture__opensl_android, pDevice) != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
- /* The internal format is determined by the "pcm" object. */
- ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->capture.internalFormat, &pDevice->capture.internalChannels, &pDevice->capture.internalSampleRate, pDevice->capture.internalChannelMap);
+/**************************************************************************************************************************************************************
- /* Buffer. */
- bufferSizeInFrames = pConfig->bufferSizeInFrames;
- if (bufferSizeInFrames == 0) {
- bufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pDevice->capture.internalSampleRate);
- }
- pDevice->capture.internalPeriods = pConfig->periods;
- pDevice->capture.internalBufferSizeInFrames = (bufferSizeInFrames / pDevice->capture.internalPeriods) * pDevice->capture.internalPeriods;
- pDevice->opensl.currentBufferIndexCapture = 0;
+Band-Pass Filtering
- bufferSizeInBytes = pDevice->capture.internalBufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- pDevice->opensl.pBufferCapture = (ma_uint8*)ma_malloc(bufferSizeInBytes);
- if (pDevice->opensl.pBufferCapture == NULL) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY);
- }
- MA_ZERO_MEMORY(pDevice->opensl.pBufferCapture, bufferSizeInBytes);
- }
+**************************************************************************************************************************************************************/
+ma_bpf2_config ma_bpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q)
+{
+ ma_bpf2_config config;
+
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.cutoffFrequency = cutoffFrequency;
+ config.q = q;
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- ma_SLDataFormat_PCM pcm;
- SLDataSource source;
- SLDataLocator_OutputMix outmixLocator;
- SLDataSink sink;
+ /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */
+ if (config.q == 0) {
+ config.q = 0.707107;
+ }
- ma_SLDataFormat_PCM_init__opensl(pConfig->playback.format, pConfig->playback.channels, pConfig->sampleRate, pConfig->playback.channelMap, &pcm);
+ return config;
+}
- resultSL = (*g_maEngineSL)->CreateOutputMix(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pOutputMixObj, 0, NULL, NULL);
- if (resultSL != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create output mix.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
- if (MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Realize((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_BOOLEAN_FALSE)) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize output mix object.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+static MA_INLINE ma_biquad_config ma_bpf2__get_biquad_config(const ma_bpf2_config* pConfig)
+{
+ ma_biquad_config bqConfig;
+ double q;
+ double w;
+ double s;
+ double c;
+ double a;
- if (MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->GetInterface((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_IID_OUTPUTMIX, &pDevice->opensl.pOutputMix) != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_OUTPUTMIX interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ MA_ASSERT(pConfig != NULL);
- /* Set the output device. */
- if (pConfig->playback.pDeviceID != NULL) {
- SLuint32 deviceID_OpenSL = pConfig->playback.pDeviceID->opensl;
- MA_OPENSL_OUTPUTMIX(pDevice->opensl.pOutputMix)->ReRoute((SLOutputMixItf)pDevice->opensl.pOutputMix, 1, &deviceID_OpenSL);
- }
-
- source.pLocator = &queue;
- source.pFormat = (SLDataFormat_PCM*)&pcm;
+ q = pConfig->q;
+ w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate;
+ s = ma_sin(w);
+ c = ma_cos(w);
+ a = s / (2*q);
- outmixLocator.locatorType = SL_DATALOCATOR_OUTPUTMIX;
- outmixLocator.outputMix = (SLObjectItf)pDevice->opensl.pOutputMixObj;
+ bqConfig.b0 = q * a;
+ bqConfig.b1 = 0;
+ bqConfig.b2 = -q * a;
+ bqConfig.a0 = 1 + a;
+ bqConfig.a1 = -2 * c;
+ bqConfig.a2 = 1 - a;
- sink.pLocator = &outmixLocator;
- sink.pFormat = NULL;
+ bqConfig.format = pConfig->format;
+ bqConfig.channels = pConfig->channels;
- resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
- if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) {
- /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */
- pcm.formatType = SL_DATAFORMAT_PCM;
- pcm.numChannels = 2;
- ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16;
- pcm.bitsPerSample = 16;
- pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */
- pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
- resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
- }
+ return bqConfig;
+}
- if (resultSL != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio player.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ma_result ma_bpf2_init(const ma_bpf2_config* pConfig, ma_bpf2* pBPF)
+{
+ ma_result result;
+ ma_biquad_config bqConfig;
- if (MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Realize((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_BOOLEAN_FALSE) != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio player.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ if (pBPF == NULL) {
+ return MA_INVALID_ARGS;
+ }
- if (MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_PLAY, &pDevice->opensl.pAudioPlayer) != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_PLAY interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ MA_ZERO_OBJECT(pBPF);
- if (MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueuePlayback) != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
- if (MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, ma_buffer_queue_callback_playback__opensl_android, pDevice) != SL_RESULT_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
- }
+ bqConfig = ma_bpf2__get_biquad_config(pConfig);
+ result = ma_biquad_init(&bqConfig, &pBPF->bq);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- /* The internal format is determined by the "pcm" object. */
- ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->playback.internalFormat, &pDevice->playback.internalChannels, &pDevice->playback.internalSampleRate, pDevice->playback.internalChannelMap);
+ return MA_SUCCESS;
+}
- /* Buffer. */
- bufferSizeInFrames = pConfig->bufferSizeInFrames;
- if (bufferSizeInFrames == 0) {
- bufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pDevice->playback.internalSampleRate);
- }
- pDevice->playback.internalPeriods = pConfig->periods;
- pDevice->playback.internalBufferSizeInFrames = (bufferSizeInFrames / pDevice->playback.internalPeriods) * pDevice->playback.internalPeriods;
- pDevice->opensl.currentBufferIndexPlayback = 0;
+ma_result ma_bpf2_reinit(const ma_bpf2_config* pConfig, ma_bpf2* pBPF)
+{
+ ma_result result;
+ ma_biquad_config bqConfig;
- bufferSizeInBytes = pDevice->playback.internalBufferSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- pDevice->opensl.pBufferPlayback = (ma_uint8*)ma_malloc(bufferSizeInBytes);
- if (pDevice->opensl.pBufferPlayback == NULL) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY);
- }
- MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, bufferSizeInBytes);
+ if (pBPF == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pConfig->deviceType == ma_device_type_duplex) {
- ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames);
- ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->opensl.duplexRB);
- if (result != MA_SUCCESS) {
- ma_device_uninit__opensl(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to initialize ring buffer.", result);
- }
+ bqConfig = ma_bpf2__get_biquad_config(pConfig);
+ result = ma_biquad_reinit(&bqConfig, &pBPF->bq);
+ if (result != MA_SUCCESS) {
+ return result;
}
return MA_SUCCESS;
-#else
- return MA_NO_BACKEND; /* Non-Android implementations are not supported. */
-#endif
}
-ma_result ma_device_start__opensl(ma_device* pDevice)
+static MA_INLINE void ma_bpf2_process_pcm_frame_s16(ma_bpf2* pBPF, ma_int16* pFrameOut, const ma_int16* pFrameIn)
{
- SLresult resultSL;
- size_t periodSizeInBytes;
- ma_uint32 iPeriod;
+ ma_biquad_process_pcm_frame_s16(&pBPF->bq, pFrameOut, pFrameIn);
+}
- ma_assert(pDevice != NULL);
+static MA_INLINE void ma_bpf2_process_pcm_frame_f32(ma_bpf2* pBPF, float* pFrameOut, const float* pFrameIn)
+{
+ ma_biquad_process_pcm_frame_f32(&pBPF->bq, pFrameOut, pFrameIn);
+}
- ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to start the device. */
- if (g_maOpenSLInitCounter == 0) {
- return MA_INVALID_OPERATION;
+ma_result ma_bpf2_process_pcm_frames(ma_bpf2* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ if (pBPF == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_RECORDING);
- if (resultSL != SL_RESULT_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal capture device.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
+ return ma_biquad_process_pcm_frames(&pBPF->bq, pFramesOut, pFramesIn, frameCount);
+}
- periodSizeInBytes = (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods) * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) {
- resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pDevice->opensl.pBufferCapture + (periodSizeInBytes * iPeriod), periodSizeInBytes);
- if (resultSL != SL_RESULT_SUCCESS) {
- MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for capture device.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
- }
+ma_uint32 ma_bpf2_get_latency(ma_bpf2* pBPF)
+{
+ if (pBPF == NULL) {
+ return 0;
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_PLAYING);
- if (resultSL != SL_RESULT_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal playback device.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
+ return ma_biquad_get_latency(&pBPF->bq);
+}
- /* In playback mode (no duplex) we need to load some initial buffers. In duplex mode we need to enqueu silent buffers. */
- if (pDevice->type == ma_device_type_duplex) {
- MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, pDevice->playback.internalBufferSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels));
- } else {
- ma_device__read_frames_from_client(pDevice, pDevice->playback.internalBufferSizeInFrames, pDevice->opensl.pBufferPlayback);
- }
- periodSizeInBytes = (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods) * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) {
- resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pDevice->opensl.pBufferPlayback + (periodSizeInBytes * iPeriod), periodSizeInBytes);
- if (resultSL != SL_RESULT_SUCCESS) {
- MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for playback device.", MA_FAILED_TO_START_BACKEND_DEVICE);
- }
- }
- }
+ma_bpf_config ma_bpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order)
+{
+ ma_bpf_config config;
- return MA_SUCCESS;
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.cutoffFrequency = cutoffFrequency;
+ config.order = ma_min(order, MA_MAX_FILTER_ORDER);
+
+ return config;
}
-ma_result ma_device_stop__opensl(ma_device* pDevice)
+static ma_result ma_bpf_reinit__internal(const ma_bpf_config* pConfig, ma_bpf* pBPF, ma_bool32 isNew)
{
- SLresult resultSL;
- ma_stop_proc onStop;
+ ma_result result;
+ ma_uint32 bpf2Count;
+ ma_uint32 ibpf2;
- ma_assert(pDevice != NULL);
+ if (pBPF == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
- ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before stopping/uninitializing the device. */
- if (g_maOpenSLInitCounter == 0) {
+ /* Only supporting f32 and s16. */
+ if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) {
+ return MA_INVALID_ARGS;
+ }
+
+ /* The format cannot be changed after initialization. */
+ if (pBPF->format != ma_format_unknown && pBPF->format != pConfig->format) {
return MA_INVALID_OPERATION;
}
- /* TODO: Wait until all buffers have been processed. Hint: Maybe SLAndroidSimpleBufferQueue::GetState() could be used in a loop? */
+ /* The channel count cannot be changed after initialization. */
+ if (pBPF->channels != 0 && pBPF->channels != pConfig->channels) {
+ return MA_INVALID_OPERATION;
+ }
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED);
- if (resultSL != SL_RESULT_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal capture device.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- }
+ if (pConfig->order > MA_MAX_FILTER_ORDER) {
+ return MA_INVALID_ARGS;
+ }
- MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture);
+ /* We must have an even number of order. */
+ if ((pConfig->order & 0x1) != 0) {
+ return MA_INVALID_ARGS;
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED);
- if (resultSL != SL_RESULT_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal playback device.", MA_FAILED_TO_STOP_BACKEND_DEVICE);
- }
+ bpf2Count = pConfig->order / 2;
- MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback);
- }
+ MA_ASSERT(bpf2Count <= ma_countof(pBPF->bpf2));
- /* Make sure the client is aware that the device has stopped. There may be an OpenSL|ES callback for this, but I haven't found it. */
- onStop = pDevice->onStop;
- if (onStop) {
- onStop(pDevice);
+ /* The filter order can't change between reinits. */
+ if (!isNew) {
+ if (pBPF->bpf2Count != bpf2Count) {
+ return MA_INVALID_OPERATION;
+ }
}
- return MA_SUCCESS;
-}
+ for (ibpf2 = 0; ibpf2 < bpf2Count; ibpf2 += 1) {
+ ma_bpf2_config bpf2Config;
+ double q;
+ /* TODO: Calculate Q to make this a proper Butterworth filter. */
+ q = 0.707107;
-ma_result ma_context_uninit__opensl(ma_context* pContext)
-{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_opensl);
- (void)pContext;
+ bpf2Config = ma_bpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q);
- /* Uninit global data. */
- if (g_maOpenSLInitCounter > 0) {
- if (ma_atomic_decrement_32(&g_maOpenSLInitCounter) == 0) {
- (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL);
+ if (isNew) {
+ result = ma_bpf2_init(&bpf2Config, &pBPF->bpf2[ibpf2]);
+ } else {
+ result = ma_bpf2_reinit(&bpf2Config, &pBPF->bpf2[ibpf2]);
+ }
+
+ if (result != MA_SUCCESS) {
+ return result;
}
}
+ pBPF->bpf2Count = bpf2Count;
+ pBPF->format = pConfig->format;
+ pBPF->channels = pConfig->channels;
+
return MA_SUCCESS;
}
-ma_result ma_context_init__opensl(const ma_context_config* pConfig, ma_context* pContext)
+ma_result ma_bpf_init(const ma_bpf_config* pConfig, ma_bpf* pBPF)
{
- ma_assert(pContext != NULL);
+ if (pBPF == NULL) {
+ return MA_INVALID_ARGS;
+ }
- (void)pConfig;
+ MA_ZERO_OBJECT(pBPF);
- /* Initialize global data first if applicable. */
- if (ma_atomic_increment_32(&g_maOpenSLInitCounter) == 1) {
- SLresult resultSL = slCreateEngine(&g_maEngineObjectSL, 0, NULL, 0, NULL, NULL);
- if (resultSL != SL_RESULT_SUCCESS) {
- ma_atomic_decrement_32(&g_maOpenSLInitCounter);
- return MA_NO_BACKEND;
- }
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
- (*g_maEngineObjectSL)->Realize(g_maEngineObjectSL, SL_BOOLEAN_FALSE);
+ return ma_bpf_reinit__internal(pConfig, pBPF, /*isNew*/MA_TRUE);
+}
- resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_ENGINE, &g_maEngineSL);
- if (resultSL != SL_RESULT_SUCCESS) {
- (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL);
- ma_atomic_decrement_32(&g_maOpenSLInitCounter);
- return MA_NO_BACKEND;
+ma_result ma_bpf_reinit(const ma_bpf_config* pConfig, ma_bpf* pBPF)
+{
+ return ma_bpf_reinit__internal(pConfig, pBPF, /*isNew*/MA_FALSE);
+}
+
+ma_result ma_bpf_process_pcm_frames(ma_bpf* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ ma_result result;
+ ma_uint32 ibpf2;
+
+ if (pBPF == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ /* Faster path for in-place. */
+ if (pFramesOut == pFramesIn) {
+ for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) {
+ result = ma_bpf2_process_pcm_frames(&pBPF->bpf2[ibpf2], pFramesOut, pFramesOut, frameCount);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
}
}
- pContext->isBackendAsynchronous = MA_TRUE;
+ /* Slightly slower path for copying. */
+ if (pFramesOut != pFramesIn) {
+ ma_uint32 iFrame;
- pContext->onUninit = ma_context_uninit__opensl;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__opensl;
- pContext->onEnumDevices = ma_context_enumerate_devices__opensl;
- pContext->onGetDeviceInfo = ma_context_get_device_info__opensl;
- pContext->onDeviceInit = ma_device_init__opensl;
- pContext->onDeviceUninit = ma_device_uninit__opensl;
- pContext->onDeviceStart = ma_device_start__opensl;
- pContext->onDeviceStop = ma_device_stop__opensl;
+ /* */ if (pBPF->format == ma_format_f32) {
+ /* */ float* pFramesOutF32 = ( float*)pFramesOut;
+ const float* pFramesInF32 = (const float*)pFramesIn;
- return MA_SUCCESS;
-}
-#endif /* OpenSL|ES */
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ MA_COPY_MEMORY(pFramesOutF32, pFramesInF32, ma_get_bytes_per_frame(pBPF->format, pBPF->channels));
+ for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) {
+ ma_bpf2_process_pcm_frame_f32(&pBPF->bpf2[ibpf2], pFramesOutF32, pFramesOutF32);
+ }
-/******************************************************************************
+ pFramesOutF32 += pBPF->channels;
+ pFramesInF32 += pBPF->channels;
+ }
+ } else if (pBPF->format == ma_format_s16) {
+ /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut;
+ const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn;
-Web Audio Backend
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ MA_COPY_MEMORY(pFramesOutS16, pFramesInS16, ma_get_bytes_per_frame(pBPF->format, pBPF->channels));
-******************************************************************************/
-#ifdef MA_HAS_WEBAUDIO
-#include
+ for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) {
+ ma_bpf2_process_pcm_frame_s16(&pBPF->bpf2[ibpf2], pFramesOutS16, pFramesOutS16);
+ }
-ma_bool32 ma_is_capture_supported__webaudio()
-{
- return EM_ASM_INT({
- return (navigator.mediaDevices !== undefined && navigator.mediaDevices.getUserMedia !== undefined);
- }, 0) != 0; /* Must pass in a dummy argument for C99 compatibility. */
+ pFramesOutS16 += pBPF->channels;
+ pFramesInS16 += pBPF->channels;
+ }
+ } else {
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_OPERATION; /* Should never hit this. */
+ }
+ }
+
+ return MA_SUCCESS;
}
-#ifdef __cplusplus
-extern "C" {
-#endif
-EMSCRIPTEN_KEEPALIVE void ma_device_process_pcm_frames_capture__webaudio(ma_device* pDevice, int frameCount, float* pFrames)
+ma_uint32 ma_bpf_get_latency(ma_bpf* pBPF)
{
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_capture(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB);
- } else {
- ma_device__send_frames_to_client(pDevice, (ma_uint32)frameCount, pFrames); /* Send directly to the client. */
+ if (pBPF == NULL) {
+ return 0;
}
+
+ return pBPF->bpf2Count*2;
}
-EMSCRIPTEN_KEEPALIVE void ma_device_process_pcm_frames_playback__webaudio(ma_device* pDevice, int frameCount, float* pFrames)
+
+/**************************************************************************************************************************************************************
+
+Notching Filter
+
+**************************************************************************************************************************************************************/
+ma_notch2_config ma_notch2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency)
{
- if (pDevice->type == ma_device_type_duplex) {
- ma_device__handle_duplex_callback_playback(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB);
- } else {
- ma_device__read_frames_from_client(pDevice, (ma_uint32)frameCount, pFrames); /* Read directly from the device. */
+ ma_notch2_config config;
+
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.q = q;
+ config.frequency = frequency;
+
+ if (config.q == 0) {
+ config.q = 0.707107;
}
+
+ return config;
}
-#ifdef __cplusplus
-}
-#endif
-ma_bool32 ma_context_is_device_id_equal__webaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1)
+
+static MA_INLINE ma_biquad_config ma_notch2__get_biquad_config(const ma_notch2_config* pConfig)
{
- ma_assert(pContext != NULL);
- ma_assert(pID0 != NULL);
- ma_assert(pID1 != NULL);
- (void)pContext;
+ ma_biquad_config bqConfig;
+ double q;
+ double w;
+ double s;
+ double c;
+ double a;
- return ma_strcmp(pID0->webaudio, pID1->webaudio) == 0;
+ MA_ASSERT(pConfig != NULL);
+
+ q = pConfig->q;
+ w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate;
+ s = ma_sin(w);
+ c = ma_cos(w);
+ a = s / (2*q);
+
+ bqConfig.b0 = 1;
+ bqConfig.b1 = -2 * c;
+ bqConfig.b2 = 1;
+ bqConfig.a0 = 1 + a;
+ bqConfig.a1 = -2 * c;
+ bqConfig.a2 = 1 - a;
+
+ bqConfig.format = pConfig->format;
+ bqConfig.channels = pConfig->channels;
+
+ return bqConfig;
}
-ma_result ma_context_enumerate_devices__webaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+ma_result ma_notch2_init(const ma_notch2_config* pConfig, ma_notch2* pFilter)
{
- ma_bool32 cbResult = MA_TRUE;
+ ma_result result;
+ ma_biquad_config bqConfig;
- ma_assert(pContext != NULL);
- ma_assert(callback != NULL);
+ if (pFilter == NULL) {
+ return MA_INVALID_ARGS;
+ }
- /* Only supporting default devices for now. */
+ MA_ZERO_OBJECT(pFilter);
- /* Playback. */
- if (cbResult) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Capture. */
- if (cbResult) {
- if (ma_is_capture_supported__webaudio()) {
- ma_device_info deviceInfo;
- ma_zero_object(&deviceInfo);
- ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
- cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
- }
+ bqConfig = ma_notch2__get_biquad_config(pConfig);
+ result = ma_biquad_init(&bqConfig, &pFilter->bq);
+ if (result != MA_SUCCESS) {
+ return result;
}
- return MA_SUCCESS;
+ return MA_SUCCESS;
+}
+
+ma_result ma_notch2_reinit(const ma_notch2_config* pConfig, ma_notch2* pFilter)
+{
+ ma_result result;
+ ma_biquad_config bqConfig;
+
+ if (pFilter == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ bqConfig = ma_notch2__get_biquad_config(pConfig);
+ result = ma_biquad_reinit(&bqConfig, &pFilter->bq);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ return MA_SUCCESS;
+}
+
+static MA_INLINE void ma_notch2_process_pcm_frame_s16(ma_notch2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn)
+{
+ ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn);
}
-ma_result ma_context_get_device_info__webaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+static MA_INLINE void ma_notch2_process_pcm_frame_f32(ma_notch2* pFilter, float* pFrameOut, const float* pFrameIn)
{
- ma_assert(pContext != NULL);
+ ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn);
+}
- /* No exclusive mode with Web Audio. */
- if (shareMode == ma_share_mode_exclusive) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ma_result ma_notch2_process_pcm_frames(ma_notch2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ if (pFilter == NULL) {
+ return MA_INVALID_ARGS;
}
- if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) {
- return MA_NO_DEVICE;
+ return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount);
+}
+
+ma_uint32 ma_notch2_get_latency(ma_notch2* pFilter)
+{
+ if (pFilter == NULL) {
+ return 0;
}
+ return ma_biquad_get_latency(&pFilter->bq);
+}
- ma_zero_memory(pDeviceInfo->id.webaudio, sizeof(pDeviceInfo->id.webaudio));
- /* Only supporting default devices for now. */
- if (deviceType == ma_device_type_playback) {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
- } else {
- ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
- }
- /* Web Audio can support any number of channels and sample rates. It only supports f32 formats, however. */
- pDeviceInfo->minChannels = 1;
- pDeviceInfo->maxChannels = MA_MAX_CHANNELS;
- if (pDeviceInfo->maxChannels > 32) {
- pDeviceInfo->maxChannels = 32; /* Maximum output channel count is 32 for createScriptProcessor() (JavaScript). */
- }
+/**************************************************************************************************************************************************************
- /* We can query the sample rate by just using a temporary audio context. */
- pDeviceInfo->minSampleRate = EM_ASM_INT({
- try {
- var temp = new (window.AudioContext || window.webkitAudioContext)();
- var sampleRate = temp.sampleRate;
- temp.close();
- return sampleRate;
- } catch(e) {
- return 0;
- }
- }, 0); /* Must pass in a dummy argument for C99 compatibility. */
- pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate;
- if (pDeviceInfo->minSampleRate == 0) {
- return MA_NO_DEVICE;
- }
+Peaking EQ Filter
- /* Web Audio only supports f32. */
- pDeviceInfo->formatCount = 1;
- pDeviceInfo->formats[0] = ma_format_f32;
+**************************************************************************************************************************************************************/
+ma_peak2_config ma_peak2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency)
+{
+ ma_peak2_config config;
- return MA_SUCCESS;
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.gainDB = gainDB;
+ config.q = q;
+ config.frequency = frequency;
+
+ if (config.q == 0) {
+ config.q = 0.707107;
+ }
+
+ return config;
}
-void ma_device_uninit_by_index__webaudio(ma_device* pDevice, ma_device_type deviceType, int deviceIndex)
+static MA_INLINE ma_biquad_config ma_peak2__get_biquad_config(const ma_peak2_config* pConfig)
{
- ma_assert(pDevice != NULL);
+ ma_biquad_config bqConfig;
+ double q;
+ double w;
+ double s;
+ double c;
+ double a;
+ double A;
- EM_ASM({
- var device = miniaudio.get_device_by_index($0);
+ MA_ASSERT(pConfig != NULL);
- /* Make sure all nodes are disconnected and marked for collection. */
- if (device.scriptNode !== undefined) {
- device.scriptNode.onaudioprocess = function(e) {}; /* We want to reset the callback to ensure it doesn't get called after AudioContext.close() has returned. Shouldn't happen since we're disconnecting, but just to be safe... */
- device.scriptNode.disconnect();
- device.scriptNode = undefined;
- }
- if (device.streamNode !== undefined) {
- device.streamNode.disconnect();
- device.streamNode = undefined;
- }
+ q = pConfig->q;
+ w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate;
+ s = ma_sin(w);
+ c = ma_cos(w);
+ a = s / (2*q);
+ A = ma_pow(10, (pConfig->gainDB / 40));
- /*
- Stop the device. I think there is a chance the callback could get fired after calling this, hence why we want
- to clear the callback before closing.
- */
- device.webaudio.close();
- device.webaudio = undefined;
+ bqConfig.b0 = 1 + (a * A);
+ bqConfig.b1 = -2 * c;
+ bqConfig.b2 = 1 - (a * A);
+ bqConfig.a0 = 1 + (a / A);
+ bqConfig.a1 = -2 * c;
+ bqConfig.a2 = 1 - (a / A);
- /* Can't forget to free the intermediary buffer. This is the buffer that's shared between JavaScript and C. */
- if (device.intermediaryBuffer !== undefined) {
- Module._free(device.intermediaryBuffer);
- device.intermediaryBuffer = undefined;
- device.intermediaryBufferView = undefined;
- device.intermediaryBufferSizeInBytes = undefined;
- }
+ bqConfig.format = pConfig->format;
+ bqConfig.channels = pConfig->channels;
- /* Make sure the device is untracked so the slot can be reused later. */
- miniaudio.untrack_device_by_index($0);
- }, deviceIndex, deviceType);
+ return bqConfig;
}
-void ma_device_uninit__webaudio(ma_device* pDevice)
+ma_result ma_peak2_init(const ma_peak2_config* pConfig, ma_peak2* pFilter)
{
- ma_assert(pDevice != NULL);
+ ma_result result;
+ ma_biquad_config bqConfig;
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture);
+ if (pFilter == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback);
+ MA_ZERO_OBJECT(pFilter);
+
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pDevice->type == ma_device_type_duplex) {
- ma_pcm_rb_uninit(&pDevice->webaudio.duplexRB);
+ bqConfig = ma_peak2__get_biquad_config(pConfig);
+ result = ma_biquad_init(&bqConfig, &pFilter->bq);
+ if (result != MA_SUCCESS) {
+ return result;
}
+
+ return MA_SUCCESS;
}
-ma_result ma_device_init_by_type__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice)
+ma_result ma_peak2_reinit(const ma_peak2_config* pConfig, ma_peak2* pFilter)
{
- int deviceIndex;
- ma_uint32 internalBufferSizeInFrames;
-
- ma_assert(pContext != NULL);
- ma_assert(pConfig != NULL);
- ma_assert(deviceType != ma_device_type_duplex);
- ma_assert(pDevice != NULL);
-
- if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) {
- return MA_NO_DEVICE;
- }
+ ma_result result;
+ ma_biquad_config bqConfig;
- /* Try calculating an appropriate buffer size. */
- internalBufferSizeInFrames = pConfig->bufferSizeInFrames;
- if (internalBufferSizeInFrames == 0) {
- internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pConfig->sampleRate);
+ if (pFilter == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- /* The size of the buffer must be a power of 2 and between 256 and 16384. */
- if (internalBufferSizeInFrames < 256) {
- internalBufferSizeInFrames = 256;
- } else if (internalBufferSizeInFrames > 16384) {
- internalBufferSizeInFrames = 16384;
- } else {
- internalBufferSizeInFrames = ma_next_power_of_2(internalBufferSizeInFrames);
+ bqConfig = ma_peak2__get_biquad_config(pConfig);
+ result = ma_biquad_reinit(&bqConfig, &pFilter->bq);
+ if (result != MA_SUCCESS) {
+ return result;
}
- /* We create the device on the JavaScript side and reference it using an index. We use this to make it possible to reference the device between JavaScript and C. */
- deviceIndex = EM_ASM_INT({
- var channels = $0;
- var sampleRate = $1;
- var bufferSize = $2; /* In PCM frames. */
- var isCapture = $3;
- var pDevice = $4;
-
- if (typeof(miniaudio) === 'undefined') {
- return -1; /* Context not initialized. */
- }
-
- var device = {};
-
- /* The AudioContext must be created in a suspended state. */
- device.webaudio = new (window.AudioContext || window.webkitAudioContext)({sampleRate:sampleRate});
- device.webaudio.suspend();
-
- /*
- We need an intermediary buffer which we use for JavaScript and C interop. This buffer stores interleaved f32 PCM data. Because it's passed between
- JavaScript and C it needs to be allocated and freed using Module._malloc() and Module._free().
- */
- device.intermediaryBufferSizeInBytes = channels * bufferSize * 4;
- device.intermediaryBuffer = Module._malloc(device.intermediaryBufferSizeInBytes);
- device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
-
- /*
- Both playback and capture devices use a ScriptProcessorNode for performing per-sample operations.
-
- ScriptProcessorNode is actually deprecated so this is likely to be temporary. The way this works for playback is very simple. You just set a callback
- that's periodically fired, just like a normal audio callback function. But apparently this design is "flawed" and is now deprecated in favour of
- something called AudioWorklets which _forces_ you to load a _separate_ .js file at run time... nice... Hopefully ScriptProcessorNode will continue to
- work for years to come, but this may need to change to use AudioSourceBufferNode instead, which I think is what Emscripten uses for it's built-in SDL
- implementation. I'll be avoiding that insane AudioWorklet API like the plague...
-
- For capture it is a bit unintuitive. We use the ScriptProccessorNode _only_ to get the raw PCM data. It is connected to an AudioContext just like the
- playback case, however we just output silence to the AudioContext instead of passing any real data. It would make more sense to me to use the
- MediaRecorder API, but unfortunately you need to specify a MIME time (Opus, Vorbis, etc.) for the binary blob that's returned to the client, but I've
- been unable to figure out how to get this as raw PCM. The closes I can think is to use the MIME type for WAV files and just parse it, but I don't know
- how well this would work. Although ScriptProccessorNode is deprecated, in practice it seems to have pretty good browser support so I'm leaving it like
- this for now. If anything knows how I could get raw PCM data using the MediaRecorder API please let me know!
- */
- device.scriptNode = device.webaudio.createScriptProcessor(bufferSize, channels, channels);
-
- if (isCapture) {
- device.scriptNode.onaudioprocess = function(e) {
- if (device.intermediaryBuffer === undefined) {
- return; /* This means the device has been uninitialized. */
- }
-
- /* Make sure silence it output to the AudioContext destination. Not doing this will cause sound to come out of the speakers! */
- for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
- e.outputBuffer.getChannelData(iChannel).fill(0.0);
- }
+ return MA_SUCCESS;
+}
- /* There are some situations where we may want to send silence to the client. */
- var sendSilence = false;
- if (device.streamNode === undefined) {
- sendSilence = true;
- }
+static MA_INLINE void ma_peak2_process_pcm_frame_s16(ma_peak2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn)
+{
+ ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn);
+}
- /* Sanity check. This will never happen, right? */
- if (e.inputBuffer.numberOfChannels != channels) {
- console.log("Capture: Channel count mismatch. " + e.inputBufer.numberOfChannels + " != " + channels + ". Sending silence.");
- sendSilence = true;
- }
+static MA_INLINE void ma_peak2_process_pcm_frame_f32(ma_peak2* pFilter, float* pFrameOut, const float* pFrameIn)
+{
+ ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn);
+}
- /* This looped design guards against the situation where e.inputBuffer is a different size to the original buffer size. Should never happen in practice. */
- var totalFramesProcessed = 0;
- while (totalFramesProcessed < e.inputBuffer.length) {
- var framesRemaining = e.inputBuffer.length - totalFramesProcessed;
- var framesToProcess = framesRemaining;
- if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
- framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4);
- }
+ma_result ma_peak2_process_pcm_frames(ma_peak2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ if (pFilter == NULL) {
+ return MA_INVALID_ARGS;
+ }
- /* We need to do the reverse of the playback case. We need to interleave the input data and copy it into the intermediary buffer. Then we send it to the client. */
- if (sendSilence) {
- device.intermediaryBufferView.fill(0.0);
- } else {
- for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) {
- for (var iChannel = 0; iChannel < e.inputBuffer.numberOfChannels; ++iChannel) {
- device.intermediaryBufferView[iFrame*channels + iChannel] = e.inputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame];
- }
- }
- }
+ return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount);
+}
- /* Send data to the client from our intermediary buffer. */
- ccall("ma_device_process_pcm_frames_capture__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]);
+ma_uint32 ma_peak2_get_latency(ma_peak2* pFilter)
+{
+ if (pFilter == NULL) {
+ return 0;
+ }
- totalFramesProcessed += framesToProcess;
- }
- };
+ return ma_biquad_get_latency(&pFilter->bq);
+}
- navigator.mediaDevices.getUserMedia({audio:true, video:false})
- .then(function(stream) {
- device.streamNode = device.webaudio.createMediaStreamSource(stream);
- device.streamNode.connect(device.scriptNode);
- device.scriptNode.connect(device.webaudio.destination);
- })
- .catch(function(error) {
- /* I think this should output silence... */
- device.scriptNode.connect(device.webaudio.destination);
- });
- } else {
- device.scriptNode.onaudioprocess = function(e) {
- if (device.intermediaryBuffer === undefined) {
- return; /* This means the device has been uninitialized. */
- }
- var outputSilence = false;
+/**************************************************************************************************************************************************************
- /* Sanity check. This will never happen, right? */
- if (e.outputBuffer.numberOfChannels != channels) {
- console.log("Playback: Channel count mismatch. " + e.outputBufer.numberOfChannels + " != " + channels + ". Outputting silence.");
- outputSilence = true;
- return;
- }
+Low Shelf Filter
- /* This looped design guards against the situation where e.outputBuffer is a different size to the original buffer size. Should never happen in practice. */
- var totalFramesProcessed = 0;
- while (totalFramesProcessed < e.outputBuffer.length) {
- var framesRemaining = e.outputBuffer.length - totalFramesProcessed;
- var framesToProcess = framesRemaining;
- if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
- framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4);
- }
+**************************************************************************************************************************************************************/
+ma_loshelf2_config ma_loshelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency)
+{
+ ma_loshelf2_config config;
- /* Read data from the client into our intermediary buffer. */
- ccall("ma_device_process_pcm_frames_playback__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]);
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.gainDB = gainDB;
+ config.shelfSlope = shelfSlope;
+ config.frequency = frequency;
- /* At this point we'll have data in our intermediary buffer which we now need to deinterleave and copy over to the output buffers. */
- if (outputSilence) {
- for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
- e.outputBuffer.getChannelData(iChannel).fill(0.0);
- }
- } else {
- for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
- for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) {
- e.outputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame] = device.intermediaryBufferView[iFrame*channels + iChannel];
- }
- }
- }
+ return config;
+}
- totalFramesProcessed += framesToProcess;
- }
- };
- device.scriptNode.connect(device.webaudio.destination);
- }
+static MA_INLINE ma_biquad_config ma_loshelf2__get_biquad_config(const ma_loshelf2_config* pConfig)
+{
+ ma_biquad_config bqConfig;
+ double w;
+ double s;
+ double c;
+ double A;
+ double S;
+ double a;
+ double sqrtA;
- return miniaudio.track_device(device);
- }, (deviceType == ma_device_type_capture) ? pConfig->capture.channels : pConfig->playback.channels, pConfig->sampleRate, internalBufferSizeInFrames, deviceType == ma_device_type_capture, pDevice);
+ MA_ASSERT(pConfig != NULL);
- if (deviceIndex < 0) {
- return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
- }
+ w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate;
+ s = ma_sin(w);
+ c = ma_cos(w);
+ A = ma_pow(10, (pConfig->gainDB / 40));
+ S = pConfig->shelfSlope;
+ a = s/2 * ma_sqrt((A + 1/A) * (1/S - 1) + 2);
+ sqrtA = 2*ma_sqrt(A)*a;
- if (deviceType == ma_device_type_capture) {
- pDevice->webaudio.indexCapture = deviceIndex;
- pDevice->capture.internalFormat = ma_format_f32;
- pDevice->capture.internalChannels = pConfig->capture.channels;
- ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap);
- pDevice->capture.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
- pDevice->capture.internalBufferSizeInFrames = internalBufferSizeInFrames;
- pDevice->capture.internalPeriods = 1;
- } else {
- pDevice->webaudio.indexPlayback = deviceIndex;
- pDevice->playback.internalFormat = ma_format_f32;
- pDevice->playback.internalChannels = pConfig->playback.channels;
- ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
- pDevice->playback.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
- pDevice->playback.internalBufferSizeInFrames = internalBufferSizeInFrames;
- pDevice->playback.internalPeriods = 1;
- }
+ bqConfig.b0 = A * ((A + 1) - (A - 1)*c + sqrtA);
+ bqConfig.b1 = 2 * A * ((A - 1) - (A + 1)*c);
+ bqConfig.b2 = A * ((A + 1) - (A - 1)*c - sqrtA);
+ bqConfig.a0 = (A + 1) + (A - 1)*c + sqrtA;
+ bqConfig.a1 = -2 * ((A - 1) + (A + 1)*c);
+ bqConfig.a2 = (A + 1) + (A - 1)*c - sqrtA;
- return MA_SUCCESS;
+ bqConfig.format = pConfig->format;
+ bqConfig.channels = pConfig->channels;
+
+ return bqConfig;
}
-ma_result ma_device_init__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+ma_result ma_loshelf2_init(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter)
{
ma_result result;
+ ma_biquad_config bqConfig;
- /* No exclusive mode with Web Audio. */
- if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) ||
- ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) {
- return MA_SHARE_MODE_NOT_SUPPORTED;
+ if (pFilter == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
- result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_capture, pDevice);
- if (result != MA_SUCCESS) {
- return result;
- }
- }
+ MA_ZERO_OBJECT(pFilter);
- if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) {
- result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_playback, pDevice);
- if (result != MA_SUCCESS) {
- if (pConfig->deviceType == ma_device_type_duplex) {
- ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture);
- }
- return result;
- }
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- /*
- We need a ring buffer for moving data from the capture device to the playback device. The capture callback is the producer
- and the playback callback is the consumer. The buffer needs to be large enough to hold internalBufferSizeInFrames based on
- the external sample rate.
- */
- if (pConfig->deviceType == ma_device_type_duplex) {
- ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames) * 2;
- result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->webaudio.duplexRB);
- if (result != MA_SUCCESS) {
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture);
- }
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback);
- }
- return result;
- }
+ bqConfig = ma_loshelf2__get_biquad_config(pConfig);
+ result = ma_biquad_init(&bqConfig, &pFilter->bq);
+ if (result != MA_SUCCESS) {
+ return result;
}
return MA_SUCCESS;
}
-ma_result ma_device_start__webaudio(ma_device* pDevice)
+ma_result ma_loshelf2_reinit(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter)
{
- ma_assert(pDevice != NULL);
+ ma_result result;
+ ma_biquad_config bqConfig;
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- EM_ASM({
- miniaudio.get_device_by_index($0).webaudio.resume();
- }, pDevice->webaudio.indexCapture);
+ if (pFilter == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- EM_ASM({
- miniaudio.get_device_by_index($0).webaudio.resume();
- }, pDevice->webaudio.indexPlayback);
+ bqConfig = ma_loshelf2__get_biquad_config(pConfig);
+ result = ma_biquad_reinit(&bqConfig, &pFilter->bq);
+ if (result != MA_SUCCESS) {
+ return result;
}
return MA_SUCCESS;
}
-ma_result ma_device_stop__webaudio(ma_device* pDevice)
+static MA_INLINE void ma_loshelf2_process_pcm_frame_s16(ma_loshelf2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn)
{
- ma_assert(pDevice != NULL);
+ ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn);
+}
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- EM_ASM({
- miniaudio.get_device_by_index($0).webaudio.suspend();
- }, pDevice->webaudio.indexCapture);
+static MA_INLINE void ma_loshelf2_process_pcm_frame_f32(ma_loshelf2* pFilter, float* pFrameOut, const float* pFrameIn)
+{
+ ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn);
+}
+
+ma_result ma_loshelf2_process_pcm_frames(ma_loshelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ if (pFilter == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- EM_ASM({
- miniaudio.get_device_by_index($0).webaudio.suspend();
- }, pDevice->webaudio.indexPlayback);
+ return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount);
+}
+
+ma_uint32 ma_loshelf2_get_latency(ma_loshelf2* pFilter)
+{
+ if (pFilter == NULL) {
+ return 0;
}
- ma_stop_proc onStop = pDevice->onStop;
- if (onStop) {
- onStop(pDevice);
+ return ma_biquad_get_latency(&pFilter->bq);
+}
+
+
+/**************************************************************************************************************************************************************
+
+High Shelf Filter
+
+**************************************************************************************************************************************************************/
+ma_hishelf2_config ma_hishelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency)
+{
+ ma_hishelf2_config config;
+
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.gainDB = gainDB;
+ config.shelfSlope = shelfSlope;
+ config.frequency = frequency;
+
+ return config;
+}
+
+
+static MA_INLINE ma_biquad_config ma_hishelf2__get_biquad_config(const ma_hishelf2_config* pConfig)
+{
+ ma_biquad_config bqConfig;
+ double w;
+ double s;
+ double c;
+ double A;
+ double S;
+ double a;
+ double sqrtA;
+
+ MA_ASSERT(pConfig != NULL);
+
+ w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate;
+ s = ma_sin(w);
+ c = ma_cos(w);
+ A = ma_pow(10, (pConfig->gainDB / 40));
+ S = pConfig->shelfSlope;
+ a = s/2 * ma_sqrt((A + 1/A) * (1/S - 1) + 2);
+ sqrtA = 2*ma_sqrt(A)*a;
+
+ bqConfig.b0 = A * ((A + 1) + (A - 1)*c + sqrtA);
+ bqConfig.b1 = -2 * A * ((A - 1) + (A + 1)*c);
+ bqConfig.b2 = A * ((A + 1) + (A - 1)*c - sqrtA);
+ bqConfig.a0 = (A + 1) - (A - 1)*c + sqrtA;
+ bqConfig.a1 = 2 * ((A - 1) - (A + 1)*c);
+ bqConfig.a2 = (A + 1) - (A - 1)*c - sqrtA;
+
+ bqConfig.format = pConfig->format;
+ bqConfig.channels = pConfig->channels;
+
+ return bqConfig;
+}
+
+ma_result ma_hishelf2_init(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter)
+{
+ ma_result result;
+ ma_biquad_config bqConfig;
+
+ if (pFilter == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ MA_ZERO_OBJECT(pFilter);
+
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ bqConfig = ma_hishelf2__get_biquad_config(pConfig);
+ result = ma_biquad_init(&bqConfig, &pFilter->bq);
+ if (result != MA_SUCCESS) {
+ return result;
}
return MA_SUCCESS;
}
-ma_result ma_context_uninit__webaudio(ma_context* pContext)
+ma_result ma_hishelf2_reinit(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter)
{
- ma_assert(pContext != NULL);
- ma_assert(pContext->backend == ma_backend_webaudio);
+ ma_result result;
+ ma_biquad_config bqConfig;
- /* Nothing needs to be done here. */
- (void)pContext;
+ if (pFilter == NULL || pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ bqConfig = ma_hishelf2__get_biquad_config(pConfig);
+ result = ma_biquad_reinit(&bqConfig, &pFilter->bq);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
return MA_SUCCESS;
}
-ma_result ma_context_init__webaudio(const ma_context_config* pConfig, ma_context* pContext)
+static MA_INLINE void ma_hishelf2_process_pcm_frame_s16(ma_hishelf2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn)
{
- int resultFromJS;
+ ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn);
+}
- ma_assert(pContext != NULL);
+static MA_INLINE void ma_hishelf2_process_pcm_frame_f32(ma_hishelf2* pFilter, float* pFrameOut, const float* pFrameIn)
+{
+ ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn);
+}
- /* Here is where our global JavaScript object is initialized. */
- resultFromJS = EM_ASM_INT({
- if ((window.AudioContext || window.webkitAudioContext) === undefined) {
- return 0; /* Web Audio not supported. */
- }
+ma_result ma_hishelf2_process_pcm_frames(ma_hishelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ if (pFilter == NULL) {
+ return MA_INVALID_ARGS;
+ }
- if (typeof(miniaudio) === 'undefined') {
- miniaudio = {};
- miniaudio.devices = []; /* Device cache for mapping devices to indexes for JavaScript/C interop. */
-
- miniaudio.track_device = function(device) {
- /* Try inserting into a free slot first. */
- for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) {
- if (miniaudio.devices[iDevice] == null) {
- miniaudio.devices[iDevice] = device;
- return iDevice;
- }
- }
-
- /* Getting here means there is no empty slots in the array so we just push to the end. */
- miniaudio.devices.push(device);
- return miniaudio.devices.length - 1;
- };
-
- miniaudio.untrack_device_by_index = function(deviceIndex) {
- /* We just set the device's slot to null. The slot will get reused in the next call to ma_track_device. */
- miniaudio.devices[deviceIndex] = null;
-
- /* Trim the array if possible. */
- while (miniaudio.devices.length > 0) {
- if (miniaudio.devices[miniaudio.devices.length-1] == null) {
- miniaudio.devices.pop();
- } else {
- break;
- }
- }
- };
-
- miniaudio.untrack_device = function(device) {
- for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) {
- if (miniaudio.devices[iDevice] == device) {
- return miniaudio.untrack_device_by_index(iDevice);
- }
- }
- };
-
- miniaudio.get_device_by_index = function(deviceIndex) {
- return miniaudio.devices[deviceIndex];
- };
- }
-
- return 1;
- }, 0); /* Must pass in a dummy argument for C99 compatibility. */
+ return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount);
+}
- if (resultFromJS != 1) {
- return MA_FAILED_TO_INIT_BACKEND;
+ma_uint32 ma_hishelf2_get_latency(ma_hishelf2* pFilter)
+{
+ if (pFilter == NULL) {
+ return 0;
}
+ return ma_biquad_get_latency(&pFilter->bq);
+}
- pContext->isBackendAsynchronous = MA_TRUE;
- pContext->onUninit = ma_context_uninit__webaudio;
- pContext->onDeviceIDEqual = ma_context_is_device_id_equal__webaudio;
- pContext->onEnumDevices = ma_context_enumerate_devices__webaudio;
- pContext->onGetDeviceInfo = ma_context_get_device_info__webaudio;
- pContext->onDeviceInit = ma_device_init__webaudio;
- pContext->onDeviceUninit = ma_device_uninit__webaudio;
- pContext->onDeviceStart = ma_device_start__webaudio;
- pContext->onDeviceStop = ma_device_stop__webaudio;
- (void)pConfig; /* Unused. */
- return MA_SUCCESS;
-}
-#endif /* Web Audio */
+/**************************************************************************************************************************************************************
+
+Resampling
+**************************************************************************************************************************************************************/
+ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut)
+{
+ ma_linear_resampler_config config;
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRateIn = sampleRateIn;
+ config.sampleRateOut = sampleRateOut;
+ config.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER);
+ config.lpfNyquistFactor = 1;
+ return config;
+}
-ma_bool32 ma__is_channel_map_valid(const ma_channel* channelMap, ma_uint32 channels)
+static ma_result ma_linear_resampler_set_rate_internal(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_bool32 isResamplerAlreadyInitialized)
{
- /* A blank channel map should be allowed, in which case it should use an appropriate default which will depend on context. */
- if (channelMap[0] != MA_CHANNEL_NONE) {
- ma_uint32 iChannel;
+ ma_uint32 gcf;
- if (channels == 0) {
- return MA_FALSE; /* No channels. */
+ if (pResampler == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ if (sampleRateIn == 0 || sampleRateOut == 0) {
+ return MA_INVALID_ARGS;
+ }
+
+ /* Simplify the sample rate. */
+ gcf = ma_gcf_u32(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut);
+ pResampler->config.sampleRateIn /= gcf;
+ pResampler->config.sampleRateOut /= gcf;
+
+ if (pResampler->config.lpfOrder > 0) {
+ ma_result result;
+ ma_uint32 lpfSampleRate;
+ double lpfCutoffFrequency;
+ ma_lpf_config lpfConfig;
+
+ if (pResampler->config.lpfOrder > MA_MAX_FILTER_ORDER) {
+ return MA_INVALID_ARGS;
}
- /* A channel cannot be present in the channel map more than once. */
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- ma_uint32 jChannel;
- for (jChannel = iChannel + 1; jChannel < channels; ++jChannel) {
- if (channelMap[iChannel] == channelMap[jChannel]) {
- return MA_FALSE;
- }
- }
+ lpfSampleRate = (ma_uint32)(ma_max(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut));
+ lpfCutoffFrequency = ( double)(ma_min(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut) * 0.5 * pResampler->config.lpfNyquistFactor);
+
+ lpfConfig = ma_lpf_config_init(pResampler->config.format, pResampler->config.channels, lpfSampleRate, lpfCutoffFrequency, pResampler->config.lpfOrder);
+
+ /*
+ If the resampler is alreay initialized we don't want to do a fresh initialization of the low-pass filter because it will result in the cached frames
+ getting cleared. Instead we re-initialize the filter which will maintain any cached frames.
+ */
+ if (isResamplerAlreadyInitialized) {
+ result = ma_lpf_reinit(&lpfConfig, &pResampler->lpf);
+ } else {
+ result = ma_lpf_init(&lpfConfig, &pResampler->lpf);
+ }
+
+ if (result != MA_SUCCESS) {
+ return result;
}
}
- return MA_TRUE;
-}
+ pResampler->inAdvanceInt = pResampler->config.sampleRateIn / pResampler->config.sampleRateOut;
+ pResampler->inAdvanceFrac = pResampler->config.sampleRateIn % pResampler->config.sampleRateOut;
+
+ /* Make sure the fractional part is less than the output sample rate. */
+ pResampler->inTimeInt += pResampler->inTimeFrac / pResampler->config.sampleRateOut;
+ pResampler->inTimeFrac = pResampler->inTimeFrac % pResampler->config.sampleRateOut;
+ return MA_SUCCESS;
+}
-void ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType)
+ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, ma_linear_resampler* pResampler)
{
- ma_assert(pDevice != NULL);
+ ma_result result;
- if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) {
- if (pDevice->capture.usingDefaultFormat) {
- pDevice->capture.format = pDevice->capture.internalFormat;
- }
- if (pDevice->capture.usingDefaultChannels) {
- pDevice->capture.channels = pDevice->capture.internalChannels;
- }
- if (pDevice->capture.usingDefaultChannelMap) {
- if (pDevice->capture.internalChannels == pDevice->capture.channels) {
- ma_channel_map_copy(pDevice->capture.channelMap, pDevice->capture.internalChannelMap, pDevice->capture.channels);
- } else {
- ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->capture.channels, pDevice->capture.channelMap);
- }
- }
+ if (pResampler == NULL) {
+ return MA_INVALID_ARGS;
}
- if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) {
- if (pDevice->playback.usingDefaultFormat) {
- pDevice->playback.format = pDevice->playback.internalFormat;
- }
- if (pDevice->playback.usingDefaultChannels) {
- pDevice->playback.channels = pDevice->playback.internalChannels;
- }
- if (pDevice->playback.usingDefaultChannelMap) {
- if (pDevice->playback.internalChannels == pDevice->playback.channels) {
- ma_channel_map_copy(pDevice->playback.channelMap, pDevice->playback.internalChannelMap, pDevice->playback.channels);
- } else {
- ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->playback.channels, pDevice->playback.channelMap);
- }
- }
- }
+ MA_ZERO_OBJECT(pResampler);
- if (pDevice->usingDefaultSampleRate) {
- if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) {
- pDevice->sampleRate = pDevice->capture.internalSampleRate;
- } else {
- pDevice->sampleRate = pDevice->playback.internalSampleRate;
- }
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- /* PCM converters. */
- if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) {
- /* Converting from internal device format to public format. */
- ma_pcm_converter_config converterConfig = ma_pcm_converter_config_init_new();
- converterConfig.neverConsumeEndOfInput = MA_TRUE;
- converterConfig.pUserData = pDevice;
- converterConfig.formatIn = pDevice->capture.internalFormat;
- converterConfig.channelsIn = pDevice->capture.internalChannels;
- converterConfig.sampleRateIn = pDevice->capture.internalSampleRate;
- ma_channel_map_copy(converterConfig.channelMapIn, pDevice->capture.internalChannelMap, pDevice->capture.internalChannels);
- converterConfig.formatOut = pDevice->capture.format;
- converterConfig.channelsOut = pDevice->capture.channels;
- converterConfig.sampleRateOut = pDevice->sampleRate;
- ma_channel_map_copy(converterConfig.channelMapOut, pDevice->capture.channelMap, pDevice->capture.channels);
- converterConfig.onRead = ma_device__pcm_converter__on_read_from_buffer_capture;
- ma_pcm_converter_init(&converterConfig, &pDevice->capture.converter);
+ pResampler->config = *pConfig;
+
+ /* Setting the rate will set up the filter and time advances for us. */
+ result = ma_linear_resampler_set_rate_internal(pResampler, pConfig->sampleRateIn, pConfig->sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_FALSE);
+ if (result != MA_SUCCESS) {
+ return result;
}
- if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) {
- /* Converting from public format to device format. */
- ma_pcm_converter_config converterConfig = ma_pcm_converter_config_init_new();
- converterConfig.neverConsumeEndOfInput = MA_TRUE;
- converterConfig.pUserData = pDevice;
- converterConfig.formatIn = pDevice->playback.format;
- converterConfig.channelsIn = pDevice->playback.channels;
- converterConfig.sampleRateIn = pDevice->sampleRate;
- ma_channel_map_copy(converterConfig.channelMapIn, pDevice->playback.channelMap, pDevice->playback.channels);
- converterConfig.formatOut = pDevice->playback.internalFormat;
- converterConfig.channelsOut = pDevice->playback.internalChannels;
- converterConfig.sampleRateOut = pDevice->playback.internalSampleRate;
- ma_channel_map_copy(converterConfig.channelMapOut, pDevice->playback.internalChannelMap, pDevice->playback.internalChannels);
- if (deviceType == ma_device_type_playback) {
- if (pDevice->type == ma_device_type_playback) {
- converterConfig.onRead = ma_device__on_read_from_client;
- } else {
- converterConfig.onRead = ma_device__pcm_converter__on_read_from_buffer_playback;
- }
- } else {
- converterConfig.onRead = ma_device__pcm_converter__on_read_from_buffer_playback;
- }
- ma_pcm_converter_init(&converterConfig, &pDevice->playback.converter);
+ pResampler->inTimeInt = 1; /* Set this to one to force an input sample to always be loaded for the first output frame. */
+ pResampler->inTimeFrac = 0;
+
+ return MA_SUCCESS;
+}
+
+void ma_linear_resampler_uninit(ma_linear_resampler* pResampler)
+{
+ if (pResampler == NULL) {
+ return;
}
}
+static MA_INLINE ma_int16 ma_linear_resampler_mix_s16(ma_int16 x, ma_int16 y, ma_int32 a, const ma_int32 shift)
+{
+ ma_int32 b;
+ ma_int32 c;
+ ma_int32 r;
+
+ MA_ASSERT(a <= (1<> shift);
+}
-ma_thread_result MA_THREADCALL ma_worker_thread(void* pData)
+static void ma_linear_resampler_interpolate_frame_s16(ma_linear_resampler* pResampler, ma_int16* pFrameOut)
{
- ma_device* pDevice = (ma_device*)pData;
- ma_assert(pDevice != NULL);
+ ma_uint32 c;
+ ma_uint32 a;
+ const ma_uint32 shift = 12;
-#ifdef MA_WIN32
- ma_CoInitializeEx(pDevice->pContext, NULL, MA_COINIT_VALUE);
-#endif
+ MA_ASSERT(pResampler != NULL);
+ MA_ASSERT(pFrameOut != NULL);
- /*
- When the device is being initialized it's initial state is set to MA_STATE_UNINITIALIZED. Before returning from
- ma_device_init(), the state needs to be set to something valid. In miniaudio the device's default state immediately
- after initialization is stopped, so therefore we need to mark the device as such. miniaudio will wait on the worker
- thread to signal an event to know when the worker thread is ready for action.
- */
- ma_device__set_state(pDevice, MA_STATE_STOPPED);
- ma_event_signal(&pDevice->stopEvent);
+ a = (pResampler->inTimeFrac << shift) / pResampler->config.sampleRateOut;
- for (;;) { /* <-- This loop just keeps the thread alive. The main audio loop is inside. */
- ma_stop_proc onStop;
+ for (c = 0; c < pResampler->config.channels; c += 1) {
+ ma_int16 s = ma_linear_resampler_mix_s16(pResampler->x0.s16[c], pResampler->x1.s16[c], a, shift);
+ pFrameOut[c] = s;
+ }
+}
- /* We wait on an event to know when something has requested that the device be started and the main loop entered. */
- ma_event_wait(&pDevice->wakeupEvent);
- /* Default result code. */
- pDevice->workResult = MA_SUCCESS;
+static void ma_linear_resampler_interpolate_frame_f32(ma_linear_resampler* pResampler, float* pFrameOut)
+{
+ ma_uint32 c;
+ float a;
- /* If the reason for the wake up is that we are terminating, just break from the loop. */
- if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) {
- break;
- }
+ MA_ASSERT(pResampler != NULL);
+ MA_ASSERT(pFrameOut != NULL);
- /*
- Getting to this point means the device is wanting to get started. The function that has requested that the device
- be started will be waiting on an event (pDevice->startEvent) which means we need to make sure we signal the event
- in both the success and error case. It's important that the state of the device is set _before_ signaling the event.
- */
- ma_assert(ma_device__get_state(pDevice) == MA_STATE_STARTING);
+ a = (float)pResampler->inTimeFrac / pResampler->config.sampleRateOut;
- /* Make sure the state is set appropriately. */
- ma_device__set_state(pDevice, MA_STATE_STARTED);
- ma_event_signal(&pDevice->startEvent);
+ for (c = 0; c < pResampler->config.channels; c += 1) {
+ float s = ma_mix_f32_fast(pResampler->x0.f32[c], pResampler->x1.f32[c], a);
+ pFrameOut[c] = s;
+ }
+}
- if (pDevice->pContext->onDeviceMainLoop != NULL) {
- pDevice->pContext->onDeviceMainLoop(pDevice);
- } else {
- ma_uint32 periodSizeInFrames;
+static ma_result ma_linear_resampler_process_pcm_frames_s16_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
+{
+ const ma_int16* pFramesInS16;
+ /* */ ma_int16* pFramesOutS16;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 framesProcessedIn;
+ ma_uint64 framesProcessedOut;
- /* When a device is using miniaudio's generic worker thread they must implement onDeviceRead or onDeviceWrite, depending on the device type. */
- ma_assert(
- (pDevice->type == ma_device_type_playback && pDevice->pContext->onDeviceWrite != NULL) ||
- (pDevice->type == ma_device_type_capture && pDevice->pContext->onDeviceRead != NULL) ||
- (pDevice->type == ma_device_type_duplex && pDevice->pContext->onDeviceWrite != NULL && pDevice->pContext->onDeviceRead != NULL)
- );
+ MA_ASSERT(pResampler != NULL);
+ MA_ASSERT(pFrameCountIn != NULL);
+ MA_ASSERT(pFrameCountOut != NULL);
+
+ pFramesInS16 = (const ma_int16*)pFramesIn;
+ pFramesOutS16 = ( ma_int16*)pFramesOut;
+ frameCountIn = *pFrameCountIn;
+ frameCountOut = *pFrameCountOut;
+ framesProcessedIn = 0;
+ framesProcessedOut = 0;
+
+ for (;;) {
+ if (framesProcessedOut >= frameCountOut) {
+ break;
+ }
+
+ /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */
+ while (pResampler->inTimeInt > 0 && frameCountIn > 0) {
+ ma_uint32 iChannel;
- if (pDevice->type == ma_device_type_capture) {
- ma_assert(pDevice->capture.internalBufferSizeInFrames >= pDevice->capture.internalPeriods);
- periodSizeInFrames = pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods;
- } else if (pDevice->type == ma_device_type_playback) {
- ma_assert(pDevice->playback.internalBufferSizeInFrames >= pDevice->playback.internalPeriods);
- periodSizeInFrames = pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods;
+ if (pFramesInS16 != NULL) {
+ for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) {
+ pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel];
+ pResampler->x1.s16[iChannel] = pFramesInS16[iChannel];
+ }
+ pFramesInS16 += pResampler->config.channels;
} else {
- ma_assert(pDevice->capture.internalBufferSizeInFrames >= pDevice->capture.internalPeriods);
- ma_assert(pDevice->playback.internalBufferSizeInFrames >= pDevice->playback.internalPeriods);
- periodSizeInFrames = ma_min(
- pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods,
- pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods
- );
+ for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) {
+ pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel];
+ pResampler->x1.s16[iChannel] = 0;
+ }
}
- /*
- With the blocking API, the device is started automatically in read()/write(). All we need to do is enter the loop and just keep reading
- or writing based on the period size.
- */
-
- /* Main Loop */
- ma_assert(periodSizeInFrames >= 1);
- while (ma_device__get_state(pDevice) == MA_STATE_STARTED) {
- ma_result result = MA_SUCCESS;
- ma_uint32 totalFramesProcessed = 0;
-
- if (pDevice->type == ma_device_type_duplex) {
- /* The process is device_read -> convert -> callback -> convert -> device_write. */
- ma_uint8 captureDeviceData[4096];
- ma_uint32 captureDeviceDataCapInFrames = sizeof(captureDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
-
- while (totalFramesProcessed < periodSizeInFrames) {
- ma_device_callback_proc onData;
- ma_uint32 framesRemaining = periodSizeInFrames - totalFramesProcessed;
- ma_uint32 framesToProcess = framesRemaining;
- if (framesToProcess > captureDeviceDataCapInFrames) {
- framesToProcess = captureDeviceDataCapInFrames;
- }
+ /* Filter. */
+ ma_lpf_process_pcm_frame_s16(&pResampler->lpf, pResampler->x1.s16, pResampler->x1.s16);
- result = pDevice->pContext->onDeviceRead(pDevice, captureDeviceData, framesToProcess);
- if (result != MA_SUCCESS) {
- break;
- }
-
- onData = pDevice->onData;
- if (onData != NULL) {
- pDevice->capture._dspFrameCount = framesToProcess;
- pDevice->capture._dspFrames = captureDeviceData;
+ frameCountIn -= 1;
+ framesProcessedIn += 1;
+ pResampler->inTimeInt -= 1;
+ }
- /* We need to process every input frame. */
- for (;;) {
- ma_uint8 capturedData[4096]; /* In capture.format/channels format */
- ma_uint8 playbackData[4096]; /* In playback.format/channels format */
+ if (pResampler->inTimeInt > 0) {
+ break; /* Ran out of input data. */
+ }
- ma_uint32 capturedDataCapInFrames = sizeof(capturedData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels);
- ma_uint32 playbackDataCapInFrames = sizeof(playbackData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels);
+ /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */
+ if (pFramesOutS16 != NULL) {
+ MA_ASSERT(pResampler->inTimeInt == 0);
+ ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16);
- ma_uint32 capturedFramesToTryProcessing = ma_min(capturedDataCapInFrames, playbackDataCapInFrames);
- ma_uint32 capturedFramesToProcess = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, capturedData, capturedFramesToTryProcessing);
- if (capturedFramesToProcess == 0) {
- break; /* Don't fire the data callback with zero frames. */
- }
-
- onData(pDevice, playbackData, capturedData, capturedFramesToProcess);
+ pFramesOutS16 += pResampler->config.channels;
+ }
- /* At this point the playbackData buffer should be holding data that needs to be written to the device. */
- pDevice->playback._dspFrameCount = capturedFramesToProcess;
- pDevice->playback._dspFrames = playbackData;
- for (;;) {
- ma_uint8 playbackDeviceData[4096];
+ framesProcessedOut += 1;
- ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- ma_uint32 playbackDeviceFramesCount = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, playbackDeviceData, playbackDeviceDataCapInFrames);
- if (playbackDeviceFramesCount == 0) {
- break;
- }
+ /* Advance time forward. */
+ pResampler->inTimeInt += pResampler->inAdvanceInt;
+ pResampler->inTimeFrac += pResampler->inAdvanceFrac;
+ if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) {
+ pResampler->inTimeFrac -= pResampler->config.sampleRateOut;
+ pResampler->inTimeInt += 1;
+ }
+ }
- result = pDevice->pContext->onDeviceWrite(pDevice, playbackDeviceData, playbackDeviceFramesCount);
- if (result != MA_SUCCESS) {
- break;
- }
+ *pFrameCountIn = framesProcessedIn;
+ *pFrameCountOut = framesProcessedOut;
- if (playbackDeviceFramesCount < playbackDeviceDataCapInFrames) {
- break;
- }
- }
+ return MA_SUCCESS;
+}
- if (capturedFramesToProcess < capturedFramesToTryProcessing) {
- break;
- }
+static ma_result ma_linear_resampler_process_pcm_frames_s16_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
+{
+ const ma_int16* pFramesInS16;
+ /* */ ma_int16* pFramesOutS16;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 framesProcessedIn;
+ ma_uint64 framesProcessedOut;
- /* In case an error happened from onDeviceWrite()... */
- if (result != MA_SUCCESS) {
- break;
- }
- }
- }
+ MA_ASSERT(pResampler != NULL);
+ MA_ASSERT(pFrameCountIn != NULL);
+ MA_ASSERT(pFrameCountOut != NULL);
- totalFramesProcessed += framesToProcess;
- }
- } else {
- ma_uint8 buffer[4096];
- ma_uint32 bufferSizeInFrames;
- if (pDevice->type == ma_device_type_capture) {
- bufferSizeInFrames = sizeof(buffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
- } else {
- bufferSizeInFrames = sizeof(buffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
- }
+ pFramesInS16 = (const ma_int16*)pFramesIn;
+ pFramesOutS16 = ( ma_int16*)pFramesOut;
+ frameCountIn = *pFrameCountIn;
+ frameCountOut = *pFrameCountOut;
+ framesProcessedIn = 0;
+ framesProcessedOut = 0;
- while (totalFramesProcessed < periodSizeInFrames) {
- ma_uint32 framesRemaining = periodSizeInFrames - totalFramesProcessed;
- ma_uint32 framesToProcess = framesRemaining;
- if (framesToProcess > bufferSizeInFrames) {
- framesToProcess = bufferSizeInFrames;
- }
+ for (;;) {
+ if (framesProcessedOut >= frameCountOut) {
+ break;
+ }
- if (pDevice->type == ma_device_type_playback) {
- ma_device__read_frames_from_client(pDevice, framesToProcess, buffer);
- result = pDevice->pContext->onDeviceWrite(pDevice, buffer, framesToProcess);
- } else {
- result = pDevice->pContext->onDeviceRead(pDevice, buffer, framesToProcess);
- ma_device__send_frames_to_client(pDevice, framesToProcess, buffer);
- }
+ /* Before interpolating we need to load the buffers. */
+ while (pResampler->inTimeInt > 0 && frameCountIn > 0) {
+ ma_uint32 iChannel;
- totalFramesProcessed += framesToProcess;
- }
+ if (pFramesInS16 != NULL) {
+ for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) {
+ pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel];
+ pResampler->x1.s16[iChannel] = pFramesInS16[iChannel];
}
-
- /* Get out of the loop if read()/write() returned an error. It probably means the device has been stopped. */
- if (result != MA_SUCCESS) {
- break;
+ pFramesInS16 += pResampler->config.channels;
+ } else {
+ for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) {
+ pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel];
+ pResampler->x1.s16[iChannel] = 0;
}
}
+
+ frameCountIn -= 1;
+ framesProcessedIn += 1;
+ pResampler->inTimeInt -= 1;
}
- /*
- Getting here means we have broken from the main loop which happens the application has requested that device be stopped. Note that this
- may have actually already happened above if the device was lost and miniaudio has attempted to re-initialize the device. In this case we
- don't want to be doing this a second time.
- */
- if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) {
- if (pDevice->pContext->onDeviceStop) {
- pDevice->pContext->onDeviceStop(pDevice);
- }
+ if (pResampler->inTimeInt > 0) {
+ break; /* Ran out of input data. */
}
- /* After the device has stopped, make sure an event is posted. */
- onStop = pDevice->onStop;
- if (onStop) {
- onStop(pDevice);
+ /* Getting here means the frames have been loaded and we can generate the next output frame. */
+ if (pFramesOutS16 != NULL) {
+ MA_ASSERT(pResampler->inTimeInt == 0);
+ ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16);
+
+ /* Filter. */
+ ma_lpf_process_pcm_frame_s16(&pResampler->lpf, pFramesOutS16, pFramesOutS16);
+
+ pFramesOutS16 += pResampler->config.channels;
}
- /*
- A function somewhere is waiting for the device to have stopped for real so we need to signal an event to allow it to continue. Note that
- it's possible that the device has been uninitialized which means we need to _not_ change the status to stopped. We cannot go from an
- uninitialized state to stopped state.
- */
- if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) {
- ma_device__set_state(pDevice, MA_STATE_STOPPED);
- ma_event_signal(&pDevice->stopEvent);
+ framesProcessedOut += 1;
+
+ /* Advance time forward. */
+ pResampler->inTimeInt += pResampler->inAdvanceInt;
+ pResampler->inTimeFrac += pResampler->inAdvanceFrac;
+ if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) {
+ pResampler->inTimeFrac -= pResampler->config.sampleRateOut;
+ pResampler->inTimeInt += 1;
}
}
- /* Make sure we aren't continuously waiting on a stop event. */
- ma_event_signal(&pDevice->stopEvent); /* <-- Is this still needed? */
-
-#ifdef MA_WIN32
- ma_CoUninitialize(pDevice->pContext);
-#endif
+ *pFrameCountIn = framesProcessedIn;
+ *pFrameCountOut = framesProcessedOut;
- return (ma_thread_result)0;
+ return MA_SUCCESS;
}
-
-/* Helper for determining whether or not the given device is initialized. */
-ma_bool32 ma_device__is_initialized(ma_device* pDevice)
+static ma_result ma_linear_resampler_process_pcm_frames_s16(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- if (pDevice == NULL) {
- return MA_FALSE;
- }
+ MA_ASSERT(pResampler != NULL);
- return ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED;
+ if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) {
+ return ma_linear_resampler_process_pcm_frames_s16_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ } else {
+ return ma_linear_resampler_process_pcm_frames_s16_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ }
}
-#ifdef MA_WIN32
-ma_result ma_context_uninit_backend_apis__win32(ma_context* pContext)
+static ma_result ma_linear_resampler_process_pcm_frames_f32_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_CoUninitialize(pContext);
- ma_dlclose(pContext, pContext->win32.hUser32DLL);
- ma_dlclose(pContext, pContext->win32.hOle32DLL);
- ma_dlclose(pContext, pContext->win32.hAdvapi32DLL);
+ const float* pFramesInF32;
+ /* */ float* pFramesOutF32;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 framesProcessedIn;
+ ma_uint64 framesProcessedOut;
- return MA_SUCCESS;
-}
+ MA_ASSERT(pResampler != NULL);
+ MA_ASSERT(pFrameCountIn != NULL);
+ MA_ASSERT(pFrameCountOut != NULL);
-ma_result ma_context_init_backend_apis__win32(ma_context* pContext)
-{
-#ifdef MA_WIN32_DESKTOP
- /* Ole32.dll */
- pContext->win32.hOle32DLL = ma_dlopen(pContext, "ole32.dll");
- if (pContext->win32.hOle32DLL == NULL) {
- return MA_FAILED_TO_INIT_BACKEND;
- }
+ pFramesInF32 = (const float*)pFramesIn;
+ pFramesOutF32 = ( float*)pFramesOut;
+ frameCountIn = *pFrameCountIn;
+ frameCountOut = *pFrameCountOut;
+ framesProcessedIn = 0;
+ framesProcessedOut = 0;
- pContext->win32.CoInitializeEx = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoInitializeEx");
- pContext->win32.CoUninitialize = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoUninitialize");
- pContext->win32.CoCreateInstance = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoCreateInstance");
- pContext->win32.CoTaskMemFree = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoTaskMemFree");
- pContext->win32.PropVariantClear = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "PropVariantClear");
- pContext->win32.StringFromGUID2 = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "StringFromGUID2");
+ for (;;) {
+ if (framesProcessedOut >= frameCountOut) {
+ break;
+ }
+ /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */
+ while (pResampler->inTimeInt > 0 && frameCountIn > 0) {
+ ma_uint32 iChannel;
- /* User32.dll */
- pContext->win32.hUser32DLL = ma_dlopen(pContext, "user32.dll");
- if (pContext->win32.hUser32DLL == NULL) {
- return MA_FAILED_TO_INIT_BACKEND;
- }
+ if (pFramesInF32 != NULL) {
+ for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) {
+ pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel];
+ pResampler->x1.f32[iChannel] = pFramesInF32[iChannel];
+ }
+ pFramesInF32 += pResampler->config.channels;
+ } else {
+ for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) {
+ pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel];
+ pResampler->x1.f32[iChannel] = 0;
+ }
+ }
+
+ /* Filter. */
+ ma_lpf_process_pcm_frame_f32(&pResampler->lpf, pResampler->x1.f32, pResampler->x1.f32);
+
+ frameCountIn -= 1;
+ framesProcessedIn += 1;
+ pResampler->inTimeInt -= 1;
+ }
+
+ if (pResampler->inTimeInt > 0) {
+ break; /* Ran out of input data. */
+ }
- pContext->win32.GetForegroundWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetForegroundWindow");
- pContext->win32.GetDesktopWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetDesktopWindow");
+ /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */
+ if (pFramesOutF32 != NULL) {
+ MA_ASSERT(pResampler->inTimeInt == 0);
+ ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32);
+ pFramesOutF32 += pResampler->config.channels;
+ }
- /* Advapi32.dll */
- pContext->win32.hAdvapi32DLL = ma_dlopen(pContext, "advapi32.dll");
- if (pContext->win32.hAdvapi32DLL == NULL) {
- return MA_FAILED_TO_INIT_BACKEND;
+ framesProcessedOut += 1;
+
+ /* Advance time forward. */
+ pResampler->inTimeInt += pResampler->inAdvanceInt;
+ pResampler->inTimeFrac += pResampler->inAdvanceFrac;
+ if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) {
+ pResampler->inTimeFrac -= pResampler->config.sampleRateOut;
+ pResampler->inTimeInt += 1;
+ }
}
- pContext->win32.RegOpenKeyExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegOpenKeyExA");
- pContext->win32.RegCloseKey = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegCloseKey");
- pContext->win32.RegQueryValueExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegQueryValueExA");
-#endif
+ *pFrameCountIn = framesProcessedIn;
+ *pFrameCountOut = framesProcessedOut;
- ma_CoInitializeEx(pContext, NULL, MA_COINIT_VALUE);
return MA_SUCCESS;
}
-#else
-ma_result ma_context_uninit_backend_apis__nix(ma_context* pContext)
+
+static ma_result ma_linear_resampler_process_pcm_frames_f32_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
-#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING)
- ma_dlclose(pContext, pContext->posix.pthreadSO);
-#else
- (void)pContext;
-#endif
+ const float* pFramesInF32;
+ /* */ float* pFramesOutF32;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 framesProcessedIn;
+ ma_uint64 framesProcessedOut;
- return MA_SUCCESS;
-}
+ MA_ASSERT(pResampler != NULL);
+ MA_ASSERT(pFrameCountIn != NULL);
+ MA_ASSERT(pFrameCountOut != NULL);
-ma_result ma_context_init_backend_apis__nix(ma_context* pContext)
-{
- /* pthread */
-#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING)
- const char* libpthreadFileNames[] = {
- "libpthread.so",
- "libpthread.so.0",
- "libpthread.dylib"
- };
- size_t i;
+ pFramesInF32 = (const float*)pFramesIn;
+ pFramesOutF32 = ( float*)pFramesOut;
+ frameCountIn = *pFrameCountIn;
+ frameCountOut = *pFrameCountOut;
+ framesProcessedIn = 0;
+ framesProcessedOut = 0;
- for (i = 0; i < sizeof(libpthreadFileNames) / sizeof(libpthreadFileNames[0]); ++i) {
- pContext->posix.pthreadSO = ma_dlopen(pContext, libpthreadFileNames[i]);
- if (pContext->posix.pthreadSO != NULL) {
+ for (;;) {
+ if (framesProcessedOut >= frameCountOut) {
break;
}
- }
- if (pContext->posix.pthreadSO == NULL) {
- return MA_FAILED_TO_INIT_BACKEND;
+ /* Before interpolating we need to load the buffers. */
+ while (pResampler->inTimeInt > 0 && frameCountIn > 0) {
+ ma_uint32 iChannel;
+
+ if (pFramesInF32 != NULL) {
+ for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) {
+ pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel];
+ pResampler->x1.f32[iChannel] = pFramesInF32[iChannel];
+ }
+ pFramesInF32 += pResampler->config.channels;
+ } else {
+ for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) {
+ pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel];
+ pResampler->x1.f32[iChannel] = 0;
+ }
+ }
+
+ frameCountIn -= 1;
+ framesProcessedIn += 1;
+ pResampler->inTimeInt -= 1;
+ }
+
+ if (pResampler->inTimeInt > 0) {
+ break; /* Ran out of input data. */
+ }
+
+ /* Getting here means the frames have been loaded and we can generate the next output frame. */
+ if (pFramesOutF32 != NULL) {
+ MA_ASSERT(pResampler->inTimeInt == 0);
+ ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32);
+
+ /* Filter. */
+ ma_lpf_process_pcm_frame_f32(&pResampler->lpf, pFramesOutF32, pFramesOutF32);
+
+ pFramesOutF32 += pResampler->config.channels;
+ }
+
+ framesProcessedOut += 1;
+
+ /* Advance time forward. */
+ pResampler->inTimeInt += pResampler->inAdvanceInt;
+ pResampler->inTimeFrac += pResampler->inAdvanceFrac;
+ if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) {
+ pResampler->inTimeFrac -= pResampler->config.sampleRateOut;
+ pResampler->inTimeInt += 1;
+ }
}
- pContext->posix.pthread_create = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_create");
- pContext->posix.pthread_join = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_join");
- pContext->posix.pthread_mutex_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_init");
- pContext->posix.pthread_mutex_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_destroy");
- pContext->posix.pthread_mutex_lock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_lock");
- pContext->posix.pthread_mutex_unlock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_unlock");
- pContext->posix.pthread_cond_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_init");
- pContext->posix.pthread_cond_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_destroy");
- pContext->posix.pthread_cond_wait = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_wait");
- pContext->posix.pthread_cond_signal = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_signal");
- pContext->posix.pthread_attr_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_init");
- pContext->posix.pthread_attr_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_destroy");
- pContext->posix.pthread_attr_setschedpolicy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedpolicy");
- pContext->posix.pthread_attr_getschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_getschedparam");
- pContext->posix.pthread_attr_setschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedparam");
-#else
- pContext->posix.pthread_create = (ma_proc)pthread_create;
- pContext->posix.pthread_join = (ma_proc)pthread_join;
- pContext->posix.pthread_mutex_init = (ma_proc)pthread_mutex_init;
- pContext->posix.pthread_mutex_destroy = (ma_proc)pthread_mutex_destroy;
- pContext->posix.pthread_mutex_lock = (ma_proc)pthread_mutex_lock;
- pContext->posix.pthread_mutex_unlock = (ma_proc)pthread_mutex_unlock;
- pContext->posix.pthread_cond_init = (ma_proc)pthread_cond_init;
- pContext->posix.pthread_cond_destroy = (ma_proc)pthread_cond_destroy;
- pContext->posix.pthread_cond_wait = (ma_proc)pthread_cond_wait;
- pContext->posix.pthread_cond_signal = (ma_proc)pthread_cond_signal;
- pContext->posix.pthread_attr_init = (ma_proc)pthread_attr_init;
- pContext->posix.pthread_attr_destroy = (ma_proc)pthread_attr_destroy;
-#if !defined(__EMSCRIPTEN__)
- pContext->posix.pthread_attr_setschedpolicy = (ma_proc)pthread_attr_setschedpolicy;
- pContext->posix.pthread_attr_getschedparam = (ma_proc)pthread_attr_getschedparam;
- pContext->posix.pthread_attr_setschedparam = (ma_proc)pthread_attr_setschedparam;
-#endif
-#endif
+ *pFrameCountIn = framesProcessedIn;
+ *pFrameCountOut = framesProcessedOut;
return MA_SUCCESS;
}
-#endif
-ma_result ma_context_init_backend_apis(ma_context* pContext)
+static ma_result ma_linear_resampler_process_pcm_frames_f32(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_result result;
-#ifdef MA_WIN32
- result = ma_context_init_backend_apis__win32(pContext);
-#else
- result = ma_context_init_backend_apis__nix(pContext);
-#endif
+ MA_ASSERT(pResampler != NULL);
- return result;
+ if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) {
+ return ma_linear_resampler_process_pcm_frames_f32_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ } else {
+ return ma_linear_resampler_process_pcm_frames_f32_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ }
}
-ma_result ma_context_uninit_backend_apis(ma_context* pContext)
+
+ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_result result;
-#ifdef MA_WIN32
- result = ma_context_uninit_backend_apis__win32(pContext);
-#else
- result = ma_context_uninit_backend_apis__nix(pContext);
-#endif
+ if (pResampler == NULL) {
+ return MA_INVALID_ARGS;
+ }
- return result;
+ /* */ if (pResampler->config.format == ma_format_s16) {
+ return ma_linear_resampler_process_pcm_frames_s16(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ } else if (pResampler->config.format == ma_format_f32) {
+ return ma_linear_resampler_process_pcm_frames_f32(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ } else {
+ /* Should never get here. Getting here means the format is not supported and you didn't check the return value of ma_linear_resampler_init(). */
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_ARGS;
+ }
}
-ma_bool32 ma_context_is_backend_asynchronous(ma_context* pContext)
+ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut)
{
- return pContext->isBackendAsynchronous;
+ return ma_linear_resampler_set_rate_internal(pResampler, sampleRateIn, sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_TRUE);
}
-ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext)
+ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut)
{
- ma_result result;
- ma_context_config config;
- ma_backend defaultBackends[ma_backend_null+1];
- ma_uint32 iBackend;
- ma_backend* pBackendsToIterate;
- ma_uint32 backendsToIterateCount;
+ ma_uint32 n;
+ ma_uint32 d;
- if (pContext == NULL) {
- return MA_INVALID_ARGS;
+ d = 1000000; /* We use up to 6 decimal places. */
+ n = (ma_uint32)(ratioInOut * d);
+
+ if (n == 0) {
+ return MA_INVALID_ARGS; /* Ratio too small. */
}
- ma_zero_object(pContext);
+ MA_ASSERT(n != 0);
+
+ return ma_linear_resampler_set_rate(pResampler, n, d);
+}
- /* Always make sure the config is set first to ensure properties are available as soon as possible. */
- if (pConfig != NULL) {
- config = *pConfig;
- } else {
- config = ma_context_config_init();
- }
- pContext->logCallback = config.logCallback;
- pContext->threadPriority = config.threadPriority;
- pContext->pUserData = config.pUserData;
+ma_uint64 ma_linear_resampler_get_required_input_frame_count(ma_linear_resampler* pResampler, ma_uint64 outputFrameCount)
+{
+ ma_uint64 count;
- /* Backend APIs need to be initialized first. This is where external libraries will be loaded and linked. */
- result = ma_context_init_backend_apis(pContext);
- if (result != MA_SUCCESS) {
- return result;
+ if (pResampler == NULL) {
+ return 0;
}
- for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) {
- defaultBackends[iBackend] = (ma_backend)iBackend;
+ if (outputFrameCount == 0) {
+ return 0;
}
- pBackendsToIterate = (ma_backend*)backends;
- backendsToIterateCount = backendCount;
- if (pBackendsToIterate == NULL) {
- pBackendsToIterate = (ma_backend*)defaultBackends;
- backendsToIterateCount = ma_countof(defaultBackends);
- }
+ /* Any whole input frames are consumed before the first output frame is generated. */
+ count = pResampler->inTimeInt;
+ outputFrameCount -= 1;
- ma_assert(pBackendsToIterate != NULL);
+ /* The rest of the output frames can be calculated in constant time. */
+ count += outputFrameCount * pResampler->inAdvanceInt;
+ count += (pResampler->inTimeFrac + (outputFrameCount * pResampler->inAdvanceFrac)) / pResampler->config.sampleRateOut;
- for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) {
- ma_backend backend = pBackendsToIterate[iBackend];
+ return count;
+}
- result = MA_NO_BACKEND;
- switch (backend) {
- #ifdef MA_HAS_WASAPI
- case ma_backend_wasapi:
- {
- result = ma_context_init__wasapi(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_DSOUND
- case ma_backend_dsound:
- {
- result = ma_context_init__dsound(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_WINMM
- case ma_backend_winmm:
- {
- result = ma_context_init__winmm(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_ALSA
- case ma_backend_alsa:
- {
- result = ma_context_init__alsa(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_PULSEAUDIO
- case ma_backend_pulseaudio:
- {
- result = ma_context_init__pulse(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_JACK
- case ma_backend_jack:
- {
- result = ma_context_init__jack(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_COREAUDIO
- case ma_backend_coreaudio:
- {
- result = ma_context_init__coreaudio(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_SNDIO
- case ma_backend_sndio:
- {
- result = ma_context_init__sndio(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_AUDIO4
- case ma_backend_audio4:
- {
- result = ma_context_init__audio4(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_OSS
- case ma_backend_oss:
- {
- result = ma_context_init__oss(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_AAUDIO
- case ma_backend_aaudio:
- {
- result = ma_context_init__aaudio(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_OPENSL
- case ma_backend_opensl:
- {
- result = ma_context_init__opensl(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_WEBAUDIO
- case ma_backend_webaudio:
- {
- result = ma_context_init__webaudio(&config, pContext);
- } break;
- #endif
- #ifdef MA_HAS_NULL
- case ma_backend_null:
- {
- result = ma_context_init__null(&config, pContext);
- } break;
- #endif
+ma_uint64 ma_linear_resampler_get_expected_output_frame_count(ma_linear_resampler* pResampler, ma_uint64 inputFrameCount)
+{
+ ma_uint64 outputFrameCount;
+ ma_uint64 inTimeInt;
+ ma_uint64 inTimeFrac;
- default: break;
+ if (pResampler == NULL) {
+ return 0;
+ }
+
+ /* TODO: Try making this run in constant time. */
+
+ outputFrameCount = 0;
+ inTimeInt = pResampler->inTimeInt;
+ inTimeFrac = pResampler->inTimeFrac;
+
+ for (;;) {
+ while (inTimeInt > 0 && inputFrameCount > 0) {
+ inputFrameCount -= 1;
+ inTimeInt -= 1;
}
- /* If this iteration was successful, return. */
- if (result == MA_SUCCESS) {
- result = ma_mutex_init(pContext, &pContext->deviceEnumLock);
- if (result != MA_SUCCESS) {
- ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device enumeration. ma_context_get_devices() is not thread safe.", MA_FAILED_TO_CREATE_MUTEX);
- }
- result = ma_mutex_init(pContext, &pContext->deviceInfoLock);
- if (result != MA_SUCCESS) {
- ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device info retrieval. ma_context_get_device_info() is not thread safe.", MA_FAILED_TO_CREATE_MUTEX);
- }
+ if (inTimeInt > 0) {
+ break;
+ }
-#ifdef MA_DEBUG_OUTPUT
- printf("[miniaudio] Endian: %s\n", ma_is_little_endian() ? "LE" : "BE");
- printf("[miniaudio] SSE2: %s\n", ma_has_sse2() ? "YES" : "NO");
- printf("[miniaudio] AVX2: %s\n", ma_has_avx2() ? "YES" : "NO");
- printf("[miniaudio] AVX512F: %s\n", ma_has_avx512f() ? "YES" : "NO");
- printf("[miniaudio] NEON: %s\n", ma_has_neon() ? "YES" : "NO");
-#endif
+ outputFrameCount += 1;
- pContext->backend = backend;
- return result;
+ /* Advance time forward. */
+ inTimeInt += pResampler->inAdvanceInt;
+ inTimeFrac += pResampler->inAdvanceFrac;
+ if (inTimeFrac >= pResampler->config.sampleRateOut) {
+ inTimeFrac -= pResampler->config.sampleRateOut;
+ inTimeInt += 1;
}
}
- /* If we get here it means an error occurred. */
- ma_zero_object(pContext); /* Safety. */
- return MA_NO_BACKEND;
+ return outputFrameCount;
}
-ma_result ma_context_uninit(ma_context* pContext)
+ma_uint64 ma_linear_resampler_get_input_latency(ma_linear_resampler* pResampler)
{
- if (pContext == NULL) {
- return MA_INVALID_ARGS;
+ if (pResampler == NULL) {
+ return 0;
}
- pContext->onUninit(pContext);
+ return 1 + ma_lpf_get_latency(&pResampler->lpf);
+}
- ma_mutex_uninit(&pContext->deviceEnumLock);
- ma_mutex_uninit(&pContext->deviceInfoLock);
- ma_free(pContext->pDeviceInfos);
- ma_context_uninit_backend_apis(pContext);
+ma_uint64 ma_linear_resampler_get_output_latency(ma_linear_resampler* pResampler)
+{
+ if (pResampler == NULL) {
+ return 0;
+ }
- return MA_SUCCESS;
+ return ma_linear_resampler_get_input_latency(pResampler) * pResampler->config.sampleRateOut / pResampler->config.sampleRateIn;
}
-ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData)
+#if defined(ma_speex_resampler_h)
+#define MA_HAS_SPEEX_RESAMPLER
+
+static ma_result ma_result_from_speex_err(int err)
+{
+ switch (err)
+ {
+ case RESAMPLER_ERR_SUCCESS: return MA_SUCCESS;
+ case RESAMPLER_ERR_ALLOC_FAILED: return MA_OUT_OF_MEMORY;
+ case RESAMPLER_ERR_BAD_STATE: return MA_ERROR;
+ case RESAMPLER_ERR_INVALID_ARG: return MA_INVALID_ARGS;
+ case RESAMPLER_ERR_PTR_OVERLAP: return MA_INVALID_ARGS;
+ case RESAMPLER_ERR_OVERFLOW: return MA_ERROR;
+ default: return MA_ERROR;
+ }
+}
+#endif /* ma_speex_resampler_h */
+
+ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm)
+{
+ ma_resampler_config config;
+
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRateIn = sampleRateIn;
+ config.sampleRateOut = sampleRateOut;
+ config.algorithm = algorithm;
+
+ /* Linear. */
+ config.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER);
+ config.linear.lpfNyquistFactor = 1;
+
+ /* Speex. */
+ config.speex.quality = 3; /* Cannot leave this as 0 as that is actually a valid value for Speex resampling quality. */
+
+ return config;
+}
+
+ma_result ma_resampler_init(const ma_resampler_config* pConfig, ma_resampler* pResampler)
{
ma_result result;
- if (pContext == NULL || pContext->onEnumDevices == NULL || callback == NULL) {
+ if (pResampler == NULL) {
return MA_INVALID_ARGS;
}
- ma_mutex_lock(&pContext->deviceEnumLock);
+ MA_ZERO_OBJECT(pResampler);
+
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) {
+ return MA_INVALID_ARGS;
+ }
+
+ pResampler->config = *pConfig;
+
+ switch (pConfig->algorithm)
{
- result = pContext->onEnumDevices(pContext, callback, pUserData);
+ case ma_resample_algorithm_linear:
+ {
+ ma_linear_resampler_config linearConfig;
+ linearConfig = ma_linear_resampler_config_init(pConfig->format, pConfig->channels, pConfig->sampleRateIn, pConfig->sampleRateOut);
+ linearConfig.lpfOrder = pConfig->linear.lpfOrder;
+ linearConfig.lpfNyquistFactor = pConfig->linear.lpfNyquistFactor;
+
+ result = ma_linear_resampler_init(&linearConfig, &pResampler->state.linear);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ } break;
+
+ case ma_resample_algorithm_speex:
+ {
+ #if defined(MA_HAS_SPEEX_RESAMPLER)
+ int speexErr;
+ pResampler->state.speex.pSpeexResamplerState = speex_resampler_init(pConfig->channels, pConfig->sampleRateIn, pConfig->sampleRateOut, pConfig->speex.quality, &speexErr);
+ if (pResampler->state.speex.pSpeexResamplerState == NULL) {
+ return ma_result_from_speex_err(speexErr);
+ }
+ #else
+ /* Speex resampler not available. */
+ return MA_NO_BACKEND;
+ #endif
+ } break;
+
+ default: return MA_INVALID_ARGS;
}
- ma_mutex_unlock(&pContext->deviceEnumLock);
- return result;
+ return MA_SUCCESS;
+}
+
+void ma_resampler_uninit(ma_resampler* pResampler)
+{
+ if (pResampler == NULL) {
+ return;
+ }
+
+ if (pResampler->config.algorithm == ma_resample_algorithm_linear) {
+ ma_linear_resampler_uninit(&pResampler->state.linear);
+ }
+
+#if defined(MA_HAS_SPEEX_RESAMPLER)
+ if (pResampler->config.algorithm == ma_resample_algorithm_speex) {
+ speex_resampler_destroy((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState);
+ }
+#endif
}
+static ma_result ma_resampler_process_pcm_frames__read__linear(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
+{
+ return ma_linear_resampler_process_pcm_frames(&pResampler->state.linear, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+}
-ma_bool32 ma_context_get_devices__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData)
+#if defined(MA_HAS_SPEEX_RESAMPLER)
+static ma_result ma_resampler_process_pcm_frames__read__speex(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- /*
- We need to insert the device info into our main internal buffer. Where it goes depends on the device type. If it's a capture device
- it's just appended to the end. If it's a playback device it's inserted just before the first capture device.
- */
+ int speexErr;
+ ma_uint64 frameCountOut;
+ ma_uint64 frameCountIn;
+ ma_uint64 framesProcessedOut;
+ ma_uint64 framesProcessedIn;
+ unsigned int framesPerIteration = UINT_MAX;
+
+ MA_ASSERT(pResampler != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+ MA_ASSERT(pFrameCountOut != NULL);
+ MA_ASSERT(pFrameCountIn != NULL);
/*
- First make sure we have room. Since the number of devices we add to the list is usually relatively small I've decided to use a
- simple fixed size increment for buffer expansion.
+ Reading from the Speex resampler requires a bit of dancing around for a few reasons. The first thing is that it's frame counts
+ are in unsigned int's whereas ours is in ma_uint64. We therefore need to run the conversion in a loop. The other, more complicated
+ problem, is that we need to keep track of the input time, similar to what we do with the linear resampler. The reason we need to
+ do this is for ma_resampler_get_required_input_frame_count() and ma_resampler_get_expected_output_frame_count().
*/
- const ma_uint32 bufferExpansionCount = 2;
- const ma_uint32 totalDeviceInfoCount = pContext->playbackDeviceInfoCount + pContext->captureDeviceInfoCount;
+ frameCountOut = *pFrameCountOut;
+ frameCountIn = *pFrameCountIn;
+ framesProcessedOut = 0;
+ framesProcessedIn = 0;
- if (pContext->deviceInfoCapacity >= totalDeviceInfoCount) {
- ma_uint32 newCapacity = totalDeviceInfoCount + bufferExpansionCount;
- ma_device_info* pNewInfos = (ma_device_info*)ma_realloc(pContext->pDeviceInfos, sizeof(*pContext->pDeviceInfos)*newCapacity);
- if (pNewInfos == NULL) {
- return MA_FALSE; /* Out of memory. */
+ while (framesProcessedOut < frameCountOut && framesProcessedIn < frameCountIn) {
+ unsigned int frameCountInThisIteration;
+ unsigned int frameCountOutThisIteration;
+ const void* pFramesInThisIteration;
+ void* pFramesOutThisIteration;
+
+ frameCountInThisIteration = framesPerIteration;
+ if ((ma_uint64)frameCountInThisIteration > (frameCountIn - framesProcessedIn)) {
+ frameCountInThisIteration = (unsigned int)(frameCountIn - framesProcessedIn);
}
- pContext->pDeviceInfos = pNewInfos;
- pContext->deviceInfoCapacity = newCapacity;
- }
+ frameCountOutThisIteration = framesPerIteration;
+ if ((ma_uint64)frameCountOutThisIteration > (frameCountOut - framesProcessedOut)) {
+ frameCountOutThisIteration = (unsigned int)(frameCountOut - framesProcessedOut);
+ }
- if (deviceType == ma_device_type_playback) {
- /* Playback. Insert just before the first capture device. */
+ pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pResampler->config.format, pResampler->config.channels));
+ pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pResampler->config.format, pResampler->config.channels));
- /* The first thing to do is move all of the capture devices down a slot. */
- ma_uint32 iFirstCaptureDevice = pContext->playbackDeviceInfoCount;
- size_t iCaptureDevice;
- for (iCaptureDevice = totalDeviceInfoCount; iCaptureDevice > iFirstCaptureDevice; --iCaptureDevice) {
- pContext->pDeviceInfos[iCaptureDevice] = pContext->pDeviceInfos[iCaptureDevice-1];
+ if (pResampler->config.format == ma_format_f32) {
+ speexErr = speex_resampler_process_interleaved_float((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, (const float*)pFramesInThisIteration, &frameCountInThisIteration, (float*)pFramesOutThisIteration, &frameCountOutThisIteration);
+ } else if (pResampler->config.format == ma_format_s16) {
+ speexErr = speex_resampler_process_interleaved_int((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, (const spx_int16_t*)pFramesInThisIteration, &frameCountInThisIteration, (spx_int16_t*)pFramesOutThisIteration, &frameCountOutThisIteration);
+ } else {
+ /* Format not supported. Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_OPERATION;
}
- /* Now just insert where the first capture device was before moving it down a slot. */
- pContext->pDeviceInfos[iFirstCaptureDevice] = *pInfo;
- pContext->playbackDeviceInfoCount += 1;
- } else {
- /* Capture. Insert at the end. */
- pContext->pDeviceInfos[totalDeviceInfoCount] = *pInfo;
- pContext->captureDeviceInfoCount += 1;
+ if (speexErr != RESAMPLER_ERR_SUCCESS) {
+ return ma_result_from_speex_err(speexErr);
+ }
+
+ framesProcessedIn += frameCountInThisIteration;
+ framesProcessedOut += frameCountOutThisIteration;
}
- (void)pUserData;
- return MA_TRUE;
+ *pFrameCountOut = framesProcessedOut;
+ *pFrameCountIn = framesProcessedIn;
+
+ return MA_SUCCESS;
}
+#endif
-ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount)
+static ma_result ma_resampler_process_pcm_frames__read(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_result result;
+ MA_ASSERT(pResampler != NULL);
+ MA_ASSERT(pFramesOut != NULL);
- /* Safety. */
- if (ppPlaybackDeviceInfos != NULL) *ppPlaybackDeviceInfos = NULL;
- if (pPlaybackDeviceCount != NULL) *pPlaybackDeviceCount = 0;
- if (ppCaptureDeviceInfos != NULL) *ppCaptureDeviceInfos = NULL;
- if (pCaptureDeviceCount != NULL) *pCaptureDeviceCount = 0;
+ /* pFramesOut is not NULL, which means we must have a capacity. */
+ if (pFrameCountOut == NULL) {
+ return MA_INVALID_ARGS;
+ }
- if (pContext == NULL || pContext->onEnumDevices == NULL) {
+ /* It doesn't make sense to not have any input frames to process. */
+ if (pFrameCountIn == NULL || pFramesIn == NULL) {
return MA_INVALID_ARGS;
}
- /* Note that we don't use ma_context_enumerate_devices() here because we want to do locking at a higher level. */
- ma_mutex_lock(&pContext->deviceEnumLock);
+ switch (pResampler->config.algorithm)
{
- /* Reset everything first. */
- pContext->playbackDeviceInfoCount = 0;
- pContext->captureDeviceInfoCount = 0;
+ case ma_resample_algorithm_linear:
+ {
+ return ma_resampler_process_pcm_frames__read__linear(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ }
- /* Now enumerate over available devices. */
- result = pContext->onEnumDevices(pContext, ma_context_get_devices__enum_callback, NULL);
- if (result == MA_SUCCESS) {
- /* Playback devices. */
- if (ppPlaybackDeviceInfos != NULL) {
- *ppPlaybackDeviceInfos = pContext->pDeviceInfos;
+ case ma_resample_algorithm_speex:
+ {
+ #if defined(MA_HAS_SPEEX_RESAMPLER)
+ return ma_resampler_process_pcm_frames__read__speex(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ #else
+ break;
+ #endif
+ }
+
+ default: break;
+ }
+
+ /* Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_ARGS;
+}
+
+
+static ma_result ma_resampler_process_pcm_frames__seek__linear(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, ma_uint64* pFrameCountOut)
+{
+ MA_ASSERT(pResampler != NULL);
+
+ /* Seeking is supported natively by the linear resampler. */
+ return ma_linear_resampler_process_pcm_frames(&pResampler->state.linear, pFramesIn, pFrameCountIn, NULL, pFrameCountOut);
+}
+
+#if defined(MA_HAS_SPEEX_RESAMPLER)
+static ma_result ma_resampler_process_pcm_frames__seek__speex(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, ma_uint64* pFrameCountOut)
+{
+ /* The generic seek method is implemented in on top of ma_resampler_process_pcm_frames__read() by just processing into a dummy buffer. */
+ float devnull[8192];
+ ma_uint64 totalOutputFramesToProcess;
+ ma_uint64 totalOutputFramesProcessed;
+ ma_uint64 totalInputFramesProcessed;
+ ma_uint32 bpf;
+ ma_result result;
+
+ MA_ASSERT(pResampler != NULL);
+
+ totalOutputFramesProcessed = 0;
+ totalInputFramesProcessed = 0;
+ bpf = ma_get_bytes_per_frame(pResampler->config.format, pResampler->config.channels);
+
+ if (pFrameCountOut != NULL) {
+ /* Seek by output frames. */
+ totalOutputFramesToProcess = *pFrameCountOut;
+ } else {
+ /* Seek by input frames. */
+ MA_ASSERT(pFrameCountIn != NULL);
+ totalOutputFramesToProcess = ma_resampler_get_expected_output_frame_count(pResampler, *pFrameCountIn);
+ }
+
+ if (pFramesIn != NULL) {
+ /* Process input data. */
+ MA_ASSERT(pFrameCountIn != NULL);
+ while (totalOutputFramesProcessed < totalOutputFramesToProcess && totalInputFramesProcessed < *pFrameCountIn) {
+ ma_uint64 inputFramesToProcessThisIteration = (*pFrameCountIn - totalInputFramesProcessed);
+ ma_uint64 outputFramesToProcessThisIteration = (totalOutputFramesToProcess - totalOutputFramesProcessed);
+ if (outputFramesToProcessThisIteration > sizeof(devnull) / bpf) {
+ outputFramesToProcessThisIteration = sizeof(devnull) / bpf;
}
- if (pPlaybackDeviceCount != NULL) {
- *pPlaybackDeviceCount = pContext->playbackDeviceInfoCount;
+
+ result = ma_resampler_process_pcm_frames__read(pResampler, ma_offset_ptr(pFramesIn, totalInputFramesProcessed*bpf), &inputFramesToProcessThisIteration, ma_offset_ptr(devnull, totalOutputFramesProcessed*bpf), &outputFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ return result;
}
- /* Capture devices. */
- if (ppCaptureDeviceInfos != NULL) {
- *ppCaptureDeviceInfos = pContext->pDeviceInfos + pContext->playbackDeviceInfoCount; /* Capture devices come after playback devices. */
+ totalOutputFramesProcessed += outputFramesToProcessThisIteration;
+ totalInputFramesProcessed += inputFramesToProcessThisIteration;
+ }
+ } else {
+ /* Don't process input data - just update timing and filter state as if zeroes were passed in. */
+ while (totalOutputFramesProcessed < totalOutputFramesToProcess) {
+ ma_uint64 inputFramesToProcessThisIteration = 16384;
+ ma_uint64 outputFramesToProcessThisIteration = (totalOutputFramesToProcess - totalOutputFramesProcessed);
+ if (outputFramesToProcessThisIteration > sizeof(devnull) / bpf) {
+ outputFramesToProcessThisIteration = sizeof(devnull) / bpf;
}
- if (pCaptureDeviceCount != NULL) {
- *pCaptureDeviceCount = pContext->captureDeviceInfoCount;
+
+ result = ma_resampler_process_pcm_frames__read(pResampler, NULL, &inputFramesToProcessThisIteration, ma_offset_ptr(devnull, totalOutputFramesProcessed*bpf), &outputFramesToProcessThisIteration);
+ if (result != MA_SUCCESS) {
+ return result;
}
+
+ totalOutputFramesProcessed += outputFramesToProcessThisIteration;
+ totalInputFramesProcessed += inputFramesToProcessThisIteration;
}
}
- ma_mutex_unlock(&pContext->deviceEnumLock);
- return result;
+
+ if (pFrameCountIn != NULL) {
+ *pFrameCountIn = totalInputFramesProcessed;
+ }
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = totalOutputFramesProcessed;
+ }
+
+ return MA_SUCCESS;
}
+#endif
-ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo)
+static ma_result ma_resampler_process_pcm_frames__seek(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, ma_uint64* pFrameCountOut)
{
- ma_device_info deviceInfo;
+ MA_ASSERT(pResampler != NULL);
- /* NOTE: Do not clear pDeviceInfo on entry. The reason is the pDeviceID may actually point to pDeviceInfo->id which will break things. */
- if (pContext == NULL || pDeviceInfo == NULL) {
+ switch (pResampler->config.algorithm)
+ {
+ case ma_resample_algorithm_linear:
+ {
+ return ma_resampler_process_pcm_frames__seek__linear(pResampler, pFramesIn, pFrameCountIn, pFrameCountOut);
+ } break;
+
+ case ma_resample_algorithm_speex:
+ {
+ #if defined(MA_HAS_SPEEX_RESAMPLER)
+ return ma_resampler_process_pcm_frames__seek__speex(pResampler, pFramesIn, pFrameCountIn, pFrameCountOut);
+ #else
+ break;
+ #endif
+ };
+
+ default: break;
+ }
+
+ /* Should never hit this. */
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_ARGS;
+}
+
+
+ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
+{
+ if (pResampler == NULL) {
return MA_INVALID_ARGS;
}
- ma_zero_object(&deviceInfo);
+ if (pFrameCountOut == NULL && pFrameCountIn == NULL) {
+ return MA_INVALID_ARGS;
+ }
- /* Help the backend out by copying over the device ID if we have one. */
- if (pDeviceID != NULL) {
- ma_copy_memory(&deviceInfo.id, pDeviceID, sizeof(*pDeviceID));
+ if (pFramesOut != NULL) {
+ /* Reading. */
+ return ma_resampler_process_pcm_frames__read(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ } else {
+ /* Seeking. */
+ return ma_resampler_process_pcm_frames__seek(pResampler, pFramesIn, pFrameCountIn, pFrameCountOut);
}
+}
- /* The backend may have an optimized device info retrieval function. If so, try that first. */
- if (pContext->onGetDeviceInfo != NULL) {
- ma_result result;
- ma_mutex_lock(&pContext->deviceInfoLock);
+ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut)
+{
+ if (pResampler == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ if (sampleRateIn == 0 || sampleRateOut == 0) {
+ return MA_INVALID_ARGS;
+ }
+
+ pResampler->config.sampleRateIn = sampleRateIn;
+ pResampler->config.sampleRateOut = sampleRateOut;
+
+ switch (pResampler->config.algorithm)
+ {
+ case ma_resample_algorithm_linear:
{
- result = pContext->onGetDeviceInfo(pContext, deviceType, pDeviceID, shareMode, &deviceInfo);
- }
- ma_mutex_unlock(&pContext->deviceInfoLock);
+ return ma_linear_resampler_set_rate(&pResampler->state.linear, sampleRateIn, sampleRateOut);
+ } break;
- /* Clamp ranges. */
- deviceInfo.minChannels = ma_max(deviceInfo.minChannels, MA_MIN_CHANNELS);
- deviceInfo.maxChannels = ma_min(deviceInfo.maxChannels, MA_MAX_CHANNELS);
- deviceInfo.minSampleRate = ma_max(deviceInfo.minSampleRate, MA_MIN_SAMPLE_RATE);
- deviceInfo.maxSampleRate = ma_min(deviceInfo.maxSampleRate, MA_MAX_SAMPLE_RATE);
+ case ma_resample_algorithm_speex:
+ {
+ #if defined(MA_HAS_SPEEX_RESAMPLER)
+ return ma_result_from_speex_err(speex_resampler_set_rate((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, sampleRateIn, sampleRateOut));
+ #else
+ break;
+ #endif
+ };
- *pDeviceInfo = deviceInfo;
- return result;
+ default: break;
}
- /* Getting here means onGetDeviceInfo has not been set. */
- return MA_ERROR;
+ /* Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return MA_INVALID_OPERATION;
}
-
-ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice)
+ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio)
{
- ma_result result;
- ma_device_config config;
-
- if (pContext == NULL) {
- return ma_device_init_ex(NULL, 0, NULL, pConfig, pDevice);
- }
- if (pDevice == NULL) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS);
- }
- if (pConfig == NULL) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pConfig == NULL).", MA_INVALID_ARGS);
+ if (pResampler == NULL) {
+ return MA_INVALID_ARGS;
}
- /* We need to make a copy of the config so we can set default values if they were left unset in the input config. */
- config = *pConfig;
+ if (pResampler->config.algorithm == ma_resample_algorithm_linear) {
+ return ma_linear_resampler_set_rate_ratio(&pResampler->state.linear, ratio);
+ } else {
+ /* Getting here means the backend does not have native support for setting the rate as a ratio so we just do it generically. */
+ ma_uint32 n;
+ ma_uint32 d;
- /* Basic config validation. */
- if (config.deviceType != ma_device_type_playback && config.deviceType != ma_device_type_capture && config.deviceType != ma_device_type_duplex) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Device type is invalid. Make sure the device type has been set in the config.", MA_INVALID_DEVICE_CONFIG);
- }
+ d = 1000000; /* We use up to 6 decimal places. */
+ n = (ma_uint32)(ratio * d);
- if (config.deviceType == ma_device_type_capture || config.deviceType == ma_device_type_duplex) {
- if (config.capture.channels > MA_MAX_CHANNELS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Capture channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG);
- }
- if (!ma__is_channel_map_valid(config.capture.channelMap, config.capture.channels)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Capture channel map is invalid.", MA_INVALID_DEVICE_CONFIG);
+ if (n == 0) {
+ return MA_INVALID_ARGS; /* Ratio too small. */
}
+
+ MA_ASSERT(n != 0);
+
+ return ma_resampler_set_rate(pResampler, n, d);
}
+}
- if (config.deviceType == ma_device_type_playback || config.deviceType == ma_device_type_duplex) {
- if (config.playback.channels > MA_MAX_CHANNELS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Playback channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG);
- }
- if (!ma__is_channel_map_valid(config.playback.channelMap, config.playback.channels)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Playback channel map is invalid.", MA_INVALID_DEVICE_CONFIG);
- }
+ma_uint64 ma_resampler_get_required_input_frame_count(ma_resampler* pResampler, ma_uint64 outputFrameCount)
+{
+ if (pResampler == NULL) {
+ return 0;
}
+ if (outputFrameCount == 0) {
+ return 0;
+ }
- ma_zero_object(pDevice);
- pDevice->pContext = pContext;
+ switch (pResampler->config.algorithm)
+ {
+ case ma_resample_algorithm_linear:
+ {
+ return ma_linear_resampler_get_required_input_frame_count(&pResampler->state.linear, outputFrameCount);
+ }
- /* Set the user data and log callback ASAP to ensure it is available for the entire initialization process. */
- pDevice->pUserData = config.pUserData;
- pDevice->onData = config.dataCallback;
- pDevice->onStop = config.stopCallback;
+ case ma_resample_algorithm_speex:
+ {
+ #if defined(MA_HAS_SPEEX_RESAMPLER)
+ ma_uint64 count;
+ int speexErr = ma_speex_resampler_get_required_input_frame_count((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, outputFrameCount, &count);
+ if (speexErr != RESAMPLER_ERR_SUCCESS) {
+ return 0;
+ }
- if (((ma_uintptr)pDevice % sizeof(pDevice)) != 0) {
- if (pContext->logCallback) {
- pContext->logCallback(pContext, pDevice, MA_LOG_LEVEL_WARNING, "WARNING: ma_device_init() called for a device that is not properly aligned. Thread safety is not supported.");
+ return count;
+ #else
+ break;
+ #endif
}
- }
- /*
- When passing in 0 for the format/channels/rate/chmap it means the device will be using whatever is chosen by the backend. If everything is set
- to defaults it means the format conversion pipeline will run on a fast path where data transfer is just passed straight through to the backend.
- */
- if (config.sampleRate == 0) {
- config.sampleRate = MA_DEFAULT_SAMPLE_RATE;
- pDevice->usingDefaultSampleRate = MA_TRUE;
+ default: break;
}
- if (config.capture.format == ma_format_unknown) {
- config.capture.format = MA_DEFAULT_FORMAT;
- pDevice->capture.usingDefaultFormat = MA_TRUE;
- }
- if (config.capture.channels == 0) {
- config.capture.channels = MA_DEFAULT_CHANNELS;
- pDevice->capture.usingDefaultChannels = MA_TRUE;
- }
- if (config.capture.channelMap[0] == MA_CHANNEL_NONE) {
- pDevice->capture.usingDefaultChannelMap = MA_TRUE;
- }
+ /* Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return 0;
+}
- if (config.playback.format == ma_format_unknown) {
- config.playback.format = MA_DEFAULT_FORMAT;
- pDevice->playback.usingDefaultFormat = MA_TRUE;
+ma_uint64 ma_resampler_get_expected_output_frame_count(ma_resampler* pResampler, ma_uint64 inputFrameCount)
+{
+ if (pResampler == NULL) {
+ return 0; /* Invalid args. */
}
- if (config.playback.channels == 0) {
- config.playback.channels = MA_DEFAULT_CHANNELS;
- pDevice->playback.usingDefaultChannels = MA_TRUE;
+
+ if (inputFrameCount == 0) {
+ return 0;
}
- if (config.playback.channelMap[0] == MA_CHANNEL_NONE) {
- pDevice->playback.usingDefaultChannelMap = MA_TRUE;
+
+ switch (pResampler->config.algorithm)
+ {
+ case ma_resample_algorithm_linear:
+ {
+ return ma_linear_resampler_get_expected_output_frame_count(&pResampler->state.linear, inputFrameCount);
+ }
+
+ case ma_resample_algorithm_speex:
+ {
+ #if defined(MA_HAS_SPEEX_RESAMPLER)
+ ma_uint64 count;
+ int speexErr = ma_speex_resampler_get_expected_output_frame_count((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, inputFrameCount, &count);
+ if (speexErr != RESAMPLER_ERR_SUCCESS) {
+ return 0;
+ }
+
+ return count;
+ #else
+ break;
+ #endif
+ }
+
+ default: break;
}
+ /* Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return 0;
+}
- /* Default buffer size. */
- if (config.bufferSizeInMilliseconds == 0 && config.bufferSizeInFrames == 0) {
- config.bufferSizeInMilliseconds = (config.performanceProfile == ma_performance_profile_low_latency) ? MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY : MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE;
- pDevice->usingDefaultBufferSize = MA_TRUE;
+ma_uint64 ma_resampler_get_input_latency(ma_resampler* pResampler)
+{
+ if (pResampler == NULL) {
+ return 0;
}
- /* Default periods. */
- if (config.periods == 0) {
- config.periods = MA_DEFAULT_PERIODS;
- pDevice->usingDefaultPeriods = MA_TRUE;
+ switch (pResampler->config.algorithm)
+ {
+ case ma_resample_algorithm_linear:
+ {
+ return ma_linear_resampler_get_input_latency(&pResampler->state.linear);
+ }
+
+ case ma_resample_algorithm_speex:
+ {
+ #if defined(MA_HAS_SPEEX_RESAMPLER)
+ return (ma_uint64)ma_speex_resampler_get_input_latency((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState);
+ #else
+ break;
+ #endif
+ }
+
+ default: break;
}
- /*
- Must have at least 3 periods for full-duplex mode. The idea is that the playback and capture positions hang out in the middle period, with the surrounding
- periods acting as a buffer in case the capture and playback devices get's slightly out of sync.
- */
- if (config.deviceType == ma_device_type_duplex && config.periods < 3) {
- config.periods = 3;
+ /* Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return 0;
+}
+
+ma_uint64 ma_resampler_get_output_latency(ma_resampler* pResampler)
+{
+ if (pResampler == NULL) {
+ return 0;
}
+ switch (pResampler->config.algorithm)
+ {
+ case ma_resample_algorithm_linear:
+ {
+ return ma_linear_resampler_get_output_latency(&pResampler->state.linear);
+ }
- pDevice->type = config.deviceType;
- pDevice->sampleRate = config.sampleRate;
+ case ma_resample_algorithm_speex:
+ {
+ #if defined(MA_HAS_SPEEX_RESAMPLER)
+ return (ma_uint64)ma_speex_resampler_get_output_latency((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState);
+ #else
+ break;
+ #endif
+ }
- pDevice->capture.shareMode = config.capture.shareMode;
- pDevice->capture.format = config.capture.format;
- pDevice->capture.channels = config.capture.channels;
- ma_channel_map_copy(pDevice->capture.channelMap, config.capture.channelMap, config.capture.channels);
+ default: break;
+ }
- pDevice->playback.shareMode = config.playback.shareMode;
- pDevice->playback.format = config.playback.format;
- pDevice->playback.channels = config.playback.channels;
- ma_channel_map_copy(pDevice->playback.channelMap, config.playback.channelMap, config.playback.channels);
+ /* Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return 0;
+}
+/**************************************************************************************************************************************************************
- /* The internal format, channel count and sample rate can be modified by the backend. */
- pDevice->capture.internalFormat = pDevice->capture.format;
- pDevice->capture.internalChannels = pDevice->capture.channels;
- pDevice->capture.internalSampleRate = pDevice->sampleRate;
- ma_channel_map_copy(pDevice->capture.internalChannelMap, pDevice->capture.channelMap, pDevice->capture.channels);
+Channel Conversion
- pDevice->playback.internalFormat = pDevice->playback.format;
- pDevice->playback.internalChannels = pDevice->playback.channels;
- pDevice->playback.internalSampleRate = pDevice->sampleRate;
- ma_channel_map_copy(pDevice->playback.internalChannelMap, pDevice->playback.channelMap, pDevice->playback.channels);
-
+**************************************************************************************************************************************************************/
+#ifndef MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT
+#define MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT 12
+#endif
- if (ma_mutex_init(pContext, &pDevice->lock) != MA_SUCCESS) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to create mutex.", MA_FAILED_TO_CREATE_MUTEX);
- }
+#define MA_PLANE_LEFT 0
+#define MA_PLANE_RIGHT 1
+#define MA_PLANE_FRONT 2
+#define MA_PLANE_BACK 3
+#define MA_PLANE_BOTTOM 4
+#define MA_PLANE_TOP 5
+
+float g_maChannelPlaneRatios[MA_CHANNEL_POSITION_COUNT][6] = {
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_NONE */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_MONO */
+ { 0.5f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT */
+ { 0.0f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT */
+ { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_CENTER */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_LFE */
+ { 0.5f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_LEFT */
+ { 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_RIGHT */
+ { 0.25f, 0.0f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT_CENTER */
+ { 0.0f, 0.25f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT_CENTER */
+ { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_CENTER */
+ { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_LEFT */
+ { 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_RIGHT */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f}, /* MA_CHANNEL_TOP_CENTER */
+ { 0.33f, 0.0f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_LEFT */
+ { 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_FRONT_CENTER */
+ { 0.0f, 0.33f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_RIGHT */
+ { 0.33f, 0.0f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_LEFT */
+ { 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_BACK_CENTER */
+ { 0.0f, 0.33f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_RIGHT */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_0 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_1 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_2 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_3 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_4 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_5 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_6 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_7 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_8 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_9 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_10 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_11 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_12 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_13 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_14 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_15 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_16 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_17 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_18 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_19 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_20 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_21 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_22 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_23 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_24 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_25 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_26 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_27 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_28 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_29 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_30 */
+ { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_31 */
+};
+float ma_calculate_channel_position_rectangular_weight(ma_channel channelPositionA, ma_channel channelPositionB)
+{
/*
- When the device is started, the worker thread is the one that does the actual startup of the backend device. We
- use a semaphore to wait for the background thread to finish the work. The same applies for stopping the device.
+ Imagine the following simplified example: You have a single input speaker which is the front/left speaker which you want to convert to
+ the following output configuration:
+
+ - front/left
+ - side/left
+ - back/left
+
+ The front/left output is easy - it the same speaker position so it receives the full contribution of the front/left input. The amount
+ of contribution to apply to the side/left and back/left speakers, however, is a bit more complicated.
+
+ Imagine the front/left speaker as emitting audio from two planes - the front plane and the left plane. You can think of the front/left
+ speaker emitting half of it's total volume from the front, and the other half from the left. Since part of it's volume is being emitted
+ from the left side, and the side/left and back/left channels also emit audio from the left plane, one would expect that they would
+ receive some amount of contribution from front/left speaker. The amount of contribution depends on how many planes are shared between
+ the two speakers. Note that in the examples below I've added a top/front/left speaker as an example just to show how the math works
+ across 3 spatial dimensions.
+
+ The first thing to do is figure out how each speaker's volume is spread over each of plane:
+ - front/left: 2 planes (front and left) = 1/2 = half it's total volume on each plane
+ - side/left: 1 plane (left only) = 1/1 = entire volume from left plane
+ - back/left: 2 planes (back and left) = 1/2 = half it's total volume on each plane
+ - top/front/left: 3 planes (top, front and left) = 1/3 = one third it's total volume on each plane
- Each of these semaphores is released internally by the worker thread when the work is completed. The start
- semaphore is also used to wake up the worker thread.
+ The amount of volume each channel contributes to each of it's planes is what controls how much it is willing to given and take to other
+ channels on the same plane. The volume that is willing to the given by one channel is multiplied by the volume that is willing to be
+ taken by the other to produce the final contribution.
*/
- if (ma_event_init(pContext, &pDevice->wakeupEvent) != MA_SUCCESS) {
- ma_mutex_uninit(&pDevice->lock);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to create worker thread wakeup event.", MA_FAILED_TO_CREATE_EVENT);
- }
- if (ma_event_init(pContext, &pDevice->startEvent) != MA_SUCCESS) {
- ma_event_uninit(&pDevice->wakeupEvent);
- ma_mutex_uninit(&pDevice->lock);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to create worker thread start event.", MA_FAILED_TO_CREATE_EVENT);
- }
- if (ma_event_init(pContext, &pDevice->stopEvent) != MA_SUCCESS) {
- ma_event_uninit(&pDevice->startEvent);
- ma_event_uninit(&pDevice->wakeupEvent);
- ma_mutex_uninit(&pDevice->lock);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to create worker thread stop event.", MA_FAILED_TO_CREATE_EVENT);
- }
-
- result = pContext->onDeviceInit(pContext, &config, pDevice);
- if (result != MA_SUCCESS) {
- return MA_NO_BACKEND; /* The error message will have been posted with ma_post_error() by the source of the error so don't bother calling it here. */
- }
+ /* Contribution = Sum(Volume to Give * Volume to Take) */
+ float contribution =
+ g_maChannelPlaneRatios[channelPositionA][0] * g_maChannelPlaneRatios[channelPositionB][0] +
+ g_maChannelPlaneRatios[channelPositionA][1] * g_maChannelPlaneRatios[channelPositionB][1] +
+ g_maChannelPlaneRatios[channelPositionA][2] * g_maChannelPlaneRatios[channelPositionB][2] +
+ g_maChannelPlaneRatios[channelPositionA][3] * g_maChannelPlaneRatios[channelPositionB][3] +
+ g_maChannelPlaneRatios[channelPositionA][4] * g_maChannelPlaneRatios[channelPositionB][4] +
+ g_maChannelPlaneRatios[channelPositionA][5] * g_maChannelPlaneRatios[channelPositionB][5];
- ma_device__post_init_setup(pDevice, pConfig->deviceType);
+ return contribution;
+}
+ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode)
+{
+ ma_channel_converter_config config;
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channelsIn = channelsIn;
+ config.channelsOut = channelsOut;
+ ma_channel_map_copy(config.channelMapIn, channelMapIn, channelsIn);
+ ma_channel_map_copy(config.channelMapOut, channelMapOut, channelsOut);
+ config.mixingMode = mixingMode;
- /* If the backend did not fill out a name for the device, try a generic method. */
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- if (pDevice->capture.name[0] == '\0') {
- if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_capture, config.capture.pDeviceID, pDevice->capture.name, sizeof(pDevice->capture.name)) != MA_SUCCESS) {
- ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), (config.capture.pDeviceID == NULL) ? MA_DEFAULT_CAPTURE_DEVICE_NAME : "Capture Device", (size_t)-1);
- }
- }
- }
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- if (pDevice->playback.name[0] == '\0') {
- if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_playback, config.playback.pDeviceID, pDevice->playback.name, sizeof(pDevice->playback.name)) != MA_SUCCESS) {
- ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), (config.playback.pDeviceID == NULL) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : "Playback Device", (size_t)-1);
- }
- }
- }
+ return config;
+}
+static ma_int32 ma_channel_converter_float_to_fp(float x)
+{
+ return (ma_int32)(x * (1<thread, ma_worker_thread, pDevice) != MA_SUCCESS) {
- ma_device_uninit(pDevice);
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to create worker thread.", MA_FAILED_TO_CREATE_THREAD);
- }
+static ma_bool32 ma_is_spatial_channel_position(ma_channel channelPosition)
+{
+ int i;
- /* Wait for the worker thread to put the device into it's stopped state for real. */
- ma_event_wait(&pDevice->stopEvent);
- } else {
- ma_device__set_state(pDevice, MA_STATE_STOPPED);
+ if (channelPosition == MA_CHANNEL_NONE || channelPosition == MA_CHANNEL_MONO || channelPosition == MA_CHANNEL_LFE) {
+ return MA_FALSE;
}
-
-#ifdef MA_DEBUG_OUTPUT
- printf("[%s]\n", ma_get_backend_name(pDevice->pContext->backend));
- if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
- printf(" %s (%s)\n", pDevice->capture.name, "Capture");
- printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->capture.format), ma_get_format_name(pDevice->capture.internalFormat));
- printf(" Channels: %d -> %d\n", pDevice->capture.channels, pDevice->capture.internalChannels);
- printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->capture.internalSampleRate);
- printf(" Buffer Size: %d/%d (%d)\n", pDevice->capture.internalBufferSizeInFrames, pDevice->capture.internalPeriods, (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods));
- printf(" Conversion:\n");
- printf(" Pre Format Conversion: %s\n", pDevice->capture.converter.isPreFormatConversionRequired ? "YES" : "NO");
- printf(" Post Format Conversion: %s\n", pDevice->capture.converter.isPostFormatConversionRequired ? "YES" : "NO");
- printf(" Channel Routing: %s\n", pDevice->capture.converter.isChannelRoutingRequired ? "YES" : "NO");
- printf(" SRC: %s\n", pDevice->capture.converter.isSRCRequired ? "YES" : "NO");
- printf(" Channel Routing at Start: %s\n", pDevice->capture.converter.isChannelRoutingAtStart ? "YES" : "NO");
- printf(" Passthrough: %s\n", pDevice->capture.converter.isPassthrough ? "YES" : "NO");
- }
- if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
- printf(" %s (%s)\n", pDevice->playback.name, "Playback");
- printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->playback.format), ma_get_format_name(pDevice->playback.internalFormat));
- printf(" Channels: %d -> %d\n", pDevice->playback.channels, pDevice->playback.internalChannels);
- printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->playback.internalSampleRate);
- printf(" Buffer Size: %d/%d (%d)\n", pDevice->playback.internalBufferSizeInFrames, pDevice->playback.internalPeriods, (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods));
- printf(" Conversion:\n");
- printf(" Pre Format Conversion: %s\n", pDevice->playback.converter.isPreFormatConversionRequired ? "YES" : "NO");
- printf(" Post Format Conversion: %s\n", pDevice->playback.converter.isPostFormatConversionRequired ? "YES" : "NO");
- printf(" Channel Routing: %s\n", pDevice->playback.converter.isChannelRoutingRequired ? "YES" : "NO");
- printf(" SRC: %s\n", pDevice->playback.converter.isSRCRequired ? "YES" : "NO");
- printf(" Channel Routing at Start: %s\n", pDevice->playback.converter.isChannelRoutingAtStart ? "YES" : "NO");
- printf(" Passthrough: %s\n", pDevice->playback.converter.isPassthrough ? "YES" : "NO");
+ for (i = 0; i < 6; ++i) { /* Each side of a cube. */
+ if (g_maChannelPlaneRatios[channelPosition][i] != 0) {
+ return MA_TRUE;
+ }
}
-#endif
-
- ma_assert(ma_device__get_state(pDevice) == MA_STATE_STOPPED);
- return MA_SUCCESS;
+ return MA_FALSE;
}
-ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice)
+ma_result ma_channel_converter_init(const ma_channel_converter_config* pConfig, ma_channel_converter* pConverter)
{
- ma_result result;
- ma_context* pContext;
- ma_backend defaultBackends[ma_backend_null+1];
- ma_uint32 iBackend;
- ma_backend* pBackendsToIterate;
- ma_uint32 backendsToIterateCount;
+ ma_uint32 iChannelIn;
+ ma_uint32 iChannelOut;
- if (pConfig == NULL) {
+ if (pConverter == NULL) {
return MA_INVALID_ARGS;
}
- pContext = (ma_context*)ma_malloc(sizeof(*pContext));
- if (pContext == NULL) {
- return MA_OUT_OF_MEMORY;
+ MA_ZERO_OBJECT(pConverter);
+
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
}
- for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) {
- defaultBackends[iBackend] = (ma_backend)iBackend;
+ if (!ma_channel_map_valid(pConfig->channelsIn, pConfig->channelMapIn)) {
+ return MA_INVALID_ARGS; /* Invalid input channel map. */
+ }
+ if (!ma_channel_map_valid(pConfig->channelsOut, pConfig->channelMapOut)) {
+ return MA_INVALID_ARGS; /* Invalid output channel map. */
}
- pBackendsToIterate = (ma_backend*)backends;
- backendsToIterateCount = backendCount;
- if (pBackendsToIterate == NULL) {
- pBackendsToIterate = (ma_backend*)defaultBackends;
- backendsToIterateCount = ma_countof(defaultBackends);
+ if (pConfig->format != ma_format_s16 && pConfig->format != ma_format_f32) {
+ return MA_INVALID_ARGS; /* Invalid format. */
}
- result = MA_NO_BACKEND;
+ pConverter->format = pConfig->format;
+ pConverter->channelsIn = pConfig->channelsIn;
+ pConverter->channelsOut = pConfig->channelsOut;
+ ma_channel_map_copy(pConverter->channelMapIn, pConfig->channelMapIn, pConfig->channelsIn);
+ ma_channel_map_copy(pConverter->channelMapOut, pConfig->channelMapOut, pConfig->channelsOut);
+ pConverter->mixingMode = pConfig->mixingMode;
- for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) {
- result = ma_context_init(&pBackendsToIterate[iBackend], 1, pContextConfig, pContext);
- if (result == MA_SUCCESS) {
- result = ma_device_init(pContext, pConfig, pDevice);
- if (result == MA_SUCCESS) {
- break; /* Success. */
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) {
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ if (pConverter->format == ma_format_s16) {
+ pConverter->weights.f32[iChannelIn][iChannelOut] = pConfig->weights[iChannelIn][iChannelOut];
} else {
- ma_context_uninit(pContext); /* Failure. */
+ pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(pConfig->weights[iChannelIn][iChannelOut]);
}
}
}
+
- if (result != MA_SUCCESS) {
- ma_free(pContext);
- return result;
- }
-
- pDevice->isOwnerOfContext = MA_TRUE;
- return result;
-}
-
-void ma_device_uninit(ma_device* pDevice)
-{
- if (!ma_device__is_initialized(pDevice)) {
- return;
- }
-
- /* Make sure the device is stopped first. The backends will probably handle this naturally, but I like to do it explicitly for my own sanity. */
- if (ma_device_is_started(pDevice)) {
- ma_device_stop(pDevice);
- }
-
- /* Putting the device into an uninitialized state will make the worker thread return. */
- ma_device__set_state(pDevice, MA_STATE_UNINITIALIZED);
-
- /* Wake up the worker thread and wait for it to properly terminate. */
- if (!ma_context_is_backend_asynchronous(pDevice->pContext)) {
- ma_event_signal(&pDevice->wakeupEvent);
- ma_thread_wait(&pDevice->thread);
- }
-
- pDevice->pContext->onDeviceUninit(pDevice);
-
- ma_event_uninit(&pDevice->stopEvent);
- ma_event_uninit(&pDevice->startEvent);
- ma_event_uninit(&pDevice->wakeupEvent);
- ma_mutex_uninit(&pDevice->lock);
-
- if (pDevice->isOwnerOfContext) {
- ma_context_uninit(pDevice->pContext);
- ma_free(pDevice->pContext);
- }
-
- ma_zero_object(pDevice);
-}
-
-void ma_device_set_stop_callback(ma_device* pDevice, ma_stop_proc proc)
-{
- if (pDevice == NULL) {
- return;
- }
-
- ma_atomic_exchange_ptr(&pDevice->onStop, proc);
-}
-
-ma_result ma_device_start(ma_device* pDevice)
-{
- ma_result result;
- if (pDevice == NULL) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS);
+ /* If the input and output channels and channel maps are the same we should use a passthrough. */
+ if (pConverter->channelsIn == pConverter->channelsOut) {
+ if (ma_channel_map_equal(pConverter->channelsIn, pConverter->channelMapIn, pConverter->channelMapOut)) {
+ pConverter->isPassthrough = MA_TRUE;
+ }
+ if (ma_channel_map_blank(pConverter->channelsIn, pConverter->channelMapIn) || ma_channel_map_blank(pConverter->channelsOut, pConverter->channelMapOut)) {
+ pConverter->isPassthrough = MA_TRUE;
+ }
}
- if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED);
- }
/*
- Starting the device doesn't do anything in synchronous mode because in that case it's started automatically with
- ma_device_write() and ma_device_read(). It's best to return an error so that the application can be aware that
- it's not doing it right.
+ We can use a simple case for expanding the mono channel. This will used when expanding a mono input into any output so long
+ as no LFE is present in the output.
*/
- if (!ma_device__is_async(pDevice)) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called in synchronous mode. This should only be used in asynchronous/callback mode.", MA_DEVICE_NOT_INITIALIZED);
- }
-
- result = MA_ERROR;
- ma_mutex_lock(&pDevice->lock);
- {
- /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a stopped or paused state. */
- ma_assert(ma_device__get_state(pDevice) == MA_STATE_STOPPED);
-
- ma_device__set_state(pDevice, MA_STATE_STARTING);
-
- /* Asynchronous backends need to be handled differently. */
- if (ma_context_is_backend_asynchronous(pDevice->pContext)) {
- result = pDevice->pContext->onDeviceStart(pDevice);
- if (result == MA_SUCCESS) {
- ma_device__set_state(pDevice, MA_STATE_STARTED);
+ if (!pConverter->isPassthrough) {
+ if (pConverter->channelsIn == 1 && pConverter->channelMapIn[0] == MA_CHANNEL_MONO) {
+ /* Optimal case if no LFE is in the output channel map. */
+ pConverter->isSimpleMonoExpansion = MA_TRUE;
+ if (ma_channel_map_contains_channel_position(pConverter->channelsOut, pConverter->channelMapOut, MA_CHANNEL_LFE)) {
+ pConverter->isSimpleMonoExpansion = MA_FALSE;
}
- } else {
- /*
- Synchronous backends are started by signaling an event that's being waited on in the worker thread. We first wake up the
- thread and then wait for the start event.
- */
- ma_event_signal(&pDevice->wakeupEvent);
-
- /*
- Wait for the worker thread to finish starting the device. Note that the worker thread will be the one who puts the device
- into the started state. Don't call ma_device__set_state() here.
- */
- ma_event_wait(&pDevice->startEvent);
- result = pDevice->workResult;
}
}
- ma_mutex_unlock(&pDevice->lock);
-
- return result;
-}
-
-ma_result ma_device_stop(ma_device* pDevice)
-{
- ma_result result;
- if (pDevice == NULL) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS);
+ /* Another optimized case is stereo to mono. */
+ if (!pConverter->isPassthrough) {
+ if (pConverter->channelsOut == 1 && pConverter->channelMapOut[0] == MA_CHANNEL_MONO && pConverter->channelsIn == 2) {
+ /* Optimal case if no LFE is in the input channel map. */
+ pConverter->isStereoToMono = MA_TRUE;
+ if (ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->channelMapIn, MA_CHANNEL_LFE)) {
+ pConverter->isStereoToMono = MA_FALSE;
+ }
+ }
}
- if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) {
- return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED);
- }
/*
- Stopping is slightly different for synchronous mode. In this case it just tells the driver to stop the internal processing of the device. Also,
- stopping in synchronous mode does not require state checking.
+ Here is where we do a bit of pre-processing to know how each channel should be combined to make up the output. Rules:
+
+ 1) If it's a passthrough, do nothing - it's just a simple memcpy().
+ 2) If the channel counts are the same and every channel position in the input map is present in the output map, use a
+ simple shuffle. An example might be different 5.1 channel layouts.
+ 3) Otherwise channels are blended based on spatial locality.
*/
- if (!ma_device__is_async(pDevice)) {
- if (pDevice->pContext->onDeviceStop) {
- return pDevice->pContext->onDeviceStop(pDevice);
- }
- }
-
- result = MA_ERROR;
- ma_mutex_lock(&pDevice->lock);
- {
- /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a started or paused state. */
- ma_assert(ma_device__get_state(pDevice) == MA_STATE_STARTED);
+ if (!pConverter->isPassthrough) {
+ if (pConverter->channelsIn == pConverter->channelsOut) {
+ ma_bool32 areAllChannelPositionsPresent = MA_TRUE;
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ ma_bool32 isInputChannelPositionInOutput = MA_FALSE;
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ if (pConverter->channelMapIn[iChannelIn] == pConverter->channelMapOut[iChannelOut]) {
+ isInputChannelPositionInOutput = MA_TRUE;
+ break;
+ }
+ }
- ma_device__set_state(pDevice, MA_STATE_STOPPING);
+ if (!isInputChannelPositionInOutput) {
+ areAllChannelPositionsPresent = MA_FALSE;
+ break;
+ }
+ }
- /* There's no need to wake up the thread like we do when starting. */
+ if (areAllChannelPositionsPresent) {
+ pConverter->isSimpleShuffle = MA_TRUE;
- /* Asynchronous backends need to be handled differently. */
- if (ma_context_is_backend_asynchronous(pDevice->pContext)) {
- if (pDevice->pContext->onDeviceStop) {
- result = pDevice->pContext->onDeviceStop(pDevice);
- } else {
- result = MA_SUCCESS;
+ /*
+ All the router will be doing is rearranging channels which means all we need to do is use a shuffling table which is just
+ a mapping between the index of the input channel to the index of the output channel.
+ */
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ if (pConverter->channelMapIn[iChannelIn] == pConverter->channelMapOut[iChannelOut]) {
+ pConverter->shuffleTable[iChannelIn] = (ma_uint8)iChannelOut;
+ break;
+ }
+ }
+ }
}
-
- ma_device__set_state(pDevice, MA_STATE_STOPPED);
- } else {
- /* Synchronous backends. */
-
- /*
- We need to wait for the worker thread to become available for work before returning. Note that the worker thread will be
- the one who puts the device into the stopped state. Don't call ma_device__set_state() here.
- */
- ma_event_wait(&pDevice->stopEvent);
- result = MA_SUCCESS;
}
}
- ma_mutex_unlock(&pDevice->lock);
-
- return result;
-}
-ma_bool32 ma_device_is_started(ma_device* pDevice)
-{
- if (pDevice == NULL) {
- return MA_FALSE;
- }
- return ma_device__get_state(pDevice) == MA_STATE_STARTED;
-}
+ /*
+ Here is where weights are calculated. Note that we calculate the weights at all times, even when using a passthrough and simple
+ shuffling. We use different algorithms for calculating weights depending on our mixing mode.
+
+ In simple mode we don't do any blending (except for converting between mono, which is done in a later step). Instead we just
+ map 1:1 matching channels. In this mode, if no channels in the input channel map correspond to anything in the output channel
+ map, nothing will be heard!
+ */
+ /* In all cases we need to make sure all channels that are present in both channel maps have a 1:1 mapping. */
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn];
-ma_context_config ma_context_config_init()
-{
- ma_context_config config;
- ma_zero_object(&config);
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut];
- return config;
-}
+ if (channelPosIn == channelPosOut) {
+ if (pConverter->format == ma_format_s16) {
+ pConverter->weights.s16[iChannelIn][iChannelOut] = (1 << MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT);
+ } else {
+ pConverter->weights.f32[iChannelIn][iChannelOut] = 1;
+ }
+ }
+ }
+ }
-ma_device_config ma_device_config_init(ma_device_type deviceType)
-{
- ma_device_config config;
- ma_zero_object(&config);
- config.deviceType = deviceType;
+ /*
+ The mono channel is accumulated on all other channels, except LFE. Make sure in this loop we exclude output mono channels since
+ they were handled in the pass above.
+ */
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn];
- return config;
-}
-#endif /* MA_NO_DEVICE_IO */
+ if (channelPosIn == MA_CHANNEL_MONO) {
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut];
+ if (channelPosOut != MA_CHANNEL_NONE && channelPosOut != MA_CHANNEL_MONO && channelPosOut != MA_CHANNEL_LFE) {
+ if (pConverter->format == ma_format_s16) {
+ pConverter->weights.s16[iChannelIn][iChannelOut] = (1 << MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT);
+ } else {
+ pConverter->weights.f32[iChannelIn][iChannelOut] = 1;
+ }
+ }
+ }
+ }
+ }
-void ma_get_standard_channel_map_microsoft(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
-{
- /* Based off the speaker configurations mentioned here: https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/ksmedia/ns-ksmedia-ksaudio_channel_config */
- switch (channels)
+ /* The output mono channel is the average of all non-none, non-mono and non-lfe input channels. */
{
- case 1:
- {
- channelMap[0] = MA_CHANNEL_MONO;
- } break;
-
- case 2:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- } break;
-
- case 3: /* Not defined, but best guess. */
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- } break;
-
- case 4:
- {
-#ifndef MA_USE_QUAD_MICROSOFT_CHANNEL_MAP
- /* Surround. Using the Surround profile has the advantage of the 3rd channel (MA_CHANNEL_FRONT_CENTER) mapping nicely with higher channel counts. */
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_BACK_CENTER;
-#else
- /* Quad. */
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
-#endif
- } break;
+ ma_uint32 len = 0;
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn];
- case 5: /* Not defined, but best guess. */
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_BACK_LEFT;
- channelMap[4] = MA_CHANNEL_BACK_RIGHT;
- } break;
+ if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) {
+ len += 1;
+ }
+ }
- case 6:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_LFE;
- channelMap[4] = MA_CHANNEL_SIDE_LEFT;
- channelMap[5] = MA_CHANNEL_SIDE_RIGHT;
- } break;
+ if (len > 0) {
+ float monoWeight = 1.0f / len;
- case 7: /* Not defined, but best guess. */
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_LFE;
- channelMap[4] = MA_CHANNEL_BACK_CENTER;
- channelMap[5] = MA_CHANNEL_SIDE_LEFT;
- channelMap[6] = MA_CHANNEL_SIDE_RIGHT;
- } break;
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut];
- case 8:
- default:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_LFE;
- channelMap[4] = MA_CHANNEL_BACK_LEFT;
- channelMap[5] = MA_CHANNEL_BACK_RIGHT;
- channelMap[6] = MA_CHANNEL_SIDE_LEFT;
- channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
- } break;
- }
+ if (channelPosOut == MA_CHANNEL_MONO) {
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn];
- /* Remainder. */
- if (channels > 8) {
- ma_uint32 iChannel;
- for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
- channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
+ if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) {
+ if (pConverter->format == ma_format_s16) {
+ pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(monoWeight);
+ } else {
+ pConverter->weights.f32[iChannelIn][iChannelOut] = monoWeight;
+ }
+ }
+ }
+ }
+ }
}
}
-}
-void ma_get_standard_channel_map_alsa(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
-{
- switch (channels)
+
+ /* Input and output channels that are not present on the other side need to be blended in based on spatial locality. */
+ switch (pConverter->mixingMode)
{
- case 1:
+ case ma_channel_mix_mode_rectangular:
{
- channelMap[0] = MA_CHANNEL_MONO;
- } break;
+ /* Unmapped input channels. */
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn];
- case 2:
- {
- channelMap[0] = MA_CHANNEL_LEFT;
- channelMap[1] = MA_CHANNEL_RIGHT;
- } break;
+ if (ma_is_spatial_channel_position(channelPosIn)) {
+ if (!ma_channel_map_contains_channel_position(pConverter->channelsOut, pConverter->channelMapOut, channelPosIn)) {
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut];
- case 3:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- } break;
+ if (ma_is_spatial_channel_position(channelPosOut)) {
+ float weight = 0;
+ if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) {
+ weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut);
+ }
- case 4:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- } break;
+ /* Only apply the weight if we haven't already got some contribution from the respective channels. */
+ if (pConverter->format == ma_format_s16) {
+ if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) {
+ pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(weight);
+ }
+ } else {
+ if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) {
+ pConverter->weights.f32[iChannelIn][iChannelOut] = weight;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
- case 5:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- } break;
+ /* Unmapped output channels. */
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut];
- case 6:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- channelMap[5] = MA_CHANNEL_LFE;
- } break;
+ if (ma_is_spatial_channel_position(channelPosOut)) {
+ if (!ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->channelMapIn, channelPosOut)) {
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn];
- case 7:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- channelMap[5] = MA_CHANNEL_LFE;
- channelMap[6] = MA_CHANNEL_BACK_CENTER;
+ if (ma_is_spatial_channel_position(channelPosIn)) {
+ float weight = 0;
+ if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) {
+ weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut);
+ }
+
+ /* Only apply the weight if we haven't already got some contribution from the respective channels. */
+ if (pConverter->format == ma_format_s16) {
+ if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) {
+ pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(weight);
+ }
+ } else {
+ if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) {
+ pConverter->weights.f32[iChannelIn][iChannelOut] = weight;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
} break;
- case 8:
+ case ma_channel_mix_mode_custom_weights:
+ case ma_channel_mix_mode_simple:
default:
{
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- channelMap[5] = MA_CHANNEL_LFE;
- channelMap[6] = MA_CHANNEL_SIDE_LEFT;
- channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
+ /* Fallthrough. */
} break;
}
- /* Remainder. */
- if (channels > 8) {
- ma_uint32 iChannel;
- for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
- channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
- }
+
+ return MA_SUCCESS;
+}
+
+void ma_channel_converter_uninit(ma_channel_converter* pConverter)
+{
+ if (pConverter == NULL) {
+ return;
}
}
-void ma_get_standard_channel_map_rfc3551(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+static ma_result ma_channel_converter_process_pcm_frames__passthrough(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
{
- switch (channels)
- {
- case 1:
- {
- channelMap[0] = MA_CHANNEL_MONO;
- } break;
+ MA_ASSERT(pConverter != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+ MA_ASSERT(pFramesIn != NULL);
- case 2:
- {
- channelMap[0] = MA_CHANNEL_LEFT;
- channelMap[1] = MA_CHANNEL_RIGHT;
- } break;
+ ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut));
+ return MA_SUCCESS;
+}
- case 3:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- } break;
+static ma_result ma_channel_converter_process_pcm_frames__simple_shuffle(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ ma_uint32 iFrame;
+ ma_uint32 iChannelIn;
- case 4:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_CENTER;
- channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[3] = MA_CHANNEL_BACK_CENTER;
- } break;
+ MA_ASSERT(pConverter != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+ MA_ASSERT(pFramesIn != NULL);
+ MA_ASSERT(pConverter->channelsIn == pConverter->channelsOut);
- case 5:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_BACK_LEFT;
- channelMap[4] = MA_CHANNEL_BACK_RIGHT;
- } break;
+ if (pConverter->format == ma_format_s16) {
+ /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut;
+ const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn;
- case 6:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_SIDE_LEFT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[4] = MA_CHANNEL_SIDE_RIGHT;
- channelMap[5] = MA_CHANNEL_BACK_CENTER;
- } break;
- }
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ pFramesOutS16[pConverter->shuffleTable[iChannelIn]] = pFramesInS16[iChannelIn];
+ }
+ }
+ } else {
+ /* */ float* pFramesOutF32 = ( float*)pFramesOut;
+ const float* pFramesInF32 = (const float*)pFramesIn;
- /* Remainder. */
- if (channels > 8) {
- ma_uint32 iChannel;
- for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) {
- channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6));
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ pFramesOutF32[pConverter->shuffleTable[iChannelIn]] = pFramesInF32[iChannelIn];
+ }
}
}
+
+ return MA_SUCCESS;
}
-void ma_get_standard_channel_map_flac(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+static ma_result ma_channel_converter_process_pcm_frames__simple_mono_expansion(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
{
- switch (channels)
- {
- case 1:
- {
- channelMap[0] = MA_CHANNEL_MONO;
- } break;
+ ma_uint64 iFrame;
- case 2:
- {
- channelMap[0] = MA_CHANNEL_LEFT;
- channelMap[1] = MA_CHANNEL_RIGHT;
- } break;
+ MA_ASSERT(pConverter != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+ MA_ASSERT(pFramesIn != NULL);
- case 3:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- } break;
+ if (pConverter->format == ma_format_s16) {
+ /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut;
+ const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn;
- case 4:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- } break;
+ if (pConverter->channelsOut == 2) {
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ pFramesOutS16[iFrame*2 + 0] = pFramesInS16[iFrame];
+ pFramesOutS16[iFrame*2 + 1] = pFramesInS16[iFrame];
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) {
+ pFramesOutS16[iFrame*pConverter->channelsOut + iChannel] = pFramesInS16[iFrame];
+ }
+ }
+ }
+ } else {
+ /* */ float* pFramesOutF32 = ( float*)pFramesOut;
+ const float* pFramesInF32 = (const float*)pFramesIn;
- case 5:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_BACK_LEFT;
- channelMap[4] = MA_CHANNEL_BACK_RIGHT;
- } break;
+ if (pConverter->channelsOut == 2) {
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ pFramesOutF32[iFrame*2 + 0] = pFramesInF32[iFrame];
+ pFramesOutF32[iFrame*2 + 1] = pFramesInF32[iFrame];
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) {
+ pFramesOutF32[iFrame*pConverter->channelsOut + iChannel] = pFramesInF32[iFrame];
+ }
+ }
+ }
+ }
- case 6:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_LFE;
- channelMap[4] = MA_CHANNEL_BACK_LEFT;
- channelMap[5] = MA_CHANNEL_BACK_RIGHT;
- } break;
+ return MA_SUCCESS;
+}
- case 7:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_LFE;
- channelMap[4] = MA_CHANNEL_BACK_CENTER;
- channelMap[5] = MA_CHANNEL_SIDE_LEFT;
- channelMap[6] = MA_CHANNEL_SIDE_RIGHT;
- } break;
+static ma_result ma_channel_converter_process_pcm_frames__stereo_to_mono(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ ma_uint64 iFrame;
- case 8:
- default:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- channelMap[3] = MA_CHANNEL_LFE;
- channelMap[4] = MA_CHANNEL_BACK_LEFT;
- channelMap[5] = MA_CHANNEL_BACK_RIGHT;
- channelMap[6] = MA_CHANNEL_SIDE_LEFT;
- channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
- } break;
- }
+ MA_ASSERT(pConverter != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+ MA_ASSERT(pFramesIn != NULL);
+ MA_ASSERT(pConverter->channelsIn == 2);
+ MA_ASSERT(pConverter->channelsOut == 1);
- /* Remainder. */
- if (channels > 8) {
- ma_uint32 iChannel;
- for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
- channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
+ if (pConverter->format == ma_format_s16) {
+ /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut;
+ const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn;
+
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ pFramesOutS16[iFrame] = (ma_int16)(((ma_int32)pFramesInS16[iFrame*2+0] + (ma_int32)pFramesInS16[iFrame*2+1]) / 2);
+ }
+ } else {
+ /* */ float* pFramesOutF32 = ( float*)pFramesOut;
+ const float* pFramesInF32 = (const float*)pFramesIn;
+
+ for (iFrame = 0; iFrame < frameCount; ++iFrame) {
+ pFramesOutF32[iFrame] = (pFramesInF32[iFrame*2+0] + pFramesInF32[iFrame*2+0]) * 0.5f;
}
}
+
+ return MA_SUCCESS;
}
-void ma_get_standard_channel_map_vorbis(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+static ma_result ma_channel_converter_process_pcm_frames__weights(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
{
- /* In Vorbis' type 0 channel mapping, the first two channels are not always the standard left/right - it will have the center speaker where the right usually goes. Why?! */
- switch (channels)
- {
- case 1:
- {
- channelMap[0] = MA_CHANNEL_MONO;
- } break;
+ ma_uint32 iFrame;
+ ma_uint32 iChannelIn;
+ ma_uint32 iChannelOut;
- case 2:
- {
- channelMap[0] = MA_CHANNEL_LEFT;
- channelMap[1] = MA_CHANNEL_RIGHT;
- } break;
+ MA_ASSERT(pConverter != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+ MA_ASSERT(pFramesIn != NULL);
- case 3:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_CENTER;
- channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
- } break;
+ /* This is the more complicated case. Each of the output channels is accumulated with 0 or more input channels. */
- case 4:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- } break;
+ /* Clear. */
+ ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut));
- case 5:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_CENTER;
- channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[3] = MA_CHANNEL_BACK_LEFT;
- channelMap[4] = MA_CHANNEL_BACK_RIGHT;
- } break;
+ /* Accumulate. */
+ if (pConverter->format == ma_format_s16) {
+ /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut;
+ const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn;
- case 6:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_CENTER;
- channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[3] = MA_CHANNEL_BACK_LEFT;
- channelMap[4] = MA_CHANNEL_BACK_RIGHT;
- channelMap[5] = MA_CHANNEL_LFE;
- } break;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ ma_int32 s = pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut];
+ s += (pFramesInS16[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT;
- case 7:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_CENTER;
- channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[3] = MA_CHANNEL_SIDE_LEFT;
- channelMap[4] = MA_CHANNEL_SIDE_RIGHT;
- channelMap[5] = MA_CHANNEL_BACK_CENTER;
- channelMap[6] = MA_CHANNEL_LFE;
- } break;
+ pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut] = (ma_int16)ma_clamp(s, -32768, 32767);
+ }
+ }
+ }
+ } else {
+ /* */ float* pFramesOutF32 = ( float*)pFramesOut;
+ const float* pFramesInF32 = (const float*)pFramesIn;
- case 8:
- default:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_CENTER;
- channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[3] = MA_CHANNEL_SIDE_LEFT;
- channelMap[4] = MA_CHANNEL_SIDE_RIGHT;
- channelMap[5] = MA_CHANNEL_BACK_LEFT;
- channelMap[6] = MA_CHANNEL_BACK_RIGHT;
- channelMap[7] = MA_CHANNEL_LFE;
- } break;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) {
+ for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) {
+ pFramesOutF32[iFrame*pConverter->channelsOut + iChannelOut] += pFramesInF32[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.f32[iChannelIn][iChannelOut];
+ }
+ }
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount)
+{
+ if (pConverter == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Remainder. */
- if (channels > 8) {
- ma_uint32 iChannel;
- for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
- channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
- }
+ if (pFramesOut == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ if (pFramesIn == NULL) {
+ ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut));
+ return MA_SUCCESS;
+ }
+
+ if (pConverter->isPassthrough) {
+ return ma_channel_converter_process_pcm_frames__passthrough(pConverter, pFramesOut, pFramesIn, frameCount);
+ } else if (pConverter->isSimpleShuffle) {
+ return ma_channel_converter_process_pcm_frames__simple_shuffle(pConverter, pFramesOut, pFramesIn, frameCount);
+ } else if (pConverter->isSimpleMonoExpansion) {
+ return ma_channel_converter_process_pcm_frames__simple_mono_expansion(pConverter, pFramesOut, pFramesIn, frameCount);
+ } else if (pConverter->isStereoToMono) {
+ return ma_channel_converter_process_pcm_frames__stereo_to_mono(pConverter, pFramesOut, pFramesIn, frameCount);
+ } else {
+ return ma_channel_converter_process_pcm_frames__weights(pConverter, pFramesOut, pFramesIn, frameCount);
}
}
-void ma_get_standard_channel_map_sound4(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
-{
- switch (channels)
- {
- case 1:
- {
- channelMap[0] = MA_CHANNEL_MONO;
- } break;
- case 2:
- {
- channelMap[0] = MA_CHANNEL_LEFT;
- channelMap[1] = MA_CHANNEL_RIGHT;
- } break;
+/**************************************************************************************************************************************************************
- case 3:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_CENTER;
- } break;
+Data Conversion
- case 4:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- } break;
+**************************************************************************************************************************************************************/
+ma_data_converter_config ma_data_converter_config_init_default()
+{
+ ma_data_converter_config config;
+ MA_ZERO_OBJECT(&config);
- case 5:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- } break;
+ config.ditherMode = ma_dither_mode_none;
+ config.resampling.algorithm = ma_resample_algorithm_linear;
+ config.resampling.allowDynamicSampleRate = MA_FALSE; /* Disable dynamic sample rates by default because dynamic rate adjustments should be quite rare and it allows an optimization for cases when the in and out sample rates are the same. */
- case 6:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- channelMap[5] = MA_CHANNEL_LFE;
- } break;
+ /* Linear resampling defaults. */
+ config.resampling.linear.lpfOrder = 1;
+ config.resampling.linear.lpfNyquistFactor = 1;
- case 7:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- channelMap[5] = MA_CHANNEL_BACK_CENTER;
- channelMap[6] = MA_CHANNEL_LFE;
- } break;
+ /* Speex resampling defaults. */
+ config.resampling.speex.quality = 3;
- case 8:
- default:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- channelMap[5] = MA_CHANNEL_LFE;
- channelMap[6] = MA_CHANNEL_SIDE_LEFT;
- channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
- } break;
- }
+ return config;
+}
- /* Remainder. */
- if (channels > 8) {
- ma_uint32 iChannel;
- for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
- channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
- }
- }
+ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut)
+{
+ ma_data_converter_config config = ma_data_converter_config_init_default();
+ config.formatIn = formatIn;
+ config.formatOut = formatOut;
+ config.channelsIn = channelsIn;
+ config.channelsOut = channelsOut;
+ config.sampleRateIn = sampleRateIn;
+ config.sampleRateOut = sampleRateOut;
+
+ return config;
}
-void ma_get_standard_channel_map_sndio(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, ma_data_converter* pConverter)
{
- switch (channels)
- {
- case 1:
- {
- channelMap[0] = MA_CHANNEL_MONO;
- } break;
+ ma_result result;
+ ma_format midFormat;
- case 2:
- {
- channelMap[0] = MA_CHANNEL_LEFT;
- channelMap[1] = MA_CHANNEL_RIGHT;
- } break;
+ if (pConverter == NULL) {
+ return MA_INVALID_ARGS;
+ }
- case 3:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_FRONT_CENTER;
- } break;
+ MA_ZERO_OBJECT(pConverter);
- case 4:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- } break;
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
- case 5:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- } break;
+ pConverter->config = *pConfig;
- case 6:
- default:
- {
- channelMap[0] = MA_CHANNEL_FRONT_LEFT;
- channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
- channelMap[2] = MA_CHANNEL_BACK_LEFT;
- channelMap[3] = MA_CHANNEL_BACK_RIGHT;
- channelMap[4] = MA_CHANNEL_FRONT_CENTER;
- channelMap[5] = MA_CHANNEL_LFE;
- } break;
+ /*
+ We want to avoid as much data conversion as possible. The channel converter and resampler both support s16 and f32 natively. We need to decide
+ on the format to use for this stage. We call this the mid format because it's used in the middle stage of the conversion pipeline. If the output
+ format is either s16 or f32 we use that one. If that is not the case it will do the same thing for the input format. If it's neither we just
+ use f32.
+ */
+ /* */ if (pConverter->config.formatOut == ma_format_s16 || pConverter->config.formatOut == ma_format_f32) {
+ midFormat = pConverter->config.formatOut;
+ } else if (pConverter->config.formatIn == ma_format_s16 || pConverter->config.formatIn == ma_format_f32) {
+ midFormat = pConverter->config.formatIn;
+ } else {
+ midFormat = ma_format_f32;
}
- /* Remainder. */
- if (channels > 6) {
- ma_uint32 iChannel;
- for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) {
- channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6));
- }
+ if (pConverter->config.formatIn != midFormat) {
+ pConverter->hasPreFormatConversion = MA_TRUE;
+ }
+ if (pConverter->config.formatOut != midFormat) {
+ pConverter->hasPostFormatConversion = MA_TRUE;
}
-}
-void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
-{
- switch (standardChannelMap)
+
+ /* Channel converter. We always initialize this, but we check if it configures itself as a passthrough to determine whether or not it's needed. */
{
- case ma_standard_channel_map_alsa:
- {
- ma_get_standard_channel_map_alsa(channels, channelMap);
- } break;
+ ma_uint32 iChannelIn;
+ ma_uint32 iChannelOut;
+ ma_channel_converter_config channelConverterConfig;
- case ma_standard_channel_map_rfc3551:
- {
- ma_get_standard_channel_map_rfc3551(channels, channelMap);
- } break;
+ channelConverterConfig = ma_channel_converter_config_init(midFormat, pConverter->config.channelsIn, pConverter->config.channelMapIn, pConverter->config.channelsOut, pConverter->config.channelMapOut, pConverter->config.channelMixMode);
+
+ /* Channel weights. */
+ for (iChannelIn = 0; iChannelIn < pConverter->config.channelsIn; iChannelIn += 1) {
+ for (iChannelOut = 0; iChannelOut < pConverter->config.channelsOut; iChannelOut += 1) {
+ channelConverterConfig.weights[iChannelIn][iChannelOut] = pConverter->config.channelWeights[iChannelIn][iChannelOut];
+ }
+ }
+
+ result = ma_channel_converter_init(&channelConverterConfig, &pConverter->channelConverter);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- case ma_standard_channel_map_flac:
- {
- ma_get_standard_channel_map_flac(channels, channelMap);
- } break;
+ /* If the channel converter is not a passthrough we need to enable it. Otherwise we can skip it. */
+ if (pConverter->channelConverter.isPassthrough == MA_FALSE) {
+ pConverter->hasChannelConverter = MA_TRUE;
+ }
+ }
- case ma_standard_channel_map_vorbis:
- {
- ma_get_standard_channel_map_vorbis(channels, channelMap);
- } break;
- case ma_standard_channel_map_sound4:
- {
- ma_get_standard_channel_map_sound4(channels, channelMap);
- } break;
-
- case ma_standard_channel_map_sndio:
- {
- ma_get_standard_channel_map_sndio(channels, channelMap);
- } break;
+ /* Always enable dynamic sample rates if the input sample rate is different because we're always going to need a resampler in this case anyway. */
+ if (pConverter->config.resampling.allowDynamicSampleRate == MA_FALSE) {
+ pConverter->config.resampling.allowDynamicSampleRate = pConverter->config.sampleRateIn != pConverter->config.sampleRateOut;
+ }
- case ma_standard_channel_map_microsoft:
- default:
- {
- ma_get_standard_channel_map_microsoft(channels, channelMap);
- } break;
+ /* Resampler. */
+ if (pConverter->config.resampling.allowDynamicSampleRate) {
+ ma_resampler_config resamplerConfig;
+ ma_uint32 resamplerChannels;
+
+ /* The resampler is the most expensive part of the conversion process, so we need to do it at the stage where the channel count is at it's lowest. */
+ if (pConverter->config.channelsIn < pConverter->config.channelsOut) {
+ resamplerChannels = pConverter->config.channelsIn;
+ } else {
+ resamplerChannels = pConverter->config.channelsOut;
+ }
+
+ resamplerConfig = ma_resampler_config_init(midFormat, resamplerChannels, pConverter->config.sampleRateIn, pConverter->config.sampleRateOut, pConverter->config.resampling.algorithm);
+ resamplerConfig.linear.lpfOrder = pConverter->config.resampling.linear.lpfOrder;
+ resamplerConfig.linear.lpfNyquistFactor = pConverter->config.resampling.linear.lpfNyquistFactor;
+ resamplerConfig.speex.quality = pConverter->config.resampling.speex.quality;
+
+ result = ma_resampler_init(&resamplerConfig, &pConverter->resampler);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ pConverter->hasResampler = MA_TRUE;
}
+
+ /* We can enable passthrough optimizations if applicable. Note that we'll only be able to do this if the sample rate is static. */
+ if (pConverter->hasPreFormatConversion == MA_FALSE &&
+ pConverter->hasPostFormatConversion == MA_FALSE &&
+ pConverter->hasChannelConverter == MA_FALSE &&
+ pConverter->hasResampler == MA_FALSE) {
+ pConverter->isPassthrough = MA_TRUE;
+ }
+
+ return MA_SUCCESS;
}
-void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels)
+void ma_data_converter_uninit(ma_data_converter* pConverter)
{
- if (pOut != NULL && pIn != NULL && channels > 0) {
- ma_copy_memory(pOut, pIn, sizeof(*pOut) * channels);
+ if (pConverter == NULL) {
+ return;
+ }
+
+ if (pConverter->hasResampler) {
+ ma_resampler_uninit(&pConverter->resampler);
}
}
-ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS])
+static ma_result ma_data_converter_process_pcm_frames__passthrough(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- if (channelMap == NULL) {
- return MA_FALSE;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 frameCount;
+
+ MA_ASSERT(pConverter != NULL);
+
+ frameCountIn = 0;
+ if (pFrameCountIn != NULL) {
+ frameCountIn = *pFrameCountIn;
}
- /* A channel count of 0 is invalid. */
- if (channels == 0) {
- return MA_FALSE;
+ frameCountOut = 0;
+ if (pFrameCountOut != NULL) {
+ frameCountOut = *pFrameCountOut;
}
- /* It does not make sense to have a mono channel when there is more than 1 channel. */
- if (channels > 1) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- if (channelMap[iChannel] == MA_CHANNEL_MONO) {
- return MA_FALSE;
- }
+ frameCount = ma_min(frameCountIn, frameCountOut);
+
+ if (pFramesOut != NULL) {
+ if (pFramesIn != NULL) {
+ ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut));
+ } else {
+ ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut));
}
}
- return MA_TRUE;
+ if (pFrameCountIn != NULL) {
+ *pFrameCountIn = frameCount;
+ }
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = frameCount;
+ }
+
+ return MA_SUCCESS;
}
-ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS])
+static ma_result ma_data_converter_process_pcm_frames__format_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_uint32 iChannel;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 frameCount;
- if (channelMapA == channelMapB) {
- return MA_FALSE;
+ MA_ASSERT(pConverter != NULL);
+
+ frameCountIn = 0;
+ if (pFrameCountIn != NULL) {
+ frameCountIn = *pFrameCountIn;
}
- if (channels == 0 || channels > MA_MAX_CHANNELS) {
- return MA_FALSE;
+ frameCountOut = 0;
+ if (pFrameCountOut != NULL) {
+ frameCountOut = *pFrameCountOut;
}
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- if (channelMapA[iChannel] != channelMapB[iChannel]) {
- return MA_FALSE;
+ frameCount = ma_min(frameCountIn, frameCountOut);
+
+ if (pFramesOut != NULL) {
+ if (pFramesIn != NULL) {
+ ma_convert_pcm_frames_format(pFramesOut, pConverter->config.formatOut, pFramesIn, pConverter->config.formatIn, frameCount, pConverter->config.channelsIn, pConverter->config.ditherMode);
+ } else {
+ ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut));
}
}
- return MA_TRUE;
+ if (pFrameCountIn != NULL) {
+ *pFrameCountIn = frameCount;
+ }
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = frameCount;
+ }
+
+ return MA_SUCCESS;
}
-ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS])
+
+static ma_result ma_data_converter_process_pcm_frames__resample_with_format_conversion(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_uint32 iChannel;
+ ma_result result = MA_SUCCESS;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 framesProcessedIn;
+ ma_uint64 framesProcessedOut;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- if (channelMap[iChannel] != MA_CHANNEL_NONE) {
- return MA_FALSE;
- }
- }
+ MA_ASSERT(pConverter != NULL);
- return MA_TRUE;
-}
+ frameCountIn = 0;
+ if (pFrameCountIn != NULL) {
+ frameCountIn = *pFrameCountIn;
+ }
-ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition)
-{
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- if (channelMap[iChannel] == channelPosition) {
- return MA_TRUE;
- }
+ frameCountOut = 0;
+ if (pFrameCountOut != NULL) {
+ frameCountOut = *pFrameCountOut;
}
- return MA_FALSE;
-}
+ framesProcessedIn = 0;
+ framesProcessedOut = 0;
+ while (framesProcessedOut < frameCountOut) {
+ ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels);
+ const void* pFramesInThisIteration;
+ /* */ void* pFramesOutThisIteration;
+ ma_uint64 frameCountInThisIteration;
+ ma_uint64 frameCountOutThisIteration;
+ if (pFramesIn != NULL) {
+ pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn));
+ } else {
+ pFramesInThisIteration = NULL;
+ }
+ if (pFramesOut != NULL) {
+ pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut));
+ } else {
+ pFramesOutThisIteration = NULL;
+ }
-/**************************************************************************************************************************************************************
+ /* Do a pre format conversion if necessary. */
+ if (pConverter->hasPreFormatConversion) {
+ ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels);
-Format Conversion.
+ frameCountInThisIteration = (frameCountIn - framesProcessedIn);
+ if (frameCountInThisIteration > tempBufferInCap) {
+ frameCountInThisIteration = tempBufferInCap;
+ }
-**************************************************************************************************************************************************************/
-void ma_copy_memory_64(void* dst, const void* src, ma_uint64 sizeInBytes)
-{
-#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX
- ma_copy_memory(dst, src, (size_t)sizeInBytes);
-#else
- while (sizeInBytes > 0) {
- ma_uint64 bytesToCopyNow = sizeInBytes;
- if (bytesToCopyNow > MA_SIZE_MAX) {
- bytesToCopyNow = MA_SIZE_MAX;
- }
+ if (pConverter->hasPostFormatConversion) {
+ if (frameCountInThisIteration > tempBufferOutCap) {
+ frameCountInThisIteration = tempBufferOutCap;
+ }
+ }
- ma_copy_memory(dst, src, (size_t)bytesToCopyNow); /* Safe cast to size_t. */
+ if (pFramesInThisIteration != NULL) {
+ ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.config.format, pFramesInThisIteration, pConverter->config.formatIn, frameCountInThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode);
+ } else {
+ MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn));
+ }
- sizeInBytes -= bytesToCopyNow;
- dst = ( void*)(( ma_uint8*)dst + bytesToCopyNow);
- src = (const void*)((const ma_uint8*)src + bytesToCopyNow);
- }
-#endif
-}
+ frameCountOutThisIteration = (frameCountOut - framesProcessedOut);
-void ma_zero_memory_64(void* dst, ma_uint64 sizeInBytes)
-{
-#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX
- ma_zero_memory(dst, (size_t)sizeInBytes);
-#else
- while (sizeInBytes > 0) {
- ma_uint64 bytesToZeroNow = sizeInBytes;
- if (bytesToZeroNow > MA_SIZE_MAX) {
- bytesToZeroNow = MA_SIZE_MAX;
- }
+ if (pConverter->hasPostFormatConversion) {
+ /* Both input and output conversion required. Output to the temp buffer. */
+ if (frameCountOutThisIteration > tempBufferOutCap) {
+ frameCountOutThisIteration = tempBufferOutCap;
+ }
- ma_zero_memory(dst, (size_t)bytesToZeroNow); /* Safe cast to size_t. */
+ result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration);
+ } else {
+ /* Only pre-format required. Output straight to the output buffer. */
+ result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pFramesOutThisIteration, &frameCountOutThisIteration);
+ }
- sizeInBytes -= bytesToZeroNow;
- dst = (void*)((ma_uint8*)dst + bytesToZeroNow);
- }
-#endif
-}
+ if (result != MA_SUCCESS) {
+ break;
+ }
+ } else {
+ /* No pre-format required. Just read straight from the input buffer. */
+ MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE);
+ frameCountInThisIteration = (frameCountIn - framesProcessedIn);
+ frameCountOutThisIteration = (frameCountOut - framesProcessedOut);
+ if (frameCountOutThisIteration > tempBufferOutCap) {
+ frameCountOutThisIteration = tempBufferOutCap;
+ }
-/* u8 */
-void ma_pcm_u8_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- (void)ditherMode;
- ma_copy_memory_64(dst, src, count * sizeof(ma_uint8));
-}
+ result = ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesInThisIteration, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+ }
+ /* If we are doing a post format conversion we need to do that now. */
+ if (pConverter->hasPostFormatConversion) {
+ if (pFramesOutThisIteration != NULL) {
+ ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->config.formatOut, pTempBufferOut, pConverter->resampler.config.format, frameCountOutThisIteration, pConverter->resampler.config.channels, pConverter->config.ditherMode);
+ }
+ }
-void ma_pcm_u8_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_int16* dst_s16 = (ma_int16*)dst;
- const ma_uint8* src_u8 = (const ma_uint8*)src;
+ framesProcessedIn += frameCountInThisIteration;
+ framesProcessedOut += frameCountOutThisIteration;
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int16 x = src_u8[i];
- x = x - 128;
- x = x << 8;
- dst_s16[i] = x;
+ MA_ASSERT(framesProcessedIn <= frameCountIn);
+ MA_ASSERT(framesProcessedOut <= frameCountOut);
+
+ if (frameCountOutThisIteration == 0) {
+ break; /* Consumed all of our input data. */
+ }
}
- (void)ditherMode;
-}
+ if (pFrameCountIn != NULL) {
+ *pFrameCountIn = framesProcessedIn;
+ }
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = framesProcessedOut;
+ }
-void ma_pcm_u8_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode);
+ return result;
}
-#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_u8_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_u8_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_u8_to_s16__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s16__avx2(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_NEON)
-void ma_pcm_u8_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static ma_result ma_data_converter_process_pcm_frames__resample_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode);
-}
-#endif
+ MA_ASSERT(pConverter != NULL);
-void ma_pcm_u8_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode);
-#else
- ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode);
-#endif
+ if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) {
+ /* Neither pre- nor post-format required. This is simple case where only resampling is required. */
+ return ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ } else {
+ /* Format conversion required. */
+ return ma_data_converter_process_pcm_frames__resample_with_format_conversion(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ }
}
-
-void ma_pcm_u8_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static ma_result ma_data_converter_process_pcm_frames__channels_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_uint8* dst_s24 = (ma_uint8*)dst;
- const ma_uint8* src_u8 = (const ma_uint8*)src;
+ ma_result result;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 frameCount;
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int16 x = src_u8[i];
- x = x - 128;
+ MA_ASSERT(pConverter != NULL);
- dst_s24[i*3+0] = 0;
- dst_s24[i*3+1] = 0;
- dst_s24[i*3+2] = (ma_uint8)((ma_int8)x);
+ frameCountIn = 0;
+ if (pFrameCountIn != NULL) {
+ frameCountIn = *pFrameCountIn;
}
- (void)ditherMode;
-}
+ frameCountOut = 0;
+ if (pFrameCountOut != NULL) {
+ frameCountOut = *pFrameCountOut;
+ }
-void ma_pcm_u8_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode);
-}
+ frameCount = ma_min(frameCountIn, frameCountOut);
-#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_u8_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_u8_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_u8_to_s24__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s24__avx2(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_NEON)
-void ma_pcm_u8_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode);
-}
-#endif
+ if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) {
+ /* No format conversion required. */
+ result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOut, pFramesIn, frameCount);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ } else {
+ /* Format conversion required. */
+ ma_uint64 framesProcessed = 0;
+
+ while (framesProcessed < frameCount) {
+ ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut);
+ const void* pFramesInThisIteration;
+ /* */ void* pFramesOutThisIteration;
+ ma_uint64 frameCountThisIteration;
+
+ if (pFramesIn != NULL) {
+ pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessed * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn));
+ } else {
+ pFramesInThisIteration = NULL;
+ }
-void ma_pcm_u8_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode);
-#else
- ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode);
-#endif
-}
+ if (pFramesOut != NULL) {
+ pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessed * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut));
+ } else {
+ pFramesOutThisIteration = NULL;
+ }
+ /* Do a pre format conversion if necessary. */
+ if (pConverter->hasPreFormatConversion) {
+ ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE];
+ const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn);
-void ma_pcm_u8_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_int32* dst_s32 = (ma_int32*)dst;
- const ma_uint8* src_u8 = (const ma_uint8*)src;
+ frameCountThisIteration = (frameCount - framesProcessed);
+ if (frameCountThisIteration > tempBufferInCap) {
+ frameCountThisIteration = tempBufferInCap;
+ }
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int32 x = src_u8[i];
- x = x - 128;
- x = x << 24;
- dst_s32[i] = x;
- }
+ if (pConverter->hasPostFormatConversion) {
+ if (frameCountThisIteration > tempBufferOutCap) {
+ frameCountThisIteration = tempBufferOutCap;
+ }
+ }
- (void)ditherMode;
-}
+ if (pFramesInThisIteration != NULL) {
+ ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pFramesInThisIteration, pConverter->config.formatIn, frameCountThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode);
+ } else {
+ MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn));
+ }
-void ma_pcm_u8_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode);
-}
+ if (pConverter->hasPostFormatConversion) {
+ /* Both input and output conversion required. Output to the temp buffer. */
+ result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pTempBufferIn, frameCountThisIteration);
+ } else {
+ /* Only pre-format required. Output straight to the output buffer. */
+ result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOutThisIteration, pTempBufferIn, frameCountThisIteration);
+ }
-#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_u8_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_u8_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_u8_to_s32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s32__avx2(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_NEON)
-void ma_pcm_u8_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode);
-}
-#endif
+ if (result != MA_SUCCESS) {
+ break;
+ }
+ } else {
+ /* No pre-format required. Just read straight from the input buffer. */
+ MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE);
-void ma_pcm_u8_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode);
-#else
- ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode);
-#endif
-}
+ frameCountThisIteration = (frameCount - framesProcessed);
+ if (frameCountThisIteration > tempBufferOutCap) {
+ frameCountThisIteration = tempBufferOutCap;
+ }
+ result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pFramesInThisIteration, frameCountThisIteration);
+ if (result != MA_SUCCESS) {
+ break;
+ }
+ }
-void ma_pcm_u8_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- float* dst_f32 = (float*)dst;
- const ma_uint8* src_u8 = (const ma_uint8*)src;
+ /* If we are doing a post format conversion we need to do that now. */
+ if (pConverter->hasPostFormatConversion) {
+ if (pFramesOutThisIteration != NULL) {
+ ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->config.formatOut, pTempBufferOut, pConverter->channelConverter.format, frameCountThisIteration, pConverter->channelConverter.channelsOut, pConverter->config.ditherMode);
+ }
+ }
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- float x = (float)src_u8[i];
- x = x * 0.00784313725490196078f; /* 0..255 to 0..2 */
- x = x - 1; /* 0..2 to -1..1 */
+ framesProcessed += frameCountThisIteration;
+ }
+ }
- dst_f32[i] = x;
+ if (pFrameCountIn != NULL) {
+ *pFrameCountIn = frameCount;
+ }
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = frameCount;
}
- (void)ditherMode;
+ return MA_SUCCESS;
}
-void ma_pcm_u8_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static ma_result ma_data_converter_process_pcm_frames__resampling_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode);
-}
+ ma_result result;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 framesProcessedIn;
+ ma_uint64 framesProcessedOut;
+ ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */
+ ma_uint64 tempBufferInCap;
+ ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */
+ ma_uint64 tempBufferMidCap;
+ ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */
+ ma_uint64 tempBufferOutCap;
+
+ MA_ASSERT(pConverter != NULL);
+ MA_ASSERT(pConverter->resampler.config.format == pConverter->channelConverter.format);
+ MA_ASSERT(pConverter->resampler.config.channels == pConverter->channelConverter.channelsIn);
+ MA_ASSERT(pConverter->resampler.config.channels < pConverter->channelConverter.channelsOut);
+
+ frameCountIn = 0;
+ if (pFrameCountIn != NULL) {
+ frameCountIn = *pFrameCountIn;
+ }
+
+ frameCountOut = 0;
+ if (pFrameCountOut != NULL) {
+ frameCountOut = *pFrameCountOut;
+ }
-#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_u8_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_u8_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_u8_to_f32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_f32__avx2(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_NEON)
-void ma_pcm_u8_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode);
-}
-#endif
+ framesProcessedIn = 0;
+ framesProcessedOut = 0;
-void ma_pcm_u8_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode);
-#else
- ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode);
-#endif
-}
+ tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels);
+ tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels);
+ tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut);
+ while (framesProcessedOut < frameCountOut) {
+ ma_uint64 frameCountInThisIteration;
+ ma_uint64 frameCountOutThisIteration;
+ const void* pRunningFramesIn = NULL;
+ void* pRunningFramesOut = NULL;
+ const void* pResampleBufferIn;
+ void* pChannelsBufferOut;
+ if (pFramesIn != NULL) {
+ pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn));
+ }
+ if (pFramesOut != NULL) {
+ pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut));
+ }
-void ma_pcm_interleave_u8__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
-{
- ma_uint8* dst_u8 = (ma_uint8*)dst;
- const ma_uint8** src_u8 = (const ma_uint8**)src;
+ /* Run input data through the resampler and output it to the temporary buffer. */
+ frameCountInThisIteration = (frameCountIn - framesProcessedIn);
- ma_uint64 iFrame;
- for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame];
+ if (pConverter->hasPreFormatConversion) {
+ if (frameCountInThisIteration > tempBufferInCap) {
+ frameCountInThisIteration = tempBufferInCap;
+ }
}
- }
-}
-void ma_pcm_interleave_u8__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
-{
- ma_uint8* dst_u8 = (ma_uint8*)dst;
- const ma_uint8** src_u8 = (const ma_uint8**)src;
+ frameCountOutThisIteration = (frameCountOut - framesProcessedOut);
+ if (frameCountOutThisIteration > tempBufferMidCap) {
+ frameCountOutThisIteration = tempBufferMidCap;
+ }
- if (channels == 1) {
- ma_copy_memory_64(dst, src[0], frameCount * sizeof(ma_uint8));
- } else if (channels == 2) {
- ma_uint64 iFrame;
- for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
- dst_u8[iFrame*2 + 0] = src_u8[0][iFrame];
- dst_u8[iFrame*2 + 1] = src_u8[1][iFrame];
+ /* We can't read more frames than can fit in the output buffer. */
+ if (pConverter->hasPostFormatConversion) {
+ if (frameCountOutThisIteration > tempBufferOutCap) {
+ frameCountOutThisIteration = tempBufferOutCap;
+ }
}
- } else {
- ma_uint64 iFrame;
- for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame];
+
+ /* We need to ensure we don't try to process too many input frames that we run out of room in the output buffer. If this happens we'll end up glitching. */
+ {
+ ma_uint64 requiredInputFrameCount = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration);
+ if (frameCountInThisIteration > requiredInputFrameCount) {
+ frameCountInThisIteration = requiredInputFrameCount;
}
}
- }
-}
-void ma_pcm_interleave_u8(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
-{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_interleave_u8__reference(dst, src, frameCount, channels);
-#else
- ma_pcm_interleave_u8__optimized(dst, src, frameCount, channels);
-#endif
-}
+ if (pConverter->hasPreFormatConversion) {
+ if (pFramesIn != NULL) {
+ ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.config.format, pRunningFramesIn, pConverter->config.formatIn, frameCountInThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode);
+ pResampleBufferIn = pTempBufferIn;
+ } else {
+ pResampleBufferIn = NULL;
+ }
+ } else {
+ pResampleBufferIn = pRunningFramesIn;
+ }
+ result = ma_resampler_process_pcm_frames(&pConverter->resampler, pResampleBufferIn, &frameCountInThisIteration, pTempBufferMid, &frameCountOutThisIteration);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-void ma_pcm_deinterleave_u8__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
-{
- ma_uint8** dst_u8 = (ma_uint8**)dst;
- const ma_uint8* src_u8 = (const ma_uint8*)src;
- ma_uint64 iFrame;
- for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst_u8[iChannel][iFrame] = src_u8[iFrame*channels + iChannel];
+ /*
+ The input data has been resampled so now we need to run it through the channel converter. The input data is always contained in pTempBufferMid. We only need to do
+ this part if we have an output buffer.
+ */
+ if (pFramesOut != NULL) {
+ if (pConverter->hasPostFormatConversion) {
+ pChannelsBufferOut = pTempBufferOut;
+ } else {
+ pChannelsBufferOut = pRunningFramesOut;
+ }
+
+ result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pChannelsBufferOut, pTempBufferMid, frameCountOutThisIteration);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ /* Finally we do post format conversion. */
+ if (pConverter->hasPostFormatConversion) {
+ ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->config.formatOut, pChannelsBufferOut, pConverter->channelConverter.format, frameCountOutThisIteration, pConverter->channelConverter.channelsOut, pConverter->config.ditherMode);
+ }
+ }
+
+
+ framesProcessedIn += frameCountInThisIteration;
+ framesProcessedOut += frameCountOutThisIteration;
+
+ MA_ASSERT(framesProcessedIn <= frameCountIn);
+ MA_ASSERT(framesProcessedOut <= frameCountOut);
+
+ if (frameCountOutThisIteration == 0) {
+ break; /* Consumed all of our input data. */
}
}
-}
-void ma_pcm_deinterleave_u8__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
-{
- ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels);
+ if (pFrameCountIn != NULL) {
+ *pFrameCountIn = framesProcessedIn;
+ }
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = framesProcessedOut;
+ }
+
+ return MA_SUCCESS;
}
-void ma_pcm_deinterleave_u8(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+static ma_result ma_data_converter_process_pcm_frames__channels_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels);
-#else
- ma_pcm_deinterleave_u8__optimized(dst, src, frameCount, channels);
-#endif
-}
+ ma_result result;
+ ma_uint64 frameCountIn;
+ ma_uint64 frameCountOut;
+ ma_uint64 framesProcessedIn;
+ ma_uint64 framesProcessedOut;
+ ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */
+ ma_uint64 tempBufferInCap;
+ ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */
+ ma_uint64 tempBufferMidCap;
+ ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */
+ ma_uint64 tempBufferOutCap;
+
+ MA_ASSERT(pConverter != NULL);
+ MA_ASSERT(pConverter->resampler.config.format == pConverter->channelConverter.format);
+ MA_ASSERT(pConverter->resampler.config.channels == pConverter->channelConverter.channelsOut);
+ MA_ASSERT(pConverter->resampler.config.channels < pConverter->channelConverter.channelsIn);
+
+ frameCountIn = 0;
+ if (pFrameCountIn != NULL) {
+ frameCountIn = *pFrameCountIn;
+ }
+
+ frameCountOut = 0;
+ if (pFrameCountOut != NULL) {
+ frameCountOut = *pFrameCountOut;
+ }
+ framesProcessedIn = 0;
+ framesProcessedOut = 0;
-/* s16 */
-void ma_pcm_s16_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_uint8* dst_u8 = (ma_uint8*)dst;
- const ma_int16* src_s16 = (const ma_int16*)src;
+ tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn);
+ tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut);
+ tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels);
- if (ditherMode == ma_dither_mode_none) {
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int16 x = src_s16[i];
- x = x >> 8;
- x = x + 128;
- dst_u8[i] = (ma_uint8)x;
+ while (framesProcessedOut < frameCountOut) {
+ ma_uint64 frameCountInThisIteration;
+ ma_uint64 frameCountOutThisIteration;
+ const void* pRunningFramesIn = NULL;
+ void* pRunningFramesOut = NULL;
+ const void* pChannelsBufferIn;
+ void* pResampleBufferOut;
+
+ if (pFramesIn != NULL) {
+ pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn));
+ }
+ if (pFramesOut != NULL) {
+ pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut));
}
- } else {
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int16 x = src_s16[i];
- /* Dither. Don't overflow. */
- ma_int32 dither = ma_dither_s32(ditherMode, -0x80, 0x7F);
- if ((x + dither) <= 0x7FFF) {
- x = (ma_int16)(x + dither);
+ /* Run input data through the channel converter and output it to the temporary buffer. */
+ frameCountInThisIteration = (frameCountIn - framesProcessedIn);
+
+ if (pConverter->hasPreFormatConversion) {
+ if (frameCountInThisIteration > tempBufferInCap) {
+ frameCountInThisIteration = tempBufferInCap;
+ }
+
+ if (pRunningFramesIn != NULL) {
+ ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pRunningFramesIn, pConverter->config.formatIn, frameCountInThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode);
+ pChannelsBufferIn = pTempBufferIn;
} else {
- x = 0x7FFF;
+ pChannelsBufferIn = NULL;
}
+ } else {
+ pChannelsBufferIn = pRunningFramesIn;
+ }
- x = x >> 8;
- x = x + 128;
- dst_u8[i] = (ma_uint8)x;
+ /*
+ We can't convert more frames than will fit in the output buffer. We shouldn't actually need to do this check because the channel count is always reduced
+ in this case which means we should always have capacity, but I'm leaving it here just for safety for future maintenance.
+ */
+ if (frameCountInThisIteration > tempBufferMidCap) {
+ frameCountInThisIteration = tempBufferMidCap;
}
- }
-}
-void ma_pcm_s16_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode);
-}
+ /*
+ Make sure we don't read any more input frames than we need to fill the output frame count. If we do this we will end up in a situation where we lose some
+ input samples and will end up glitching.
+ */
+ frameCountOutThisIteration = (frameCountOut - framesProcessedOut);
+ if (frameCountOutThisIteration > tempBufferMidCap) {
+ frameCountOutThisIteration = tempBufferMidCap;
+ }
-#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s16_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s16_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s16_to_u8__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_u8__avx2(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s16_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode);
-}
-#endif
+ if (pConverter->hasPostFormatConversion) {
+ ma_uint64 requiredInputFrameCount;
-void ma_pcm_s16_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode);
-#else
- ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode);
-#endif
-}
+ if (frameCountOutThisIteration > tempBufferOutCap) {
+ frameCountOutThisIteration = tempBufferOutCap;
+ }
+ requiredInputFrameCount = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration);
+ if (frameCountInThisIteration > requiredInputFrameCount) {
+ frameCountInThisIteration = requiredInputFrameCount;
+ }
+ }
-void ma_pcm_s16_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- (void)ditherMode;
- ma_copy_memory_64(dst, src, count * sizeof(ma_int16));
-}
+ result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferMid, pChannelsBufferIn, frameCountInThisIteration);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-void ma_pcm_s16_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_uint8* dst_s24 = (ma_uint8*)dst;
- const ma_int16* src_s16 = (const ma_int16*)src;
+ /* At this point we have converted the channels to the output channel count which we now need to resample. */
+ if (pConverter->hasPostFormatConversion) {
+ pResampleBufferOut = pTempBufferOut;
+ } else {
+ pResampleBufferOut = pRunningFramesOut;
+ }
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- dst_s24[i*3+0] = 0;
- dst_s24[i*3+1] = (ma_uint8)(src_s16[i] & 0xFF);
- dst_s24[i*3+2] = (ma_uint8)(src_s16[i] >> 8);
- }
+ result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferMid, &frameCountInThisIteration, pResampleBufferOut, &frameCountOutThisIteration);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- (void)ditherMode;
-}
+ /* Finally we can do the post format conversion. */
+ if (pConverter->hasPostFormatConversion) {
+ if (pRunningFramesOut != NULL) {
+ ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->config.formatOut, pResampleBufferOut, pConverter->resampler.config.format, frameCountOutThisIteration, pConverter->config.channelsOut, pConverter->config.ditherMode);
+ }
+ }
-void ma_pcm_s16_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode);
-}
+ framesProcessedIn += frameCountInThisIteration;
+ framesProcessedOut += frameCountOutThisIteration;
-#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s16_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s16_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s16_to_s24__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_s24__avx2(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s16_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode);
-}
-#endif
+ MA_ASSERT(framesProcessedIn <= frameCountIn);
+ MA_ASSERT(framesProcessedOut <= frameCountOut);
-void ma_pcm_s16_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode);
-#else
- ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode);
-#endif
+ if (frameCountOutThisIteration == 0) {
+ break; /* Consumed all of our input data. */
+ }
+ }
+
+ if (pFrameCountIn != NULL) {
+ *pFrameCountIn = framesProcessedIn;
+ }
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = framesProcessedOut;
+ }
+
+ return MA_SUCCESS;
}
-
-void ma_pcm_s16_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut)
{
- ma_int32* dst_s32 = (ma_int32*)dst;
- const ma_int16* src_s16 = (const ma_int16*)src;
+ if (pConverter == NULL) {
+ return MA_INVALID_ARGS;
+ }
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- dst_s32[i] = src_s16[i] << 16;
+ if (pConverter->isPassthrough) {
+ return ma_data_converter_process_pcm_frames__passthrough(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
}
- (void)ditherMode;
-}
+ /*
+ Here is where the real work is done. Getting here means we're not using a passthrough and we need to move the data through each of the relevant stages. The order
+ of our stages depends on the input and output channel count. If the input channels is less than the output channels we want to do sample rate conversion first so
+ that it has less work (resampling is the most expensive part of format conversion).
+ */
+ if (pConverter->config.channelsIn < pConverter->config.channelsOut) {
+ /* Do resampling first, if necessary. */
+ MA_ASSERT(pConverter->hasChannelConverter == MA_TRUE);
-void ma_pcm_s16_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode);
+ if (pConverter->hasResampler) {
+ /* Resampling first. */
+ return ma_data_converter_process_pcm_frames__resampling_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ } else {
+ /* Resampling not required. */
+ return ma_data_converter_process_pcm_frames__channels_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ }
+ } else {
+ /* Do channel conversion first, if necessary. */
+ if (pConverter->hasChannelConverter) {
+ if (pConverter->hasResampler) {
+ /* Channel routing first. */
+ return ma_data_converter_process_pcm_frames__channels_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ } else {
+ /* Resampling not required. */
+ return ma_data_converter_process_pcm_frames__channels_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ }
+ } else {
+ /* Channel routing not required. */
+ if (pConverter->hasResampler) {
+ /* Resampling only. */
+ return ma_data_converter_process_pcm_frames__resample_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ } else {
+ /* No channel routing nor resampling required. Just format conversion. */
+ return ma_data_converter_process_pcm_frames__format_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut);
+ }
+ }
+ }
}
-#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s16_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s16_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s16_to_s32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_s32__avx2(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s16_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut)
{
- ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode);
-}
-#endif
+ if (pConverter == NULL) {
+ return MA_INVALID_ARGS;
+ }
-void ma_pcm_s16_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode);
-#else
- ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode);
-#endif
-}
+ if (pConverter->hasResampler == MA_FALSE) {
+ return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */
+ }
+ return ma_resampler_set_rate(&pConverter->resampler, sampleRateIn, sampleRateOut);
+}
-void ma_pcm_s16_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut)
{
- float* dst_f32 = (float*)dst;
- const ma_int16* src_s16 = (const ma_int16*)src;
-
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- float x = (float)src_s16[i];
-
-#if 0
- /* The accurate way. */
- x = x + 32768.0f; /* -32768..32767 to 0..65535 */
- x = x * 0.00003051804379339284f; /* 0..65536 to 0..2 */
- x = x - 1; /* 0..2 to -1..1 */
-#else
- /* The fast way. */
- x = x * 0.000030517578125f; /* -32768..32767 to -1..0.999969482421875 */
-#endif
+ if (pConverter == NULL) {
+ return MA_INVALID_ARGS;
+ }
- dst_f32[i] = x;
+ if (pConverter->hasResampler == MA_FALSE) {
+ return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */
}
- (void)ditherMode;
+ return ma_resampler_set_rate_ratio(&pConverter->resampler, ratioInOut);
}
-void ma_pcm_s16_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+ma_uint64 ma_data_converter_get_required_input_frame_count(ma_data_converter* pConverter, ma_uint64 outputFrameCount)
{
- ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode);
-}
+ if (pConverter == NULL) {
+ return 0;
+ }
-#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s16_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s16_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s16_to_f32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_f32__avx2(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s16_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode);
+ if (pConverter->hasResampler) {
+ return ma_resampler_get_required_input_frame_count(&pConverter->resampler, outputFrameCount);
+ } else {
+ return outputFrameCount; /* 1:1 */
+ }
}
-#endif
-void ma_pcm_s16_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+ma_uint64 ma_data_converter_get_expected_output_frame_count(ma_data_converter* pConverter, ma_uint64 inputFrameCount)
{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode);
-#else
- ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode);
-#endif
-}
+ if (pConverter == NULL) {
+ return 0;
+ }
+ if (pConverter->hasResampler) {
+ return ma_resampler_get_expected_output_frame_count(&pConverter->resampler, inputFrameCount);
+ } else {
+ return inputFrameCount; /* 1:1 */
+ }
+}
-void ma_pcm_interleave_s16__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+ma_uint64 ma_data_converter_get_input_latency(ma_data_converter* pConverter)
{
- ma_int16* dst_s16 = (ma_int16*)dst;
- const ma_int16** src_s16 = (const ma_int16**)src;
+ if (pConverter == NULL) {
+ return 0;
+ }
- ma_uint64 iFrame;
- for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst_s16[iFrame*channels + iChannel] = src_s16[iChannel][iFrame];
- }
+ if (pConverter->hasResampler) {
+ return ma_resampler_get_input_latency(&pConverter->resampler);
}
-}
-void ma_pcm_interleave_s16__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
-{
- ma_pcm_interleave_s16__reference(dst, src, frameCount, channels);
+ return 0; /* No latency without a resampler. */
}
-void ma_pcm_interleave_s16(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+ma_uint64 ma_data_converter_get_output_latency(ma_data_converter* pConverter)
{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_interleave_s16__reference(dst, src, frameCount, channels);
-#else
- ma_pcm_interleave_s16__optimized(dst, src, frameCount, channels);
-#endif
+ if (pConverter == NULL) {
+ return 0;
+ }
+
+ if (pConverter->hasResampler) {
+ return ma_resampler_get_output_latency(&pConverter->resampler);
+ }
+
+ return 0; /* No latency without a resampler. */
}
-void ma_pcm_deinterleave_s16__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
-{
- ma_int16** dst_s16 = (ma_int16**)dst;
- const ma_int16* src_s16 = (const ma_int16*)src;
- ma_uint64 iFrame;
- for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst_s16[iChannel][iFrame] = src_s16[iFrame*channels + iChannel];
- }
- }
-}
+/**************************************************************************************************************************************************************
+
+Format Conversion
+
+**************************************************************************************************************************************************************/
-void ma_pcm_deinterleave_s16__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE ma_int16 ma_pcm_sample_f32_to_s16(float x)
{
- ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels);
+ return (ma_int16)(x * 32767.0f);
}
-void ma_pcm_deinterleave_s16(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+/* u8 */
+void ma_pcm_u8_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels);
-#else
- ma_pcm_deinterleave_s16__optimized(dst, src, frameCount, channels);
-#endif
+ (void)ditherMode;
+ ma_copy_memory_64(dst, src, count * sizeof(ma_uint8));
}
-/* s24 */
-void ma_pcm_s24_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_uint8* dst_u8 = (ma_uint8*)dst;
- const ma_uint8* src_s24 = (const ma_uint8*)src;
-
- if (ditherMode == ma_dither_mode_none) {
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int8 x = (ma_int8)src_s24[i*3 + 2] + 128;
- dst_u8[i] = (ma_uint8)x;
- }
- } else {
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24);
+ ma_int16* dst_s16 = (ma_int16*)dst;
+ const ma_uint8* src_u8 = (const ma_uint8*)src;
- /* Dither. Don't overflow. */
- ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF);
- if ((ma_int64)x + dither <= 0x7FFFFFFF) {
- x = x + dither;
- } else {
- x = 0x7FFFFFFF;
- }
-
- x = x >> 24;
- x = x + 128;
- dst_u8[i] = (ma_uint8)x;
- }
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int16 x = src_u8[i];
+ x = x - 128;
+ x = x << 8;
+ dst_s16[i] = x;
}
+
+ (void)ditherMode;
}
-void ma_pcm_s24_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s24_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s24_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s24_to_u8__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_u8__avx2(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s24_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_s24_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_u8_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode);
#else
- ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_u8_to_s16__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_u8_to_s16__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_u8_to_s16__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_s24_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_int16* dst_s16 = (ma_int16*)dst;
- const ma_uint8* src_s24 = (const ma_uint8*)src;
-
- if (ditherMode == ma_dither_mode_none) {
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_uint16 dst_lo = ((ma_uint16)src_s24[i*3 + 1]);
- ma_uint16 dst_hi = ((ma_uint16)src_s24[i*3 + 2]) << 8;
- dst_s16[i] = (ma_int16)dst_lo | dst_hi;
- }
- } else {
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24);
+ ma_uint8* dst_s24 = (ma_uint8*)dst;
+ const ma_uint8* src_u8 = (const ma_uint8*)src;
- /* Dither. Don't overflow. */
- ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF);
- if ((ma_int64)x + dither <= 0x7FFFFFFF) {
- x = x + dither;
- } else {
- x = 0x7FFFFFFF;
- }
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int16 x = src_u8[i];
+ x = x - 128;
- x = x >> 16;
- dst_s16[i] = (ma_int16)x;
- }
+ dst_s24[i*3+0] = 0;
+ dst_s24[i*3+1] = 0;
+ dst_s24[i*3+2] = (ma_uint8)((ma_int8)x);
}
+
+ (void)ditherMode;
}
-void ma_pcm_s24_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s24_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s24_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s24_to_s16__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_s16__avx2(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s24_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_s24_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_u8_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode);
#else
- ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_u8_to_s24__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_u8_to_s24__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_u8_to_s24__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_s24_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- (void)ditherMode;
-
- ma_copy_memory_64(dst, src, count * 3);
-}
-
-
-void ma_pcm_s24_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
ma_int32* dst_s32 = (ma_int32*)dst;
- const ma_uint8* src_s24 = (const ma_uint8*)src;
+ const ma_uint8* src_u8 = (const ma_uint8*)src;
ma_uint64 i;
for (i = 0; i < count; i += 1) {
- dst_s32[i] = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24);
+ ma_int32 x = src_u8[i];
+ x = x - 128;
+ x = x << 24;
+ dst_s32[i] = x;
}
(void)ditherMode;
}
-void ma_pcm_s24_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s24_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s24_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s24_to_s32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_s32__avx2(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s24_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_s24_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_u8_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode);
+ ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode);
#else
- ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_u8_to_s32__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_u8_to_s32__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_u8_to_s32__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_s24_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
float* dst_f32 = (float*)dst;
- const ma_uint8* src_s24 = (const ma_uint8*)src;
+ const ma_uint8* src_u8 = (const ma_uint8*)src;
ma_uint64 i;
for (i = 0; i < count; i += 1) {
- float x = (float)(((ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24)) >> 8);
-
-#if 0
- /* The accurate way. */
- x = x + 8388608.0f; /* -8388608..8388607 to 0..16777215 */
- x = x * 0.00000011920929665621f; /* 0..16777215 to 0..2 */
+ float x = (float)src_u8[i];
+ x = x * 0.00784313725490196078f; /* 0..255 to 0..2 */
x = x - 1; /* 0..2 to -1..1 */
-#else
- /* The fast way. */
- x = x * 0.00000011920928955078125f; /* -8388608..8388607 to -1..0.999969482421875 */
-#endif
dst_f32[i] = x;
}
@@ -26202,6802 +35427,6637 @@ void ma_pcm_s24_to_f32__reference(void* dst, const void* src, ma_uint64 count, m
(void)ditherMode;
}
-void ma_pcm_s24_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode);
+ ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s24_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode);
+ ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s24_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s24_to_f32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_f32__avx2(dst, src, count, ditherMode);
+ ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s24_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_u8_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode);
+ ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_s24_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_u8_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode);
+ ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode);
#else
- ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_u8_to_f32__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_u8_to_f32__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_u8_to_f32__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_interleave_s24__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+static MA_INLINE void ma_pcm_interleave_u8__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_uint8* dst8 = (ma_uint8*)dst;
- const ma_uint8** src8 = (const ma_uint8**)src;
+ ma_uint8* dst_u8 = (ma_uint8*)dst;
+ const ma_uint8** src_u8 = (const ma_uint8**)src;
ma_uint64 iFrame;
for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
ma_uint32 iChannel;
for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst8[iFrame*3*channels + iChannel*3 + 0] = src8[iChannel][iFrame*3 + 0];
- dst8[iFrame*3*channels + iChannel*3 + 1] = src8[iChannel][iFrame*3 + 1];
- dst8[iFrame*3*channels + iChannel*3 + 2] = src8[iChannel][iFrame*3 + 2];
+ dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame];
}
}
}
-
-void ma_pcm_interleave_s24__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+#else
+static MA_INLINE void ma_pcm_interleave_u8__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_pcm_interleave_s24__reference(dst, src, frameCount, channels);
+ ma_uint8* dst_u8 = (ma_uint8*)dst;
+ const ma_uint8** src_u8 = (const ma_uint8**)src;
+
+ if (channels == 1) {
+ ma_copy_memory_64(dst, src[0], frameCount * sizeof(ma_uint8));
+ } else if (channels == 2) {
+ ma_uint64 iFrame;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ dst_u8[iFrame*2 + 0] = src_u8[0][iFrame];
+ dst_u8[iFrame*2 + 1] = src_u8[1][iFrame];
+ }
+ } else {
+ ma_uint64 iFrame;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; iChannel += 1) {
+ dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame];
+ }
+ }
+ }
}
+#endif
-void ma_pcm_interleave_s24(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+void ma_pcm_interleave_u8(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_interleave_s24__reference(dst, src, frameCount, channels);
+ ma_pcm_interleave_u8__reference(dst, src, frameCount, channels);
#else
- ma_pcm_interleave_s24__optimized(dst, src, frameCount, channels);
+ ma_pcm_interleave_u8__optimized(dst, src, frameCount, channels);
#endif
}
-void ma_pcm_deinterleave_s24__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_deinterleave_u8__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_uint8** dst8 = (ma_uint8**)dst;
- const ma_uint8* src8 = (const ma_uint8*)src;
+ ma_uint8** dst_u8 = (ma_uint8**)dst;
+ const ma_uint8* src_u8 = (const ma_uint8*)src;
- ma_uint32 iFrame;
+ ma_uint64 iFrame;
for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
ma_uint32 iChannel;
for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst8[iChannel][iFrame*3 + 0] = src8[iFrame*3*channels + iChannel*3 + 0];
- dst8[iChannel][iFrame*3 + 1] = src8[iFrame*3*channels + iChannel*3 + 1];
- dst8[iChannel][iFrame*3 + 2] = src8[iFrame*3*channels + iChannel*3 + 2];
+ dst_u8[iChannel][iFrame] = src_u8[iFrame*channels + iChannel];
}
}
}
-void ma_pcm_deinterleave_s24__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_deinterleave_u8__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels);
+ ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels);
}
-void ma_pcm_deinterleave_s24(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+void ma_pcm_deinterleave_u8(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels);
+ ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels);
#else
- ma_pcm_deinterleave_s24__optimized(dst, src, frameCount, channels);
+ ma_pcm_deinterleave_u8__optimized(dst, src, frameCount, channels);
#endif
}
-
-/* s32 */
-void ma_pcm_s32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+/* s16 */
+static MA_INLINE void ma_pcm_s16_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
ma_uint8* dst_u8 = (ma_uint8*)dst;
- const ma_int32* src_s32 = (const ma_int32*)src;
+ const ma_int16* src_s16 = (const ma_int16*)src;
if (ditherMode == ma_dither_mode_none) {
ma_uint64 i;
for (i = 0; i < count; i += 1) {
- ma_int32 x = src_s32[i];
- x = x >> 24;
+ ma_int16 x = src_s16[i];
+ x = x >> 8;
x = x + 128;
dst_u8[i] = (ma_uint8)x;
}
} else {
ma_uint64 i;
for (i = 0; i < count; i += 1) {
- ma_int32 x = src_s32[i];
+ ma_int16 x = src_s16[i];
/* Dither. Don't overflow. */
- ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF);
- if ((ma_int64)x + dither <= 0x7FFFFFFF) {
- x = x + dither;
+ ma_int32 dither = ma_dither_s32(ditherMode, -0x80, 0x7F);
+ if ((x + dither) <= 0x7FFF) {
+ x = (ma_int16)(x + dither);
} else {
- x = 0x7FFFFFFF;
+ x = 0x7FFF;
}
-
- x = x >> 24;
+
+ x = x >> 8;
x = x + 128;
dst_u8[i] = (ma_uint8)x;
}
}
}
-void ma_pcm_s32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode);
+ ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode);
+ ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s32_to_u8__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_u8__avx2(dst, src, count, ditherMode);
+ ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode);
+ ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_s32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_s16_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode);
+ ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode);
#else
- ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s16_to_u8__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s16_to_u8__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s16_to_u8__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_s32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_s16_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_int16* dst_s16 = (ma_int16*)dst;
- const ma_int32* src_s32 = (const ma_int32*)src;
+ (void)ditherMode;
+ ma_copy_memory_64(dst, src, count * sizeof(ma_int16));
+}
- if (ditherMode == ma_dither_mode_none) {
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int32 x = src_s32[i];
- x = x >> 16;
- dst_s16[i] = (ma_int16)x;
- }
- } else {
- ma_uint64 i;
- for (i = 0; i < count; i += 1) {
- ma_int32 x = src_s32[i];
- /* Dither. Don't overflow. */
- ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF);
- if ((ma_int64)x + dither <= 0x7FFFFFFF) {
- x = x + dither;
- } else {
- x = 0x7FFFFFFF;
- }
-
- x = x >> 16;
- dst_s16[i] = (ma_int16)x;
- }
+static MA_INLINE void ma_pcm_s16_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_uint8* dst_s24 = (ma_uint8*)dst;
+ const ma_int16* src_s16 = (const ma_int16*)src;
+
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ dst_s24[i*3+0] = 0;
+ dst_s24[i*3+1] = (ma_uint8)(src_s16[i] & 0xFF);
+ dst_s24[i*3+2] = (ma_uint8)(src_s16[i] >> 8);
}
+
+ (void)ditherMode;
}
-void ma_pcm_s32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s32_to_s16__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_s16__avx2(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_s32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_s16_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode);
#else
- ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s16_to_s24__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s16_to_s24__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s16_to_s24__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_s32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_uint8* dst_s24 = (ma_uint8*)dst;
- const ma_int32* src_s32 = (const ma_int32*)src;
+ ma_int32* dst_s32 = (ma_int32*)dst;
+ const ma_int16* src_s16 = (const ma_int16*)src;
ma_uint64 i;
for (i = 0; i < count; i += 1) {
- ma_uint32 x = (ma_uint32)src_s32[i];
- dst_s24[i*3+0] = (ma_uint8)((x & 0x0000FF00) >> 8);
- dst_s24[i*3+1] = (ma_uint8)((x & 0x00FF0000) >> 16);
- dst_s24[i*3+2] = (ma_uint8)((x & 0xFF000000) >> 24);
+ dst_s32[i] = src_s16[i] << 16;
}
- (void)ditherMode; /* No dithering for s32 -> s24. */
+ (void)ditherMode;
}
-void ma_pcm_s32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s32_to_s24__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_s24__avx2(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_s32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_s16_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode);
+ ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode);
#else
- ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s16_to_s32__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s16_to_s32__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s16_to_s32__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_s32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- (void)ditherMode;
-
- ma_copy_memory_64(dst, src, count * sizeof(ma_int32));
-}
-
-
-void ma_pcm_s32_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
float* dst_f32 = (float*)dst;
- const ma_int32* src_s32 = (const ma_int32*)src;
+ const ma_int16* src_s16 = (const ma_int16*)src;
ma_uint64 i;
for (i = 0; i < count; i += 1) {
- double x = src_s32[i];
+ float x = (float)src_s16[i];
#if 0
- x = x + 2147483648.0;
- x = x * 0.0000000004656612873077392578125;
- x = x - 1;
+ /* The accurate way. */
+ x = x + 32768.0f; /* -32768..32767 to 0..65535 */
+ x = x * 0.00003051804379339284f; /* 0..65535 to 0..2 */
+ x = x - 1; /* 0..2 to -1..1 */
#else
- x = x / 2147483648.0;
+ /* The fast way. */
+ x = x * 0.000030517578125f; /* -32768..32767 to -1..0.999969482421875 */
#endif
- dst_f32[i] = (float)x;
+ dst_f32[i] = x;
}
- (void)ditherMode; /* No dithering for s32 -> f32. */
+ (void)ditherMode;
}
-void ma_pcm_s32_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode);
+ ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_s32_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode);
+ ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_s32_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_s32_to_f32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_f32__avx2(dst, src, count, ditherMode);
+ ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_s32_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s16_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode);
+ ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_s32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_s16_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode);
+ ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode);
#else
- ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s16_to_f32__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s16_to_f32__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s16_to_f32__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_interleave_s32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_interleave_s16__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_int32* dst_s32 = (ma_int32*)dst;
- const ma_int32** src_s32 = (const ma_int32**)src;
+ ma_int16* dst_s16 = (ma_int16*)dst;
+ const ma_int16** src_s16 = (const ma_int16**)src;
ma_uint64 iFrame;
for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
ma_uint32 iChannel;
for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst_s32[iFrame*channels + iChannel] = src_s32[iChannel][iFrame];
+ dst_s16[iFrame*channels + iChannel] = src_s16[iChannel][iFrame];
}
}
}
-void ma_pcm_interleave_s32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_interleave_s16__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_pcm_interleave_s32__reference(dst, src, frameCount, channels);
+ ma_pcm_interleave_s16__reference(dst, src, frameCount, channels);
}
-void ma_pcm_interleave_s32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+void ma_pcm_interleave_s16(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_interleave_s32__reference(dst, src, frameCount, channels);
+ ma_pcm_interleave_s16__reference(dst, src, frameCount, channels);
#else
- ma_pcm_interleave_s32__optimized(dst, src, frameCount, channels);
+ ma_pcm_interleave_s16__optimized(dst, src, frameCount, channels);
#endif
}
-void ma_pcm_deinterleave_s32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_deinterleave_s16__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_int32** dst_s32 = (ma_int32**)dst;
- const ma_int32* src_s32 = (const ma_int32*)src;
+ ma_int16** dst_s16 = (ma_int16**)dst;
+ const ma_int16* src_s16 = (const ma_int16*)src;
ma_uint64 iFrame;
for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
ma_uint32 iChannel;
for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst_s32[iChannel][iFrame] = src_s32[iFrame*channels + iChannel];
+ dst_s16[iChannel][iFrame] = src_s16[iFrame*channels + iChannel];
}
}
}
-void ma_pcm_deinterleave_s32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_deinterleave_s16__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels);
+ ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels);
}
-void ma_pcm_deinterleave_s32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+void ma_pcm_deinterleave_s16(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels);
+ ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels);
#else
- ma_pcm_deinterleave_s32__optimized(dst, src, frameCount, channels);
+ ma_pcm_deinterleave_s16__optimized(dst, src, frameCount, channels);
#endif
}
-/* f32 */
-void ma_pcm_f32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+/* s24 */
+static MA_INLINE void ma_pcm_s24_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_uint64 i;
-
ma_uint8* dst_u8 = (ma_uint8*)dst;
- const float* src_f32 = (const float*)src;
-
- float ditherMin = 0;
- float ditherMax = 0;
- if (ditherMode != ma_dither_mode_none) {
- ditherMin = 1.0f / -128;
- ditherMax = 1.0f / 127;
- }
+ const ma_uint8* src_s24 = (const ma_uint8*)src;
- for (i = 0; i < count; i += 1) {
- float x = src_f32[i];
- x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
- x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
- x = x + 1; /* -1..1 to 0..2 */
- x = x * 127.5f; /* 0..2 to 0..255 */
+ if (ditherMode == ma_dither_mode_none) {
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int8 x = (ma_int8)src_s24[i*3 + 2] + 128;
+ dst_u8[i] = (ma_uint8)x;
+ }
+ } else {
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24);
- dst_u8[i] = (ma_uint8)x;
+ /* Dither. Don't overflow. */
+ ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF);
+ if ((ma_int64)x + dither <= 0x7FFFFFFF) {
+ x = x + dither;
+ } else {
+ x = 0x7FFFFFFF;
+ }
+
+ x = x >> 24;
+ x = x + 128;
+ dst_u8[i] = (ma_uint8)x;
+ }
}
}
-void ma_pcm_f32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s24_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode);
+ ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_f32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s24_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode);
+ ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_f32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_f32_to_u8__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s24_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_u8__avx2(dst, src, count, ditherMode);
+ ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_f32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode);
-}
-#endif
-
-void ma_pcm_f32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s24_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
-#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode);
-#else
- ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode);
-#endif
+ ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode);
}
-
-
-void ma_pcm_f32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_uint64 i;
-
- ma_int16* dst_s16 = (ma_int16*)dst;
- const float* src_f32 = (const float*)src;
-
- float ditherMin = 0;
- float ditherMax = 0;
- if (ditherMode != ma_dither_mode_none) {
- ditherMin = 1.0f / -32768;
- ditherMax = 1.0f / 32767;
- }
-
- for (i = 0; i < count; i += 1) {
- float x = src_f32[i];
- x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
- x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
-
-#if 0
- /* The accurate way. */
- x = x + 1; /* -1..1 to 0..2 */
- x = x * 32767.5f; /* 0..2 to 0..65535 */
- x = x - 32768.0f; /* 0...65535 to -32768..32767 */
-#else
- /* The fast way. */
- x = x * 32767.0f; /* -1..1 to -32767..32767 */
#endif
- dst_s16[i] = (ma_int16)x;
- }
-}
-
-void ma_pcm_f32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_uint64 i;
- ma_uint64 i4;
- ma_uint64 count4;
-
- ma_int16* dst_s16 = (ma_int16*)dst;
- const float* src_f32 = (const float*)src;
-
- float ditherMin = 0;
- float ditherMax = 0;
- if (ditherMode != ma_dither_mode_none) {
- ditherMin = 1.0f / -32768;
- ditherMax = 1.0f / 32767;
- }
-
- /* Unrolled. */
- i = 0;
- count4 = count >> 2;
- for (i4 = 0; i4 < count4; i4 += 1) {
- float d0 = ma_dither_f32(ditherMode, ditherMin, ditherMax);
- float d1 = ma_dither_f32(ditherMode, ditherMin, ditherMax);
- float d2 = ma_dither_f32(ditherMode, ditherMin, ditherMax);
- float d3 = ma_dither_f32(ditherMode, ditherMin, ditherMax);
-
- float x0 = src_f32[i+0];
- float x1 = src_f32[i+1];
- float x2 = src_f32[i+2];
- float x3 = src_f32[i+3];
-
- x0 = x0 + d0;
- x1 = x1 + d1;
- x2 = x2 + d2;
- x3 = x3 + d3;
-
- x0 = ((x0 < -1) ? -1 : ((x0 > 1) ? 1 : x0));
- x1 = ((x1 < -1) ? -1 : ((x1 > 1) ? 1 : x1));
- x2 = ((x2 < -1) ? -1 : ((x2 > 1) ? 1 : x2));
- x3 = ((x3 < -1) ? -1 : ((x3 > 1) ? 1 : x3));
-
- x0 = x0 * 32767.0f;
- x1 = x1 * 32767.0f;
- x2 = x2 * 32767.0f;
- x3 = x3 * 32767.0f;
-
- dst_s16[i+0] = (ma_int16)x0;
- dst_s16[i+1] = (ma_int16)x1;
- dst_s16[i+2] = (ma_int16)x2;
- dst_s16[i+3] = (ma_int16)x3;
-
- i += 4;
- }
-
- /* Leftover. */
- for (; i < count; i += 1) {
- float x = src_f32[i];
- x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
- x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
- x = x * 32767.0f; /* -1..1 to -32767..32767 */
-
- dst_s16[i] = (ma_int16)x;
- }
+void ma_pcm_s24_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode);
+#else
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s24_to_u8__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s24_to_u8__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s24_to_u8__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode);
+ }
+#endif
}
-#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_f32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+
+static MA_INLINE void ma_pcm_s24_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_uint64 i;
- ma_uint64 i8;
- ma_uint64 count8;
- ma_int16* dst_s16;
- const float* src_f32;
- float ditherMin;
- float ditherMax;
+ ma_int16* dst_s16 = (ma_int16*)dst;
+ const ma_uint8* src_s24 = (const ma_uint8*)src;
- /* Both the input and output buffers need to be aligned to 16 bytes. */
- if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) {
- ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode);
- return;
- }
+ if (ditherMode == ma_dither_mode_none) {
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_uint16 dst_lo = ((ma_uint16)src_s24[i*3 + 1]);
+ ma_uint16 dst_hi = ((ma_uint16)src_s24[i*3 + 2]) << 8;
+ dst_s16[i] = (ma_int16)dst_lo | dst_hi;
+ }
+ } else {
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24);
- dst_s16 = (ma_int16*)dst;
- src_f32 = (const float*)src;
+ /* Dither. Don't overflow. */
+ ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF);
+ if ((ma_int64)x + dither <= 0x7FFFFFFF) {
+ x = x + dither;
+ } else {
+ x = 0x7FFFFFFF;
+ }
- ditherMin = 0;
- ditherMax = 0;
- if (ditherMode != ma_dither_mode_none) {
- ditherMin = 1.0f / -32768;
- ditherMax = 1.0f / 32767;
+ x = x >> 16;
+ dst_s16[i] = (ma_int16)x;
+ }
}
+}
- i = 0;
+static MA_INLINE void ma_pcm_s24_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode);
+}
- /* SSE2. SSE allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */
- count8 = count >> 3;
- for (i8 = 0; i8 < count8; i8 += 1) {
- __m128 d0;
- __m128 d1;
- __m128 x0;
- __m128 x1;
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE void ma_pcm_s24_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode);
+}
+#endif
+#if defined(MA_SUPPORT_AVX2)
+static MA_INLINE void ma_pcm_s24_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode);
+}
+#endif
+#if defined(MA_SUPPORT_NEON)
+static MA_INLINE void ma_pcm_s24_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode);
+}
+#endif
- if (ditherMode == ma_dither_mode_none) {
- d0 = _mm_set1_ps(0);
- d1 = _mm_set1_ps(0);
- } else if (ditherMode == ma_dither_mode_rectangle) {
- d0 = _mm_set_ps(
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax)
- );
- d1 = _mm_set_ps(
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax)
- );
- } else {
- d0 = _mm_set_ps(
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax)
- );
- d1 = _mm_set_ps(
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax)
- );
+void ma_pcm_s24_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode);
+#else
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s24_to_s16__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s24_to_s16__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s24_to_s16__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode);
}
+#endif
+}
- x0 = *((__m128*)(src_f32 + i) + 0);
- x1 = *((__m128*)(src_f32 + i) + 1);
- x0 = _mm_add_ps(x0, d0);
- x1 = _mm_add_ps(x1, d1);
+void ma_pcm_s24_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ (void)ditherMode;
- x0 = _mm_mul_ps(x0, _mm_set1_ps(32767.0f));
- x1 = _mm_mul_ps(x1, _mm_set1_ps(32767.0f));
+ ma_copy_memory_64(dst, src, count * 3);
+}
- _mm_stream_si128(((__m128i*)(dst_s16 + i)), _mm_packs_epi32(_mm_cvttps_epi32(x0), _mm_cvttps_epi32(x1)));
-
- i += 8;
+
+static MA_INLINE void ma_pcm_s24_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_int32* dst_s32 = (ma_int32*)dst;
+ const ma_uint8* src_s24 = (const ma_uint8*)src;
+
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ dst_s32[i] = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24);
}
+ (void)ditherMode;
+}
- /* Leftover. */
- for (; i < count; i += 1) {
- float x = src_f32[i];
- x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
- x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
- x = x * 32767.0f; /* -1..1 to -32767..32767 */
+static MA_INLINE void ma_pcm_s24_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode);
+}
- dst_s16[i] = (ma_int16)x;
- }
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE void ma_pcm_s24_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_f32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s24_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_uint64 i;
- ma_uint64 i16;
- ma_uint64 count16;
- ma_int16* dst_s16;
- const float* src_f32;
- float ditherMin;
- float ditherMax;
-
- /* Both the input and output buffers need to be aligned to 32 bytes. */
- if ((((ma_uintptr)dst & 31) != 0) || (((ma_uintptr)src & 31) != 0)) {
- ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode);
- return;
- }
-
- dst_s16 = (ma_int16*)dst;
- src_f32 = (const float*)src;
-
- ditherMin = 0;
- ditherMax = 0;
- if (ditherMode != ma_dither_mode_none) {
- ditherMin = 1.0f / -32768;
- ditherMax = 1.0f / 32767;
- }
-
- i = 0;
-
- /* AVX2. AVX2 allows us to output 16 s16's at a time which means our loop is unrolled 16 times. */
- count16 = count >> 4;
- for (i16 = 0; i16 < count16; i16 += 1) {
- __m256 d0;
- __m256 d1;
- __m256 x0;
- __m256 x1;
- __m256i i0;
- __m256i i1;
- __m256i p0;
- __m256i p1;
- __m256i r;
+ ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode);
+}
+#endif
+#if defined(MA_SUPPORT_NEON)
+static MA_INLINE void ma_pcm_s24_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode);
+}
+#endif
- if (ditherMode == ma_dither_mode_none) {
- d0 = _mm256_set1_ps(0);
- d1 = _mm256_set1_ps(0);
- } else if (ditherMode == ma_dither_mode_rectangle) {
- d0 = _mm256_set_ps(
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax)
- );
- d1 = _mm256_set_ps(
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax),
- ma_dither_f32_rectangle(ditherMin, ditherMax)
- );
- } else {
- d0 = _mm256_set_ps(
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax)
- );
- d1 = _mm256_set_ps(
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax),
- ma_dither_f32_triangle(ditherMin, ditherMax)
- );
+void ma_pcm_s24_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode);
+#else
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s24_to_s32__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s24_to_s32__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s24_to_s32__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode);
}
+#endif
+}
- x0 = *((__m256*)(src_f32 + i) + 0);
- x1 = *((__m256*)(src_f32 + i) + 1);
-
- x0 = _mm256_add_ps(x0, d0);
- x1 = _mm256_add_ps(x1, d1);
- x0 = _mm256_mul_ps(x0, _mm256_set1_ps(32767.0f));
- x1 = _mm256_mul_ps(x1, _mm256_set1_ps(32767.0f));
+static MA_INLINE void ma_pcm_s24_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ float* dst_f32 = (float*)dst;
+ const ma_uint8* src_s24 = (const ma_uint8*)src;
- /* Computing the final result is a little more complicated for AVX2 than SSE2. */
- i0 = _mm256_cvttps_epi32(x0);
- i1 = _mm256_cvttps_epi32(x1);
- p0 = _mm256_permute2x128_si256(i0, i1, 0 | 32);
- p1 = _mm256_permute2x128_si256(i0, i1, 1 | 48);
- r = _mm256_packs_epi32(p0, p1);
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ float x = (float)(((ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24)) >> 8);
- _mm256_stream_si256(((__m256i*)(dst_s16 + i)), r);
+#if 0
+ /* The accurate way. */
+ x = x + 8388608.0f; /* -8388608..8388607 to 0..16777215 */
+ x = x * 0.00000011920929665621f; /* 0..16777215 to 0..2 */
+ x = x - 1; /* 0..2 to -1..1 */
+#else
+ /* The fast way. */
+ x = x * 0.00000011920928955078125f; /* -8388608..8388607 to -1..0.999969482421875 */
+#endif
- i += 16;
+ dst_f32[i] = x;
}
+ (void)ditherMode;
+}
- /* Leftover. */
- for (; i < count; i += 1) {
- float x = src_f32[i];
- x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
- x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
- x = x * 32767.0f; /* -1..1 to -32767..32767 */
+static MA_INLINE void ma_pcm_s24_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode);
+}
- dst_s16[i] = (ma_int16)x;
- }
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE void ma_pcm_s24_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_f32_to_s16__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+#if defined(MA_SUPPORT_AVX2)
+static MA_INLINE void ma_pcm_s24_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- /* TODO: Convert this from AVX to AVX-512. */
- ma_pcm_f32_to_s16__avx2(dst, src, count, ditherMode);
+ ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_f32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s24_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_uint64 i;
- ma_uint64 i8;
- ma_uint64 count8;
- ma_int16* dst_s16;
- const float* src_f32;
- float ditherMin;
- float ditherMax;
+ ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode);
+}
+#endif
- /* Both the input and output buffers need to be aligned to 16 bytes. */
- if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) {
- ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode);
- return;
+void ma_pcm_s24_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode);
+#else
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s24_to_f32__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s24_to_f32__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s24_to_f32__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode);
+ }
+#endif
+}
+
+
+static MA_INLINE void ma_pcm_interleave_s24__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+{
+ ma_uint8* dst8 = (ma_uint8*)dst;
+ const ma_uint8** src8 = (const ma_uint8**)src;
+
+ ma_uint64 iFrame;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; iChannel += 1) {
+ dst8[iFrame*3*channels + iChannel*3 + 0] = src8[iChannel][iFrame*3 + 0];
+ dst8[iFrame*3*channels + iChannel*3 + 1] = src8[iChannel][iFrame*3 + 1];
+ dst8[iFrame*3*channels + iChannel*3 + 2] = src8[iChannel][iFrame*3 + 2];
+ }
}
+}
- dst_s16 = (ma_int16*)dst;
- src_f32 = (const float*)src;
+static MA_INLINE void ma_pcm_interleave_s24__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+{
+ ma_pcm_interleave_s24__reference(dst, src, frameCount, channels);
+}
- ditherMin = 0;
- ditherMax = 0;
- if (ditherMode != ma_dither_mode_none) {
- ditherMin = 1.0f / -32768;
- ditherMax = 1.0f / 32767;
+void ma_pcm_interleave_s24(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+{
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_interleave_s24__reference(dst, src, frameCount, channels);
+#else
+ ma_pcm_interleave_s24__optimized(dst, src, frameCount, channels);
+#endif
+}
+
+
+static MA_INLINE void ma_pcm_deinterleave_s24__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+{
+ ma_uint8** dst8 = (ma_uint8**)dst;
+ const ma_uint8* src8 = (const ma_uint8*)src;
+
+ ma_uint32 iFrame;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; iChannel += 1) {
+ dst8[iChannel][iFrame*3 + 0] = src8[iFrame*3*channels + iChannel*3 + 0];
+ dst8[iChannel][iFrame*3 + 1] = src8[iFrame*3*channels + iChannel*3 + 1];
+ dst8[iChannel][iFrame*3 + 2] = src8[iFrame*3*channels + iChannel*3 + 2];
+ }
}
+}
- i = 0;
+static MA_INLINE void ma_pcm_deinterleave_s24__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+{
+ ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels);
+}
- /* NEON. NEON allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */
- count8 = count >> 3;
- for (i8 = 0; i8 < count8; i8 += 1) {
- float32x4_t d0;
- float32x4_t d1;
- float32x4_t x0;
- float32x4_t x1;
- int32x4_t i0;
- int32x4_t i1;
+void ma_pcm_deinterleave_s24(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+{
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels);
+#else
+ ma_pcm_deinterleave_s24__optimized(dst, src, frameCount, channels);
+#endif
+}
- if (ditherMode == ma_dither_mode_none) {
- d0 = vmovq_n_f32(0);
- d1 = vmovq_n_f32(0);
- } else if (ditherMode == ma_dither_mode_rectangle) {
- float d0v[4];
- d0v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax);
- d0v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax);
- d0v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax);
- d0v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax);
- d0 = vld1q_f32(d0v);
- float d1v[4];
- d1v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax);
- d1v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax);
- d1v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax);
- d1v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax);
- d1 = vld1q_f32(d1v);
- } else {
- float d0v[4];
- d0v[0] = ma_dither_f32_triangle(ditherMin, ditherMax);
- d0v[1] = ma_dither_f32_triangle(ditherMin, ditherMax);
- d0v[2] = ma_dither_f32_triangle(ditherMin, ditherMax);
- d0v[3] = ma_dither_f32_triangle(ditherMin, ditherMax);
- d0 = vld1q_f32(d0v);
- float d1v[4];
- d1v[0] = ma_dither_f32_triangle(ditherMin, ditherMax);
- d1v[1] = ma_dither_f32_triangle(ditherMin, ditherMax);
- d1v[2] = ma_dither_f32_triangle(ditherMin, ditherMax);
- d1v[3] = ma_dither_f32_triangle(ditherMin, ditherMax);
- d1 = vld1q_f32(d1v);
+/* s32 */
+static MA_INLINE void ma_pcm_s32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_uint8* dst_u8 = (ma_uint8*)dst;
+ const ma_int32* src_s32 = (const ma_int32*)src;
+
+ if (ditherMode == ma_dither_mode_none) {
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int32 x = src_s32[i];
+ x = x >> 24;
+ x = x + 128;
+ dst_u8[i] = (ma_uint8)x;
}
+ } else {
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int32 x = src_s32[i];
- x0 = *((float32x4_t*)(src_f32 + i) + 0);
- x1 = *((float32x4_t*)(src_f32 + i) + 1);
+ /* Dither. Don't overflow. */
+ ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF);
+ if ((ma_int64)x + dither <= 0x7FFFFFFF) {
+ x = x + dither;
+ } else {
+ x = 0x7FFFFFFF;
+ }
+
+ x = x >> 24;
+ x = x + 128;
+ dst_u8[i] = (ma_uint8)x;
+ }
+ }
+}
- x0 = vaddq_f32(x0, d0);
- x1 = vaddq_f32(x1, d1);
+static MA_INLINE void ma_pcm_s32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode);
+}
- x0 = vmulq_n_f32(x0, 32767.0f);
- x1 = vmulq_n_f32(x1, 32767.0f);
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE void ma_pcm_s32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode);
+}
+#endif
+#if defined(MA_SUPPORT_AVX2)
+static MA_INLINE void ma_pcm_s32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode);
+}
+#endif
+#if defined(MA_SUPPORT_NEON)
+static MA_INLINE void ma_pcm_s32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode);
+}
+#endif
- i0 = vcvtq_s32_f32(x0);
- i1 = vcvtq_s32_f32(x1);
- *((int16x8_t*)(dst_s16 + i)) = vcombine_s16(vqmovn_s32(i0), vqmovn_s32(i1));
+void ma_pcm_s32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode);
+#else
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s32_to_u8__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s32_to_u8__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s32_to_u8__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode);
+ }
+#endif
+}
+
+
+static MA_INLINE void ma_pcm_s32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_int16* dst_s16 = (ma_int16*)dst;
+ const ma_int32* src_s32 = (const ma_int32*)src;
+
+ if (ditherMode == ma_dither_mode_none) {
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int32 x = src_s32[i];
+ x = x >> 16;
+ dst_s16[i] = (ma_int16)x;
+ }
+ } else {
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int32 x = src_s32[i];
- i += 8;
+ /* Dither. Don't overflow. */
+ ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF);
+ if ((ma_int64)x + dither <= 0x7FFFFFFF) {
+ x = x + dither;
+ } else {
+ x = 0x7FFFFFFF;
+ }
+
+ x = x >> 16;
+ dst_s16[i] = (ma_int16)x;
+ }
}
+}
+static MA_INLINE void ma_pcm_s32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode);
+}
- /* Leftover. */
- for (; i < count; i += 1) {
- float x = src_f32[i];
- x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
- x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
- x = x * 32767.0f; /* -1..1 to -32767..32767 */
-
- dst_s16[i] = (ma_int16)x;
- }
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE void ma_pcm_s32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode);
+}
+#endif
+#if defined(MA_SUPPORT_AVX2)
+static MA_INLINE void ma_pcm_s32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode);
+}
+#endif
+#if defined(MA_SUPPORT_NEON)
+static MA_INLINE void ma_pcm_s32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_f32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_s32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_f32_to_s16__reference(dst, src, count, ditherMode);
+ ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode);
#else
- ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s32_to_s16__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s32_to_s16__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s32_to_s16__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_f32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
ma_uint8* dst_s24 = (ma_uint8*)dst;
- const float* src_f32 = (const float*)src;
+ const ma_int32* src_s32 = (const ma_int32*)src;
ma_uint64 i;
for (i = 0; i < count; i += 1) {
- ma_int32 r;
- float x = src_f32[i];
- x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
-
-#if 0
- /* The accurate way. */
- x = x + 1; /* -1..1 to 0..2 */
- x = x * 8388607.5f; /* 0..2 to 0..16777215 */
- x = x - 8388608.0f; /* 0..16777215 to -8388608..8388607 */
-#else
- /* The fast way. */
- x = x * 8388607.0f; /* -1..1 to -8388607..8388607 */
-#endif
-
- r = (ma_int32)x;
- dst_s24[(i*3)+0] = (ma_uint8)((r & 0x0000FF) >> 0);
- dst_s24[(i*3)+1] = (ma_uint8)((r & 0x00FF00) >> 8);
- dst_s24[(i*3)+2] = (ma_uint8)((r & 0xFF0000) >> 16);
+ ma_uint32 x = (ma_uint32)src_s32[i];
+ dst_s24[i*3+0] = (ma_uint8)((x & 0x0000FF00) >> 8);
+ dst_s24[i*3+1] = (ma_uint8)((x & 0x00FF0000) >> 16);
+ dst_s24[i*3+2] = (ma_uint8)((x & 0xFF000000) >> 24);
}
- (void)ditherMode; /* No dithering for f32 -> s24. */
+ (void)ditherMode; /* No dithering for s32 -> s24. */
}
-void ma_pcm_f32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode);
+ ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_f32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode);
+ ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_f32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_f32_to_s24__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_s24__avx2(dst, src, count, ditherMode);
+ ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_f32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode);
+ ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_f32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_s32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode);
+ ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode);
#else
- ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s32_to_s24__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s32_to_s24__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s32_to_s24__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_f32_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_s32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_int32* dst_s32 = (ma_int32*)dst;
- const float* src_f32 = (const float*)src;
+ (void)ditherMode;
- ma_uint32 i;
+ ma_copy_memory_64(dst, src, count * sizeof(ma_int32));
+}
+
+
+static MA_INLINE void ma_pcm_s32_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ float* dst_f32 = (float*)dst;
+ const ma_int32* src_s32 = (const ma_int32*)src;
+
+ ma_uint64 i;
for (i = 0; i < count; i += 1) {
- double x = src_f32[i];
- x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
+ double x = src_s32[i];
#if 0
- /* The accurate way. */
- x = x + 1; /* -1..1 to 0..2 */
- x = x * 2147483647.5; /* 0..2 to 0..4294967295 */
- x = x - 2147483648.0; /* 0...4294967295 to -2147483648..2147483647 */
+ x = x + 2147483648.0;
+ x = x * 0.0000000004656612873077392578125;
+ x = x - 1;
#else
- /* The fast way. */
- x = x * 2147483647.0; /* -1..1 to -2147483647..2147483647 */
+ x = x / 2147483648.0;
#endif
- dst_s32[i] = (ma_int32)x;
+ dst_f32[i] = (float)x;
}
- (void)ditherMode; /* No dithering for f32 -> s32. */
+ (void)ditherMode; /* No dithering for s32 -> f32. */
}
-void ma_pcm_f32_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s32_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode);
+ ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode);
}
#if defined(MA_SUPPORT_SSE2)
-void ma_pcm_f32_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s32_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode);
+ ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_AVX2)
-void ma_pcm_f32_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s32_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode);
-}
-#endif
-#if defined(MA_SUPPORT_AVX512)
-void ma_pcm_f32_to_s32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- ma_pcm_f32_to_s32__avx2(dst, src, count, ditherMode);
+ ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
#if defined(MA_SUPPORT_NEON)
-void ma_pcm_f32_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+static MA_INLINE void ma_pcm_s32_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode);
+ ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode);
}
#endif
-void ma_pcm_f32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+void ma_pcm_s32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode);
+ ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode);
#else
- ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode);
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_s32_to_f32__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_s32_to_f32__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_s32_to_f32__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode);
+ }
#endif
}
-void ma_pcm_f32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
-{
- (void)ditherMode;
-
- ma_copy_memory_64(dst, src, count * sizeof(float));
-}
-
-
-void ma_pcm_interleave_f32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_interleave_s32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
- float* dst_f32 = (float*)dst;
- const float** src_f32 = (const float**)src;
+ ma_int32* dst_s32 = (ma_int32*)dst;
+ const ma_int32** src_s32 = (const ma_int32**)src;
ma_uint64 iFrame;
for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
ma_uint32 iChannel;
for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst_f32[iFrame*channels + iChannel] = src_f32[iChannel][iFrame];
+ dst_s32[iFrame*channels + iChannel] = src_s32[iChannel][iFrame];
}
}
}
-void ma_pcm_interleave_f32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_interleave_s32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_pcm_interleave_f32__reference(dst, src, frameCount, channels);
+ ma_pcm_interleave_s32__reference(dst, src, frameCount, channels);
}
-void ma_pcm_interleave_f32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+void ma_pcm_interleave_s32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_interleave_f32__reference(dst, src, frameCount, channels);
+ ma_pcm_interleave_s32__reference(dst, src, frameCount, channels);
#else
- ma_pcm_interleave_f32__optimized(dst, src, frameCount, channels);
+ ma_pcm_interleave_s32__optimized(dst, src, frameCount, channels);
#endif
}
-void ma_pcm_deinterleave_f32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_deinterleave_s32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
- float** dst_f32 = (float**)dst;
- const float* src_f32 = (const float*)src;
+ ma_int32** dst_s32 = (ma_int32**)dst;
+ const ma_int32* src_s32 = (const ma_int32*)src;
ma_uint64 iFrame;
for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
ma_uint32 iChannel;
for (iChannel = 0; iChannel < channels; iChannel += 1) {
- dst_f32[iChannel][iFrame] = src_f32[iFrame*channels + iChannel];
+ dst_s32[iChannel][iFrame] = src_s32[iFrame*channels + iChannel];
}
}
}
-void ma_pcm_deinterleave_f32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+static MA_INLINE void ma_pcm_deinterleave_s32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels);
+ ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels);
}
-void ma_pcm_deinterleave_f32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+void ma_pcm_deinterleave_s32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
#ifdef MA_USE_REFERENCE_CONVERSION_APIS
- ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels);
+ ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels);
#else
- ma_pcm_deinterleave_f32__optimized(dst, src, frameCount, channels);
+ ma_pcm_deinterleave_s32__optimized(dst, src, frameCount, channels);
#endif
}
-void ma_format_converter_init_callbacks__default(ma_format_converter* pConverter)
+/* f32 */
+static MA_INLINE void ma_pcm_f32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_assert(pConverter != NULL);
-
- switch (pConverter->config.formatIn)
- {
- case ma_format_u8:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_u8_to_u8;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s16;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s24;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s32;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_f32;
- }
- } break;
+ ma_uint64 i;
- case ma_format_s16:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s16_to_u8;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s16;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s24;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s32;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_f32;
- }
- } break;
+ ma_uint8* dst_u8 = (ma_uint8*)dst;
+ const float* src_f32 = (const float*)src;
- case ma_format_s24:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s24_to_u8;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s16;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s24;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s32;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_f32;
- }
- } break;
+ float ditherMin = 0;
+ float ditherMax = 0;
+ if (ditherMode != ma_dither_mode_none) {
+ ditherMin = 1.0f / -128;
+ ditherMax = 1.0f / 127;
+ }
- case ma_format_s32:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s32_to_u8;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s16;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s24;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s32;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_f32;
- }
- } break;
+ for (i = 0; i < count; i += 1) {
+ float x = src_f32[i];
+ x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
+ x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
+ x = x + 1; /* -1..1 to 0..2 */
+ x = x * 127.5f; /* 0..2 to 0..255 */
- case ma_format_f32:
- default:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_f32_to_u8;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s16;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s24;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s32;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_f32;
- }
- } break;
+ dst_u8[i] = (ma_uint8)x;
}
}
-#if defined(MA_SUPPORT_SSE2)
-void ma_format_converter_init_callbacks__sse2(ma_format_converter* pConverter)
+static MA_INLINE void ma_pcm_f32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_assert(pConverter != NULL);
-
- switch (pConverter->config.formatIn)
- {
- case ma_format_u8:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_u8_to_u8;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s16__sse2;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s24__sse2;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s32__sse2;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_f32__sse2;
- }
- } break;
-
- case ma_format_s16:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s16_to_u8__sse2;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s16;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s24__sse2;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s32__sse2;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_f32__sse2;
- }
- } break;
-
- case ma_format_s24:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s24_to_u8__sse2;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s16__sse2;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s24;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s32__sse2;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_f32__sse2;
- }
- } break;
+ ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode);
+}
- case ma_format_s32:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s32_to_u8__sse2;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s16__sse2;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s24__sse2;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s32;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_f32__sse2;
- }
- } break;
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE void ma_pcm_f32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode);
+}
+#endif
+#if defined(MA_SUPPORT_AVX2)
+static MA_INLINE void ma_pcm_f32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode);
+}
+#endif
+#if defined(MA_SUPPORT_NEON)
+static MA_INLINE void ma_pcm_f32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode);
+}
+#endif
- case ma_format_f32:
- default:
+void ma_pcm_f32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode);
+#else
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_f32_to_u8__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_f32_to_u8__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_f32_to_u8__neon(dst, src, count, ditherMode);
+ } else
+ #endif
{
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_f32_to_u8__sse2;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s16__sse2;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s24__sse2;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s32__sse2;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_f32;
- }
- } break;
- }
-}
+ ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode);
+ }
#endif
+}
-#if defined(MA_SUPPORT_AVX2)
-void ma_format_converter_init_callbacks__avx2(ma_format_converter* pConverter)
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+static MA_INLINE void ma_pcm_f32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_assert(pConverter != NULL);
+ ma_uint64 i;
- switch (pConverter->config.formatIn)
- {
- case ma_format_u8:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_u8_to_u8;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s16__avx2;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s24__avx2;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s32__avx2;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_f32__avx2;
- }
- } break;
+ ma_int16* dst_s16 = (ma_int16*)dst;
+ const float* src_f32 = (const float*)src;
- case ma_format_s16:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s16_to_u8__avx2;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s16;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s24__avx2;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s32__avx2;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_f32__avx2;
- }
- } break;
+ float ditherMin = 0;
+ float ditherMax = 0;
+ if (ditherMode != ma_dither_mode_none) {
+ ditherMin = 1.0f / -32768;
+ ditherMax = 1.0f / 32767;
+ }
- case ma_format_s24:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s24_to_u8__avx2;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s16__avx2;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s24;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s32__avx2;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_f32__avx2;
- }
- } break;
+ for (i = 0; i < count; i += 1) {
+ float x = src_f32[i];
+ x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
+ x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
- case ma_format_s32:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s32_to_u8__avx2;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s16__avx2;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s24__avx2;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s32;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_f32__avx2;
- }
- } break;
+#if 0
+ /* The accurate way. */
+ x = x + 1; /* -1..1 to 0..2 */
+ x = x * 32767.5f; /* 0..2 to 0..65535 */
+ x = x - 32768.0f; /* 0...65535 to -32768..32767 */
+#else
+ /* The fast way. */
+ x = x * 32767.0f; /* -1..1 to -32767..32767 */
+#endif
- case ma_format_f32:
- default:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_f32_to_u8__avx2;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s16__avx2;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s24__avx2;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s32__avx2;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_f32;
- }
- } break;
+ dst_s16[i] = (ma_int16)x;
}
}
-#endif
-
-#if defined(MA_SUPPORT_AVX512)
-void ma_format_converter_init_callbacks__avx512(ma_format_converter* pConverter)
+#else
+static MA_INLINE void ma_pcm_f32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_assert(pConverter != NULL);
-
- switch (pConverter->config.formatIn)
- {
- case ma_format_u8:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_u8_to_u8;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s16__avx512;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s24__avx512;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s32__avx512;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_f32__avx512;
- }
- } break;
+ ma_uint64 i;
+ ma_uint64 i4;
+ ma_uint64 count4;
- case ma_format_s16:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s16_to_u8__avx512;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s16;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s24__avx512;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s32__avx512;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_f32__avx512;
- }
- } break;
+ ma_int16* dst_s16 = (ma_int16*)dst;
+ const float* src_f32 = (const float*)src;
- case ma_format_s24:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s24_to_u8__avx512;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s16__avx512;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s24;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s32__avx512;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_f32__avx512;
- }
- } break;
+ float ditherMin = 0;
+ float ditherMax = 0;
+ if (ditherMode != ma_dither_mode_none) {
+ ditherMin = 1.0f / -32768;
+ ditherMax = 1.0f / 32767;
+ }
- case ma_format_s32:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s32_to_u8__avx512;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s16__avx512;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s24__avx512;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s32;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_f32__avx512;
- }
- } break;
+ /* Unrolled. */
+ i = 0;
+ count4 = count >> 2;
+ for (i4 = 0; i4 < count4; i4 += 1) {
+ float d0 = ma_dither_f32(ditherMode, ditherMin, ditherMax);
+ float d1 = ma_dither_f32(ditherMode, ditherMin, ditherMax);
+ float d2 = ma_dither_f32(ditherMode, ditherMin, ditherMax);
+ float d3 = ma_dither_f32(ditherMode, ditherMin, ditherMax);
+
+ float x0 = src_f32[i+0];
+ float x1 = src_f32[i+1];
+ float x2 = src_f32[i+2];
+ float x3 = src_f32[i+3];
- case ma_format_f32:
- default:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_f32_to_u8__avx512;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s16__avx512;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s24__avx512;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s32__avx512;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_f32;
- }
- } break;
- }
-}
-#endif
+ x0 = x0 + d0;
+ x1 = x1 + d1;
+ x2 = x2 + d2;
+ x3 = x3 + d3;
-#if defined(MA_SUPPORT_NEON)
-void ma_format_converter_init_callbacks__neon(ma_format_converter* pConverter)
-{
- ma_assert(pConverter != NULL);
+ x0 = ((x0 < -1) ? -1 : ((x0 > 1) ? 1 : x0));
+ x1 = ((x1 < -1) ? -1 : ((x1 > 1) ? 1 : x1));
+ x2 = ((x2 < -1) ? -1 : ((x2 > 1) ? 1 : x2));
+ x3 = ((x3 < -1) ? -1 : ((x3 > 1) ? 1 : x3));
- switch (pConverter->config.formatIn)
- {
- case ma_format_u8:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_u8_to_u8;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s16__neon;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s24__neon;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_s32__neon;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_u8_to_f32__neon;
- }
- } break;
+ x0 = x0 * 32767.0f;
+ x1 = x1 * 32767.0f;
+ x2 = x2 * 32767.0f;
+ x3 = x3 * 32767.0f;
- case ma_format_s16:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s16_to_u8__neon;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s16;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s24__neon;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_s32__neon;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s16_to_f32__neon;
- }
- } break;
+ dst_s16[i+0] = (ma_int16)x0;
+ dst_s16[i+1] = (ma_int16)x1;
+ dst_s16[i+2] = (ma_int16)x2;
+ dst_s16[i+3] = (ma_int16)x3;
- case ma_format_s24:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s24_to_u8__neon;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s16__neon;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s24;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_s32__neon;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s24_to_f32__neon;
- }
- } break;
+ i += 4;
+ }
- case ma_format_s32:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_s32_to_u8__neon;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s16__neon;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s24__neon;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_s32;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_s32_to_f32__neon;
- }
- } break;
+ /* Leftover. */
+ for (; i < count; i += 1) {
+ float x = src_f32[i];
+ x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
+ x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
+ x = x * 32767.0f; /* -1..1 to -32767..32767 */
- case ma_format_f32:
- default:
- {
- if (pConverter->config.formatOut == ma_format_u8) {
- pConverter->onConvertPCM = ma_pcm_f32_to_u8__neon;
- } else if (pConverter->config.formatOut == ma_format_s16) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s16__neon;
- } else if (pConverter->config.formatOut == ma_format_s24) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s24__neon;
- } else if (pConverter->config.formatOut == ma_format_s32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_s32__neon;
- } else if (pConverter->config.formatOut == ma_format_f32) {
- pConverter->onConvertPCM = ma_pcm_f32_to_f32;
- }
- } break;
+ dst_s16[i] = (ma_int16)x;
}
}
-#endif
-ma_result ma_format_converter_init(const ma_format_converter_config* pConfig, ma_format_converter* pConverter)
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE void ma_pcm_f32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- if (pConverter == NULL) {
- return MA_INVALID_ARGS;
- }
- ma_zero_object(pConverter);
+ ma_uint64 i;
+ ma_uint64 i8;
+ ma_uint64 count8;
+ ma_int16* dst_s16;
+ const float* src_f32;
+ float ditherMin;
+ float ditherMax;
- if (pConfig == NULL) {
- return MA_INVALID_ARGS;
+ /* Both the input and output buffers need to be aligned to 16 bytes. */
+ if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) {
+ ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode);
+ return;
}
- pConverter->config = *pConfig;
-
- /* SIMD */
- pConverter->useSSE2 = ma_has_sse2() && !pConfig->noSSE2;
- pConverter->useAVX2 = ma_has_avx2() && !pConfig->noAVX2;
- pConverter->useAVX512 = ma_has_avx512f() && !pConfig->noAVX512;
- pConverter->useNEON = ma_has_neon() && !pConfig->noNEON;
+ dst_s16 = (ma_int16*)dst;
+ src_f32 = (const float*)src;
-#if defined(MA_SUPPORT_AVX512)
- if (pConverter->useAVX512) {
- ma_format_converter_init_callbacks__avx512(pConverter);
- } else
-#endif
-#if defined(MA_SUPPORT_AVX2)
- if (pConverter->useAVX2) {
- ma_format_converter_init_callbacks__avx2(pConverter);
- } else
-#endif
-#if defined(MA_SUPPORT_SSE2)
- if (pConverter->useSSE2) {
- ma_format_converter_init_callbacks__sse2(pConverter);
- } else
-#endif
-#if defined(MA_SUPPORT_NEON)
- if (pConverter->useNEON) {
- ma_format_converter_init_callbacks__neon(pConverter);
- } else
-#endif
- {
- ma_format_converter_init_callbacks__default(pConverter);
+ ditherMin = 0;
+ ditherMax = 0;
+ if (ditherMode != ma_dither_mode_none) {
+ ditherMin = 1.0f / -32768;
+ ditherMax = 1.0f / 32767;
}
- switch (pConfig->formatOut)
- {
- case ma_format_u8:
- {
- pConverter->onInterleavePCM = ma_pcm_interleave_u8;
- pConverter->onDeinterleavePCM = ma_pcm_deinterleave_u8;
- } break;
- case ma_format_s16:
- {
- pConverter->onInterleavePCM = ma_pcm_interleave_s16;
- pConverter->onDeinterleavePCM = ma_pcm_deinterleave_s16;
- } break;
- case ma_format_s24:
- {
- pConverter->onInterleavePCM = ma_pcm_interleave_s24;
- pConverter->onDeinterleavePCM = ma_pcm_deinterleave_s24;
- } break;
- case ma_format_s32:
- {
- pConverter->onInterleavePCM = ma_pcm_interleave_s32;
- pConverter->onDeinterleavePCM = ma_pcm_deinterleave_s32;
- } break;
- case ma_format_f32:
- default:
- {
- pConverter->onInterleavePCM = ma_pcm_interleave_f32;
- pConverter->onDeinterleavePCM = ma_pcm_deinterleave_f32;
- } break;
- }
+ i = 0;
+
+ /* SSE2. SSE allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */
+ count8 = count >> 3;
+ for (i8 = 0; i8 < count8; i8 += 1) {
+ __m128 d0;
+ __m128 d1;
+ __m128 x0;
+ __m128 x1;
+
+ if (ditherMode == ma_dither_mode_none) {
+ d0 = _mm_set1_ps(0);
+ d1 = _mm_set1_ps(0);
+ } else if (ditherMode == ma_dither_mode_rectangle) {
+ d0 = _mm_set_ps(
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax)
+ );
+ d1 = _mm_set_ps(
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax)
+ );
+ } else {
+ d0 = _mm_set_ps(
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax)
+ );
+ d1 = _mm_set_ps(
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax)
+ );
+ }
- return MA_SUCCESS;
-}
+ x0 = *((__m128*)(src_f32 + i) + 0);
+ x1 = *((__m128*)(src_f32 + i) + 1);
-ma_uint64 ma_format_converter_read(ma_format_converter* pConverter, ma_uint64 frameCount, void* pFramesOut, void* pUserData)
-{
- ma_uint64 totalFramesRead;
- ma_uint32 sampleSizeIn;
- ma_uint32 sampleSizeOut;
- ma_uint32 frameSizeOut;
- ma_uint8* pNextFramesOut;
+ x0 = _mm_add_ps(x0, d0);
+ x1 = _mm_add_ps(x1, d1);
- if (pConverter == NULL || pFramesOut == NULL) {
- return 0;
- }
+ x0 = _mm_mul_ps(x0, _mm_set1_ps(32767.0f));
+ x1 = _mm_mul_ps(x1, _mm_set1_ps(32767.0f));
- totalFramesRead = 0;
- sampleSizeIn = ma_get_bytes_per_sample(pConverter->config.formatIn);
- sampleSizeOut = ma_get_bytes_per_sample(pConverter->config.formatOut);
- /*frameSizeIn = sampleSizeIn * pConverter->config.channels;*/
- frameSizeOut = sampleSizeOut * pConverter->config.channels;
- pNextFramesOut = (ma_uint8*)pFramesOut;
-
- if (pConverter->config.onRead != NULL) {
- /* Input data is interleaved. */
- if (pConverter->config.formatIn == pConverter->config.formatOut) {
- /* Pass through. */
- while (totalFramesRead < frameCount) {
- ma_uint32 framesJustRead;
- ma_uint64 framesRemaining = (frameCount - totalFramesRead);
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > 0xFFFFFFFF) {
- framesToReadRightNow = 0xFFFFFFFF;
- }
+ _mm_stream_si128(((__m128i*)(dst_s16 + i)), _mm_packs_epi32(_mm_cvttps_epi32(x0), _mm_cvttps_epi32(x1)));
+
+ i += 8;
+ }
- framesJustRead = (ma_uint32)pConverter->config.onRead(pConverter, (ma_uint32)framesToReadRightNow, pNextFramesOut, pUserData);
- if (framesJustRead == 0) {
- break;
- }
- totalFramesRead += framesJustRead;
- pNextFramesOut += framesJustRead * frameSizeOut;
+ /* Leftover. */
+ for (; i < count; i += 1) {
+ float x = src_f32[i];
+ x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
+ x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
+ x = x * 32767.0f; /* -1..1 to -32767..32767 */
- if (framesJustRead < framesToReadRightNow) {
- break;
- }
- }
- } else {
- /* Conversion required. */
- ma_uint32 maxFramesToReadAtATime;
+ dst_s16[i] = (ma_int16)x;
+ }
+}
+#endif /* SSE2 */
- MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 temp[MA_MAX_CHANNELS * MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128];
- ma_assert(sizeof(temp) <= 0xFFFFFFFF);
+#if defined(MA_SUPPORT_AVX2)
+static MA_INLINE void ma_pcm_f32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_uint64 i;
+ ma_uint64 i16;
+ ma_uint64 count16;
+ ma_int16* dst_s16;
+ const float* src_f32;
+ float ditherMin;
+ float ditherMax;
- maxFramesToReadAtATime = sizeof(temp) / sampleSizeIn / pConverter->config.channels;
+ /* Both the input and output buffers need to be aligned to 32 bytes. */
+ if ((((ma_uintptr)dst & 31) != 0) || (((ma_uintptr)src & 31) != 0)) {
+ ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode);
+ return;
+ }
- while (totalFramesRead < frameCount) {
- ma_uint32 framesJustRead;
- ma_uint64 framesRemaining = (frameCount - totalFramesRead);
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > maxFramesToReadAtATime) {
- framesToReadRightNow = maxFramesToReadAtATime;
- }
+ dst_s16 = (ma_int16*)dst;
+ src_f32 = (const float*)src;
- framesJustRead = (ma_uint32)pConverter->config.onRead(pConverter, (ma_uint32)framesToReadRightNow, temp, pUserData);
- if (framesJustRead == 0) {
- break;
- }
+ ditherMin = 0;
+ ditherMax = 0;
+ if (ditherMode != ma_dither_mode_none) {
+ ditherMin = 1.0f / -32768;
+ ditherMax = 1.0f / 32767;
+ }
- pConverter->onConvertPCM(pNextFramesOut, temp, framesJustRead*pConverter->config.channels, pConverter->config.ditherMode);
+ i = 0;
- totalFramesRead += framesJustRead;
- pNextFramesOut += framesJustRead * frameSizeOut;
+ /* AVX2. AVX2 allows us to output 16 s16's at a time which means our loop is unrolled 16 times. */
+ count16 = count >> 4;
+ for (i16 = 0; i16 < count16; i16 += 1) {
+ __m256 d0;
+ __m256 d1;
+ __m256 x0;
+ __m256 x1;
+ __m256i i0;
+ __m256i i1;
+ __m256i p0;
+ __m256i p1;
+ __m256i r;
- if (framesJustRead < framesToReadRightNow) {
- break;
- }
- }
+ if (ditherMode == ma_dither_mode_none) {
+ d0 = _mm256_set1_ps(0);
+ d1 = _mm256_set1_ps(0);
+ } else if (ditherMode == ma_dither_mode_rectangle) {
+ d0 = _mm256_set_ps(
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax)
+ );
+ d1 = _mm256_set_ps(
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax),
+ ma_dither_f32_rectangle(ditherMin, ditherMax)
+ );
+ } else {
+ d0 = _mm256_set_ps(
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax)
+ );
+ d1 = _mm256_set_ps(
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax),
+ ma_dither_f32_triangle(ditherMin, ditherMax)
+ );
}
- } else {
- /* Input data is deinterleaved. If a conversion is required we need to do an intermediary step. */
- void* ppTempSamplesOfOutFormat[MA_MAX_CHANNELS];
- size_t splitBufferSizeOut;
- ma_uint32 maxFramesToReadAtATime;
- MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 tempSamplesOfOutFormat[MA_MAX_CHANNELS * MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128];
- ma_assert(sizeof(tempSamplesOfOutFormat) <= 0xFFFFFFFF);
-
- ma_split_buffer(tempSamplesOfOutFormat, sizeof(tempSamplesOfOutFormat), pConverter->config.channels, MA_SIMD_ALIGNMENT, (void**)&ppTempSamplesOfOutFormat, &splitBufferSizeOut);
-
- maxFramesToReadAtATime = (ma_uint32)(splitBufferSizeOut / sampleSizeIn);
-
- while (totalFramesRead < frameCount) {
- ma_uint32 framesJustRead;
- ma_uint64 framesRemaining = (frameCount - totalFramesRead);
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > maxFramesToReadAtATime) {
- framesToReadRightNow = maxFramesToReadAtATime;
- }
-
- if (pConverter->config.formatIn == pConverter->config.formatOut) {
- /* Only interleaving. */
- framesJustRead = (ma_uint32)pConverter->config.onReadDeinterleaved(pConverter, (ma_uint32)framesToReadRightNow, ppTempSamplesOfOutFormat, pUserData);
- if (framesJustRead == 0) {
- break;
- }
- } else {
- /* Interleaving + Conversion. Convert first, then interleave. */
- void* ppTempSamplesOfInFormat[MA_MAX_CHANNELS];
- size_t splitBufferSizeIn;
- ma_uint32 iChannel;
+ x0 = *((__m256*)(src_f32 + i) + 0);
+ x1 = *((__m256*)(src_f32 + i) + 1);
- MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 tempSamplesOfInFormat[MA_MAX_CHANNELS * MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128];
+ x0 = _mm256_add_ps(x0, d0);
+ x1 = _mm256_add_ps(x1, d1);
- ma_split_buffer(tempSamplesOfInFormat, sizeof(tempSamplesOfInFormat), pConverter->config.channels, MA_SIMD_ALIGNMENT, (void**)&ppTempSamplesOfInFormat, &splitBufferSizeIn);
+ x0 = _mm256_mul_ps(x0, _mm256_set1_ps(32767.0f));
+ x1 = _mm256_mul_ps(x1, _mm256_set1_ps(32767.0f));
- if (framesToReadRightNow > (splitBufferSizeIn / sampleSizeIn)) {
- framesToReadRightNow = (splitBufferSizeIn / sampleSizeIn);
- }
+ /* Computing the final result is a little more complicated for AVX2 than SSE2. */
+ i0 = _mm256_cvttps_epi32(x0);
+ i1 = _mm256_cvttps_epi32(x1);
+ p0 = _mm256_permute2x128_si256(i0, i1, 0 | 32);
+ p1 = _mm256_permute2x128_si256(i0, i1, 1 | 48);
+ r = _mm256_packs_epi32(p0, p1);
- framesJustRead = (ma_uint32)pConverter->config.onReadDeinterleaved(pConverter, (ma_uint32)framesToReadRightNow, ppTempSamplesOfInFormat, pUserData);
- if (framesJustRead == 0) {
- break;
- }
+ _mm256_stream_si256(((__m256i*)(dst_s16 + i)), r);
- for (iChannel = 0; iChannel < pConverter->config.channels; iChannel += 1) {
- pConverter->onConvertPCM(ppTempSamplesOfOutFormat[iChannel], ppTempSamplesOfInFormat[iChannel], framesJustRead, pConverter->config.ditherMode);
- }
- }
+ i += 16;
+ }
- pConverter->onInterleavePCM(pNextFramesOut, (const void**)ppTempSamplesOfOutFormat, framesJustRead, pConverter->config.channels);
- totalFramesRead += framesJustRead;
- pNextFramesOut += framesJustRead * frameSizeOut;
+ /* Leftover. */
+ for (; i < count; i += 1) {
+ float x = src_f32[i];
+ x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
+ x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
+ x = x * 32767.0f; /* -1..1 to -32767..32767 */
- if (framesJustRead < framesToReadRightNow) {
- break;
- }
- }
+ dst_s16[i] = (ma_int16)x;
}
-
- return totalFramesRead;
}
+#endif /* AVX2 */
-ma_uint64 ma_format_converter_read_deinterleaved(ma_format_converter* pConverter, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData)
+#if defined(MA_SUPPORT_NEON)
+static MA_INLINE void ma_pcm_f32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_uint64 totalFramesRead;
- ma_uint32 sampleSizeIn;
- ma_uint32 sampleSizeOut;
- ma_uint8* ppNextSamplesOut[MA_MAX_CHANNELS];
+ ma_uint64 i;
+ ma_uint64 i8;
+ ma_uint64 count8;
+ ma_int16* dst_s16;
+ const float* src_f32;
+ float ditherMin;
+ float ditherMax;
- if (pConverter == NULL || ppSamplesOut == NULL) {
- return 0;
+ if (!ma_has_neon()) {
+ return ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode);
}
- totalFramesRead = 0;
- sampleSizeIn = ma_get_bytes_per_sample(pConverter->config.formatIn);
- sampleSizeOut = ma_get_bytes_per_sample(pConverter->config.formatOut);
-
- ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(void*) * pConverter->config.channels);
-
- if (pConverter->config.onRead != NULL) {
- /* Input data is interleaved. */
- ma_uint32 maxFramesToReadAtATime;
-
- MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 tempSamplesOfOutFormat[MA_MAX_CHANNELS * MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128];
- ma_assert(sizeof(tempSamplesOfOutFormat) <= 0xFFFFFFFF);
-
- maxFramesToReadAtATime = sizeof(tempSamplesOfOutFormat) / sampleSizeIn / pConverter->config.channels;
-
- while (totalFramesRead < frameCount) {
- ma_uint32 iChannel;
- ma_uint32 framesJustRead;
- ma_uint64 framesRemaining = (frameCount - totalFramesRead);
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > maxFramesToReadAtATime) {
- framesToReadRightNow = maxFramesToReadAtATime;
- }
-
- if (pConverter->config.formatIn == pConverter->config.formatOut) {
- /* Only de-interleaving. */
- framesJustRead = (ma_uint32)pConverter->config.onRead(pConverter, (ma_uint32)framesToReadRightNow, tempSamplesOfOutFormat, pUserData);
- if (framesJustRead == 0) {
- break;
- }
- } else {
- /* De-interleaving + Conversion. Convert first, then de-interleave. */
- MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 tempSamplesOfInFormat[sizeof(tempSamplesOfOutFormat)];
-
- framesJustRead = (ma_uint32)pConverter->config.onRead(pConverter, (ma_uint32)framesToReadRightNow, tempSamplesOfInFormat, pUserData);
- if (framesJustRead == 0) {
- break;
- }
-
- pConverter->onConvertPCM(tempSamplesOfOutFormat, tempSamplesOfInFormat, framesJustRead * pConverter->config.channels, pConverter->config.ditherMode);
- }
+ /* Both the input and output buffers need to be aligned to 16 bytes. */
+ if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) {
+ ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode);
+ return;
+ }
- pConverter->onDeinterleavePCM((void**)ppNextSamplesOut, tempSamplesOfOutFormat, framesJustRead, pConverter->config.channels);
+ dst_s16 = (ma_int16*)dst;
+ src_f32 = (const float*)src;
- totalFramesRead += framesJustRead;
- for (iChannel = 0; iChannel < pConverter->config.channels; ++iChannel) {
- ppNextSamplesOut[iChannel] += framesJustRead * sampleSizeOut;
- }
+ ditherMin = 0;
+ ditherMax = 0;
+ if (ditherMode != ma_dither_mode_none) {
+ ditherMin = 1.0f / -32768;
+ ditherMax = 1.0f / 32767;
+ }
- if (framesJustRead < framesToReadRightNow) {
- break;
- }
- }
- } else {
- /* Input data is deinterleaved. */
- if (pConverter->config.formatIn == pConverter->config.formatOut) {
- /* Pass through. */
- while (totalFramesRead < frameCount) {
- ma_uint32 iChannel;
- ma_uint32 framesJustRead;
- ma_uint64 framesRemaining = (frameCount - totalFramesRead);
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > 0xFFFFFFFF) {
- framesToReadRightNow = 0xFFFFFFFF;
- }
+ i = 0;
- framesJustRead = (ma_uint32)pConverter->config.onReadDeinterleaved(pConverter, (ma_uint32)framesToReadRightNow, (void**)ppNextSamplesOut, pUserData);
- if (framesJustRead == 0) {
- break;
- }
+ /* NEON. NEON allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */
+ count8 = count >> 3;
+ for (i8 = 0; i8 < count8; i8 += 1) {
+ float32x4_t d0;
+ float32x4_t d1;
+ float32x4_t x0;
+ float32x4_t x1;
+ int32x4_t i0;
+ int32x4_t i1;
- totalFramesRead += framesJustRead;
- for (iChannel = 0; iChannel < pConverter->config.channels; ++iChannel) {
- ppNextSamplesOut[iChannel] += framesJustRead * sampleSizeOut;
- }
+ if (ditherMode == ma_dither_mode_none) {
+ d0 = vmovq_n_f32(0);
+ d1 = vmovq_n_f32(0);
+ } else if (ditherMode == ma_dither_mode_rectangle) {
+ float d0v[4];
+ d0v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax);
+ d0v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax);
+ d0v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax);
+ d0v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax);
+ d0 = vld1q_f32(d0v);
- if (framesJustRead < framesToReadRightNow) {
- break;
- }
- }
+ float d1v[4];
+ d1v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax);
+ d1v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax);
+ d1v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax);
+ d1v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax);
+ d1 = vld1q_f32(d1v);
} else {
- /* Conversion required. */
- void* ppTemp[MA_MAX_CHANNELS];
- size_t splitBufferSize;
- ma_uint32 maxFramesToReadAtATime;
-
- MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 temp[MA_MAX_CHANNELS][MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128];
- ma_assert(sizeof(temp) <= 0xFFFFFFFF);
-
- ma_split_buffer(temp, sizeof(temp), pConverter->config.channels, MA_SIMD_ALIGNMENT, (void**)&ppTemp, &splitBufferSize);
+ float d0v[4];
+ d0v[0] = ma_dither_f32_triangle(ditherMin, ditherMax);
+ d0v[1] = ma_dither_f32_triangle(ditherMin, ditherMax);
+ d0v[2] = ma_dither_f32_triangle(ditherMin, ditherMax);
+ d0v[3] = ma_dither_f32_triangle(ditherMin, ditherMax);
+ d0 = vld1q_f32(d0v);
- maxFramesToReadAtATime = (ma_uint32)(splitBufferSize / sampleSizeIn);
+ float d1v[4];
+ d1v[0] = ma_dither_f32_triangle(ditherMin, ditherMax);
+ d1v[1] = ma_dither_f32_triangle(ditherMin, ditherMax);
+ d1v[2] = ma_dither_f32_triangle(ditherMin, ditherMax);
+ d1v[3] = ma_dither_f32_triangle(ditherMin, ditherMax);
+ d1 = vld1q_f32(d1v);
+ }
- while (totalFramesRead < frameCount) {
- ma_uint32 iChannel;
- ma_uint32 framesJustRead;
- ma_uint64 framesRemaining = (frameCount - totalFramesRead);
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > maxFramesToReadAtATime) {
- framesToReadRightNow = maxFramesToReadAtATime;
- }
+ x0 = *((float32x4_t*)(src_f32 + i) + 0);
+ x1 = *((float32x4_t*)(src_f32 + i) + 1);
- framesJustRead = (ma_uint32)pConverter->config.onReadDeinterleaved(pConverter, (ma_uint32)framesToReadRightNow, ppTemp, pUserData);
- if (framesJustRead == 0) {
- break;
- }
+ x0 = vaddq_f32(x0, d0);
+ x1 = vaddq_f32(x1, d1);
- for (iChannel = 0; iChannel < pConverter->config.channels; iChannel += 1) {
- pConverter->onConvertPCM(ppNextSamplesOut[iChannel], ppTemp[iChannel], framesJustRead, pConverter->config.ditherMode);
- ppNextSamplesOut[iChannel] += framesJustRead * sampleSizeOut;
- }
+ x0 = vmulq_n_f32(x0, 32767.0f);
+ x1 = vmulq_n_f32(x1, 32767.0f);
- totalFramesRead += framesJustRead;
+ i0 = vcvtq_s32_f32(x0);
+ i1 = vcvtq_s32_f32(x1);
+ *((int16x8_t*)(dst_s16 + i)) = vcombine_s16(vqmovn_s32(i0), vqmovn_s32(i1));
- if (framesJustRead < framesToReadRightNow) {
- break;
- }
- }
- }
+ i += 8;
}
- return totalFramesRead;
-}
-
-
-ma_format_converter_config ma_format_converter_config_init_new()
-{
- ma_format_converter_config config;
- ma_zero_object(&config);
-
- return config;
-}
-ma_format_converter_config ma_format_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channels, ma_format_converter_read_proc onRead, void* pUserData)
-{
- ma_format_converter_config config = ma_format_converter_config_init_new();
- config.formatIn = formatIn;
- config.formatOut = formatOut;
- config.channels = channels;
- config.onRead = onRead;
- config.onReadDeinterleaved = NULL;
- config.pUserData = pUserData;
+ /* Leftover. */
+ for (; i < count; i += 1) {
+ float x = src_f32[i];
+ x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax);
+ x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
+ x = x * 32767.0f; /* -1..1 to -32767..32767 */
- return config;
+ dst_s16[i] = (ma_int16)x;
+ }
}
+#endif /* Neon */
+#endif /* MA_USE_REFERENCE_CONVERSION_APIS */
-ma_format_converter_config ma_format_converter_config_init_deinterleaved(ma_format formatIn, ma_format formatOut, ma_uint32 channels, ma_format_converter_read_deinterleaved_proc onReadDeinterleaved, void* pUserData)
+void ma_pcm_f32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_format_converter_config config = ma_format_converter_config_init(formatIn, formatOut, channels, NULL, pUserData);
- config.onReadDeinterleaved = onReadDeinterleaved;
-
- return config;
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_f32_to_s16__reference(dst, src, count, ditherMode);
+#else
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_f32_to_s16__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_f32_to_s16__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_f32_to_s16__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode);
+ }
+#endif
}
+static MA_INLINE void ma_pcm_f32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ ma_uint8* dst_s24 = (ma_uint8*)dst;
+ const float* src_f32 = (const float*)src;
-/**************************************************************************************************************************************************************
+ ma_uint64 i;
+ for (i = 0; i < count; i += 1) {
+ ma_int32 r;
+ float x = src_f32[i];
+ x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
-Channel Routing
+#if 0
+ /* The accurate way. */
+ x = x + 1; /* -1..1 to 0..2 */
+ x = x * 8388607.5f; /* 0..2 to 0..16777215 */
+ x = x - 8388608.0f; /* 0..16777215 to -8388608..8388607 */
+#else
+ /* The fast way. */
+ x = x * 8388607.0f; /* -1..1 to -8388607..8388607 */
+#endif
-**************************************************************************************************************************************************************/
+ r = (ma_int32)x;
+ dst_s24[(i*3)+0] = (ma_uint8)((r & 0x0000FF) >> 0);
+ dst_s24[(i*3)+1] = (ma_uint8)((r & 0x00FF00) >> 8);
+ dst_s24[(i*3)+2] = (ma_uint8)((r & 0xFF0000) >> 16);
+ }
-/*
--X = Left, +X = Right
--Y = Bottom, +Y = Top
--Z = Front, +Z = Back
-*/
-typedef struct
-{
- float x;
- float y;
- float z;
-} ma_vec3;
+ (void)ditherMode; /* No dithering for f32 -> s24. */
+}
-static MA_INLINE ma_vec3 ma_vec3f(float x, float y, float z)
+static MA_INLINE void ma_pcm_f32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- ma_vec3 r;
- r.x = x;
- r.y = y;
- r.z = z;
-
- return r;
+ ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode);
}
-static MA_INLINE ma_vec3 ma_vec3_add(ma_vec3 a, ma_vec3 b)
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE void ma_pcm_f32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- return ma_vec3f(
- a.x + b.x,
- a.y + b.y,
- a.z + b.z
- );
+ ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode);
}
-
-static MA_INLINE ma_vec3 ma_vec3_sub(ma_vec3 a, ma_vec3 b)
+#endif
+#if defined(MA_SUPPORT_AVX2)
+static MA_INLINE void ma_pcm_f32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- return ma_vec3f(
- a.x - b.x,
- a.y - b.y,
- a.z - b.z
- );
+ ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode);
}
-
-static MA_INLINE ma_vec3 ma_vec3_mul(ma_vec3 a, ma_vec3 b)
+#endif
+#if defined(MA_SUPPORT_NEON)
+static MA_INLINE void ma_pcm_f32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- return ma_vec3f(
- a.x * b.x,
- a.y * b.y,
- a.z * b.z
- );
+ ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode);
}
+#endif
-static MA_INLINE ma_vec3 ma_vec3_div(ma_vec3 a, ma_vec3 b)
+void ma_pcm_f32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- return ma_vec3f(
- a.x / b.x,
- a.y / b.y,
- a.z / b.z
- );
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode);
+#else
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_f32_to_s24__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_f32_to_s24__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_f32_to_s24__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode);
+ }
+#endif
}
-static MA_INLINE float ma_vec3_dot(ma_vec3 a, ma_vec3 b)
+
+static MA_INLINE void ma_pcm_f32_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- return a.x*b.x + a.y*b.y + a.z*b.z;
+ ma_int32* dst_s32 = (ma_int32*)dst;
+ const float* src_f32 = (const float*)src;
+
+ ma_uint32 i;
+ for (i = 0; i < count; i += 1) {
+ double x = src_f32[i];
+ x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */
+
+#if 0
+ /* The accurate way. */
+ x = x + 1; /* -1..1 to 0..2 */
+ x = x * 2147483647.5; /* 0..2 to 0..4294967295 */
+ x = x - 2147483648.0; /* 0...4294967295 to -2147483648..2147483647 */
+#else
+ /* The fast way. */
+ x = x * 2147483647.0; /* -1..1 to -2147483647..2147483647 */
+#endif
+
+ dst_s32[i] = (ma_int32)x;
+ }
+
+ (void)ditherMode; /* No dithering for f32 -> s32. */
}
-static MA_INLINE float ma_vec3_length2(ma_vec3 a)
+static MA_INLINE void ma_pcm_f32_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- return ma_vec3_dot(a, a);
+ ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode);
}
-static MA_INLINE float ma_vec3_length(ma_vec3 a)
+#if defined(MA_SUPPORT_SSE2)
+static MA_INLINE void ma_pcm_f32_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- return (float)sqrt(ma_vec3_length2(a));
+ ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode);
}
-
-static MA_INLINE ma_vec3 ma_vec3_normalize(ma_vec3 a)
+#endif
+#if defined(MA_SUPPORT_AVX2)
+static MA_INLINE void ma_pcm_f32_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- float len = 1 / ma_vec3_length(a);
-
- ma_vec3 r;
- r.x = a.x * len;
- r.y = a.y * len;
- r.z = a.z * len;
-
- return r;
+ ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode);
}
-
-static MA_INLINE float ma_vec3_distance(ma_vec3 a, ma_vec3 b)
+#endif
+#if defined(MA_SUPPORT_NEON)
+static MA_INLINE void ma_pcm_f32_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- return ma_vec3_length(ma_vec3_sub(a, b));
+ ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode);
}
+#endif
-
-#define MA_PLANE_LEFT 0
-#define MA_PLANE_RIGHT 1
-#define MA_PLANE_FRONT 2
-#define MA_PLANE_BACK 3
-#define MA_PLANE_BOTTOM 4
-#define MA_PLANE_TOP 5
-
-float g_maChannelPlaneRatios[MA_CHANNEL_POSITION_COUNT][6] = {
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_NONE */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_MONO */
- { 0.5f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT */
- { 0.0f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT */
- { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_CENTER */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_LFE */
- { 0.5f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_LEFT */
- { 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_RIGHT */
- { 0.25f, 0.0f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT_CENTER */
- { 0.0f, 0.25f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT_CENTER */
- { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_CENTER */
- { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_LEFT */
- { 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_RIGHT */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f}, /* MA_CHANNEL_TOP_CENTER */
- { 0.33f, 0.0f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_LEFT */
- { 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_FRONT_CENTER */
- { 0.0f, 0.33f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_RIGHT */
- { 0.33f, 0.0f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_LEFT */
- { 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_BACK_CENTER */
- { 0.0f, 0.33f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_RIGHT */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_0 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_1 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_2 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_3 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_4 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_5 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_6 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_7 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_8 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_9 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_10 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_11 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_12 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_13 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_14 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_15 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_16 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_17 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_18 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_19 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_20 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_21 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_22 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_23 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_24 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_25 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_26 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_27 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_28 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_29 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_30 */
- { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_31 */
-};
-
-float ma_calculate_channel_position_planar_weight(ma_channel channelPositionA, ma_channel channelPositionB)
+void ma_pcm_f32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
{
- /*
- Imagine the following simplified example: You have a single input speaker which is the front/left speaker which you want to convert to
- the following output configuration:
-
- - front/left
- - side/left
- - back/left
-
- The front/left output is easy - it the same speaker position so it receives the full contribution of the front/left input. The amount
- of contribution to apply to the side/left and back/left speakers, however, is a bit more complicated.
-
- Imagine the front/left speaker as emitting audio from two planes - the front plane and the left plane. You can think of the front/left
- speaker emitting half of it's total volume from the front, and the other half from the left. Since part of it's volume is being emitted
- from the left side, and the side/left and back/left channels also emit audio from the left plane, one would expect that they would
- receive some amount of contribution from front/left speaker. The amount of contribution depends on how many planes are shared between
- the two speakers. Note that in the examples below I've added a top/front/left speaker as an example just to show how the math works
- across 3 spatial dimensions.
-
- The first thing to do is figure out how each speaker's volume is spread over each of plane:
- - front/left: 2 planes (front and left) = 1/2 = half it's total volume on each plane
- - side/left: 1 plane (left only) = 1/1 = entire volume from left plane
- - back/left: 2 planes (back and left) = 1/2 = half it's total volume on each plane
- - top/front/left: 3 planes (top, front and left) = 1/3 = one third it's total volume on each plane
-
- The amount of volume each channel contributes to each of it's planes is what controls how much it is willing to given and take to other
- channels on the same plane. The volume that is willing to the given by one channel is multiplied by the volume that is willing to be
- taken by the other to produce the final contribution.
- */
-
- /* Contribution = Sum(Volume to Give * Volume to Take) */
- float contribution =
- g_maChannelPlaneRatios[channelPositionA][0] * g_maChannelPlaneRatios[channelPositionB][0] +
- g_maChannelPlaneRatios[channelPositionA][1] * g_maChannelPlaneRatios[channelPositionB][1] +
- g_maChannelPlaneRatios[channelPositionA][2] * g_maChannelPlaneRatios[channelPositionB][2] +
- g_maChannelPlaneRatios[channelPositionA][3] * g_maChannelPlaneRatios[channelPositionB][3] +
- g_maChannelPlaneRatios[channelPositionA][4] * g_maChannelPlaneRatios[channelPositionB][4] +
- g_maChannelPlaneRatios[channelPositionA][5] * g_maChannelPlaneRatios[channelPositionB][5];
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode);
+#else
+ # if MA_PREFERRED_SIMD == MA_SIMD_AVX2
+ if (ma_has_avx2()) {
+ ma_pcm_f32_to_s32__avx2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2
+ if (ma_has_sse2()) {
+ ma_pcm_f32_to_s32__sse2(dst, src, count, ditherMode);
+ } else
+ #elif MA_PREFERRED_SIMD == MA_SIMD_NEON
+ if (ma_has_neon()) {
+ ma_pcm_f32_to_s32__neon(dst, src, count, ditherMode);
+ } else
+ #endif
+ {
+ ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode);
+ }
+#endif
+}
- return contribution;
+
+void ma_pcm_f32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode)
+{
+ (void)ditherMode;
+
+ ma_copy_memory_64(dst, src, count * sizeof(float));
}
-float ma_channel_router__calculate_input_channel_planar_weight(const ma_channel_router* pRouter, ma_channel channelPositionIn, ma_channel channelPositionOut)
+
+static void ma_pcm_interleave_f32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_assert(pRouter != NULL);
- (void)pRouter;
+ float* dst_f32 = (float*)dst;
+ const float** src_f32 = (const float**)src;
- return ma_calculate_channel_position_planar_weight(channelPositionIn, channelPositionOut);
+ ma_uint64 iFrame;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; iChannel += 1) {
+ dst_f32[iFrame*channels + iChannel] = src_f32[iChannel][iFrame];
+ }
+ }
}
-ma_bool32 ma_channel_router__is_spatial_channel_position(const ma_channel_router* pRouter, ma_channel channelPosition)
+static void ma_pcm_interleave_f32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
{
- int i;
+ ma_pcm_interleave_f32__reference(dst, src, frameCount, channels);
+}
- ma_assert(pRouter != NULL);
- (void)pRouter;
+void ma_pcm_interleave_f32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels)
+{
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_interleave_f32__reference(dst, src, frameCount, channels);
+#else
+ ma_pcm_interleave_f32__optimized(dst, src, frameCount, channels);
+#endif
+}
- if (channelPosition == MA_CHANNEL_NONE || channelPosition == MA_CHANNEL_MONO || channelPosition == MA_CHANNEL_LFE) {
- return MA_FALSE;
- }
- for (i = 0; i < 6; ++i) {
- if (g_maChannelPlaneRatios[channelPosition][i] != 0) {
- return MA_TRUE;
+static void ma_pcm_deinterleave_f32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+{
+ float** dst_f32 = (float**)dst;
+ const float* src_f32 = (const float*)src;
+
+ ma_uint64 iFrame;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; iChannel += 1) {
+ dst_f32[iChannel][iFrame] = src_f32[iFrame*channels + iChannel];
}
}
+}
- return MA_FALSE;
+static void ma_pcm_deinterleave_f32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
+{
+ ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels);
}
-ma_result ma_channel_router_init(const ma_channel_router_config* pConfig, ma_channel_router* pRouter)
+void ma_pcm_deinterleave_f32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels)
{
- ma_uint32 iChannelIn;
- ma_uint32 iChannelOut;
+#ifdef MA_USE_REFERENCE_CONVERSION_APIS
+ ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels);
+#else
+ ma_pcm_deinterleave_f32__optimized(dst, src, frameCount, channels);
+#endif
+}
- if (pRouter == NULL) {
- return MA_INVALID_ARGS;
+
+void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode)
+{
+ if (formatOut == formatIn) {
+ ma_copy_memory_64(pOut, pIn, sampleCount * ma_get_bytes_per_sample(formatOut));
+ return;
}
- ma_zero_object(pRouter);
+ switch (formatIn)
+ {
+ case ma_format_u8:
+ {
+ switch (formatOut)
+ {
+ case ma_format_s16: ma_pcm_u8_to_s16(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s24: ma_pcm_u8_to_s24(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s32: ma_pcm_u8_to_s32(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_f32: ma_pcm_u8_to_f32(pOut, pIn, sampleCount, ditherMode); return;
+ default: break;
+ }
+ } break;
- if (pConfig == NULL) {
- return MA_INVALID_ARGS;
- }
- if (pConfig->onReadDeinterleaved == NULL) {
- return MA_INVALID_ARGS;
- }
+ case ma_format_s16:
+ {
+ switch (formatOut)
+ {
+ case ma_format_u8: ma_pcm_s16_to_u8( pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s24: ma_pcm_s16_to_s24(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s32: ma_pcm_s16_to_s32(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_f32: ma_pcm_s16_to_f32(pOut, pIn, sampleCount, ditherMode); return;
+ default: break;
+ }
+ } break;
- if (!ma_channel_map_valid(pConfig->channelsIn, pConfig->channelMapIn)) {
- return MA_INVALID_ARGS; /* Invalid input channel map. */
- }
- if (!ma_channel_map_valid(pConfig->channelsOut, pConfig->channelMapOut)) {
- return MA_INVALID_ARGS; /* Invalid output channel map. */
- }
+ case ma_format_s24:
+ {
+ switch (formatOut)
+ {
+ case ma_format_u8: ma_pcm_s24_to_u8( pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s16: ma_pcm_s24_to_s16(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s32: ma_pcm_s24_to_s32(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_f32: ma_pcm_s24_to_f32(pOut, pIn, sampleCount, ditherMode); return;
+ default: break;
+ }
+ } break;
+
+ case ma_format_s32:
+ {
+ switch (formatOut)
+ {
+ case ma_format_u8: ma_pcm_s32_to_u8( pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s16: ma_pcm_s32_to_s16(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s24: ma_pcm_s32_to_s24(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_f32: ma_pcm_s32_to_f32(pOut, pIn, sampleCount, ditherMode); return;
+ default: break;
+ }
+ } break;
+
+ case ma_format_f32:
+ {
+ switch (formatOut)
+ {
+ case ma_format_u8: ma_pcm_f32_to_u8( pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s16: ma_pcm_f32_to_s16(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s24: ma_pcm_f32_to_s24(pOut, pIn, sampleCount, ditherMode); return;
+ case ma_format_s32: ma_pcm_f32_to_s32(pOut, pIn, sampleCount, ditherMode); return;
+ default: break;
+ }
+ } break;
- pRouter->config = *pConfig;
+ default: break;
+ }
+}
- /* SIMD */
- pRouter->useSSE2 = ma_has_sse2() && !pConfig->noSSE2;
- pRouter->useAVX2 = ma_has_avx2() && !pConfig->noAVX2;
- pRouter->useAVX512 = ma_has_avx512f() && !pConfig->noAVX512;
- pRouter->useNEON = ma_has_neon() && !pConfig->noNEON;
+void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode)
+{
+ ma_pcm_convert(pOut, formatOut, pIn, formatIn, frameCount * channels, ditherMode);
+}
- /* If the input and output channels and channel maps are the same we should use a passthrough. */
- if (pRouter->config.channelsIn == pRouter->config.channelsOut) {
- if (ma_channel_map_equal(pRouter->config.channelsIn, pRouter->config.channelMapIn, pRouter->config.channelMapOut)) {
- pRouter->isPassthrough = MA_TRUE;
- }
- if (ma_channel_map_blank(pRouter->config.channelsIn, pRouter->config.channelMapIn) || ma_channel_map_blank(pRouter->config.channelsOut, pRouter->config.channelMapOut)) {
- pRouter->isPassthrough = MA_TRUE;
- }
+void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames)
+{
+ if (pInterleavedPCMFrames == NULL || ppDeinterleavedPCMFrames == NULL) {
+ return; /* Invalid args. */
}
- /*
- Here is where we do a bit of pre-processing to know how each channel should be combined to make up the output. Rules:
-
- 1) If it's a passthrough, do nothing - it's just a simple memcpy().
- 2) If the channel counts are the same and every channel position in the input map is present in the output map, use a
- simple shuffle. An example might be different 5.1 channel layouts.
- 3) Otherwise channels are blended based on spatial locality.
- */
- if (!pRouter->isPassthrough) {
- if (pRouter->config.channelsIn == pRouter->config.channelsOut) {
- ma_bool32 areAllChannelPositionsPresent = MA_TRUE;
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- ma_bool32 isInputChannelPositionInOutput = MA_FALSE;
- for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) {
- if (pRouter->config.channelMapIn[iChannelIn] == pRouter->config.channelMapOut[iChannelOut]) {
- isInputChannelPositionInOutput = MA_TRUE;
- break;
- }
+ /* For efficiency we do this per format. */
+ switch (format) {
+ case ma_format_s16:
+ {
+ const ma_int16* pSrcS16 = (const ma_int16*)pInterleavedPCMFrames;
+ ma_uint64 iPCMFrame;
+ for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ ma_int16* pDstS16 = (ma_int16*)ppDeinterleavedPCMFrames[iChannel];
+ pDstS16[iPCMFrame] = pSrcS16[iPCMFrame*channels+iChannel];
+ }
+ }
+ } break;
+
+ case ma_format_f32:
+ {
+ const float* pSrcF32 = (const float*)pInterleavedPCMFrames;
+ ma_uint64 iPCMFrame;
+ for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ float* pDstF32 = (float*)ppDeinterleavedPCMFrames[iChannel];
+ pDstF32[iPCMFrame] = pSrcF32[iPCMFrame*channels+iChannel];
+ }
+ }
+ } break;
+
+ default:
+ {
+ ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format);
+ ma_uint64 iPCMFrame;
+ for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ void* pDst = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes);
+ const void* pSrc = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes);
+ memcpy(pDst, pSrc, sampleSizeInBytes);
}
+ }
+ } break;
+ }
+}
- if (!isInputChannelPositionInOutput) {
- areAllChannelPositionsPresent = MA_FALSE;
- break;
+void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames)
+{
+ switch (format)
+ {
+ case ma_format_s16:
+ {
+ ma_int16* pDstS16 = (ma_int16*)pInterleavedPCMFrames;
+ ma_uint64 iPCMFrame;
+ for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ const ma_int16* pSrcS16 = (const ma_int16*)ppDeinterleavedPCMFrames[iChannel];
+ pDstS16[iPCMFrame*channels+iChannel] = pSrcS16[iPCMFrame];
+ }
+ }
+ } break;
+
+ case ma_format_f32:
+ {
+ float* pDstF32 = (float*)pInterleavedPCMFrames;
+ ma_uint64 iPCMFrame;
+ for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ const float* pSrcF32 = (const float*)ppDeinterleavedPCMFrames[iChannel];
+ pDstF32[iPCMFrame*channels+iChannel] = pSrcF32[iPCMFrame];
}
}
+ } break;
+
+ default:
+ {
+ ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format);
+ ma_uint64 iPCMFrame;
+ for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ void* pDst = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes);
+ const void* pSrc = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes);
+ memcpy(pDst, pSrc, sampleSizeInBytes);
+ }
+ }
+ } break;
+ }
+}
+
+
+
+/**************************************************************************************************************************************************************
+
+Channel Maps
+
+**************************************************************************************************************************************************************/
+static void ma_get_standard_channel_map_microsoft(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+{
+ /* Based off the speaker configurations mentioned here: https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/ksmedia/ns-ksmedia-ksaudio_channel_config */
+ switch (channels)
+ {
+ case 1:
+ {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } break;
- if (areAllChannelPositionsPresent) {
- pRouter->isSimpleShuffle = MA_TRUE;
+ case 2:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ } break;
- /*
- All the router will be doing is rearranging channels which means all we need to do is use a shuffling table which is just
- a mapping between the index of the input channel to the index of the output channel.
- */
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) {
- if (pRouter->config.channelMapIn[iChannelIn] == pRouter->config.channelMapOut[iChannelOut]) {
- pRouter->shuffleTable[iChannelIn] = (ma_uint8)iChannelOut;
- break;
- }
- }
- }
- }
- }
- }
+ case 3: /* Not defined, but best guess. */
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ } break;
+ case 4:
+ {
+#ifndef MA_USE_QUAD_MICROSOFT_CHANNEL_MAP
+ /* Surround. Using the Surround profile has the advantage of the 3rd channel (MA_CHANNEL_FRONT_CENTER) mapping nicely with higher channel counts. */
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_BACK_CENTER;
+#else
+ /* Quad. */
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+#endif
+ } break;
- /*
- Here is where weights are calculated. Note that we calculate the weights at all times, even when using a passthrough and simple
- shuffling. We use different algorithms for calculating weights depending on our mixing mode.
-
- In simple mode we don't do any blending (except for converting between mono, which is done in a later step). Instead we just
- map 1:1 matching channels. In this mode, if no channels in the input channel map correspond to anything in the output channel
- map, nothing will be heard!
- */
+ case 5: /* Not defined, but best guess. */
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_BACK_LEFT;
+ channelMap[4] = MA_CHANNEL_BACK_RIGHT;
+ } break;
- /* In all cases we need to make sure all channels that are present in both channel maps have a 1:1 mapping. */
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn];
+ case 6:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_LFE;
+ channelMap[4] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[5] = MA_CHANNEL_SIDE_RIGHT;
+ } break;
- for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) {
- ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut];
+ case 7: /* Not defined, but best guess. */
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_LFE;
+ channelMap[4] = MA_CHANNEL_BACK_CENTER;
+ channelMap[5] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[6] = MA_CHANNEL_SIDE_RIGHT;
+ } break;
- if (channelPosIn == channelPosOut) {
- pRouter->config.weights[iChannelIn][iChannelOut] = 1;
- }
- }
+ case 8:
+ default:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_LFE;
+ channelMap[4] = MA_CHANNEL_BACK_LEFT;
+ channelMap[5] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[6] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
+ } break;
}
- /*
- The mono channel is accumulated on all other channels, except LFE. Make sure in this loop we exclude output mono channels since
- they were handled in the pass above.
- */
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn];
-
- if (channelPosIn == MA_CHANNEL_MONO) {
- for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) {
- ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut];
-
- if (channelPosOut != MA_CHANNEL_NONE && channelPosOut != MA_CHANNEL_MONO && channelPosOut != MA_CHANNEL_LFE) {
- pRouter->config.weights[iChannelIn][iChannelOut] = 1;
- }
- }
+ /* Remainder. */
+ if (channels > 8) {
+ ma_uint32 iChannel;
+ for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
+ channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
}
}
+}
- /* The output mono channel is the average of all non-none, non-mono and non-lfe input channels. */
+static void ma_get_standard_channel_map_alsa(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+{
+ switch (channels)
{
- ma_uint32 len = 0;
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn];
-
- if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) {
- len += 1;
- }
- }
+ case 1:
+ {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } break;
- if (len > 0) {
- float monoWeight = 1.0f / len;
+ case 2:
+ {
+ channelMap[0] = MA_CHANNEL_LEFT;
+ channelMap[1] = MA_CHANNEL_RIGHT;
+ } break;
- for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) {
- ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut];
+ case 3:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ } break;
- if (channelPosOut == MA_CHANNEL_MONO) {
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn];
+ case 4:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ } break;
- if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) {
- pRouter->config.weights[iChannelIn][iChannelOut] += monoWeight;
- }
- }
- }
- }
- }
- }
+ case 5:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ } break;
+ case 6:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[5] = MA_CHANNEL_LFE;
+ } break;
- /* Input and output channels that are not present on the other side need to be blended in based on spatial locality. */
- switch (pRouter->config.mixingMode)
- {
- case ma_channel_mix_mode_rectangular:
+ case 7:
{
- /* Unmapped input channels. */
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn];
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[5] = MA_CHANNEL_LFE;
+ channelMap[6] = MA_CHANNEL_BACK_CENTER;
+ } break;
- if (ma_channel_router__is_spatial_channel_position(pRouter, channelPosIn)) {
- if (!ma_channel_map_contains_channel_position(pRouter->config.channelsOut, pRouter->config.channelMapOut, channelPosIn)) {
- for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) {
- ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut];
+ case 8:
+ default:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[5] = MA_CHANNEL_LFE;
+ channelMap[6] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
+ } break;
+ }
- if (ma_channel_router__is_spatial_channel_position(pRouter, channelPosOut)) {
- float weight = 0;
- if (pRouter->config.mixingMode == ma_channel_mix_mode_planar_blend) {
- weight = ma_channel_router__calculate_input_channel_planar_weight(pRouter, channelPosIn, channelPosOut);
- }
+ /* Remainder. */
+ if (channels > 8) {
+ ma_uint32 iChannel;
+ for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
+ channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
+ }
+ }
+}
- /* Only apply the weight if we haven't already got some contribution from the respective channels. */
- if (pRouter->config.weights[iChannelIn][iChannelOut] == 0) {
- pRouter->config.weights[iChannelIn][iChannelOut] = weight;
- }
- }
- }
- }
- }
- }
+static void ma_get_standard_channel_map_rfc3551(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+{
+ switch (channels)
+ {
+ case 1:
+ {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } break;
- /* Unmapped output channels. */
- for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) {
- ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut];
+ case 2:
+ {
+ channelMap[0] = MA_CHANNEL_LEFT;
+ channelMap[1] = MA_CHANNEL_RIGHT;
+ } break;
- if (ma_channel_router__is_spatial_channel_position(pRouter, channelPosOut)) {
- if (!ma_channel_map_contains_channel_position(pRouter->config.channelsIn, pRouter->config.channelMapIn, channelPosOut)) {
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn];
+ case 3:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ } break;
- if (ma_channel_router__is_spatial_channel_position(pRouter, channelPosIn)) {
- float weight = 0;
- if (pRouter->config.mixingMode == ma_channel_mix_mode_planar_blend) {
- weight = ma_channel_router__calculate_input_channel_planar_weight(pRouter, channelPosIn, channelPosOut);
- }
+ case 4:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[3] = MA_CHANNEL_BACK_CENTER;
+ } break;
- /* Only apply the weight if we haven't already got some contribution from the respective channels. */
- if (pRouter->config.weights[iChannelIn][iChannelOut] == 0) {
- pRouter->config.weights[iChannelIn][iChannelOut] = weight;
- }
- }
- }
- }
- }
- }
+ case 5:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_BACK_LEFT;
+ channelMap[4] = MA_CHANNEL_BACK_RIGHT;
} break;
- case ma_channel_mix_mode_custom_weights:
- case ma_channel_mix_mode_simple:
- default:
+ case 6:
{
- /* Fallthrough. */
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[4] = MA_CHANNEL_SIDE_RIGHT;
+ channelMap[5] = MA_CHANNEL_BACK_CENTER;
} break;
}
- return MA_SUCCESS;
-}
-
-static MA_INLINE ma_bool32 ma_channel_router__can_use_sse2(ma_channel_router* pRouter, const float* pSamplesOut, const float* pSamplesIn)
-{
- return pRouter->useSSE2 && (((ma_uintptr)pSamplesOut & 15) == 0) && (((ma_uintptr)pSamplesIn & 15) == 0);
-}
-
-static MA_INLINE ma_bool32 ma_channel_router__can_use_avx2(ma_channel_router* pRouter, const float* pSamplesOut, const float* pSamplesIn)
-{
- return pRouter->useAVX2 && (((ma_uintptr)pSamplesOut & 31) == 0) && (((ma_uintptr)pSamplesIn & 31) == 0);
-}
-
-static MA_INLINE ma_bool32 ma_channel_router__can_use_avx512(ma_channel_router* pRouter, const float* pSamplesOut, const float* pSamplesIn)
-{
- return pRouter->useAVX512 && (((ma_uintptr)pSamplesOut & 63) == 0) && (((ma_uintptr)pSamplesIn & 63) == 0);
-}
-
-static MA_INLINE ma_bool32 ma_channel_router__can_use_neon(ma_channel_router* pRouter, const float* pSamplesOut, const float* pSamplesIn)
-{
- return pRouter->useNEON && (((ma_uintptr)pSamplesOut & 15) == 0) && (((ma_uintptr)pSamplesIn & 15) == 0);
+ /* Remainder. */
+ if (channels > 8) {
+ ma_uint32 iChannel;
+ for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) {
+ channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6));
+ }
+ }
}
-void ma_channel_router__do_routing(ma_channel_router* pRouter, ma_uint64 frameCount, float** ppSamplesOut, const float** ppSamplesIn)
+static void ma_get_standard_channel_map_flac(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
{
- ma_uint32 iChannelIn;
- ma_uint32 iChannelOut;
-
- ma_assert(pRouter != NULL);
- ma_assert(pRouter->isPassthrough == MA_FALSE);
+ switch (channels)
+ {
+ case 1:
+ {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } break;
- if (pRouter->isSimpleShuffle) {
- /* A shuffle is just a re-arrangement of channels and does not require any arithmetic. */
- ma_assert(pRouter->config.channelsIn == pRouter->config.channelsOut);
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- iChannelOut = pRouter->shuffleTable[iChannelIn];
- ma_copy_memory_64(ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn], frameCount * sizeof(float));
- }
- } else {
- /* This is the more complicated case. Each of the output channels is accumulated with 0 or more input channels. */
+ case 2:
+ {
+ channelMap[0] = MA_CHANNEL_LEFT;
+ channelMap[1] = MA_CHANNEL_RIGHT;
+ } break;
- /* Clear. */
- for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) {
- ma_zero_memory_64(ppSamplesOut[iChannelOut], frameCount * sizeof(float));
- }
+ case 3:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ } break;
- /* Accumulate. */
- for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) {
- for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) {
- ma_uint64 iFrame = 0;
-#if defined(MA_SUPPORT_NEON)
- if (ma_channel_router__can_use_neon(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) {
- float32x4_t weight = vmovq_n_f32(pRouter->config.weights[iChannelIn][iChannelOut]);
- ma_uint64 frameCount4 = frameCount/4;
- ma_uint64 iFrame4;
-
- for (iFrame4 = 0; iFrame4 < frameCount4; iFrame4 += 1) {
- float32x4_t* pO = (float32x4_t*)ppSamplesOut[iChannelOut] + iFrame4;
- float32x4_t* pI = (float32x4_t*)ppSamplesIn [iChannelIn ] + iFrame4;
- *pO = vaddq_f32(*pO, vmulq_f32(*pI, weight));
- }
+ case 4:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ } break;
- iFrame += frameCount4*4;
- }
- else
-#endif
-#if defined(MA_SUPPORT_AVX512)
- if (ma_channel_router__can_use_avx512(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) {
- __m512 weight = _mm512_set1_ps(pRouter->config.weights[iChannelIn][iChannelOut]);
- ma_uint64 frameCount16 = frameCount/16;
- ma_uint64 iFrame16;
-
- for (iFrame16 = 0; iFrame16 < frameCount16; iFrame16 += 1) {
- __m512* pO = (__m512*)ppSamplesOut[iChannelOut] + iFrame16;
- __m512* pI = (__m512*)ppSamplesIn [iChannelIn ] + iFrame16;
- *pO = _mm512_add_ps(*pO, _mm512_mul_ps(*pI, weight));
- }
+ case 5:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_BACK_LEFT;
+ channelMap[4] = MA_CHANNEL_BACK_RIGHT;
+ } break;
- iFrame += frameCount16*16;
- }
- else
-#endif
-#if defined(MA_SUPPORT_AVX2)
- if (ma_channel_router__can_use_avx2(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) {
- __m256 weight = _mm256_set1_ps(pRouter->config.weights[iChannelIn][iChannelOut]);
- ma_uint64 frameCount8 = frameCount/8;
- ma_uint64 iFrame8;
-
- for (iFrame8 = 0; iFrame8 < frameCount8; iFrame8 += 1) {
- __m256* pO = (__m256*)ppSamplesOut[iChannelOut] + iFrame8;
- __m256* pI = (__m256*)ppSamplesIn [iChannelIn ] + iFrame8;
- *pO = _mm256_add_ps(*pO, _mm256_mul_ps(*pI, weight));
- }
+ case 6:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_LFE;
+ channelMap[4] = MA_CHANNEL_BACK_LEFT;
+ channelMap[5] = MA_CHANNEL_BACK_RIGHT;
+ } break;
- iFrame += frameCount8*8;
- }
- else
-#endif
-#if defined(MA_SUPPORT_SSE2)
- if (ma_channel_router__can_use_sse2(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) {
- __m128 weight = _mm_set1_ps(pRouter->config.weights[iChannelIn][iChannelOut]);
- ma_uint64 frameCount4 = frameCount/4;
- ma_uint64 iFrame4;
-
- for (iFrame4 = 0; iFrame4 < frameCount4; iFrame4 += 1) {
- __m128* pO = (__m128*)ppSamplesOut[iChannelOut] + iFrame4;
- __m128* pI = (__m128*)ppSamplesIn [iChannelIn ] + iFrame4;
- *pO = _mm_add_ps(*pO, _mm_mul_ps(*pI, weight));
- }
+ case 7:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_LFE;
+ channelMap[4] = MA_CHANNEL_BACK_CENTER;
+ channelMap[5] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[6] = MA_CHANNEL_SIDE_RIGHT;
+ } break;
- iFrame += frameCount4*4;
- } else
-#endif
- { /* Reference. */
- float weight0 = pRouter->config.weights[iChannelIn][iChannelOut];
- float weight1 = pRouter->config.weights[iChannelIn][iChannelOut];
- float weight2 = pRouter->config.weights[iChannelIn][iChannelOut];
- float weight3 = pRouter->config.weights[iChannelIn][iChannelOut];
- ma_uint64 frameCount4 = frameCount/4;
- ma_uint64 iFrame4;
-
- for (iFrame4 = 0; iFrame4 < frameCount4; iFrame4 += 1) {
- ppSamplesOut[iChannelOut][iFrame+0] += ppSamplesIn[iChannelIn][iFrame+0] * weight0;
- ppSamplesOut[iChannelOut][iFrame+1] += ppSamplesIn[iChannelIn][iFrame+1] * weight1;
- ppSamplesOut[iChannelOut][iFrame+2] += ppSamplesIn[iChannelIn][iFrame+2] * weight2;
- ppSamplesOut[iChannelOut][iFrame+3] += ppSamplesIn[iChannelIn][iFrame+3] * weight3;
- iFrame += 4;
- }
- }
+ case 8:
+ default:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[3] = MA_CHANNEL_LFE;
+ channelMap[4] = MA_CHANNEL_BACK_LEFT;
+ channelMap[5] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[6] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
+ } break;
+ }
- /* Leftover. */
- for (; iFrame < frameCount; ++iFrame) {
- ppSamplesOut[iChannelOut][iFrame] += ppSamplesIn[iChannelIn][iFrame] * pRouter->config.weights[iChannelIn][iChannelOut];
- }
- }
+ /* Remainder. */
+ if (channels > 8) {
+ ma_uint32 iChannel;
+ for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
+ channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
}
}
}
-ma_uint64 ma_channel_router_read_deinterleaved(ma_channel_router* pRouter, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData)
+static void ma_get_standard_channel_map_vorbis(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
{
- if (pRouter == NULL || ppSamplesOut == NULL) {
- return 0;
- }
+ /* In Vorbis' type 0 channel mapping, the first two channels are not always the standard left/right - it will have the center speaker where the right usually goes. Why?! */
+ switch (channels)
+ {
+ case 1:
+ {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } break;
- /* Fast path for a passthrough. */
- if (pRouter->isPassthrough) {
- if (frameCount <= 0xFFFFFFFF) {
- return (ma_uint32)pRouter->config.onReadDeinterleaved(pRouter, (ma_uint32)frameCount, ppSamplesOut, pUserData);
- } else {
- float* ppNextSamplesOut[MA_MAX_CHANNELS];
- ma_uint64 totalFramesRead;
+ case 2:
+ {
+ channelMap[0] = MA_CHANNEL_LEFT;
+ channelMap[1] = MA_CHANNEL_RIGHT;
+ } break;
- ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(float*) * pRouter->config.channelsOut);
+ case 3:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
+ } break;
- totalFramesRead = 0;
- while (totalFramesRead < frameCount) {
- ma_uint32 iChannel;
- ma_uint32 framesJustRead;
- ma_uint64 framesRemaining = (frameCount - totalFramesRead);
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > 0xFFFFFFFF) {
- framesToReadRightNow = 0xFFFFFFFF;
- }
+ case 4:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ } break;
- framesJustRead = (ma_uint32)pRouter->config.onReadDeinterleaved(pRouter, (ma_uint32)framesToReadRightNow, (void**)ppNextSamplesOut, pUserData);
- if (framesJustRead == 0) {
- break;
- }
+ case 5:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[3] = MA_CHANNEL_BACK_LEFT;
+ channelMap[4] = MA_CHANNEL_BACK_RIGHT;
+ } break;
- totalFramesRead += framesJustRead;
+ case 6:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[3] = MA_CHANNEL_BACK_LEFT;
+ channelMap[4] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[5] = MA_CHANNEL_LFE;
+ } break;
- if (framesJustRead < framesToReadRightNow) {
- break;
- }
+ case 7:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[3] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[4] = MA_CHANNEL_SIDE_RIGHT;
+ channelMap[5] = MA_CHANNEL_BACK_CENTER;
+ channelMap[6] = MA_CHANNEL_LFE;
+ } break;
- for (iChannel = 0; iChannel < pRouter->config.channelsOut; ++iChannel) {
- ppNextSamplesOut[iChannel] += framesJustRead;
- }
- }
+ case 8:
+ default:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[2] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[3] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[4] = MA_CHANNEL_SIDE_RIGHT;
+ channelMap[5] = MA_CHANNEL_BACK_LEFT;
+ channelMap[6] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[7] = MA_CHANNEL_LFE;
+ } break;
+ }
- return totalFramesRead;
+ /* Remainder. */
+ if (channels > 8) {
+ ma_uint32 iChannel;
+ for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
+ channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
}
}
+}
- /* Slower path for a non-passthrough. */
+static void ma_get_standard_channel_map_sound4(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
+{
+ switch (channels)
{
- float* ppNextSamplesOut[MA_MAX_CHANNELS];
- float* ppTemp[MA_MAX_CHANNELS];
- size_t maxBytesToReadPerFrameEachIteration;
- size_t maxFramesToReadEachIteration;
- ma_uint64 totalFramesRead;
- MA_ALIGN(MA_SIMD_ALIGNMENT) float temp[MA_MAX_CHANNELS * 256];
-
- ma_assert(sizeof(temp) <= 0xFFFFFFFF);
- ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(float*) * pRouter->config.channelsOut);
-
-
- ma_split_buffer(temp, sizeof(temp), pRouter->config.channelsIn, MA_SIMD_ALIGNMENT, (void**)&ppTemp, &maxBytesToReadPerFrameEachIteration);
-
- maxFramesToReadEachIteration = maxBytesToReadPerFrameEachIteration/sizeof(float);
-
- totalFramesRead = 0;
- while (totalFramesRead < frameCount) {
- ma_uint32 iChannel;
- ma_uint32 framesJustRead;
- ma_uint64 framesRemaining = (frameCount - totalFramesRead);
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > maxFramesToReadEachIteration) {
- framesToReadRightNow = maxFramesToReadEachIteration;
- }
-
- framesJustRead = pRouter->config.onReadDeinterleaved(pRouter, (ma_uint32)framesToReadRightNow, (void**)ppTemp, pUserData);
- if (framesJustRead == 0) {
- break;
- }
+ case 1:
+ {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } break;
- ma_channel_router__do_routing(pRouter, framesJustRead, (float**)ppNextSamplesOut, (const float**)ppTemp); /* <-- Real work is done here. */
+ case 2:
+ {
+ channelMap[0] = MA_CHANNEL_LEFT;
+ channelMap[1] = MA_CHANNEL_RIGHT;
+ } break;
- totalFramesRead += framesJustRead;
- if (totalFramesRead < frameCount) {
- for (iChannel = 0; iChannel < pRouter->config.channelsIn; iChannel += 1) {
- ppNextSamplesOut[iChannel] += framesJustRead;
- }
- }
+ case 3:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_CENTER;
+ } break;
- if (framesJustRead < framesToReadRightNow) {
- break;
- }
- }
+ case 4:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ } break;
- return totalFramesRead;
- }
-}
+ case 5:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ } break;
-ma_channel_router_config ma_channel_router_config_init(ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode, ma_channel_router_read_deinterleaved_proc onRead, void* pUserData)
-{
- ma_channel_router_config config;
- ma_uint32 iChannel;
+ case 6:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[5] = MA_CHANNEL_LFE;
+ } break;
- ma_zero_object(&config);
+ case 7:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[5] = MA_CHANNEL_BACK_CENTER;
+ channelMap[6] = MA_CHANNEL_LFE;
+ } break;
- config.channelsIn = channelsIn;
- for (iChannel = 0; iChannel < channelsIn; ++iChannel) {
- config.channelMapIn[iChannel] = channelMapIn[iChannel];
+ case 8:
+ default:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[5] = MA_CHANNEL_LFE;
+ channelMap[6] = MA_CHANNEL_SIDE_LEFT;
+ channelMap[7] = MA_CHANNEL_SIDE_RIGHT;
+ } break;
}
- config.channelsOut = channelsOut;
- for (iChannel = 0; iChannel < channelsOut; ++iChannel) {
- config.channelMapOut[iChannel] = channelMapOut[iChannel];
+ /* Remainder. */
+ if (channels > 8) {
+ ma_uint32 iChannel;
+ for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) {
+ channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8));
+ }
}
-
- config.mixingMode = mixingMode;
- config.onReadDeinterleaved = onRead;
- config.pUserData = pUserData;
-
- return config;
}
-
-
-/**************************************************************************************************************************************************************
-
-SRC
-
-**************************************************************************************************************************************************************/
-#define ma_floorf(x) ((float)floor((double)(x)))
-#define ma_sinf(x) ((float)sin((double)(x)))
-#define ma_cosf(x) ((float)cos((double)(x)))
-
-static MA_INLINE double ma_sinc(double x)
+static void ma_get_standard_channel_map_sndio(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
{
- if (x != 0) {
- return sin(MA_PI_D*x) / (MA_PI_D*x);
- } else {
- return 1;
- }
-}
-
-#define ma_sincf(x) ((float)ma_sinc((double)(x)))
+ switch (channels)
+ {
+ case 1:
+ {
+ channelMap[0] = MA_CHANNEL_MONO;
+ } break;
+ case 2:
+ {
+ channelMap[0] = MA_CHANNEL_LEFT;
+ channelMap[1] = MA_CHANNEL_RIGHT;
+ } break;
-ma_uint64 ma_src_read_deinterleaved__passthrough(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData);
-ma_uint64 ma_src_read_deinterleaved__linear(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData);
-ma_uint64 ma_src_read_deinterleaved__sinc(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData);
+ case 3:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_FRONT_CENTER;
+ } break;
-void ma_src__build_sinc_table__sinc(ma_src* pSRC)
-{
- ma_uint32 i;
+ case 4:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ } break;
- ma_assert(pSRC != NULL);
+ case 5:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ } break;
- pSRC->sinc.table[0] = 1.0f;
- for (i = 1; i < ma_countof(pSRC->sinc.table); i += 1) {
- double x = i*MA_PI_D / MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION;
- pSRC->sinc.table[i] = (float)(sin(x)/x);
+ case 6:
+ default:
+ {
+ channelMap[0] = MA_CHANNEL_FRONT_LEFT;
+ channelMap[1] = MA_CHANNEL_FRONT_RIGHT;
+ channelMap[2] = MA_CHANNEL_BACK_LEFT;
+ channelMap[3] = MA_CHANNEL_BACK_RIGHT;
+ channelMap[4] = MA_CHANNEL_FRONT_CENTER;
+ channelMap[5] = MA_CHANNEL_LFE;
+ } break;
}
-}
-
-void ma_src__build_sinc_table__rectangular(ma_src* pSRC)
-{
- /* This is the same as the base sinc table. */
- ma_src__build_sinc_table__sinc(pSRC);
-}
-void ma_src__build_sinc_table__hann(ma_src* pSRC)
-{
- ma_uint32 i;
-
- ma_src__build_sinc_table__sinc(pSRC);
-
- for (i = 0; i < ma_countof(pSRC->sinc.table); i += 1) {
- double x = pSRC->sinc.table[i];
- double N = MA_SRC_SINC_MAX_WINDOW_WIDTH*2;
- double n = ((double)(i) / MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION) + MA_SRC_SINC_MAX_WINDOW_WIDTH;
- double w = 0.5 * (1 - cos((2*MA_PI_D*n) / (N)));
-
- pSRC->sinc.table[i] = (float)(x * w);
+ /* Remainder. */
+ if (channels > 6) {
+ ma_uint32 iChannel;
+ for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) {
+ channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6));
+ }
}
}
-ma_result ma_src_init(const ma_src_config* pConfig, ma_src* pSRC)
+void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS])
{
- if (pSRC == NULL) {
- return MA_INVALID_ARGS;
- }
-
- ma_zero_object(pSRC);
+ switch (standardChannelMap)
+ {
+ case ma_standard_channel_map_alsa:
+ {
+ ma_get_standard_channel_map_alsa(channels, channelMap);
+ } break;
- if (pConfig == NULL || pConfig->onReadDeinterleaved == NULL) {
- return MA_INVALID_ARGS;
- }
- if (pConfig->channels == 0 || pConfig->channels > MA_MAX_CHANNELS) {
- return MA_INVALID_ARGS;
- }
+ case ma_standard_channel_map_rfc3551:
+ {
+ ma_get_standard_channel_map_rfc3551(channels, channelMap);
+ } break;
- pSRC->config = *pConfig;
+ case ma_standard_channel_map_flac:
+ {
+ ma_get_standard_channel_map_flac(channels, channelMap);
+ } break;
- /* SIMD */
- pSRC->useSSE2 = ma_has_sse2() && !pConfig->noSSE2;
- pSRC->useAVX2 = ma_has_avx2() && !pConfig->noAVX2;
- pSRC->useAVX512 = ma_has_avx512f() && !pConfig->noAVX512;
- pSRC->useNEON = ma_has_neon() && !pConfig->noNEON;
+ case ma_standard_channel_map_vorbis:
+ {
+ ma_get_standard_channel_map_vorbis(channels, channelMap);
+ } break;
- if (pSRC->config.algorithm == ma_src_algorithm_sinc) {
- /* Make sure the window width within bounds. */
- if (pSRC->config.sinc.windowWidth == 0) {
- pSRC->config.sinc.windowWidth = MA_SRC_SINC_DEFAULT_WINDOW_WIDTH;
- }
- if (pSRC->config.sinc.windowWidth < MA_SRC_SINC_MIN_WINDOW_WIDTH) {
- pSRC->config.sinc.windowWidth = MA_SRC_SINC_MIN_WINDOW_WIDTH;
- }
- if (pSRC->config.sinc.windowWidth > MA_SRC_SINC_MAX_WINDOW_WIDTH) {
- pSRC->config.sinc.windowWidth = MA_SRC_SINC_MAX_WINDOW_WIDTH;
- }
+ case ma_standard_channel_map_sound4:
+ {
+ ma_get_standard_channel_map_sound4(channels, channelMap);
+ } break;
+
+ case ma_standard_channel_map_sndio:
+ {
+ ma_get_standard_channel_map_sndio(channels, channelMap);
+ } break;
- /* Set up the lookup table. */
- switch (pSRC->config.sinc.windowFunction) {
- case ma_src_sinc_window_function_hann: ma_src__build_sinc_table__hann(pSRC); break;
- case ma_src_sinc_window_function_rectangular: ma_src__build_sinc_table__rectangular(pSRC); break;
- default: return MA_INVALID_ARGS; /* <-- Hitting this means the window function is unknown to miniaudio. */
- }
+ case ma_standard_channel_map_microsoft:
+ default:
+ {
+ ma_get_standard_channel_map_microsoft(channels, channelMap);
+ } break;
}
-
- return MA_SUCCESS;
}
-ma_result ma_src_set_sample_rate(ma_src* pSRC, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut)
+void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels)
{
- if (pSRC == NULL) {
- return MA_INVALID_ARGS;
- }
-
- /* Must have a sample rate of > 0. */
- if (sampleRateIn == 0 || sampleRateOut == 0) {
- return MA_INVALID_ARGS;
+ if (pOut != NULL && pIn != NULL && channels > 0) {
+ MA_COPY_MEMORY(pOut, pIn, sizeof(*pOut) * channels);
}
-
- ma_atomic_exchange_32(&pSRC->config.sampleRateIn, sampleRateIn);
- ma_atomic_exchange_32(&pSRC->config.sampleRateOut, sampleRateOut);
-
- return MA_SUCCESS;
}
-ma_uint64 ma_src_read_deinterleaved(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData)
+ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS])
{
- ma_src_algorithm algorithm;
-
- if (pSRC == NULL || frameCount == 0 || ppSamplesOut == NULL) {
- return 0;
+ if (channelMap == NULL) {
+ return MA_FALSE;
}
- algorithm = pSRC->config.algorithm;
-
- /* Can use a function pointer for this. */
- switch (algorithm) {
- case ma_src_algorithm_none: return ma_src_read_deinterleaved__passthrough(pSRC, frameCount, ppSamplesOut, pUserData);
- case ma_src_algorithm_linear: return ma_src_read_deinterleaved__linear( pSRC, frameCount, ppSamplesOut, pUserData);
- case ma_src_algorithm_sinc: return ma_src_read_deinterleaved__sinc( pSRC, frameCount, ppSamplesOut, pUserData);
- default: break;
+ /* A channel count of 0 is invalid. */
+ if (channels == 0) {
+ return MA_FALSE;
}
- /* Should never get here. */
- return 0;
-}
-
-ma_uint64 ma_src_read_deinterleaved__passthrough(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData)
-{
- if (frameCount <= 0xFFFFFFFF) {
- return pSRC->config.onReadDeinterleaved(pSRC, (ma_uint32)frameCount, ppSamplesOut, pUserData);
- } else {
+ /* It does not make sense to have a mono channel when there is more than 1 channel. */
+ if (channels > 1) {
ma_uint32 iChannel;
- ma_uint64 totalFramesRead;
- float* ppNextSamplesOut[MA_MAX_CHANNELS];
-
- for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) {
- ppNextSamplesOut[iChannel] = (float*)ppSamplesOut[iChannel];
- }
-
- totalFramesRead = 0;
- while (totalFramesRead < frameCount) {
- ma_uint32 framesJustRead;
- ma_uint64 framesRemaining = frameCount - totalFramesRead;
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > 0xFFFFFFFF) {
- framesToReadRightNow = 0xFFFFFFFF;
- }
-
- framesJustRead = (ma_uint32)pSRC->config.onReadDeinterleaved(pSRC, (ma_uint32)framesToReadRightNow, (void**)ppNextSamplesOut, pUserData);
- if (framesJustRead == 0) {
- break;
- }
-
- totalFramesRead += framesJustRead;
- for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) {
- ppNextSamplesOut[iChannel] += framesJustRead;
- }
-
- if (framesJustRead < framesToReadRightNow) {
- break;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ if (channelMap[iChannel] == MA_CHANNEL_MONO) {
+ return MA_FALSE;
}
}
-
- return totalFramesRead;
}
+
+ return MA_TRUE;
}
-ma_uint64 ma_src_read_deinterleaved__linear(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData)
+ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS])
{
- float* ppNextSamplesOut[MA_MAX_CHANNELS];
- float factor;
- ma_uint32 maxFrameCountPerChunkIn;
- ma_uint64 totalFramesRead;
-
- ma_assert(pSRC != NULL);
- ma_assert(frameCount > 0);
- ma_assert(ppSamplesOut != NULL);
-
- ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(void*) * pSRC->config.channels);
-
- factor = (float)pSRC->config.sampleRateIn / pSRC->config.sampleRateOut;
- maxFrameCountPerChunkIn = ma_countof(pSRC->linear.input[0]);
-
- totalFramesRead = 0;
- while (totalFramesRead < frameCount) {
- ma_uint32 iChannel;
- float tBeg;
- float tEnd;
- float tAvailable;
- float tNext;
- float* ppSamplesFromClient[MA_MAX_CHANNELS];
- ma_uint32 iNextFrame;
- ma_uint32 maxOutputFramesToRead;
- ma_uint32 maxOutputFramesToRead4;
- ma_uint32 framesToReadFromClient;
- ma_uint32 framesReadFromClient;
- ma_uint64 framesRemaining = frameCount - totalFramesRead;
- ma_uint64 framesToRead = framesRemaining;
- if (framesToRead > 16384) {
- framesToRead = 16384; /* <-- Keep this small because we're using 32-bit floats for calculating sample positions and I don't want to run out of precision with huge sample counts. */
- }
-
-
- /* Read Input Data */
- tBeg = pSRC->linear.timeIn;
- tEnd = tBeg + ((ma_int64)framesToRead*factor); /* Cast to int64 required for VC6. */
-
- framesToReadFromClient = (ma_uint32)(tEnd) + 1 + 1; /* +1 to make tEnd 1-based and +1 because we always need to an extra sample for interpolation. */
- if (framesToReadFromClient >= maxFrameCountPerChunkIn) {
- framesToReadFromClient = maxFrameCountPerChunkIn;
- }
-
- for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) {
- ppSamplesFromClient[iChannel] = pSRC->linear.input[iChannel] + pSRC->linear.leftoverFrames;
- }
-
- framesReadFromClient = 0;
- if (framesToReadFromClient > pSRC->linear.leftoverFrames) {
- framesReadFromClient = (ma_uint32)pSRC->config.onReadDeinterleaved(pSRC, (ma_uint32)framesToReadFromClient - pSRC->linear.leftoverFrames, (void**)ppSamplesFromClient, pUserData);
- }
-
- framesReadFromClient += pSRC->linear.leftoverFrames; /* <-- You can sort of think of it as though we've re-read the leftover samples from the client. */
- if (framesReadFromClient < 2) {
- break;
- }
-
- for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) {
- ppSamplesFromClient[iChannel] = pSRC->linear.input[iChannel];
- }
-
-
- /* Write Output Data */
-
- /*
- At this point we have a bunch of frames that the client has given to us for processing. From this we can determine the maximum number of output frames
- that can be processed from this input. We want to output as many samples as possible from our input data.
- */
- tAvailable = framesReadFromClient - tBeg - 1; /* Subtract 1 because the last input sample is needed for interpolation and cannot be included in the output sample count calculation. */
-
- maxOutputFramesToRead = (ma_uint32)(tAvailable / factor);
- if (maxOutputFramesToRead == 0) {
- maxOutputFramesToRead = 1;
- }
- if (maxOutputFramesToRead > framesToRead) {
- maxOutputFramesToRead = (ma_uint32)framesToRead;
- }
-
- /* Output frames are always read in groups of 4 because I'm planning on using this as a reference for some SIMD-y stuff later. */
- maxOutputFramesToRead4 = maxOutputFramesToRead/4;
- for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) {
- ma_uint32 iFrameOut;
- float t0 = pSRC->linear.timeIn + factor*0;
- float t1 = pSRC->linear.timeIn + factor*1;
- float t2 = pSRC->linear.timeIn + factor*2;
- float t3 = pSRC->linear.timeIn + factor*3;
- float t;
-
- for (iFrameOut = 0; iFrameOut < maxOutputFramesToRead4; iFrameOut += 1) {
- float iPrevSample0 = (float)floor(t0);
- float iPrevSample1 = (float)floor(t1);
- float iPrevSample2 = (float)floor(t2);
- float iPrevSample3 = (float)floor(t3);
-
- float iNextSample0 = iPrevSample0 + 1;
- float iNextSample1 = iPrevSample1 + 1;
- float iNextSample2 = iPrevSample2 + 1;
- float iNextSample3 = iPrevSample3 + 1;
-
- float alpha0 = t0 - iPrevSample0;
- float alpha1 = t1 - iPrevSample1;
- float alpha2 = t2 - iPrevSample2;
- float alpha3 = t3 - iPrevSample3;
-
- float prevSample0 = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample0];
- float prevSample1 = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample1];
- float prevSample2 = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample2];
- float prevSample3 = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample3];
-
- float nextSample0 = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample0];
- float nextSample1 = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample1];
- float nextSample2 = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample2];
- float nextSample3 = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample3];
-
- ppNextSamplesOut[iChannel][iFrameOut*4 + 0] = ma_mix_f32_fast(prevSample0, nextSample0, alpha0);
- ppNextSamplesOut[iChannel][iFrameOut*4 + 1] = ma_mix_f32_fast(prevSample1, nextSample1, alpha1);
- ppNextSamplesOut[iChannel][iFrameOut*4 + 2] = ma_mix_f32_fast(prevSample2, nextSample2, alpha2);
- ppNextSamplesOut[iChannel][iFrameOut*4 + 3] = ma_mix_f32_fast(prevSample3, nextSample3, alpha3);
-
- t0 += factor*4;
- t1 += factor*4;
- t2 += factor*4;
- t3 += factor*4;
- }
-
- t = pSRC->linear.timeIn + (factor*maxOutputFramesToRead4*4);
- for (iFrameOut = (maxOutputFramesToRead4*4); iFrameOut < maxOutputFramesToRead; iFrameOut += 1) {
- float iPrevSample = (float)floor(t);
- float iNextSample = iPrevSample + 1;
- float alpha = t - iPrevSample;
- float prevSample;
- float nextSample;
-
- ma_assert(iPrevSample < ma_countof(pSRC->linear.input[iChannel]));
- ma_assert(iNextSample < ma_countof(pSRC->linear.input[iChannel]));
-
- prevSample = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample];
- nextSample = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample];
-
- ppNextSamplesOut[iChannel][iFrameOut] = ma_mix_f32_fast(prevSample, nextSample, alpha);
-
- t += factor;
- }
-
- ppNextSamplesOut[iChannel] += maxOutputFramesToRead;
- }
-
- totalFramesRead += maxOutputFramesToRead;
-
-
- /* Residual */
- tNext = pSRC->linear.timeIn + (maxOutputFramesToRead*factor);
-
- pSRC->linear.timeIn = tNext;
- ma_assert(tNext <= framesReadFromClient+1);
+ ma_uint32 iChannel;
- iNextFrame = (ma_uint32)floor(tNext);
- pSRC->linear.leftoverFrames = framesReadFromClient - iNextFrame;
- pSRC->linear.timeIn = tNext - iNextFrame;
+ if (channelMapA == channelMapB) {
+ return MA_FALSE;
+ }
- for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) {
- ma_uint32 iFrame;
- for (iFrame = 0; iFrame < pSRC->linear.leftoverFrames; ++iFrame) {
- float sample = ppSamplesFromClient[iChannel][framesReadFromClient-pSRC->linear.leftoverFrames + iFrame];
- ppSamplesFromClient[iChannel][iFrame] = sample;
- }
- }
+ if (channels == 0 || channels > MA_MAX_CHANNELS) {
+ return MA_FALSE;
+ }
-
- /* Exit the loop if we've found everything from the client. */
- if (framesReadFromClient < framesToReadFromClient) {
- break;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ if (channelMapA[iChannel] != channelMapB[iChannel]) {
+ return MA_FALSE;
}
}
- return totalFramesRead;
+ return MA_TRUE;
}
-
-ma_src_config ma_src_config_init_new()
+ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS])
{
- ma_src_config config;
- ma_zero_object(&config);
+ ma_uint32 iChannel;
- return config;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ if (channelMap[iChannel] != MA_CHANNEL_NONE) {
+ return MA_FALSE;
+ }
+ }
+
+ return MA_TRUE;
}
-ma_src_config ma_src_config_init(ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_uint32 channels, ma_src_read_deinterleaved_proc onReadDeinterleaved, void* pUserData)
+ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition)
{
- ma_src_config config = ma_src_config_init_new();
- config.sampleRateIn = sampleRateIn;
- config.sampleRateOut = sampleRateOut;
- config.channels = channels;
- config.onReadDeinterleaved = onReadDeinterleaved;
- config.pUserData = pUserData;
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < channels; ++iChannel) {
+ if (channelMap[iChannel] == channelPosition) {
+ return MA_TRUE;
+ }
+ }
- return config;
+ return MA_FALSE;
}
-/**************************************************************************************************************************************************************
-
-Sinc Sample Rate Conversion
-===========================
-
-The sinc SRC algorithm uses a windowed sinc to perform interpolation of samples. Currently, miniaudio's implementation supports rectangular and Hann window
-methods.
-
-Whenever an output sample is being computed, it looks at a sub-section of the input samples. I've called this sub-section in the code below the "window",
-which I realize is a bit ambigous with the mathematical "window", but it works for me when I need to conceptualize things in my head. The window is made up
-of two halves. The first half contains past input samples (initialized to zero), and the second half contains future input samples. As time moves forward
-and input samples are consumed, the window moves forward. The larger the window, the better the quality at the expense of slower processing. The window is
-limited the range [MA_SRC_SINC_MIN_WINDOW_WIDTH, MA_SRC_SINC_MAX_WINDOW_WIDTH] and defaults to MA_SRC_SINC_DEFAULT_WINDOW_WIDTH.
-Input samples are cached for efficiency (to prevent frequently requesting tiny numbers of samples from the client). When the window gets to the end of the
-cache, it's moved back to the start, and more samples are read from the client. If the client has no more data to give, the cache is filled with zeros and
-the last of the input samples will be consumed. Once the last of the input samples have been consumed, no more samples will be output.
-
-
-When reading output samples, we always first read whatever is already in the input cache. Only when the cache has been fully consumed do we read more data
-from the client.
+/**************************************************************************************************************************************************************
-To access samples in the input buffer you do so relative to the window. When the window itself is at position 0, the first item in the buffer is accessed
-with "windowPos + windowWidth". Generally, to access any sample relative to the window you do "windowPos + windowWidth + sampleIndexRelativeToWindow".
+Conversion Helpers
**************************************************************************************************************************************************************/
-
-/* Comment this to disable interpolation of table lookups. Less accurate, but faster. */
-#define MA_USE_SINC_TABLE_INTERPOLATION
-
-/* Retrieves a sample from the input buffer's window. Values >= 0 retrieve future samples. Negative values return past samples. */
-static MA_INLINE float ma_src_sinc__get_input_sample_from_window(const ma_src* pSRC, ma_uint32 channel, ma_uint32 windowPosInSamples, ma_int32 sampleIndex)
+ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn)
{
- ma_assert(pSRC != NULL);
- ma_assert(channel < pSRC->config.channels);
- ma_assert(sampleIndex >= -(ma_int32)pSRC->config.sinc.windowWidth);
- ma_assert(sampleIndex < (ma_int32)pSRC->config.sinc.windowWidth);
+ ma_data_converter_config config;
- /* The window should always be contained within the input cache. */
- ma_assert(windowPosInSamples < ma_countof(pSRC->sinc.input[0]) - pSRC->config.sinc.windowWidth);
-
- return pSRC->sinc.input[channel][windowPosInSamples + pSRC->config.sinc.windowWidth + sampleIndex];
+ config = ma_data_converter_config_init(formatIn, formatOut, channelsIn, channelsOut, sampleRateIn, sampleRateOut);
+ ma_get_standard_channel_map(ma_standard_channel_map_default, channelsOut, config.channelMapOut);
+ ma_get_standard_channel_map(ma_standard_channel_map_default, channelsIn, config.channelMapIn);
+ config.resampling.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER);
+
+ return ma_convert_frames_ex(pOut, frameCountOut, pIn, frameCountIn, &config);
}
-static MA_INLINE float ma_src_sinc__interpolation_factor(const ma_src* pSRC, float x)
+ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig)
{
- float xabs;
- ma_int32 ixabs;
-
- ma_assert(pSRC != NULL);
+ ma_result result;
+ ma_data_converter converter;
- xabs = (float)fabs(x);
- xabs = xabs * MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION;
- ixabs = (ma_int32)xabs;
+ if (frameCountIn == 0 || pConfig == NULL) {
+ return 0;
+ }
-#if defined(MA_USE_SINC_TABLE_INTERPOLATION)
- {
- float a = xabs - ixabs;
- return ma_mix_f32_fast(pSRC->sinc.table[ixabs], pSRC->sinc.table[ixabs+1], a);
+ result = ma_data_converter_init(pConfig, &converter);
+ if (result != MA_SUCCESS) {
+ return 0; /* Failed to initialize the data converter. */
}
-#else
- return pSRC->sinc.table[ixabs];
-#endif
-}
-#if defined(MA_SUPPORT_SSE2)
-static MA_INLINE __m128 ma_fabsf_sse2(__m128 x)
-{
- return _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF)), x);
-}
+ if (pOut == NULL) {
+ frameCountOut = ma_data_converter_get_expected_output_frame_count(&converter, frameCountIn);
+ } else {
+ result = ma_data_converter_process_pcm_frames(&converter, pIn, &frameCountIn, pOut, &frameCountOut);
+ if (result != MA_SUCCESS) {
+ frameCountOut = 0;
+ }
+ }
-static MA_INLINE __m128 ma_truncf_sse2(__m128 x)
-{
- return _mm_cvtepi32_ps(_mm_cvttps_epi32(x));
+ ma_data_converter_uninit(&converter);
+ return frameCountOut;
}
-static MA_INLINE __m128 ma_src_sinc__interpolation_factor__sse2(const ma_src* pSRC, __m128 x)
-{
- __m128 resolution128;
- __m128 xabs;
- __m128i ixabs;
- __m128 lo;
- __m128 hi;
- __m128 a;
- __m128 r;
- int* ixabsv;
-
- resolution128 = _mm_set1_ps(MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION);
- xabs = ma_fabsf_sse2(x);
- xabs = _mm_mul_ps(xabs, resolution128);
- ixabs = _mm_cvttps_epi32(xabs);
- ixabsv = (int*)&ixabs;
-
- lo = _mm_set_ps(
- pSRC->sinc.table[ixabsv[3]],
- pSRC->sinc.table[ixabsv[2]],
- pSRC->sinc.table[ixabsv[1]],
- pSRC->sinc.table[ixabsv[0]]
- );
-
- hi = _mm_set_ps(
- pSRC->sinc.table[ixabsv[3]+1],
- pSRC->sinc.table[ixabsv[2]+1],
- pSRC->sinc.table[ixabsv[1]+1],
- pSRC->sinc.table[ixabsv[0]+1]
- );
+/**************************************************************************************************************************************************************
- a = _mm_sub_ps(xabs, _mm_cvtepi32_ps(ixabs));
- r = ma_mix_f32_fast__sse2(lo, hi, a);
+Ring Buffer
- return r;
+**************************************************************************************************************************************************************/
+MA_INLINE ma_uint32 ma_rb__extract_offset_in_bytes(ma_uint32 encodedOffset)
+{
+ return encodedOffset & 0x7FFFFFFF;
}
-#endif
-#if defined(MA_SUPPORT_AVX2)
-static MA_INLINE __m256 ma_fabsf_avx2(__m256 x)
+MA_INLINE ma_uint32 ma_rb__extract_offset_loop_flag(ma_uint32 encodedOffset)
{
- return _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x7FFFFFFF)), x);
+ return encodedOffset & 0x80000000;
}
-#if 0
-static MA_INLINE __m256 ma_src_sinc__interpolation_factor__avx2(const ma_src* pSRC, __m256 x)
+MA_INLINE void* ma_rb__get_read_ptr(ma_rb* pRB)
{
- __m256 resolution256 = _mm256_set1_ps(MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION);
- __m256 xabs = ma_fabsf_avx2(x);
-
- xabs = _mm256_mul_ps(xabs, resolution256);
-
- __m256i ixabs = _mm256_cvttps_epi32(xabs);
- __m256 a = _mm256_sub_ps(xabs, _mm256_cvtepi32_ps(ixabs));
-
-
- int* ixabsv = (int*)&ixabs;
-
- __m256 lo = _mm256_set_ps(
- pSRC->sinc.table[ixabsv[7]],
- pSRC->sinc.table[ixabsv[6]],
- pSRC->sinc.table[ixabsv[5]],
- pSRC->sinc.table[ixabsv[4]],
- pSRC->sinc.table[ixabsv[3]],
- pSRC->sinc.table[ixabsv[2]],
- pSRC->sinc.table[ixabsv[1]],
- pSRC->sinc.table[ixabsv[0]]
- );
-
- __m256 hi = _mm256_set_ps(
- pSRC->sinc.table[ixabsv[7]+1],
- pSRC->sinc.table[ixabsv[6]+1],
- pSRC->sinc.table[ixabsv[5]+1],
- pSRC->sinc.table[ixabsv[4]+1],
- pSRC->sinc.table[ixabsv[3]+1],
- pSRC->sinc.table[ixabsv[2]+1],
- pSRC->sinc.table[ixabsv[1]+1],
- pSRC->sinc.table[ixabsv[0]+1]
- );
-
- __m256 r = ma_mix_f32_fast__avx2(lo, hi, a);
-
- return r;
+ MA_ASSERT(pRB != NULL);
+ return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(pRB->encodedReadOffset));
}
-#endif
-#endif
-
-#if defined(MA_SUPPORT_NEON)
-static MA_INLINE float32x4_t ma_fabsf_neon(float32x4_t x)
+MA_INLINE void* ma_rb__get_write_ptr(ma_rb* pRB)
{
- return vabdq_f32(vmovq_n_f32(0), x);
+ MA_ASSERT(pRB != NULL);
+ return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(pRB->encodedWriteOffset));
}
-static MA_INLINE float32x4_t ma_src_sinc__interpolation_factor__neon(const ma_src* pSRC, float32x4_t x)
+MA_INLINE ma_uint32 ma_rb__construct_offset(ma_uint32 offsetInBytes, ma_uint32 offsetLoopFlag)
{
- float32x4_t xabs;
- int32x4_t ixabs;
- float32x4_t a
- float32x4_t r
- int* ixabsv;
- float lo[4];
- float hi[4];
-
- xabs = ma_fabsf_neon(x);
- xabs = vmulq_n_f32(xabs, MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION);
- ixabs = vcvtq_s32_f32(xabs);
-
- ixabsv = (int*)&ixabs;
-
- lo[0] = pSRC->sinc.table[ixabsv[0]];
- lo[1] = pSRC->sinc.table[ixabsv[1]];
- lo[2] = pSRC->sinc.table[ixabsv[2]];
- lo[3] = pSRC->sinc.table[ixabsv[3]];
-
- hi[0] = pSRC->sinc.table[ixabsv[0]+1];
- hi[1] = pSRC->sinc.table[ixabsv[1]+1];
- hi[2] = pSRC->sinc.table[ixabsv[2]+1];
- hi[3] = pSRC->sinc.table[ixabsv[3]+1];
+ return offsetLoopFlag | offsetInBytes;
+}
- a = vsubq_f32(xabs, vcvtq_f32_s32(ixabs));
- r = ma_mix_f32_fast__neon(vld1q_f32(lo), vld1q_f32(hi), a);
+MA_INLINE void ma_rb__deconstruct_offset(ma_uint32 encodedOffset, ma_uint32* pOffsetInBytes, ma_uint32* pOffsetLoopFlag)
+{
+ MA_ASSERT(pOffsetInBytes != NULL);
+ MA_ASSERT(pOffsetLoopFlag != NULL);
- return r;
+ *pOffsetInBytes = ma_rb__extract_offset_in_bytes(encodedOffset);
+ *pOffsetLoopFlag = ma_rb__extract_offset_loop_flag(encodedOffset);
}
-#endif
-ma_uint64 ma_src_read_deinterleaved__sinc(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData)
+
+ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB)
{
- float factor;
- float inverseFactor;
- ma_int32 windowWidth;
- ma_int32 windowWidth2;
- ma_int32 windowWidthSIMD;
- ma_int32 windowWidthSIMD2;
- float* ppNextSamplesOut[MA_MAX_CHANNELS];
- float _windowSamplesUnaligned[MA_SRC_SINC_MAX_WINDOW_WIDTH*2 + MA_SIMD_ALIGNMENT];
- float* windowSamples;
- float _iWindowFUnaligned[MA_SRC_SINC_MAX_WINDOW_WIDTH*2 + MA_SIMD_ALIGNMENT];
- float* iWindowF;
- ma_int32 i;
- ma_uint64 totalOutputFramesRead;
-
- ma_assert(pSRC != NULL);
- ma_assert(frameCount > 0);
- ma_assert(ppSamplesOut != NULL);
-
- factor = (float)pSRC->config.sampleRateIn / pSRC->config.sampleRateOut;
- inverseFactor = 1/factor;
-
- windowWidth = (ma_int32)pSRC->config.sinc.windowWidth;
- windowWidth2 = windowWidth*2;
+ ma_result result;
+ const ma_uint32 maxSubBufferSize = 0x7FFFFFFF - (MA_SIMD_ALIGNMENT-1);
- /*
- There are cases where it's actually more efficient to increase the window width so that it's aligned with the respective
- SIMD pipeline being used.
- */
- windowWidthSIMD = windowWidth;
- if (pSRC->useNEON) {
- windowWidthSIMD = (windowWidthSIMD + 1) & ~(1);
- } else if (pSRC->useAVX512) {
- windowWidthSIMD = (windowWidthSIMD + 7) & ~(7);
- } else if (pSRC->useAVX2) {
- windowWidthSIMD = (windowWidthSIMD + 3) & ~(3);
- } else if (pSRC->useSSE2) {
- windowWidthSIMD = (windowWidthSIMD + 1) & ~(1);
+ if (pRB == NULL) {
+ return MA_INVALID_ARGS;
}
- windowWidthSIMD2 = windowWidthSIMD*2;
- (void)windowWidthSIMD2; /* <-- Silence a warning when SIMD is disabled. */
+ if (subbufferSizeInBytes == 0 || subbufferCount == 0) {
+ return MA_INVALID_ARGS;
+ }
- ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(void*) * pSRC->config.channels);
+ if (subbufferSizeInBytes > maxSubBufferSize) {
+ return MA_INVALID_ARGS; /* Maximum buffer size is ~2GB. The most significant bit is a flag for use internally. */
+ }
- windowSamples = (float*)(((ma_uintptr)_windowSamplesUnaligned + MA_SIMD_ALIGNMENT-1) & ~(MA_SIMD_ALIGNMENT-1));
- ma_zero_memory(windowSamples, MA_SRC_SINC_MAX_WINDOW_WIDTH*2 * sizeof(float));
- iWindowF = (float*)(((ma_uintptr)_iWindowFUnaligned + MA_SIMD_ALIGNMENT-1) & ~(MA_SIMD_ALIGNMENT-1));
- ma_zero_memory(iWindowF, MA_SRC_SINC_MAX_WINDOW_WIDTH*2 * sizeof(float));
+ MA_ZERO_OBJECT(pRB);
- for (i = 0; i < windowWidth2; ++i) {
- iWindowF[i] = (float)(i - windowWidth);
+ result = ma_allocation_callbacks_init_copy(&pRB->allocationCallbacks, pAllocationCallbacks);
+ if (result != MA_SUCCESS) {
+ return result;
}
- totalOutputFramesRead = 0;
- while (totalOutputFramesRead < frameCount) {
- ma_uint32 maxInputSamplesAvailableInCache;
- float timeInBeg;
- float timeInEnd;
- ma_uint64 maxOutputFramesToRead;
- ma_uint64 outputFramesRemaining;
- ma_uint64 outputFramesToRead;
- ma_uint32 iChannel;
- ma_uint32 prevWindowPosInSamples;
- ma_uint32 availableOutputFrames;
+ pRB->subbufferSizeInBytes = (ma_uint32)subbufferSizeInBytes;
+ pRB->subbufferCount = (ma_uint32)subbufferCount;
+
+ if (pOptionalPreallocatedBuffer != NULL) {
+ pRB->subbufferStrideInBytes = (ma_uint32)subbufferStrideInBytes;
+ pRB->pBuffer = pOptionalPreallocatedBuffer;
+ } else {
+ size_t bufferSizeInBytes;
/*
- The maximum number of frames we can read this iteration depends on how many input samples we have available to us. This is the number
- of input samples between the end of the window and the end of the cache.
+ Here is where we allocate our own buffer. We always want to align this to MA_SIMD_ALIGNMENT for future SIMD optimization opportunity. To do this
+ we need to make sure the stride is a multiple of MA_SIMD_ALIGNMENT.
*/
- maxInputSamplesAvailableInCache = ma_countof(pSRC->sinc.input[0]) - (pSRC->config.sinc.windowWidth*2) - pSRC->sinc.windowPosInSamples;
- if (maxInputSamplesAvailableInCache > pSRC->sinc.inputFrameCount) {
- maxInputSamplesAvailableInCache = pSRC->sinc.inputFrameCount;
- }
-
- /* Never consume the tail end of the input data if requested. */
- if (pSRC->config.neverConsumeEndOfInput) {
- if (maxInputSamplesAvailableInCache >= pSRC->config.sinc.windowWidth) {
- maxInputSamplesAvailableInCache -= pSRC->config.sinc.windowWidth;
- } else {
- maxInputSamplesAvailableInCache = 0;
- }
- }
-
- timeInBeg = pSRC->sinc.timeIn;
- timeInEnd = (float)(pSRC->sinc.windowPosInSamples + maxInputSamplesAvailableInCache);
-
- ma_assert(timeInBeg >= 0);
- ma_assert(timeInBeg <= timeInEnd);
-
- maxOutputFramesToRead = (ma_uint64)(((timeInEnd - timeInBeg) * inverseFactor));
-
- outputFramesRemaining = frameCount - totalOutputFramesRead;
- outputFramesToRead = outputFramesRemaining;
- if (outputFramesToRead > maxOutputFramesToRead) {
- outputFramesToRead = maxOutputFramesToRead;
- }
-
- for (iChannel = 0; iChannel < pSRC->config.channels; iChannel += 1) {
- /* Do SRC. */
- float timeIn = timeInBeg;
- ma_uint32 iSample;
- for (iSample = 0; iSample < outputFramesToRead; iSample += 1) {
- float sampleOut = 0;
- float iTimeInF = ma_floorf(timeIn);
- ma_uint32 iTimeIn = (ma_uint32)iTimeInF;
- ma_int32 iWindow = 0;
- float tScalar;
-
- /* Pre-load the window samples into an aligned buffer to begin with. Need to put these into an aligned buffer to make SIMD easier. */
- windowSamples[0] = 0; /* <-- The first sample is always zero. */
- for (i = 1; i < windowWidth2; ++i) {
- windowSamples[i] = pSRC->sinc.input[iChannel][iTimeIn + i];
- }
-
-#if defined(MA_SUPPORT_AVX2) || defined(MA_SUPPORT_AVX512)
- if (pSRC->useAVX2 || pSRC->useAVX512) {
- __m256i ixabs[MA_SRC_SINC_MAX_WINDOW_WIDTH*2/8];
- __m256 a[MA_SRC_SINC_MAX_WINDOW_WIDTH*2/8];
- __m256 resolution256;
- __m256 t;
- __m256 r;
- ma_int32 windowWidth8;
- ma_int32 iWindow8;
-
- resolution256 = _mm256_set1_ps(MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION);
-
- t = _mm256_set1_ps((timeIn - iTimeInF));
- r = _mm256_set1_ps(0);
-
- windowWidth8 = windowWidthSIMD2 >> 3;
- for (iWindow8 = 0; iWindow8 < windowWidth8; iWindow8 += 1) {
- __m256 w = *((__m256*)iWindowF + iWindow8);
-
- __m256 xabs = _mm256_sub_ps(t, w);
- xabs = ma_fabsf_avx2(xabs);
- xabs = _mm256_mul_ps(xabs, resolution256);
-
- ixabs[iWindow8] = _mm256_cvttps_epi32(xabs);
- a[iWindow8] = _mm256_sub_ps(xabs, _mm256_cvtepi32_ps(ixabs[iWindow8]));
- }
-
- for (iWindow8 = 0; iWindow8 < windowWidth8; iWindow8 += 1) {
- int* ixabsv = (int*)&ixabs[iWindow8];
-
- __m256 lo = _mm256_set_ps(
- pSRC->sinc.table[ixabsv[7]],
- pSRC->sinc.table[ixabsv[6]],
- pSRC->sinc.table[ixabsv[5]],
- pSRC->sinc.table[ixabsv[4]],
- pSRC->sinc.table[ixabsv[3]],
- pSRC->sinc.table[ixabsv[2]],
- pSRC->sinc.table[ixabsv[1]],
- pSRC->sinc.table[ixabsv[0]]
- );
-
- __m256 hi = _mm256_set_ps(
- pSRC->sinc.table[ixabsv[7]+1],
- pSRC->sinc.table[ixabsv[6]+1],
- pSRC->sinc.table[ixabsv[5]+1],
- pSRC->sinc.table[ixabsv[4]+1],
- pSRC->sinc.table[ixabsv[3]+1],
- pSRC->sinc.table[ixabsv[2]+1],
- pSRC->sinc.table[ixabsv[1]+1],
- pSRC->sinc.table[ixabsv[0]+1]
- );
-
- __m256 s = *((__m256*)windowSamples + iWindow8);
- r = _mm256_add_ps(r, _mm256_mul_ps(s, ma_mix_f32_fast__avx2(lo, hi, a[iWindow8])));
- }
-
- /* Horizontal add. */
- __m256 x = _mm256_hadd_ps(r, _mm256_permute2f128_ps(r, r, 1));
- x = _mm256_hadd_ps(x, x);
- x = _mm256_hadd_ps(x, x);
- sampleOut += _mm_cvtss_f32(_mm256_castps256_ps128(x));
-
- iWindow += windowWidth8 * 8;
- }
- else
-#endif
-#if defined(MA_SUPPORT_SSE2)
- if (pSRC->useSSE2) {
- __m128 t = _mm_set1_ps((timeIn - iTimeInF));
- __m128 r = _mm_set1_ps(0);
-
- ma_int32 windowWidth4 = windowWidthSIMD2 >> 2;
- ma_int32 iWindow4;
- for (iWindow4 = 0; iWindow4 < windowWidth4; iWindow4 += 1) {
- __m128* s = (__m128*)windowSamples + iWindow4;
- __m128* w = (__m128*)iWindowF + iWindow4;
-
- __m128 a = ma_src_sinc__interpolation_factor__sse2(pSRC, _mm_sub_ps(t, *w));
- r = _mm_add_ps(r, _mm_mul_ps(*s, a));
- }
-
- sampleOut += ((float*)(&r))[0];
- sampleOut += ((float*)(&r))[1];
- sampleOut += ((float*)(&r))[2];
- sampleOut += ((float*)(&r))[3];
-
- iWindow += windowWidth4 * 4;
- }
- else
-#endif
-#if defined(MA_SUPPORT_NEON)
- if (pSRC->useNEON) {
- float32x4_t t = vmovq_n_f32((timeIn - iTimeInF));
- float32x4_t r = vmovq_n_f32(0);
-
- ma_int32 windowWidth4 = windowWidthSIMD2 >> 2;
- ma_int32 iWindow4;
- for (iWindow4 = 0; iWindow4 < windowWidth4; iWindow4 += 1) {
- float32x4_t* s = (float32x4_t*)windowSamples + iWindow4;
- float32x4_t* w = (float32x4_t*)iWindowF + iWindow4;
-
- float32x4_t a = ma_src_sinc__interpolation_factor__neon(pSRC, vsubq_f32(t, *w));
- r = vaddq_f32(r, vmulq_f32(*s, a));
- }
-
- sampleOut += ((float*)(&r))[0];
- sampleOut += ((float*)(&r))[1];
- sampleOut += ((float*)(&r))[2];
- sampleOut += ((float*)(&r))[3];
-
- iWindow += windowWidth4 * 4;
- }
- else
-#endif
- {
- iWindow += 1; /* The first one is a dummy for SIMD alignment purposes. Skip it. */
- }
-
- /* Non-SIMD/Reference implementation. */
- tScalar = (timeIn - iTimeIn);
- for (; iWindow < windowWidth2; iWindow += 1) {
- float s = windowSamples[iWindow];
- float w = iWindowF[iWindow];
-
- float a = ma_src_sinc__interpolation_factor(pSRC, (tScalar - w));
- float r = s * a;
-
- sampleOut += r;
- }
-
- ppNextSamplesOut[iChannel][iSample] = (float)sampleOut;
-
- timeIn += factor;
- }
+ pRB->subbufferStrideInBytes = (pRB->subbufferSizeInBytes + (MA_SIMD_ALIGNMENT-1)) & ~MA_SIMD_ALIGNMENT;
- ppNextSamplesOut[iChannel] += outputFramesToRead;
+ bufferSizeInBytes = (size_t)pRB->subbufferCount*pRB->subbufferStrideInBytes;
+ pRB->pBuffer = ma_aligned_malloc(bufferSizeInBytes, MA_SIMD_ALIGNMENT, &pRB->allocationCallbacks);
+ if (pRB->pBuffer == NULL) {
+ return MA_OUT_OF_MEMORY;
}
- totalOutputFramesRead += outputFramesToRead;
-
- prevWindowPosInSamples = pSRC->sinc.windowPosInSamples;
-
- pSRC->sinc.timeIn += ((ma_int64)outputFramesToRead * factor); /* Cast to int64 required for VC6. */
- pSRC->sinc.windowPosInSamples = (ma_uint32)pSRC->sinc.timeIn;
- pSRC->sinc.inputFrameCount -= pSRC->sinc.windowPosInSamples - prevWindowPosInSamples;
-
- /* If the window has reached a point where we cannot read a whole output sample it needs to be moved back to the start. */
- availableOutputFrames = (ma_uint32)((timeInEnd - pSRC->sinc.timeIn) * inverseFactor);
+ MA_ZERO_MEMORY(pRB->pBuffer, bufferSizeInBytes);
+ pRB->ownsBuffer = MA_TRUE;
+ }
- if (availableOutputFrames == 0) {
- size_t samplesToMove = ma_countof(pSRC->sinc.input[0]) - pSRC->sinc.windowPosInSamples;
+ return MA_SUCCESS;
+}
- pSRC->sinc.timeIn -= ma_floorf(pSRC->sinc.timeIn);
- pSRC->sinc.windowPosInSamples = 0;
+ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB)
+{
+ return ma_rb_init_ex(bufferSizeInBytes, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB);
+}
- /* Move everything from the end of the cache up to the front. */
- for (iChannel = 0; iChannel < pSRC->config.channels; iChannel += 1) {
- memmove(pSRC->sinc.input[iChannel], pSRC->sinc.input[iChannel] + ma_countof(pSRC->sinc.input[iChannel]) - samplesToMove, samplesToMove * sizeof(*pSRC->sinc.input[iChannel]));
- }
- }
+void ma_rb_uninit(ma_rb* pRB)
+{
+ if (pRB == NULL) {
+ return;
+ }
- /* Read more data from the client if required. */
- if (pSRC->isEndOfInputLoaded) {
- pSRC->isEndOfInputLoaded = MA_FALSE;
- break;
- }
+ if (pRB->ownsBuffer) {
+ ma_aligned_free(pRB->pBuffer, &pRB->allocationCallbacks);
+ }
+}
- /*
- Everything beyond this point is reloading. If we're at the end of the input data we do _not_ want to try reading any more in this function call. If the
- caller wants to keep trying, they can reload their internal data sources and call this function again. We should never be
- */
- ma_assert(pSRC->isEndOfInputLoaded == MA_FALSE);
+void ma_rb_reset(ma_rb* pRB)
+{
+ if (pRB == NULL) {
+ return;
+ }
- if (pSRC->sinc.inputFrameCount <= pSRC->config.sinc.windowWidth || availableOutputFrames == 0) {
- float* ppInputDst[MA_MAX_CHANNELS] = {0};
- ma_uint32 framesToReadFromClient;
- ma_uint32 framesReadFromClient;
- ma_uint32 leftoverFrames;
+ pRB->encodedReadOffset = 0;
+ pRB->encodedWriteOffset = 0;
+}
- for (iChannel = 0; iChannel < pSRC->config.channels; iChannel += 1) {
- ppInputDst[iChannel] = pSRC->sinc.input[iChannel] + pSRC->config.sinc.windowWidth + pSRC->sinc.inputFrameCount;
- }
+ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut)
+{
+ ma_uint32 writeOffset;
+ ma_uint32 writeOffsetInBytes;
+ ma_uint32 writeOffsetLoopFlag;
+ ma_uint32 readOffset;
+ ma_uint32 readOffsetInBytes;
+ ma_uint32 readOffsetLoopFlag;
+ size_t bytesAvailable;
+ size_t bytesRequested;
- /* Now read data from the client. */
- framesToReadFromClient = ma_countof(pSRC->sinc.input[0]) - (pSRC->config.sinc.windowWidth + pSRC->sinc.inputFrameCount);
+ if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) {
+ return MA_INVALID_ARGS;
+ }
- framesReadFromClient = 0;
- if (framesToReadFromClient > 0) {
- framesReadFromClient = pSRC->config.onReadDeinterleaved(pSRC, framesToReadFromClient, (void**)ppInputDst, pUserData);
- }
+ /* The returned buffer should never move ahead of the write pointer. */
+ writeOffset = pRB->encodedWriteOffset;
+ ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
- if (framesReadFromClient != framesToReadFromClient) {
- pSRC->isEndOfInputLoaded = MA_TRUE;
- } else {
- pSRC->isEndOfInputLoaded = MA_FALSE;
- }
+ readOffset = pRB->encodedReadOffset;
+ ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
- if (framesReadFromClient != 0) {
- pSRC->sinc.inputFrameCount += framesReadFromClient;
- } else {
- /* We couldn't get anything more from the client. If no more output samples can be computed from the available input samples we need to return. */
- if (pSRC->config.neverConsumeEndOfInput) {
- if ((pSRC->sinc.inputFrameCount * inverseFactor) <= pSRC->config.sinc.windowWidth) {
- break;
- }
- } else {
- if ((pSRC->sinc.inputFrameCount * inverseFactor) < 1) {
- break;
- }
- }
- }
+ /*
+ The number of bytes available depends on whether or not the read and write pointers are on the same loop iteration. If so, we
+ can only read up to the write pointer. If not, we can only read up to the end of the buffer.
+ */
+ if (readOffsetLoopFlag == writeOffsetLoopFlag) {
+ bytesAvailable = writeOffsetInBytes - readOffsetInBytes;
+ } else {
+ bytesAvailable = pRB->subbufferSizeInBytes - readOffsetInBytes;
+ }
- /* Anything left over in the cache must be set to zero. */
- leftoverFrames = ma_countof(pSRC->sinc.input[0]) - (pSRC->config.sinc.windowWidth + pSRC->sinc.inputFrameCount);
- if (leftoverFrames > 0) {
- for (iChannel = 0; iChannel < pSRC->config.channels; iChannel += 1) {
- ma_zero_memory(pSRC->sinc.input[iChannel] + pSRC->config.sinc.windowWidth + pSRC->sinc.inputFrameCount, leftoverFrames * sizeof(float));
- }
- }
- }
+ bytesRequested = *pSizeInBytes;
+ if (bytesRequested > bytesAvailable) {
+ bytesRequested = bytesAvailable;
}
- return totalOutputFramesRead;
+ *pSizeInBytes = bytesRequested;
+ (*ppBufferOut) = ma_rb__get_read_ptr(pRB);
+
+ return MA_SUCCESS;
}
+ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut)
+{
+ ma_uint32 readOffset;
+ ma_uint32 readOffsetInBytes;
+ ma_uint32 readOffsetLoopFlag;
+ ma_uint32 newReadOffsetInBytes;
+ ma_uint32 newReadOffsetLoopFlag;
+ if (pRB == NULL) {
+ return MA_INVALID_ARGS;
+ }
-/**************************************************************************************************************************************************************
+ /* Validate the buffer. */
+ if (pBufferOut != ma_rb__get_read_ptr(pRB)) {
+ return MA_INVALID_ARGS;
+ }
-Format Conversion
+ readOffset = pRB->encodedReadOffset;
+ ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
-**************************************************************************************************************************************************************/
-void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode)
-{
- if (formatOut == formatIn) {
- ma_copy_memory_64(pOut, pIn, sampleCount * ma_get_bytes_per_sample(formatOut));
- return;
+ /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */
+ newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + sizeInBytes);
+ if (newReadOffsetInBytes > pRB->subbufferSizeInBytes) {
+ return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */
}
- switch (formatIn)
- {
- case ma_format_u8:
- {
- switch (formatOut)
- {
- case ma_format_s16: ma_pcm_u8_to_s16(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s24: ma_pcm_u8_to_s24(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s32: ma_pcm_u8_to_s32(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_f32: ma_pcm_u8_to_f32(pOut, pIn, sampleCount, ditherMode); return;
- default: break;
- }
- } break;
+ /* Move the read pointer back to the start if necessary. */
+ newReadOffsetLoopFlag = readOffsetLoopFlag;
+ if (newReadOffsetInBytes == pRB->subbufferSizeInBytes) {
+ newReadOffsetInBytes = 0;
+ newReadOffsetLoopFlag ^= 0x80000000;
+ }
- case ma_format_s16:
- {
- switch (formatOut)
- {
- case ma_format_u8: ma_pcm_s16_to_u8( pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s24: ma_pcm_s16_to_s24(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s32: ma_pcm_s16_to_s32(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_f32: ma_pcm_s16_to_f32(pOut, pIn, sampleCount, ditherMode); return;
- default: break;
- }
- } break;
+ ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetLoopFlag, newReadOffsetInBytes));
+ return MA_SUCCESS;
+}
- case ma_format_s24:
- {
- switch (formatOut)
- {
- case ma_format_u8: ma_pcm_s24_to_u8( pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s16: ma_pcm_s24_to_s16(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s32: ma_pcm_s24_to_s32(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_f32: ma_pcm_s24_to_f32(pOut, pIn, sampleCount, ditherMode); return;
- default: break;
- }
- } break;
+ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut)
+{
+ ma_uint32 readOffset;
+ ma_uint32 readOffsetInBytes;
+ ma_uint32 readOffsetLoopFlag;
+ ma_uint32 writeOffset;
+ ma_uint32 writeOffsetInBytes;
+ ma_uint32 writeOffsetLoopFlag;
+ size_t bytesAvailable;
+ size_t bytesRequested;
- case ma_format_s32:
- {
- switch (formatOut)
- {
- case ma_format_u8: ma_pcm_s32_to_u8( pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s16: ma_pcm_s32_to_s16(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s24: ma_pcm_s32_to_s24(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_f32: ma_pcm_s32_to_f32(pOut, pIn, sampleCount, ditherMode); return;
- default: break;
- }
- } break;
+ if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) {
+ return MA_INVALID_ARGS;
+ }
- case ma_format_f32:
- {
- switch (formatOut)
- {
- case ma_format_u8: ma_pcm_f32_to_u8( pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s16: ma_pcm_f32_to_s16(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s24: ma_pcm_f32_to_s24(pOut, pIn, sampleCount, ditherMode); return;
- case ma_format_s32: ma_pcm_f32_to_s32(pOut, pIn, sampleCount, ditherMode); return;
- default: break;
- }
- } break;
+ /* The returned buffer should never overtake the read buffer. */
+ readOffset = pRB->encodedReadOffset;
+ ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
- default: break;
- }
-}
+ writeOffset = pRB->encodedWriteOffset;
+ ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
-void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames)
-{
- if (pInterleavedPCMFrames == NULL || ppDeinterleavedPCMFrames == NULL) {
- return; /* Invalid args. */
+ /*
+ In the case of writing, if the write pointer and the read pointer are on the same loop iteration we can only
+ write up to the end of the buffer. Otherwise we can only write up to the read pointer. The write pointer should
+ never overtake the read pointer.
+ */
+ if (writeOffsetLoopFlag == readOffsetLoopFlag) {
+ bytesAvailable = pRB->subbufferSizeInBytes - writeOffsetInBytes;
+ } else {
+ bytesAvailable = readOffsetInBytes - writeOffsetInBytes;
}
- /* For efficiency we do this per format. */
- switch (format) {
- case ma_format_s16:
- {
- const ma_int16* pSrcS16 = (const ma_int16*)pInterleavedPCMFrames;
- ma_uint64 iPCMFrame;
- for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- ma_int16* pDstS16 = (ma_int16*)ppDeinterleavedPCMFrames[iChannel];
- pDstS16[iPCMFrame] = pSrcS16[iPCMFrame*channels+iChannel];
- }
- }
- } break;
-
- case ma_format_f32:
- {
- const float* pSrcF32 = (const float*)pInterleavedPCMFrames;
- ma_uint64 iPCMFrame;
- for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- float* pDstF32 = (float*)ppDeinterleavedPCMFrames[iChannel];
- pDstF32[iPCMFrame] = pSrcF32[iPCMFrame*channels+iChannel];
- }
- }
- } break;
-
- default:
- {
- ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format);
- ma_uint64 iPCMFrame;
- for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- void* pDst = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes);
- const void* pSrc = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes);
- memcpy(pDst, pSrc, sampleSizeInBytes);
- }
- }
- } break;
+ bytesRequested = *pSizeInBytes;
+ if (bytesRequested > bytesAvailable) {
+ bytesRequested = bytesAvailable;
}
-}
-void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames)
-{
- switch (format)
- {
- case ma_format_s16:
- {
- ma_int16* pDstS16 = (ma_int16*)pInterleavedPCMFrames;
- ma_uint64 iPCMFrame;
- for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- const ma_int16* pSrcS16 = (const ma_int16*)ppDeinterleavedPCMFrames[iChannel];
- pDstS16[iPCMFrame*channels+iChannel] = pSrcS16[iPCMFrame];
- }
- }
- } break;
-
- case ma_format_f32:
- {
- float* pDstF32 = (float*)pInterleavedPCMFrames;
- ma_uint64 iPCMFrame;
- for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- const float* pSrcF32 = (const float*)ppDeinterleavedPCMFrames[iChannel];
- pDstF32[iPCMFrame*channels+iChannel] = pSrcF32[iPCMFrame];
- }
- }
- } break;
-
- default:
- {
- ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format);
- ma_uint64 iPCMFrame;
- for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < channels; ++iChannel) {
- void* pDst = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes);
- const void* pSrc = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes);
- memcpy(pDst, pSrc, sampleSizeInBytes);
- }
- }
- } break;
- }
-}
+ *pSizeInBytes = bytesRequested;
+ *ppBufferOut = ma_rb__get_write_ptr(pRB);
+ /* Clear the buffer if desired. */
+ if (pRB->clearOnWriteAcquire) {
+ MA_ZERO_MEMORY(*ppBufferOut, *pSizeInBytes);
+ }
+ return MA_SUCCESS;
+}
-typedef struct
+ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut)
{
- ma_pcm_converter* pDSP;
- void* pUserDataForClient;
-} ma_pcm_converter_callback_data;
+ ma_uint32 writeOffset;
+ ma_uint32 writeOffsetInBytes;
+ ma_uint32 writeOffsetLoopFlag;
+ ma_uint32 newWriteOffsetInBytes;
+ ma_uint32 newWriteOffsetLoopFlag;
-ma_uint32 ma_pcm_converter__pre_format_converter_on_read(ma_format_converter* pConverter, ma_uint32 frameCount, void* pFramesOut, void* pUserData)
-{
- ma_pcm_converter_callback_data* pData;
- ma_pcm_converter* pDSP;
+ if (pRB == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ /* Validate the buffer. */
+ if (pBufferOut != ma_rb__get_write_ptr(pRB)) {
+ return MA_INVALID_ARGS;
+ }
- (void)pConverter;
+ writeOffset = pRB->encodedWriteOffset;
+ ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
- pData = (ma_pcm_converter_callback_data*)pUserData;
- ma_assert(pData != NULL);
+ /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */
+ newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + sizeInBytes);
+ if (newWriteOffsetInBytes > pRB->subbufferSizeInBytes) {
+ return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */
+ }
- pDSP = pData->pDSP;
- ma_assert(pDSP != NULL);
+ /* Move the read pointer back to the start if necessary. */
+ newWriteOffsetLoopFlag = writeOffsetLoopFlag;
+ if (newWriteOffsetInBytes == pRB->subbufferSizeInBytes) {
+ newWriteOffsetInBytes = 0;
+ newWriteOffsetLoopFlag ^= 0x80000000;
+ }
- return pDSP->onRead(pDSP, pFramesOut, frameCount, pData->pUserDataForClient);
+ ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetLoopFlag, newWriteOffsetInBytes));
+ return MA_SUCCESS;
}
-ma_uint32 ma_pcm_converter__post_format_converter_on_read(ma_format_converter* pConverter, ma_uint32 frameCount, void* pFramesOut, void* pUserData)
+ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes)
{
- ma_pcm_converter_callback_data* pData;
- ma_pcm_converter* pDSP;
+ ma_uint32 readOffset;
+ ma_uint32 readOffsetInBytes;
+ ma_uint32 readOffsetLoopFlag;
+ ma_uint32 writeOffset;
+ ma_uint32 writeOffsetInBytes;
+ ma_uint32 writeOffsetLoopFlag;
+ ma_uint32 newReadOffsetInBytes;
+ ma_uint32 newReadOffsetLoopFlag;
- (void)pConverter;
+ if (pRB == NULL || offsetInBytes > pRB->subbufferSizeInBytes) {
+ return MA_INVALID_ARGS;
+ }
+
+ readOffset = pRB->encodedReadOffset;
+ ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
- pData = (ma_pcm_converter_callback_data*)pUserData;
- ma_assert(pData != NULL);
+ writeOffset = pRB->encodedWriteOffset;
+ ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
- pDSP = pData->pDSP;
- ma_assert(pDSP != NULL);
+ newReadOffsetInBytes = readOffsetInBytes;
+ newReadOffsetLoopFlag = readOffsetLoopFlag;
- /* When this version of this callback is used it means we're reading directly from the client. */
- ma_assert(pDSP->isPreFormatConversionRequired == MA_FALSE);
- ma_assert(pDSP->isChannelRoutingRequired == MA_FALSE);
- ma_assert(pDSP->isSRCRequired == MA_FALSE);
+ /* We cannot go past the write buffer. */
+ if (readOffsetLoopFlag == writeOffsetLoopFlag) {
+ if ((readOffsetInBytes + offsetInBytes) > writeOffsetInBytes) {
+ newReadOffsetInBytes = writeOffsetInBytes;
+ } else {
+ newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes);
+ }
+ } else {
+ /* May end up looping. */
+ if ((readOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) {
+ newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes;
+ newReadOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */
+ } else {
+ newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes);
+ }
+ }
- return pDSP->onRead(pDSP, pFramesOut, frameCount, pData->pUserDataForClient);
+ ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetInBytes, newReadOffsetLoopFlag));
+ return MA_SUCCESS;
}
-ma_uint32 ma_pcm_converter__post_format_converter_on_read_deinterleaved(ma_format_converter* pConverter, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData)
+ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes)
{
- ma_pcm_converter_callback_data* pData;
- ma_pcm_converter* pDSP;
+ ma_uint32 readOffset;
+ ma_uint32 readOffsetInBytes;
+ ma_uint32 readOffsetLoopFlag;
+ ma_uint32 writeOffset;
+ ma_uint32 writeOffsetInBytes;
+ ma_uint32 writeOffsetLoopFlag;
+ ma_uint32 newWriteOffsetInBytes;
+ ma_uint32 newWriteOffsetLoopFlag;
+
+ if (pRB == NULL) {
+ return MA_INVALID_ARGS;
+ }
- (void)pConverter;
+ readOffset = pRB->encodedReadOffset;
+ ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
- pData = (ma_pcm_converter_callback_data*)pUserData;
- ma_assert(pData != NULL);
+ writeOffset = pRB->encodedWriteOffset;
+ ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
- pDSP = pData->pDSP;
- ma_assert(pDSP != NULL);
+ newWriteOffsetInBytes = writeOffsetInBytes;
+ newWriteOffsetLoopFlag = writeOffsetLoopFlag;
- if (!pDSP->isChannelRoutingAtStart) {
- return (ma_uint32)ma_channel_router_read_deinterleaved(&pDSP->channelRouter, frameCount, ppSamplesOut, pUserData);
+ /* We cannot go past the write buffer. */
+ if (readOffsetLoopFlag == writeOffsetLoopFlag) {
+ /* May end up looping. */
+ if ((writeOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) {
+ newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes;
+ newWriteOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */
+ } else {
+ newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes);
+ }
} else {
- if (pDSP->isSRCRequired) {
- return (ma_uint32)ma_src_read_deinterleaved(&pDSP->src, frameCount, ppSamplesOut, pUserData);
+ if ((writeOffsetInBytes + offsetInBytes) > readOffsetInBytes) {
+ newWriteOffsetInBytes = readOffsetInBytes;
} else {
- return (ma_uint32)ma_channel_router_read_deinterleaved(&pDSP->channelRouter, frameCount, ppSamplesOut, pUserData);
+ newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes);
}
}
+
+ ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetInBytes, newWriteOffsetLoopFlag));
+ return MA_SUCCESS;
}
-ma_uint32 ma_pcm_converter__src_on_read_deinterleaved(ma_src* pSRC, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData)
+ma_int32 ma_rb_pointer_distance(ma_rb* pRB)
{
- ma_pcm_converter_callback_data* pData;
- ma_pcm_converter* pDSP;
+ ma_uint32 readOffset;
+ ma_uint32 readOffsetInBytes;
+ ma_uint32 readOffsetLoopFlag;
+ ma_uint32 writeOffset;
+ ma_uint32 writeOffsetInBytes;
+ ma_uint32 writeOffsetLoopFlag;
- (void)pSRC;
+ if (pRB == NULL) {
+ return 0;
+ }
- pData = (ma_pcm_converter_callback_data*)pUserData;
- ma_assert(pData != NULL);
+ readOffset = pRB->encodedReadOffset;
+ ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
- pDSP = pData->pDSP;
- ma_assert(pDSP != NULL);
+ writeOffset = pRB->encodedWriteOffset;
+ ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
- /* If the channel routing stage is at the front we need to read from that. Otherwise we read from the pre format converter. */
- if (pDSP->isChannelRoutingAtStart) {
- return (ma_uint32)ma_channel_router_read_deinterleaved(&pDSP->channelRouter, frameCount, ppSamplesOut, pUserData);
+ if (readOffsetLoopFlag == writeOffsetLoopFlag) {
+ return writeOffsetInBytes - readOffsetInBytes;
} else {
- return (ma_uint32)ma_format_converter_read_deinterleaved(&pDSP->formatConverterIn, frameCount, ppSamplesOut, pUserData);
+ return writeOffsetInBytes + (pRB->subbufferSizeInBytes - readOffsetInBytes);
}
}
-ma_uint32 ma_pcm_converter__channel_router_on_read_deinterleaved(ma_channel_router* pRouter, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData)
+ma_uint32 ma_rb_available_read(ma_rb* pRB)
{
- ma_pcm_converter_callback_data* pData;
- ma_pcm_converter* pDSP;
+ ma_int32 dist;
- (void)pRouter;
+ if (pRB == NULL) {
+ return 0;
+ }
- pData = (ma_pcm_converter_callback_data*)pUserData;
- ma_assert(pData != NULL);
+ dist = ma_rb_pointer_distance(pRB);
+ if (dist < 0) {
+ return 0;
+ }
- pDSP = pData->pDSP;
- ma_assert(pDSP != NULL);
+ return dist;
+}
- /* If the channel routing stage is at the front of the pipeline we read from the pre format converter. Otherwise we read from the sample rate converter. */
- if (pDSP->isChannelRoutingAtStart) {
- return (ma_uint32)ma_format_converter_read_deinterleaved(&pDSP->formatConverterIn, frameCount, ppSamplesOut, pUserData);
- } else {
- if (pDSP->isSRCRequired) {
- return (ma_uint32)ma_src_read_deinterleaved(&pDSP->src, frameCount, ppSamplesOut, pUserData);
- } else {
- return (ma_uint32)ma_format_converter_read_deinterleaved(&pDSP->formatConverterIn, frameCount, ppSamplesOut, pUserData);
- }
+ma_uint32 ma_rb_available_write(ma_rb* pRB)
+{
+ if (pRB == NULL) {
+ return 0;
}
+
+ return (ma_uint32)(ma_rb_get_subbuffer_size(pRB) - ma_rb_pointer_distance(pRB));
}
-ma_result ma_pcm_converter_init(const ma_pcm_converter_config* pConfig, ma_pcm_converter* pDSP)
+size_t ma_rb_get_subbuffer_size(ma_rb* pRB)
{
- ma_result result;
+ if (pRB == NULL) {
+ return 0;
+ }
- if (pDSP == NULL) {
- return MA_INVALID_ARGS;
+ return pRB->subbufferSizeInBytes;
+}
+
+size_t ma_rb_get_subbuffer_stride(ma_rb* pRB)
+{
+ if (pRB == NULL) {
+ return 0;
}
- ma_zero_object(pDSP);
- pDSP->onRead = pConfig->onRead;
- pDSP->pUserData = pConfig->pUserData;
- pDSP->isDynamicSampleRateAllowed = pConfig->allowDynamicSampleRate;
+ if (pRB->subbufferStrideInBytes == 0) {
+ return (size_t)pRB->subbufferSizeInBytes;
+ }
- /*
- In general, this is the pipeline used for data conversion. Note that this can actually change which is explained later.
-
- Pre Format Conversion -> Sample Rate Conversion -> Channel Routing -> Post Format Conversion
-
- Pre Format Conversion
- ---------------------
- This is where the sample data is converted to a format that's usable by the later stages in the pipeline. Input data
- is converted to deinterleaved floating-point.
-
- Channel Routing
- ---------------
- Channel routing is where stereo is converted to 5.1, mono is converted to stereo, etc. This stage depends on the
- pre format conversion stage.
-
- Sample Rate Conversion
- ----------------------
- Sample rate conversion depends on the pre format conversion stage and as the name implies performs sample rate conversion.
-
- Post Format Conversion
- ----------------------
- This stage is where our deinterleaved floating-point data from the previous stages are converted to the requested output
- format.
-
-
- Optimizations
- -------------
- Sometimes the conversion pipeline is rearranged for efficiency. The first obvious optimization is to eliminate unnecessary
- stages in the pipeline. When no channel routing nor sample rate conversion is necessary, the entire pipeline is optimized
- down to just this:
-
- Post Format Conversion
-
- When sample rate conversion is not unnecessary:
-
- Pre Format Conversion -> Channel Routing -> Post Format Conversion
-
- When channel routing is unnecessary:
-
- Pre Format Conversion -> Sample Rate Conversion -> Post Format Conversion
-
- A slightly less obvious optimization is used depending on whether or not we are increasing or decreasing the number of
- channels. Because everything in the pipeline works on a per-channel basis, the efficiency of the pipeline is directly
- proportionate to the number of channels that need to be processed. Therefore, it's can be more efficient to move the
- channel conversion stage to an earlier or later stage. When the channel count is being reduced, we move the channel
- conversion stage to the start of the pipeline so that later stages can work on a smaller number of channels at a time.
- Otherwise, we move the channel conversion stage to the end of the pipeline. When reducing the channel count, the pipeline
- will look like this:
-
- Pre Format Conversion -> Channel Routing -> Sample Rate Conversion -> Post Format Conversion
-
- Notice how the Channel Routing and Sample Rate Conversion stages are swapped so that the SRC stage has less data to process.
- */
+ return (size_t)pRB->subbufferStrideInBytes;
+}
- /* First we need to determine what's required and what's not. */
- if (pConfig->sampleRateIn != pConfig->sampleRateOut || pConfig->allowDynamicSampleRate) {
- pDSP->isSRCRequired = MA_TRUE;
+size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex)
+{
+ if (pRB == NULL) {
+ return 0;
}
- if (pConfig->channelsIn != pConfig->channelsOut || !ma_channel_map_equal(pConfig->channelsIn, pConfig->channelMapIn, pConfig->channelMapOut)) {
- pDSP->isChannelRoutingRequired = MA_TRUE;
+
+ return subbufferIndex * ma_rb_get_subbuffer_stride(pRB);
+}
+
+void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer)
+{
+ if (pRB == NULL) {
+ return NULL;
}
- /* If neither a sample rate conversion nor channel conversion is necessary we can skip the pre format conversion. */
- if (!pDSP->isSRCRequired && !pDSP->isChannelRoutingRequired) {
- /* We don't need a pre format conversion stage, but we may still need a post format conversion stage. */
- if (pConfig->formatIn != pConfig->formatOut) {
- pDSP->isPostFormatConversionRequired = MA_TRUE;
- }
- } else {
- pDSP->isPreFormatConversionRequired = MA_TRUE;
- pDSP->isPostFormatConversionRequired = MA_TRUE;
+ return ma_offset_ptr(pBuffer, ma_rb_get_subbuffer_offset(pRB, subbufferIndex));
+}
+
+
+static MA_INLINE ma_uint32 ma_pcm_rb_get_bpf(ma_pcm_rb* pRB)
+{
+ MA_ASSERT(pRB != NULL);
+
+ return ma_get_bytes_per_frame(pRB->format, pRB->channels);
+}
+
+ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB)
+{
+ ma_uint32 bpf;
+ ma_result result;
+
+ if (pRB == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Use a passthrough if none of the stages are being used. */
- if (!pDSP->isPreFormatConversionRequired && !pDSP->isPostFormatConversionRequired && !pDSP->isChannelRoutingRequired && !pDSP->isSRCRequired) {
- pDSP->isPassthrough = MA_TRUE;
+ MA_ZERO_OBJECT(pRB);
+
+ bpf = ma_get_bytes_per_frame(format, channels);
+ if (bpf == 0) {
+ return MA_INVALID_ARGS;
}
- /* Move the channel conversion stage to the start of the pipeline if we are reducing the channel count. */
- if (pConfig->channelsOut < pConfig->channelsIn) {
- pDSP->isChannelRoutingAtStart = MA_TRUE;
+ result = ma_rb_init_ex(subbufferSizeInFrames*bpf, subbufferCount, subbufferStrideInFrames*bpf, pOptionalPreallocatedBuffer, pAllocationCallbacks, &pRB->rb);
+ if (result != MA_SUCCESS) {
+ return result;
}
+ pRB->format = format;
+ pRB->channels = channels;
- /*
- We always initialize every stage of the pipeline regardless of whether or not the stage is used because it simplifies
- a few things when it comes to dynamically changing properties post-initialization.
- */
- result = MA_SUCCESS;
+ return MA_SUCCESS;
+}
- /* Pre format conversion. */
- {
- ma_format_converter_config preFormatConverterConfig = ma_format_converter_config_init(
- pConfig->formatIn,
- ma_format_f32,
- pConfig->channelsIn,
- ma_pcm_converter__pre_format_converter_on_read,
- pDSP
- );
- preFormatConverterConfig.ditherMode = pConfig->ditherMode;
- preFormatConverterConfig.noSSE2 = pConfig->noSSE2;
- preFormatConverterConfig.noAVX2 = pConfig->noAVX2;
- preFormatConverterConfig.noAVX512 = pConfig->noAVX512;
- preFormatConverterConfig.noNEON = pConfig->noNEON;
-
- result = ma_format_converter_init(&preFormatConverterConfig, &pDSP->formatConverterIn);
- if (result != MA_SUCCESS) {
- return result;
- }
+ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB)
+{
+ return ma_pcm_rb_init_ex(format, channels, bufferSizeInFrames, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB);
+}
+
+void ma_pcm_rb_uninit(ma_pcm_rb* pRB)
+{
+ if (pRB == NULL) {
+ return;
}
- /*
- Post format conversion. The exact configuration for this depends on whether or not we are reading data directly from the client
- or from an earlier stage in the pipeline.
- */
- {
- ma_format_converter_config postFormatConverterConfig = ma_format_converter_config_init_new();
- postFormatConverterConfig.formatIn = pConfig->formatIn;
- postFormatConverterConfig.formatOut = pConfig->formatOut;
- postFormatConverterConfig.channels = pConfig->channelsOut;
- postFormatConverterConfig.ditherMode = pConfig->ditherMode;
- postFormatConverterConfig.noSSE2 = pConfig->noSSE2;
- postFormatConverterConfig.noAVX2 = pConfig->noAVX2;
- postFormatConverterConfig.noAVX512 = pConfig->noAVX512;
- postFormatConverterConfig.noNEON = pConfig->noNEON;
- if (pDSP->isPreFormatConversionRequired) {
- postFormatConverterConfig.onReadDeinterleaved = ma_pcm_converter__post_format_converter_on_read_deinterleaved;
- postFormatConverterConfig.formatIn = ma_format_f32;
- } else {
- postFormatConverterConfig.onRead = ma_pcm_converter__post_format_converter_on_read;
- }
+ ma_rb_uninit(&pRB->rb);
+}
- result = ma_format_converter_init(&postFormatConverterConfig, &pDSP->formatConverterOut);
- if (result != MA_SUCCESS) {
- return result;
- }
+void ma_pcm_rb_reset(ma_pcm_rb* pRB)
+{
+ if (pRB == NULL) {
+ return;
}
- /* SRC */
- {
- ma_src_config srcConfig = ma_src_config_init(
- pConfig->sampleRateIn,
- pConfig->sampleRateOut,
- ((pConfig->channelsIn < pConfig->channelsOut) ? pConfig->channelsIn : pConfig->channelsOut),
- ma_pcm_converter__src_on_read_deinterleaved,
- pDSP
- );
- srcConfig.algorithm = pConfig->srcAlgorithm;
- srcConfig.neverConsumeEndOfInput = pConfig->neverConsumeEndOfInput;
- srcConfig.noSSE2 = pConfig->noSSE2;
- srcConfig.noAVX2 = pConfig->noAVX2;
- srcConfig.noAVX512 = pConfig->noAVX512;
- srcConfig.noNEON = pConfig->noNEON;
- ma_copy_memory(&srcConfig.sinc, &pConfig->sinc, sizeof(pConfig->sinc));
-
- result = ma_src_init(&srcConfig, &pDSP->src);
- if (result != MA_SUCCESS) {
- return result;
- }
+ ma_rb_reset(&pRB->rb);
+}
+
+ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut)
+{
+ size_t sizeInBytes;
+ ma_result result;
+
+ if (pRB == NULL || pSizeInFrames == NULL) {
+ return MA_INVALID_ARGS;
}
- /* Channel conversion */
- {
- ma_channel_router_config routerConfig = ma_channel_router_config_init(
- pConfig->channelsIn,
- pConfig->channelMapIn,
- pConfig->channelsOut,
- pConfig->channelMapOut,
- pConfig->channelMixMode,
- ma_pcm_converter__channel_router_on_read_deinterleaved,
- pDSP);
- routerConfig.noSSE2 = pConfig->noSSE2;
- routerConfig.noAVX2 = pConfig->noAVX2;
- routerConfig.noAVX512 = pConfig->noAVX512;
- routerConfig.noNEON = pConfig->noNEON;
-
- result = ma_channel_router_init(&routerConfig, &pDSP->channelRouter);
- if (result != MA_SUCCESS) {
- return result;
- }
+ sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB);
+
+ result = ma_rb_acquire_read(&pRB->rb, &sizeInBytes, ppBufferOut);
+ if (result != MA_SUCCESS) {
+ return result;
}
+ *pSizeInFrames = (ma_uint32)(sizeInBytes / (size_t)ma_pcm_rb_get_bpf(pRB));
return MA_SUCCESS;
}
+ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut)
+{
+ if (pRB == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ return ma_rb_commit_read(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB), pBufferOut);
+}
-ma_result ma_pcm_converter_refresh_sample_rate(ma_pcm_converter* pDSP)
+ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut)
{
- /* The SRC stage will already have been initialized so we can just set it there. */
- ma_src_set_sample_rate(&pDSP->src, pDSP->src.config.sampleRateIn, pDSP->src.config.sampleRateOut);
+ size_t sizeInBytes;
+ ma_result result;
+
+ if (pRB == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB);
+
+ result = ma_rb_acquire_write(&pRB->rb, &sizeInBytes, ppBufferOut);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ *pSizeInFrames = (ma_uint32)(sizeInBytes / ma_pcm_rb_get_bpf(pRB));
return MA_SUCCESS;
}
-ma_result ma_pcm_converter_set_input_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateIn)
+ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut)
{
- if (pDSP == NULL) {
- return MA_INVALID_ARGS;
- }
-
- /* Must have a sample rate of > 0. */
- if (sampleRateIn == 0) {
+ if (pRB == NULL) {
return MA_INVALID_ARGS;
}
- /* Must have been initialized with allowDynamicSampleRate. */
- if (!pDSP->isDynamicSampleRateAllowed) {
- return MA_INVALID_OPERATION;
- }
-
- ma_atomic_exchange_32(&pDSP->src.config.sampleRateIn, sampleRateIn);
- return ma_pcm_converter_refresh_sample_rate(pDSP);
+ return ma_rb_commit_write(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB), pBufferOut);
}
-ma_result ma_pcm_converter_set_output_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateOut)
+ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames)
{
- if (pDSP == NULL) {
- return MA_INVALID_ARGS;
- }
-
- /* Must have a sample rate of > 0. */
- if (sampleRateOut == 0) {
+ if (pRB == NULL) {
return MA_INVALID_ARGS;
}
- /* Must have been initialized with allowDynamicSampleRate. */
- if (!pDSP->isDynamicSampleRateAllowed) {
- return MA_INVALID_OPERATION;
- }
-
- ma_atomic_exchange_32(&pDSP->src.config.sampleRateOut, sampleRateOut);
- return ma_pcm_converter_refresh_sample_rate(pDSP);
+ return ma_rb_seek_read(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB));
}
-ma_result ma_pcm_converter_set_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut)
+ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames)
{
- if (pDSP == NULL) {
- return MA_INVALID_ARGS;
- }
-
- /* Must have a sample rate of > 0. */
- if (sampleRateIn == 0 || sampleRateOut == 0) {
+ if (pRB == NULL) {
return MA_INVALID_ARGS;
}
- /* Must have been initialized with allowDynamicSampleRate. */
- if (!pDSP->isDynamicSampleRateAllowed) {
- return MA_INVALID_OPERATION;
- }
-
- ma_atomic_exchange_32(&pDSP->src.config.sampleRateIn, sampleRateIn);
- ma_atomic_exchange_32(&pDSP->src.config.sampleRateOut, sampleRateOut);
-
- return ma_pcm_converter_refresh_sample_rate(pDSP);
+ return ma_rb_seek_write(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB));
}
-ma_uint64 ma_pcm_converter_read(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint64 frameCount)
+ma_int32 ma_pcm_rb_pointer_distance(ma_pcm_rb* pRB)
{
- ma_pcm_converter_callback_data data;
-
- if (pDSP == NULL || pFramesOut == NULL) {
+ if (pRB == NULL) {
return 0;
}
- /* Fast path. */
- if (pDSP->isPassthrough) {
- if (frameCount <= 0xFFFFFFFF) {
- return (ma_uint32)pDSP->onRead(pDSP, pFramesOut, (ma_uint32)frameCount, pDSP->pUserData);
- } else {
- ma_uint8* pNextFramesOut = (ma_uint8*)pFramesOut;
-
- ma_uint64 totalFramesRead = 0;
- while (totalFramesRead < frameCount) {
- ma_uint32 framesRead;
- ma_uint64 framesRemaining = (frameCount - totalFramesRead);
- ma_uint64 framesToReadRightNow = framesRemaining;
- if (framesToReadRightNow > 0xFFFFFFFF) {
- framesToReadRightNow = 0xFFFFFFFF;
- }
-
- framesRead = pDSP->onRead(pDSP, pNextFramesOut, (ma_uint32)framesToReadRightNow, pDSP->pUserData);
- if (framesRead == 0) {
- break;
- }
-
- pNextFramesOut += framesRead * pDSP->channelRouter.config.channelsOut * ma_get_bytes_per_sample(pDSP->formatConverterOut.config.formatOut);
- totalFramesRead += framesRead;
- }
+ return ma_rb_pointer_distance(&pRB->rb) / ma_pcm_rb_get_bpf(pRB);
+}
- return totalFramesRead;
- }
+ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB)
+{
+ if (pRB == NULL) {
+ return 0;
}
- /* Slower path. The real work is done here. To do this all we need to do is read from the last stage in the pipeline. */
- ma_assert(pDSP->isPostFormatConversionRequired == MA_TRUE);
-
- data.pDSP = pDSP;
- data.pUserDataForClient = pDSP->pUserData;
- return ma_format_converter_read(&pDSP->formatConverterOut, frameCount, pFramesOut, &data);
+ return ma_rb_available_read(&pRB->rb) / ma_pcm_rb_get_bpf(pRB);
}
-
-typedef struct
-{
- const void* pDataIn;
- ma_format formatIn;
- ma_uint32 channelsIn;
- ma_uint64 totalFrameCount;
- ma_uint64 iNextFrame;
- ma_bool32 isFeedingZeros; /* When set to true, feeds the DSP zero samples. */
-} ma_convert_frames__data;
-
-ma_uint32 ma_convert_frames__on_read(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData)
+ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB)
{
- ma_convert_frames__data* pData;
- ma_uint32 framesToRead;
- ma_uint64 framesRemaining;
- ma_uint32 frameSizeInBytes;
-
- (void)pDSP;
-
- pData = (ma_convert_frames__data*)pUserData;
- ma_assert(pData != NULL);
- ma_assert(pData->totalFrameCount >= pData->iNextFrame);
-
- framesToRead = frameCount;
- framesRemaining = (pData->totalFrameCount - pData->iNextFrame);
- if (framesToRead > framesRemaining) {
- framesToRead = (ma_uint32)framesRemaining;
+ if (pRB == NULL) {
+ return 0;
}
- frameSizeInBytes = ma_get_bytes_per_frame(pData->formatIn, pData->channelsIn);
+ return ma_rb_available_write(&pRB->rb) / ma_pcm_rb_get_bpf(pRB);
+}
- if (!pData->isFeedingZeros) {
- ma_copy_memory(pFramesOut, (const ma_uint8*)pData->pDataIn + (frameSizeInBytes * pData->iNextFrame), frameSizeInBytes * framesToRead);
- } else {
- ma_zero_memory(pFramesOut, frameSizeInBytes * framesToRead);
+ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB)
+{
+ if (pRB == NULL) {
+ return 0;
}
- pData->iNextFrame += framesToRead;
- return framesToRead;
+ return (ma_uint32)(ma_rb_get_subbuffer_size(&pRB->rb) / ma_pcm_rb_get_bpf(pRB));
}
-ma_pcm_converter_config ma_pcm_converter_config_init_new()
+ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB)
{
- ma_pcm_converter_config config;
- ma_zero_object(&config);
+ if (pRB == NULL) {
+ return 0;
+ }
- return config;
+ return (ma_uint32)(ma_rb_get_subbuffer_stride(&pRB->rb) / ma_pcm_rb_get_bpf(pRB));
}
-ma_pcm_converter_config ma_pcm_converter_config_init(ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_pcm_converter_read_proc onRead, void* pUserData)
+ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex)
{
- return ma_pcm_converter_config_init_ex(formatIn, channelsIn, sampleRateIn, NULL, formatOut, channelsOut, sampleRateOut, NULL, onRead, pUserData);
+ if (pRB == NULL) {
+ return 0;
+ }
+
+ return (ma_uint32)(ma_rb_get_subbuffer_offset(&pRB->rb, subbufferIndex) / ma_pcm_rb_get_bpf(pRB));
}
-ma_pcm_converter_config ma_pcm_converter_config_init_ex(ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_channel channelMapIn[MA_MAX_CHANNELS], ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_channel channelMapOut[MA_MAX_CHANNELS], ma_pcm_converter_read_proc onRead, void* pUserData)
+void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer)
{
- ma_pcm_converter_config config;
- ma_zero_object(&config);
- config.formatIn = formatIn;
- config.channelsIn = channelsIn;
- config.sampleRateIn = sampleRateIn;
- config.formatOut = formatOut;
- config.channelsOut = channelsOut;
- config.sampleRateOut = sampleRateOut;
- if (channelMapIn != NULL) {
- ma_copy_memory(config.channelMapIn, channelMapIn, sizeof(config.channelMapIn));
- }
- if (channelMapOut != NULL) {
- ma_copy_memory(config.channelMapOut, channelMapOut, sizeof(config.channelMapOut));
+ if (pRB == NULL) {
+ return NULL;
}
- config.onRead = onRead;
- config.pUserData = pUserData;
- return config;
+ return ma_rb_get_subbuffer_ptr(&pRB->rb, subbufferIndex, pBuffer);
}
-ma_uint64 ma_convert_frames(void* pOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_uint64 frameCount)
-{
- ma_channel channelMapOut[MA_MAX_CHANNELS];
- ma_channel channelMapIn[MA_MAX_CHANNELS];
-
- ma_get_standard_channel_map(ma_standard_channel_map_default, channelsOut, channelMapOut);
- ma_get_standard_channel_map(ma_standard_channel_map_default, channelsIn, channelMapIn);
+/**************************************************************************************************************************************************************
- return ma_convert_frames_ex(pOut, formatOut, channelsOut, sampleRateOut, channelMapOut, pIn, formatIn, channelsIn, sampleRateIn, channelMapIn, frameCount);
-}
+Miscellaneous Helpers
-ma_uint64 ma_convert_frames_ex(void* pOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_channel channelMapOut[MA_MAX_CHANNELS], const void* pIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint64 frameCount)
+**************************************************************************************************************************************************************/
+const char* ma_result_description(ma_result result)
{
- ma_uint64 frameCountOut;
- ma_convert_frames__data data;
- ma_pcm_converter_config converterConfig;
- ma_pcm_converter converter;
- ma_uint64 totalFramesRead;
-
- if (frameCount == 0) {
- return 0;
- }
-
- frameCountOut = ma_calculate_frame_count_after_src(sampleRateOut, sampleRateIn, frameCount);
- if (pOut == NULL) {
- return frameCountOut;
+ switch (result)
+ {
+ case MA_SUCCESS: return "No error";
+ case MA_ERROR: return "Unknown error";
+ case MA_INVALID_ARGS: return "Invalid argument";
+ case MA_INVALID_OPERATION: return "Invalid operation";
+ case MA_OUT_OF_MEMORY: return "Out of memory";
+ case MA_OUT_OF_RANGE: return "Out of range";
+ case MA_ACCESS_DENIED: return "Permission denied";
+ case MA_DOES_NOT_EXIST: return "Resource does not exist";
+ case MA_ALREADY_EXISTS: return "Resource already exists";
+ case MA_TOO_MANY_OPEN_FILES: return "Too many open files";
+ case MA_INVALID_FILE: return "Invalid file";
+ case MA_TOO_BIG: return "Too large";
+ case MA_PATH_TOO_LONG: return "Path too long";
+ case MA_NAME_TOO_LONG: return "Name too long";
+ case MA_NOT_DIRECTORY: return "Not a directory";
+ case MA_IS_DIRECTORY: return "Is a directory";
+ case MA_DIRECTORY_NOT_EMPTY: return "Directory not empty";
+ case MA_END_OF_FILE: return "End of file";
+ case MA_NO_SPACE: return "No space available";
+ case MA_BUSY: return "Device or resource busy";
+ case MA_IO_ERROR: return "Input/output error";
+ case MA_INTERRUPT: return "Interrupted";
+ case MA_UNAVAILABLE: return "Resource unavailable";
+ case MA_ALREADY_IN_USE: return "Resource already in use";
+ case MA_BAD_ADDRESS: return "Bad address";
+ case MA_BAD_SEEK: return "Illegal seek";
+ case MA_BAD_PIPE: return "Broken pipe";
+ case MA_DEADLOCK: return "Deadlock";
+ case MA_TOO_MANY_LINKS: return "Too many links";
+ case MA_NOT_IMPLEMENTED: return "Not implemented";
+ case MA_NO_MESSAGE: return "No message of desired type";
+ case MA_BAD_MESSAGE: return "Invalid message";
+ case MA_NO_DATA_AVAILABLE: return "No data available";
+ case MA_INVALID_DATA: return "Invalid data";
+ case MA_TIMEOUT: return "Timeout";
+ case MA_NO_NETWORK: return "Network unavailable";
+ case MA_NOT_UNIQUE: return "Not unique";
+ case MA_NOT_SOCKET: return "Socket operation on non-socket";
+ case MA_NO_ADDRESS: return "Destination address required";
+ case MA_BAD_PROTOCOL: return "Protocol wrong type for socket";
+ case MA_PROTOCOL_UNAVAILABLE: return "Protocol not available";
+ case MA_PROTOCOL_NOT_SUPPORTED: return "Protocol not supported";
+ case MA_PROTOCOL_FAMILY_NOT_SUPPORTED: return "Protocol family not supported";
+ case MA_ADDRESS_FAMILY_NOT_SUPPORTED: return "Address family not supported";
+ case MA_SOCKET_NOT_SUPPORTED: return "Socket type not supported";
+ case MA_CONNECTION_RESET: return "Connection reset";
+ case MA_ALREADY_CONNECTED: return "Already connected";
+ case MA_NOT_CONNECTED: return "Not connected";
+ case MA_CONNECTION_REFUSED: return "Connection refused";
+ case MA_NO_HOST: return "No host";
+ case MA_IN_PROGRESS: return "Operation in progress";
+ case MA_CANCELLED: return "Operation cancelled";
+ case MA_MEMORY_ALREADY_MAPPED: return "Memory already mapped";
+ case MA_AT_END: return "Reached end of collection";
+
+ case MA_FORMAT_NOT_SUPPORTED: return "Format not supported";
+ case MA_DEVICE_TYPE_NOT_SUPPORTED: return "Device type not supported";
+ case MA_SHARE_MODE_NOT_SUPPORTED: return "Share mode not supported";
+ case MA_NO_BACKEND: return "No backend";
+ case MA_NO_DEVICE: return "No device";
+ case MA_API_NOT_FOUND: return "API not found";
+ case MA_INVALID_DEVICE_CONFIG: return "Invalid device config";
+
+ case MA_DEVICE_NOT_INITIALIZED: return "Device not initialized";
+ case MA_DEVICE_NOT_STARTED: return "Device not started";
+
+ case MA_FAILED_TO_INIT_BACKEND: return "Failed to initialize backend";
+ case MA_FAILED_TO_OPEN_BACKEND_DEVICE: return "Failed to open backend device";
+ case MA_FAILED_TO_START_BACKEND_DEVICE: return "Failed to start backend device";
+ case MA_FAILED_TO_STOP_BACKEND_DEVICE: return "Failed to stop backend device";
+
+ default: return "Unknown error";
+ }
+}
+
+void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks != NULL) {
+ return ma__malloc_from_callbacks(sz, pAllocationCallbacks);
+ } else {
+ return ma__malloc_default(sz, NULL);
}
+}
- data.pDataIn = pIn;
- data.formatIn = formatIn;
- data.channelsIn = channelsIn;
- data.totalFrameCount = frameCount;
- data.iNextFrame = 0;
- data.isFeedingZeros = MA_FALSE;
-
- ma_zero_object(&converterConfig);
-
- converterConfig.formatIn = formatIn;
- converterConfig.channelsIn = channelsIn;
- converterConfig.sampleRateIn = sampleRateIn;
- if (channelMapIn != NULL) {
- ma_channel_map_copy(converterConfig.channelMapIn, channelMapIn, channelsIn);
+void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks != NULL) {
+ if (pAllocationCallbacks->onRealloc != NULL) {
+ return pAllocationCallbacks->onRealloc(p, sz, pAllocationCallbacks->pUserData);
+ } else {
+ return NULL; /* This requires a native implementation of realloc(). */
+ }
} else {
- ma_get_standard_channel_map(ma_standard_channel_map_default, converterConfig.channelsIn, converterConfig.channelMapIn);
+ return ma__realloc_default(p, sz, NULL);
}
-
- converterConfig.formatOut = formatOut;
- converterConfig.channelsOut = channelsOut;
- converterConfig.sampleRateOut = sampleRateOut;
- if (channelMapOut != NULL) {
- ma_channel_map_copy(converterConfig.channelMapOut, channelMapOut, channelsOut);
+}
+
+void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks != NULL) {
+ ma__free_from_callbacks(p, pAllocationCallbacks);
} else {
- ma_get_standard_channel_map(ma_standard_channel_map_default, converterConfig.channelsOut, converterConfig.channelMapOut);
+ ma__free_default(p, NULL);
}
+}
- converterConfig.onRead = ma_convert_frames__on_read;
- converterConfig.pUserData = &data;
+void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks)
+{
+ size_t extraBytes;
+ void* pUnaligned;
+ void* pAligned;
- if (ma_pcm_converter_init(&converterConfig, &converter) != MA_SUCCESS) {
+ if (alignment == 0) {
return 0;
}
- /*
- Always output our computed frame count. There is a chance the sample rate conversion routine may not output the last sample
- due to precision issues with 32-bit floats, in which case we should feed the DSP zero samples so it can generate that last
- frame.
- */
- totalFramesRead = ma_pcm_converter_read(&converter, pOut, frameCountOut);
- if (totalFramesRead < frameCountOut) {
- ma_uint32 bpf = ma_get_bytes_per_frame(formatIn, channelsIn);
-
- data.isFeedingZeros = MA_TRUE;
- data.totalFrameCount = ((ma_uint64)0xFFFFFFFF << 32) | 0xFFFFFFFF; /* C89 does not support 64-bit constants so need to instead construct it like this. Annoying... */ /*data.totalFrameCount = 0xFFFFFFFFFFFFFFFF;*/
- data.pDataIn = NULL;
-
- while (totalFramesRead < frameCountOut) {
- ma_uint64 framesToRead;
- ma_uint64 framesJustRead;
-
- framesToRead = (frameCountOut - totalFramesRead);
- ma_assert(framesToRead > 0);
-
- framesJustRead = ma_pcm_converter_read(&converter, ma_offset_ptr(pOut, totalFramesRead * bpf), framesToRead);
- totalFramesRead += framesJustRead;
-
- if (framesJustRead < framesToRead) {
- break;
- }
- }
+ extraBytes = alignment-1 + sizeof(void*);
- /* At this point we should have output every sample, but just to be super duper sure, just fill the rest with zeros. */
- if (totalFramesRead < frameCountOut) {
- ma_zero_memory_64(ma_offset_ptr(pOut, totalFramesRead * bpf), ((frameCountOut - totalFramesRead) * bpf));
- totalFramesRead = frameCountOut;
- }
+ pUnaligned = ma_malloc(sz + extraBytes, pAllocationCallbacks);
+ if (pUnaligned == NULL) {
+ return NULL;
}
- ma_assert(totalFramesRead == frameCountOut);
- return totalFramesRead;
-}
-
-
-/**************************************************************************************************************************************************************
-
-Ring Buffer
+ pAligned = (void*)(((ma_uintptr)pUnaligned + extraBytes) & ~((ma_uintptr)(alignment-1)));
+ ((void**)pAligned)[-1] = pUnaligned;
-**************************************************************************************************************************************************************/
-MA_INLINE ma_uint32 ma_rb__extract_offset_in_bytes(ma_uint32 encodedOffset)
-{
- return encodedOffset & 0x7FFFFFFF;
+ return pAligned;
}
-MA_INLINE ma_uint32 ma_rb__extract_offset_loop_flag(ma_uint32 encodedOffset)
+void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks)
{
- return encodedOffset & 0x80000000;
+ ma_free(((void**)p)[-1], pAllocationCallbacks);
}
-MA_INLINE void* ma_rb__get_read_ptr(ma_rb* pRB)
+const char* ma_get_format_name(ma_format format)
{
- ma_assert(pRB != NULL);
- return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(pRB->encodedReadOffset));
+ switch (format)
+ {
+ case ma_format_unknown: return "Unknown";
+ case ma_format_u8: return "8-bit Unsigned Integer";
+ case ma_format_s16: return "16-bit Signed Integer";
+ case ma_format_s24: return "24-bit Signed Integer (Tightly Packed)";
+ case ma_format_s32: return "32-bit Signed Integer";
+ case ma_format_f32: return "32-bit IEEE Floating Point";
+ default: return "Invalid";
+ }
}
-MA_INLINE void* ma_rb__get_write_ptr(ma_rb* pRB)
+void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels)
{
- ma_assert(pRB != NULL);
- return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(pRB->encodedWriteOffset));
+ ma_uint32 i;
+ for (i = 0; i < channels; ++i) {
+ pOut[i] = ma_mix_f32(pInA[i], pInB[i], factor);
+ }
}
-MA_INLINE ma_uint32 ma_rb__construct_offset(ma_uint32 offsetInBytes, ma_uint32 offsetLoopFlag)
-{
- return offsetLoopFlag | offsetInBytes;
-}
-MA_INLINE void ma_rb__deconstruct_offset(ma_uint32 encodedOffset, ma_uint32* pOffsetInBytes, ma_uint32* pOffsetLoopFlag)
+ma_uint32 ma_get_bytes_per_sample(ma_format format)
{
- ma_assert(pOffsetInBytes != NULL);
- ma_assert(pOffsetLoopFlag != NULL);
-
- *pOffsetInBytes = ma_rb__extract_offset_in_bytes(encodedOffset);
- *pOffsetLoopFlag = ma_rb__extract_offset_loop_flag(encodedOffset);
+ ma_uint32 sizes[] = {
+ 0, /* unknown */
+ 1, /* u8 */
+ 2, /* s16 */
+ 3, /* s24 */
+ 4, /* s32 */
+ 4, /* f32 */
+ };
+ return sizes[format];
}
-ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, ma_rb* pRB)
-{
- const ma_uint32 maxSubBufferSize = 0x7FFFFFFF - (MA_SIMD_ALIGNMENT-1);
+/**************************************************************************************************************************************************************
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
+Decoding
- if (subbufferSizeInBytes == 0 || subbufferCount == 0) {
- return MA_INVALID_ARGS;
- }
+**************************************************************************************************************************************************************/
+#ifndef MA_NO_DECODING
- if (subbufferSizeInBytes > maxSubBufferSize) {
- return MA_INVALID_ARGS; /* Maximum buffer size is ~2GB. The most significant bit is a flag for use internally. */
- }
+static size_t ma_decoder_read_bytes(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead)
+{
+ size_t bytesRead;
+ MA_ASSERT(pDecoder != NULL);
+ MA_ASSERT(pBufferOut != NULL);
- ma_zero_object(pRB);
- pRB->subbufferSizeInBytes = (ma_uint32)subbufferSizeInBytes;
- pRB->subbufferCount = (ma_uint32)subbufferCount;
+ bytesRead = pDecoder->onRead(pDecoder, pBufferOut, bytesToRead);
+ pDecoder->readPointer += bytesRead;
- if (pOptionalPreallocatedBuffer != NULL) {
- pRB->subbufferStrideInBytes = (ma_uint32)subbufferStrideInBytes;
- pRB->pBuffer = pOptionalPreallocatedBuffer;
- } else {
- size_t bufferSizeInBytes;
+ return bytesRead;
+}
- /*
- Here is where we allocate our own buffer. We always want to align this to MA_SIMD_ALIGNMENT for future SIMD optimization opportunity. To do this
- we need to make sure the stride is a multiple of MA_SIMD_ALIGNMENT.
- */
- pRB->subbufferStrideInBytes = (pRB->subbufferSizeInBytes + (MA_SIMD_ALIGNMENT-1)) & ~MA_SIMD_ALIGNMENT;
+static ma_bool32 ma_decoder_seek_bytes(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin)
+{
+ ma_bool32 wasSuccessful;
- bufferSizeInBytes = (size_t)pRB->subbufferCount*pRB->subbufferStrideInBytes;
- pRB->pBuffer = ma_aligned_malloc(bufferSizeInBytes, MA_SIMD_ALIGNMENT);
- if (pRB->pBuffer == NULL) {
- return MA_OUT_OF_MEMORY;
- }
+ MA_ASSERT(pDecoder != NULL);
- ma_zero_memory(pRB->pBuffer, bufferSizeInBytes);
- pRB->ownsBuffer = MA_TRUE;
+ wasSuccessful = pDecoder->onSeek(pDecoder, byteOffset, origin);
+ if (wasSuccessful) {
+ if (origin == ma_seek_origin_start) {
+ pDecoder->readPointer = (ma_uint64)byteOffset;
+ } else {
+ pDecoder->readPointer += byteOffset;
+ }
}
- return MA_SUCCESS;
+ return wasSuccessful;
}
-ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, ma_rb* pRB)
+
+ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate)
{
- return ma_rb_init_ex(bufferSizeInBytes, 1, 0, pOptionalPreallocatedBuffer, pRB);
+ ma_decoder_config config;
+ MA_ZERO_OBJECT(&config);
+ config.format = outputFormat;
+ config.channels = outputChannels;
+ config.sampleRate = outputSampleRate;
+ config.resampling.algorithm = ma_resample_algorithm_linear;
+ config.resampling.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER);
+ config.resampling.speex.quality = 3;
+
+ /* Note that we are intentionally leaving the channel map empty here which will cause the default channel map to be used. */
+
+ return config;
}
-void ma_rb_uninit(ma_rb* pRB)
+ma_decoder_config ma_decoder_config_init_copy(const ma_decoder_config* pConfig)
{
- if (pRB == NULL) {
- return;
+ ma_decoder_config config;
+ if (pConfig != NULL) {
+ config = *pConfig;
+ } else {
+ MA_ZERO_OBJECT(&config);
}
- if (pRB->ownsBuffer) {
- ma_aligned_free(pRB->pBuffer);
- }
+ return config;
}
-ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut)
+static ma_result ma_decoder__init_data_converter(ma_decoder* pDecoder, const ma_decoder_config* pConfig)
{
- ma_uint32 writeOffset;
- ma_uint32 writeOffsetInBytes;
- ma_uint32 writeOffsetLoopFlag;
- ma_uint32 readOffset;
- ma_uint32 readOffsetInBytes;
- ma_uint32 readOffsetLoopFlag;
- size_t bytesAvailable;
- size_t bytesRequested;
+ ma_data_converter_config converterConfig;
- if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) {
- return MA_INVALID_ARGS;
- }
+ MA_ASSERT(pDecoder != NULL);
- /* The returned buffer should never move ahead of the write pointer. */
- writeOffset = pRB->encodedWriteOffset;
- ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
+ /* Output format. */
+ if (pConfig->format == ma_format_unknown) {
+ pDecoder->outputFormat = pDecoder->internalFormat;
+ } else {
+ pDecoder->outputFormat = pConfig->format;
+ }
- readOffset = pRB->encodedReadOffset;
- ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
+ if (pConfig->channels == 0) {
+ pDecoder->outputChannels = pDecoder->internalChannels;
+ } else {
+ pDecoder->outputChannels = pConfig->channels;
+ }
- /*
- The number of bytes available depends on whether or not the read and write pointers are on the same loop iteration. If so, we
- can only read up to the write pointer. If not, we can only read up to the end of the buffer.
- */
- if (readOffsetLoopFlag == writeOffsetLoopFlag) {
- bytesAvailable = writeOffsetInBytes - readOffsetInBytes;
+ if (pConfig->sampleRate == 0) {
+ pDecoder->outputSampleRate = pDecoder->internalSampleRate;
} else {
- bytesAvailable = pRB->subbufferSizeInBytes - readOffsetInBytes;
+ pDecoder->outputSampleRate = pConfig->sampleRate;
}
- bytesRequested = *pSizeInBytes;
- if (bytesRequested > bytesAvailable) {
- bytesRequested = bytesAvailable;
+ if (ma_channel_map_blank(pDecoder->outputChannels, pConfig->channelMap)) {
+ ma_get_standard_channel_map(ma_standard_channel_map_default, pDecoder->outputChannels, pDecoder->outputChannelMap);
+ } else {
+ MA_COPY_MEMORY(pDecoder->outputChannelMap, pConfig->channelMap, sizeof(pConfig->channelMap));
}
- *pSizeInBytes = bytesRequested;
- (*ppBufferOut) = ma_rb__get_read_ptr(pRB);
+
+ converterConfig = ma_data_converter_config_init(
+ pDecoder->internalFormat, pDecoder->outputFormat,
+ pDecoder->internalChannels, pDecoder->outputChannels,
+ pDecoder->internalSampleRate, pDecoder->outputSampleRate
+ );
+ ma_channel_map_copy(converterConfig.channelMapIn, pDecoder->internalChannelMap, pDecoder->internalChannels);
+ ma_channel_map_copy(converterConfig.channelMapOut, pDecoder->outputChannelMap, pDecoder->outputChannels);
+ converterConfig.channelMixMode = pConfig->channelMixMode;
+ converterConfig.ditherMode = pConfig->ditherMode;
+ converterConfig.resampling.allowDynamicSampleRate = MA_FALSE; /* Never allow dynamic sample rate conversion. Setting this to true will disable passthrough optimizations. */
+ converterConfig.resampling.algorithm = pConfig->resampling.algorithm;
+ converterConfig.resampling.linear.lpfOrder = pConfig->resampling.linear.lpfOrder;
+ converterConfig.resampling.speex.quality = pConfig->resampling.speex.quality;
- return MA_SUCCESS;
+ return ma_data_converter_init(&converterConfig, &pDecoder->converter);
}
-ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut)
-{
- ma_uint32 readOffset;
- ma_uint32 readOffsetInBytes;
- ma_uint32 readOffsetLoopFlag;
- ma_uint32 newReadOffsetInBytes;
- ma_uint32 newReadOffsetLoopFlag;
-
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
-
- /* Validate the buffer. */
- if (pBufferOut != ma_rb__get_read_ptr(pRB)) {
- return MA_INVALID_ARGS;
- }
+/* WAV */
+#ifdef dr_wav_h
+#define MA_HAS_WAV
- readOffset = pRB->encodedReadOffset;
- ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
+static size_t ma_decoder_internal_on_read__wav(void* pUserData, void* pBufferOut, size_t bytesToRead)
+{
+ ma_decoder* pDecoder = (ma_decoder*)pUserData;
+ MA_ASSERT(pDecoder != NULL);
- /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */
- newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + sizeInBytes);
- if (newReadOffsetInBytes > pRB->subbufferSizeInBytes) {
- return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */
- }
+ return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead);
+}
- /* Move the read pointer back to the start if necessary. */
- newReadOffsetLoopFlag = readOffsetLoopFlag;
- if (newReadOffsetInBytes == pRB->subbufferSizeInBytes) {
- newReadOffsetInBytes = 0;
- newReadOffsetLoopFlag ^= 0x80000000;
- }
+static drwav_bool32 ma_decoder_internal_on_seek__wav(void* pUserData, int offset, drwav_seek_origin origin)
+{
+ ma_decoder* pDecoder = (ma_decoder*)pUserData;
+ MA_ASSERT(pDecoder != NULL);
- ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetLoopFlag, newReadOffsetInBytes));
- return MA_SUCCESS;
+ return ma_decoder_seek_bytes(pDecoder, offset, (origin == drwav_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current);
}
-ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut)
+static ma_uint64 ma_decoder_internal_on_read_pcm_frames__wav(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount)
{
- ma_uint32 readOffset;
- ma_uint32 readOffsetInBytes;
- ma_uint32 readOffsetLoopFlag;
- ma_uint32 writeOffset;
- ma_uint32 writeOffsetInBytes;
- ma_uint32 writeOffsetLoopFlag;
- size_t bytesAvailable;
- size_t bytesRequested;
-
- if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) {
- return MA_INVALID_ARGS;
- }
+ drwav* pWav;
- /* The returned buffer should never overtake the read buffer. */
- readOffset = pRB->encodedReadOffset;
- ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
+ MA_ASSERT(pDecoder != NULL);
+ MA_ASSERT(pFramesOut != NULL);
- writeOffset = pRB->encodedWriteOffset;
- ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
+ pWav = (drwav*)pDecoder->pInternalDecoder;
+ MA_ASSERT(pWav != NULL);
- /*
- In the case of writing, if the write pointer and the read pointer are on the same loop iteration we can only
- write up to the end of the buffer. Otherwise we can only write up to the read pointer. The write pointer should
- never overtake the read pointer.
- */
- if (writeOffsetLoopFlag == readOffsetLoopFlag) {
- bytesAvailable = pRB->subbufferSizeInBytes - writeOffsetInBytes;
- } else {
- bytesAvailable = readOffsetInBytes - writeOffsetInBytes;
+ switch (pDecoder->internalFormat) {
+ case ma_format_s16: return drwav_read_pcm_frames_s16(pWav, frameCount, (drwav_int16*)pFramesOut);
+ case ma_format_s32: return drwav_read_pcm_frames_s32(pWav, frameCount, (drwav_int32*)pFramesOut);
+ case ma_format_f32: return drwav_read_pcm_frames_f32(pWav, frameCount, (float*)pFramesOut);
+ default: break;
}
- bytesRequested = *pSizeInBytes;
- if (bytesRequested > bytesAvailable) {
- bytesRequested = bytesAvailable;
- }
+ /* Should never get here. If we do, it means the internal format was not set correctly at initialization time. */
+ MA_ASSERT(MA_FALSE);
+ return 0;
+}
- *pSizeInBytes = bytesRequested;
- *ppBufferOut = ma_rb__get_write_ptr(pRB);
+static ma_result ma_decoder_internal_on_seek_to_pcm_frame__wav(ma_decoder* pDecoder, ma_uint64 frameIndex)
+{
+ drwav* pWav;
+ drwav_bool32 result;
- /* Clear the buffer if desired. */
- if (pRB->clearOnWriteAcquire) {
- ma_zero_memory(*ppBufferOut, *pSizeInBytes);
+ pWav = (drwav*)pDecoder->pInternalDecoder;
+ MA_ASSERT(pWav != NULL);
+
+ result = drwav_seek_to_pcm_frame(pWav, frameIndex);
+ if (result) {
+ return MA_SUCCESS;
+ } else {
+ return MA_ERROR;
}
+}
+static ma_result ma_decoder_internal_on_uninit__wav(ma_decoder* pDecoder)
+{
+ drwav_uninit((drwav*)pDecoder->pInternalDecoder);
+ ma__free_from_callbacks(pDecoder->pInternalDecoder, &pDecoder->allocationCallbacks);
return MA_SUCCESS;
}
-ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut)
+static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__wav(ma_decoder* pDecoder)
{
- ma_uint32 writeOffset;
- ma_uint32 writeOffsetInBytes;
- ma_uint32 writeOffsetLoopFlag;
- ma_uint32 newWriteOffsetInBytes;
- ma_uint32 newWriteOffsetLoopFlag;
+ return ((drwav*)pDecoder->pInternalDecoder)->totalPCMFrameCount;
+}
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
+static ma_result ma_decoder_init_wav__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+{
+ drwav* pWav;
+ drwav_allocation_callbacks allocationCallbacks;
- /* Validate the buffer. */
- if (pBufferOut != ma_rb__get_write_ptr(pRB)) {
- return MA_INVALID_ARGS;
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pDecoder != NULL);
+
+ pWav = (drwav*)ma__malloc_from_callbacks(sizeof(*pWav), &pDecoder->allocationCallbacks);
+ if (pWav == NULL) {
+ return MA_OUT_OF_MEMORY;
}
- writeOffset = pRB->encodedWriteOffset;
- ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
+ allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData;
+ allocationCallbacks.onMalloc = pDecoder->allocationCallbacks.onMalloc;
+ allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc;
+ allocationCallbacks.onFree = pDecoder->allocationCallbacks.onFree;
- /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */
- newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + sizeInBytes);
- if (newWriteOffsetInBytes > pRB->subbufferSizeInBytes) {
- return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */
+ /* Try opening the decoder first. */
+ if (!drwav_init(pWav, ma_decoder_internal_on_read__wav, ma_decoder_internal_on_seek__wav, pDecoder, &allocationCallbacks)) {
+ ma__free_from_callbacks(pWav, &pDecoder->allocationCallbacks);
+ return MA_ERROR;
}
- /* Move the read pointer back to the start if necessary. */
- newWriteOffsetLoopFlag = writeOffsetLoopFlag;
- if (newWriteOffsetInBytes == pRB->subbufferSizeInBytes) {
- newWriteOffsetInBytes = 0;
- newWriteOffsetLoopFlag ^= 0x80000000;
- }
+ /* If we get here it means we successfully initialized the WAV decoder. We can now initialize the rest of the ma_decoder. */
+ pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__wav;
+ pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__wav;
+ pDecoder->onUninit = ma_decoder_internal_on_uninit__wav;
+ pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__wav;
+ pDecoder->pInternalDecoder = pWav;
- ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetLoopFlag, newWriteOffsetInBytes));
- return MA_SUCCESS;
-}
+ /* Try to be as optimal as possible for the internal format. If miniaudio does not support a format we will fall back to f32. */
+ pDecoder->internalFormat = ma_format_unknown;
+ switch (pWav->translatedFormatTag) {
+ case DR_WAVE_FORMAT_PCM:
+ {
+ if (pWav->bitsPerSample == 8) {
+ pDecoder->internalFormat = ma_format_s16;
+ } else if (pWav->bitsPerSample == 16) {
+ pDecoder->internalFormat = ma_format_s16;
+ } else if (pWav->bitsPerSample == 32) {
+ pDecoder->internalFormat = ma_format_s32;
+ }
+ } break;
-ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes)
-{
- ma_uint32 readOffset;
- ma_uint32 readOffsetInBytes;
- ma_uint32 readOffsetLoopFlag;
- ma_uint32 writeOffset;
- ma_uint32 writeOffsetInBytes;
- ma_uint32 writeOffsetLoopFlag;
- ma_uint32 newReadOffsetInBytes;
- ma_uint32 newReadOffsetLoopFlag;
+ case DR_WAVE_FORMAT_IEEE_FLOAT:
+ {
+ if (pWav->bitsPerSample == 32) {
+ pDecoder->internalFormat = ma_format_f32;
+ }
+ } break;
- if (pRB == NULL || offsetInBytes > pRB->subbufferSizeInBytes) {
- return MA_INVALID_ARGS;
+ case DR_WAVE_FORMAT_ALAW:
+ case DR_WAVE_FORMAT_MULAW:
+ case DR_WAVE_FORMAT_ADPCM:
+ case DR_WAVE_FORMAT_DVI_ADPCM:
+ {
+ pDecoder->internalFormat = ma_format_s16;
+ } break;
}
- readOffset = pRB->encodedReadOffset;
- ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
+ if (pDecoder->internalFormat == ma_format_unknown) {
+ pDecoder->internalFormat = ma_format_f32;
+ }
- writeOffset = pRB->encodedWriteOffset;
- ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
+ pDecoder->internalChannels = pWav->channels;
+ pDecoder->internalSampleRate = pWav->sampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDecoder->internalChannels, pDecoder->internalChannelMap);
- newReadOffsetInBytes = readOffsetInBytes;
- newReadOffsetLoopFlag = readOffsetLoopFlag;
+ return MA_SUCCESS;
+}
+#endif /* dr_wav_h */
- /* We cannot go past the write buffer. */
- if (readOffsetLoopFlag == writeOffsetLoopFlag) {
- if ((readOffsetInBytes + offsetInBytes) > writeOffsetInBytes) {
- newReadOffsetInBytes = writeOffsetInBytes;
- } else {
- newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes);
- }
- } else {
- /* May end up looping. */
- if ((readOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) {
- newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes;
- newReadOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */
- } else {
- newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes);
- }
- }
+/* FLAC */
+#ifdef dr_flac_h
+#define MA_HAS_FLAC
- ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetInBytes, newReadOffsetLoopFlag));
- return MA_SUCCESS;
+static size_t ma_decoder_internal_on_read__flac(void* pUserData, void* pBufferOut, size_t bytesToRead)
+{
+ ma_decoder* pDecoder = (ma_decoder*)pUserData;
+ MA_ASSERT(pDecoder != NULL);
+
+ return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead);
}
-ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes)
+static drflac_bool32 ma_decoder_internal_on_seek__flac(void* pUserData, int offset, drflac_seek_origin origin)
{
- ma_uint32 readOffset;
- ma_uint32 readOffsetInBytes;
- ma_uint32 readOffsetLoopFlag;
- ma_uint32 writeOffset;
- ma_uint32 writeOffsetInBytes;
- ma_uint32 writeOffsetLoopFlag;
- ma_uint32 newWriteOffsetInBytes;
- ma_uint32 newWriteOffsetLoopFlag;
+ ma_decoder* pDecoder = (ma_decoder*)pUserData;
+ MA_ASSERT(pDecoder != NULL);
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
+ return ma_decoder_seek_bytes(pDecoder, offset, (origin == drflac_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current);
+}
- readOffset = pRB->encodedReadOffset;
- ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
+static ma_uint64 ma_decoder_internal_on_read_pcm_frames__flac(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount)
+{
+ drflac* pFlac;
- writeOffset = pRB->encodedWriteOffset;
- ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
+ MA_ASSERT(pDecoder != NULL);
+ MA_ASSERT(pFramesOut != NULL);
- newWriteOffsetInBytes = writeOffsetInBytes;
- newWriteOffsetLoopFlag = writeOffsetLoopFlag;
+ pFlac = (drflac*)pDecoder->pInternalDecoder;
+ MA_ASSERT(pFlac != NULL);
- /* We cannot go past the write buffer. */
- if (readOffsetLoopFlag == writeOffsetLoopFlag) {
- /* May end up looping. */
- if ((writeOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) {
- newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes;
- newWriteOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */
- } else {
- newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes);
- }
- } else {
- if ((writeOffsetInBytes + offsetInBytes) > readOffsetInBytes) {
- newWriteOffsetInBytes = readOffsetInBytes;
- } else {
- newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes);
- }
+ switch (pDecoder->internalFormat) {
+ case ma_format_s16: return drflac_read_pcm_frames_s16(pFlac, frameCount, (drflac_int16*)pFramesOut);
+ case ma_format_s32: return drflac_read_pcm_frames_s32(pFlac, frameCount, (drflac_int32*)pFramesOut);
+ case ma_format_f32: return drflac_read_pcm_frames_f32(pFlac, frameCount, (float*)pFramesOut);
+ default: break;
}
- ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetInBytes, newWriteOffsetLoopFlag));
- return MA_SUCCESS;
+ /* Should never get here. If we do, it means the internal format was not set correctly at initialization time. */
+ MA_ASSERT(MA_FALSE);
+ return 0;
}
-ma_int32 ma_rb_pointer_distance(ma_rb* pRB)
+static ma_result ma_decoder_internal_on_seek_to_pcm_frame__flac(ma_decoder* pDecoder, ma_uint64 frameIndex)
{
- ma_uint32 readOffset;
- ma_uint32 readOffsetInBytes;
- ma_uint32 readOffsetLoopFlag;
- ma_uint32 writeOffset;
- ma_uint32 writeOffsetInBytes;
- ma_uint32 writeOffsetLoopFlag;
-
- if (pRB == NULL) {
- return 0;
- }
-
- readOffset = pRB->encodedReadOffset;
- ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag);
+ drflac* pFlac;
+ drflac_bool32 result;
- writeOffset = pRB->encodedWriteOffset;
- ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag);
+ pFlac = (drflac*)pDecoder->pInternalDecoder;
+ MA_ASSERT(pFlac != NULL);
- if (readOffsetLoopFlag == writeOffsetLoopFlag) {
- return writeOffsetInBytes - readOffsetInBytes;
+ result = drflac_seek_to_pcm_frame(pFlac, frameIndex);
+ if (result) {
+ return MA_SUCCESS;
} else {
- return writeOffsetInBytes + (pRB->subbufferSizeInBytes - readOffsetInBytes);
+ return MA_ERROR;
}
}
-size_t ma_rb_get_subbuffer_size(ma_rb* pRB)
+static ma_result ma_decoder_internal_on_uninit__flac(ma_decoder* pDecoder)
{
- if (pRB == NULL) {
- return 0;
- }
+ drflac_close((drflac*)pDecoder->pInternalDecoder);
+ return MA_SUCCESS;
+}
- return pRB->subbufferSizeInBytes;
+static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__flac(ma_decoder* pDecoder)
+{
+ return ((drflac*)pDecoder->pInternalDecoder)->totalPCMFrameCount;
}
-size_t ma_rb_get_subbuffer_stride(ma_rb* pRB)
+static ma_result ma_decoder_init_flac__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- if (pRB == NULL) {
- return 0;
- }
+ drflac* pFlac;
+ drflac_allocation_callbacks allocationCallbacks;
- if (pRB->subbufferStrideInBytes == 0) {
- return (size_t)pRB->subbufferSizeInBytes;
- }
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pDecoder != NULL);
- return (size_t)pRB->subbufferStrideInBytes;
-}
+ allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData;
+ allocationCallbacks.onMalloc = pDecoder->allocationCallbacks.onMalloc;
+ allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc;
+ allocationCallbacks.onFree = pDecoder->allocationCallbacks.onFree;
-size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex)
-{
- if (pRB == NULL) {
- return 0;
+ /* Try opening the decoder first. */
+ pFlac = drflac_open(ma_decoder_internal_on_read__flac, ma_decoder_internal_on_seek__flac, pDecoder, &allocationCallbacks);
+ if (pFlac == NULL) {
+ return MA_ERROR;
}
- return subbufferIndex * ma_rb_get_subbuffer_stride(pRB);
-}
+ /* If we get here it means we successfully initialized the FLAC decoder. We can now initialize the rest of the ma_decoder. */
+ pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__flac;
+ pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__flac;
+ pDecoder->onUninit = ma_decoder_internal_on_uninit__flac;
+ pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__flac;
+ pDecoder->pInternalDecoder = pFlac;
-void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer)
-{
- if (pRB == NULL) {
- return NULL;
+ /*
+ dr_flac supports reading as s32, s16 and f32. Try to do a one-to-one mapping if possible, but fall back to s32 if not. s32 is the "native" FLAC format
+ since it's the only one that's truly lossless.
+ */
+ pDecoder->internalFormat = ma_format_s32;
+ if (pConfig->format == ma_format_s16) {
+ pDecoder->internalFormat = ma_format_s16;
+ } else if (pConfig->format == ma_format_f32) {
+ pDecoder->internalFormat = ma_format_f32;
}
- return ma_offset_ptr(pBuffer, ma_rb_get_subbuffer_offset(pRB, subbufferIndex));
+ pDecoder->internalChannels = pFlac->channels;
+ pDecoder->internalSampleRate = pFlac->sampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_flac, pDecoder->internalChannels, pDecoder->internalChannelMap);
+
+ return MA_SUCCESS;
}
+#endif /* dr_flac_h */
+/* Vorbis */
+#ifdef STB_VORBIS_INCLUDE_STB_VORBIS_H
+#define MA_HAS_VORBIS
-static MA_INLINE ma_uint32 ma_pcm_rb_get_bpf(ma_pcm_rb* pRB)
-{
- ma_assert(pRB != NULL);
+/* The size in bytes of each chunk of data to read from the Vorbis stream. */
+#define MA_VORBIS_DATA_CHUNK_SIZE 4096
- return ma_get_bytes_per_frame(pRB->format, pRB->channels);
-}
+typedef struct
+{
+ stb_vorbis* pInternalVorbis;
+ ma_uint8* pData;
+ size_t dataSize;
+ size_t dataCapacity;
+ ma_uint32 framesConsumed; /* The number of frames consumed in ppPacketData. */
+ ma_uint32 framesRemaining; /* The number of frames remaining in ppPacketData. */
+ float** ppPacketData;
+} ma_vorbis_decoder;
-ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, ma_pcm_rb* pRB)
+static ma_uint64 ma_vorbis_decoder_read_pcm_frames(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount)
{
- ma_uint32 bpf;
- ma_result result;
+ float* pFramesOutF;
+ ma_uint64 totalFramesRead;
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
+ MA_ASSERT(pVorbis != NULL);
+ MA_ASSERT(pDecoder != NULL);
- ma_zero_object(pRB);
+ pFramesOutF = (float*)pFramesOut;
- bpf = ma_get_bytes_per_frame(format, channels);
- if (bpf == 0) {
- return MA_INVALID_ARGS;
- }
+ totalFramesRead = 0;
+ while (frameCount > 0) {
+ /* Read from the in-memory buffer first. */
+ while (pVorbis->framesRemaining > 0 && frameCount > 0) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < pDecoder->internalChannels; ++iChannel) {
+ pFramesOutF[0] = pVorbis->ppPacketData[iChannel][pVorbis->framesConsumed];
+ pFramesOutF += 1;
+ }
- result = ma_rb_init_ex(subbufferSizeInFrames*bpf, subbufferCount, subbufferStrideInFrames*bpf, pOptionalPreallocatedBuffer, &pRB->rb);
- if (result != MA_SUCCESS) {
- return result;
- }
+ pVorbis->framesConsumed += 1;
+ pVorbis->framesRemaining -= 1;
+ frameCount -= 1;
+ totalFramesRead += 1;
+ }
- pRB->format = format;
- pRB->channels = channels;
+ if (frameCount == 0) {
+ break;
+ }
- return MA_SUCCESS;
-}
+ MA_ASSERT(pVorbis->framesRemaining == 0);
-ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, ma_pcm_rb* pRB)
-{
- return ma_pcm_rb_init_ex(format, channels, bufferSizeInFrames, 1, 0, pOptionalPreallocatedBuffer, pRB);
-}
+ /* We've run out of cached frames, so decode the next packet and continue iteration. */
+ do
+ {
+ int samplesRead;
+ int consumedDataSize;
-void ma_pcm_rb_uninit(ma_pcm_rb* pRB)
-{
- if (pRB == NULL) {
- return;
+ if (pVorbis->dataSize > INT_MAX) {
+ break; /* Too big. */
+ }
+
+ samplesRead = 0;
+ consumedDataSize = stb_vorbis_decode_frame_pushdata(pVorbis->pInternalVorbis, pVorbis->pData, (int)pVorbis->dataSize, NULL, (float***)&pVorbis->ppPacketData, &samplesRead);
+ if (consumedDataSize != 0) {
+ size_t leftoverDataSize = (pVorbis->dataSize - (size_t)consumedDataSize);
+ size_t i;
+ for (i = 0; i < leftoverDataSize; ++i) {
+ pVorbis->pData[i] = pVorbis->pData[i + consumedDataSize];
+ }
+
+ pVorbis->dataSize = leftoverDataSize;
+ pVorbis->framesConsumed = 0;
+ pVorbis->framesRemaining = samplesRead;
+ break;
+ } else {
+ /* Need more data. If there's any room in the existing buffer allocation fill that first. Otherwise expand. */
+ size_t bytesRead;
+ if (pVorbis->dataCapacity == pVorbis->dataSize) {
+ /* No room. Expand. */
+ size_t oldCap = pVorbis->dataCapacity;
+ size_t newCap = pVorbis->dataCapacity + MA_VORBIS_DATA_CHUNK_SIZE;
+ ma_uint8* pNewData;
+
+ pNewData = (ma_uint8*)ma__realloc_from_callbacks(pVorbis->pData, newCap, oldCap, &pDecoder->allocationCallbacks);
+ if (pNewData == NULL) {
+ return totalFramesRead; /* Out of memory. */
+ }
+
+ pVorbis->pData = pNewData;
+ pVorbis->dataCapacity = newCap;
+ }
+
+ /* Fill in a chunk. */
+ bytesRead = ma_decoder_read_bytes(pDecoder, pVorbis->pData + pVorbis->dataSize, (pVorbis->dataCapacity - pVorbis->dataSize));
+ if (bytesRead == 0) {
+ return totalFramesRead; /* Error reading more data. */
+ }
+
+ pVorbis->dataSize += bytesRead;
+ }
+ } while (MA_TRUE);
}
- ma_rb_uninit(&pRB->rb);
+ return totalFramesRead;
}
-ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut)
+static ma_result ma_vorbis_decoder_seek_to_pcm_frame(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, ma_uint64 frameIndex)
{
- size_t sizeInBytes;
- ma_result result;
+ float buffer[4096];
- if (pRB == NULL || pSizeInFrames == NULL) {
- return MA_INVALID_ARGS;
+ MA_ASSERT(pVorbis != NULL);
+ MA_ASSERT(pDecoder != NULL);
+
+ /*
+ This is terribly inefficient because stb_vorbis does not have a good seeking solution with it's push API. Currently this just performs
+ a full decode right from the start of the stream. Later on I'll need to write a layer that goes through all of the Ogg pages until we
+ find the one containing the sample we need. Then we know exactly where to seek for stb_vorbis.
+ */
+ if (!ma_decoder_seek_bytes(pDecoder, 0, ma_seek_origin_start)) {
+ return MA_ERROR;
}
- sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB);
+ stb_vorbis_flush_pushdata(pVorbis->pInternalVorbis);
+ pVorbis->framesConsumed = 0;
+ pVorbis->framesRemaining = 0;
+ pVorbis->dataSize = 0;
+
+ while (frameIndex > 0) {
+ ma_uint32 framesRead;
+ ma_uint32 framesToRead = ma_countof(buffer)/pDecoder->internalChannels;
+ if (framesToRead > frameIndex) {
+ framesToRead = (ma_uint32)frameIndex;
+ }
+
+ framesRead = (ma_uint32)ma_vorbis_decoder_read_pcm_frames(pVorbis, pDecoder, buffer, framesToRead);
+ if (framesRead == 0) {
+ return MA_ERROR;
+ }
- result = ma_rb_acquire_read(&pRB->rb, &sizeInBytes, ppBufferOut);
- if (result != MA_SUCCESS) {
- return result;
+ frameIndex -= framesRead;
}
- *pSizeInFrames = (ma_uint32)(sizeInBytes / (size_t)ma_pcm_rb_get_bpf(pRB));
return MA_SUCCESS;
}
-ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut)
+
+static ma_result ma_decoder_internal_on_seek_to_pcm_frame__vorbis(ma_decoder* pDecoder, ma_uint64 frameIndex)
{
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
+ ma_vorbis_decoder* pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder;
+ MA_ASSERT(pVorbis != NULL);
- return ma_rb_commit_read(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB), pBufferOut);
+ return ma_vorbis_decoder_seek_to_pcm_frame(pVorbis, pDecoder, frameIndex);
}
-ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut)
+static ma_result ma_decoder_internal_on_uninit__vorbis(ma_decoder* pDecoder)
{
- size_t sizeInBytes;
- ma_result result;
-
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
-
- sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB);
+ ma_vorbis_decoder* pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder;
+ MA_ASSERT(pVorbis != NULL);
- result = ma_rb_acquire_write(&pRB->rb, &sizeInBytes, ppBufferOut);
- if (result != MA_SUCCESS) {
- return result;
- }
+ stb_vorbis_close(pVorbis->pInternalVorbis);
+ ma__free_from_callbacks(pVorbis->pData, &pDecoder->allocationCallbacks);
+ ma__free_from_callbacks(pVorbis, &pDecoder->allocationCallbacks);
- *pSizeInFrames = (ma_uint32)(sizeInBytes / ma_pcm_rb_get_bpf(pRB));
return MA_SUCCESS;
}
-ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut)
+static ma_uint64 ma_decoder_internal_on_read_pcm_frames__vorbis(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount)
{
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
+ ma_vorbis_decoder* pVorbis;
- return ma_rb_commit_write(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB), pBufferOut);
-}
+ MA_ASSERT(pDecoder != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+ MA_ASSERT(pDecoder->internalFormat == ma_format_f32);
-ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames)
-{
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
+ pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder;
+ MA_ASSERT(pVorbis != NULL);
- return ma_rb_seek_read(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB));
+ return ma_vorbis_decoder_read_pcm_frames(pVorbis, pDecoder, pFramesOut, frameCount);
}
-ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames)
+static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__vorbis(ma_decoder* pDecoder)
{
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
-
- return ma_rb_seek_write(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB));
+ /* No good way to do this with Vorbis. */
+ (void)pDecoder;
+ return 0;
}
-ma_int32 ma_pcm_rb_pointer_disance(ma_pcm_rb* pRB)
+static ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- if (pRB == NULL) {
- return MA_INVALID_ARGS;
- }
+ stb_vorbis* pInternalVorbis = NULL;
+ size_t dataSize = 0;
+ size_t dataCapacity = 0;
+ ma_uint8* pData = NULL;
+ stb_vorbis_info vorbisInfo;
+ size_t vorbisDataSize;
+ ma_vorbis_decoder* pVorbis;
- return ma_rb_pointer_distance(&pRB->rb) / ma_pcm_rb_get_bpf(pRB);
-}
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pDecoder != NULL);
-ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB)
-{
- if (pRB == NULL) {
- return 0;
- }
+ /* We grow the buffer in chunks. */
+ do
+ {
+ /* Allocate memory for a new chunk. */
+ ma_uint8* pNewData;
+ size_t bytesRead;
+ int vorbisError = 0;
+ int consumedDataSize = 0;
+ size_t oldCapacity = dataCapacity;
- return (ma_uint32)(ma_rb_get_subbuffer_size(&pRB->rb) / ma_pcm_rb_get_bpf(pRB));
-}
+ dataCapacity += MA_VORBIS_DATA_CHUNK_SIZE;
+ pNewData = (ma_uint8*)ma__realloc_from_callbacks(pData, dataCapacity, oldCapacity, &pDecoder->allocationCallbacks);
+ if (pNewData == NULL) {
+ ma__free_from_callbacks(pData, &pDecoder->allocationCallbacks);
+ return MA_OUT_OF_MEMORY;
+ }
-ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB)
-{
- if (pRB == NULL) {
- return 0;
- }
+ pData = pNewData;
- return (ma_uint32)(ma_rb_get_subbuffer_stride(&pRB->rb) / ma_pcm_rb_get_bpf(pRB));
-}
+ /* Fill in a chunk. */
+ bytesRead = ma_decoder_read_bytes(pDecoder, pData + dataSize, (dataCapacity - dataSize));
+ if (bytesRead == 0) {
+ return MA_ERROR;
+ }
-ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex)
-{
- if (pRB == NULL) {
- return 0;
- }
+ dataSize += bytesRead;
+ if (dataSize > INT_MAX) {
+ return MA_ERROR; /* Too big. */
+ }
- return (ma_uint32)(ma_rb_get_subbuffer_offset(&pRB->rb, subbufferIndex) / ma_pcm_rb_get_bpf(pRB));
-}
+ pInternalVorbis = stb_vorbis_open_pushdata(pData, (int)dataSize, &consumedDataSize, &vorbisError, NULL);
+ if (pInternalVorbis != NULL) {
+ /*
+ If we get here it means we were able to open the stb_vorbis decoder. There may be some leftover bytes in our buffer, so
+ we need to move those bytes down to the front of the buffer since they'll be needed for future decoding.
+ */
+ size_t leftoverDataSize = (dataSize - (size_t)consumedDataSize);
+ size_t i;
+ for (i = 0; i < leftoverDataSize; ++i) {
+ pData[i] = pData[i + consumedDataSize];
+ }
-void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer)
-{
- if (pRB == NULL) {
- return NULL;
- }
+ dataSize = leftoverDataSize;
+ break; /* Success. */
+ } else {
+ if (vorbisError == VORBIS_need_more_data) {
+ continue;
+ } else {
+ return MA_ERROR; /* Failed to open the stb_vorbis decoder. */
+ }
+ }
+ } while (MA_TRUE);
- return ma_rb_get_subbuffer_ptr(&pRB->rb, subbufferIndex, pBuffer);
-}
+ /* If we get here it means we successfully opened the Vorbis decoder. */
+ vorbisInfo = stb_vorbis_get_info(pInternalVorbis);
+ /* Don't allow more than MA_MAX_CHANNELS channels. */
+ if (vorbisInfo.channels > MA_MAX_CHANNELS) {
+ stb_vorbis_close(pInternalVorbis);
+ ma__free_from_callbacks(pData, &pDecoder->allocationCallbacks);
+ return MA_ERROR; /* Too many channels. */
+ }
-/**************************************************************************************************************************************************************
+ vorbisDataSize = sizeof(ma_vorbis_decoder) + sizeof(float)*vorbisInfo.max_frame_size;
+ pVorbis = (ma_vorbis_decoder*)ma__malloc_from_callbacks(vorbisDataSize, &pDecoder->allocationCallbacks);
+ if (pVorbis == NULL) {
+ stb_vorbis_close(pInternalVorbis);
+ ma__free_from_callbacks(pData, &pDecoder->allocationCallbacks);
+ return MA_OUT_OF_MEMORY;
+ }
-Miscellaneous Helpers
+ MA_ZERO_MEMORY(pVorbis, vorbisDataSize);
+ pVorbis->pInternalVorbis = pInternalVorbis;
+ pVorbis->pData = pData;
+ pVorbis->dataSize = dataSize;
+ pVorbis->dataCapacity = dataCapacity;
-**************************************************************************************************************************************************************/
-void* ma_malloc(size_t sz)
-{
- return MA_MALLOC(sz);
+ pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__vorbis;
+ pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__vorbis;
+ pDecoder->onUninit = ma_decoder_internal_on_uninit__vorbis;
+ pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__vorbis;
+ pDecoder->pInternalDecoder = pVorbis;
+
+ /* The internal format is always f32. */
+ pDecoder->internalFormat = ma_format_f32;
+ pDecoder->internalChannels = vorbisInfo.channels;
+ pDecoder->internalSampleRate = vorbisInfo.sample_rate;
+ ma_get_standard_channel_map(ma_standard_channel_map_vorbis, pDecoder->internalChannels, pDecoder->internalChannelMap);
+
+ return MA_SUCCESS;
}
+#endif /* STB_VORBIS_INCLUDE_STB_VORBIS_H */
+
+/* MP3 */
+#ifdef dr_mp3_h
+#define MA_HAS_MP3
-void* ma_realloc(void* p, size_t sz)
+static size_t ma_decoder_internal_on_read__mp3(void* pUserData, void* pBufferOut, size_t bytesToRead)
{
- return MA_REALLOC(p, sz);
+ ma_decoder* pDecoder = (ma_decoder*)pUserData;
+ MA_ASSERT(pDecoder != NULL);
+
+ return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead);
}
-void ma_free(void* p)
+static drmp3_bool32 ma_decoder_internal_on_seek__mp3(void* pUserData, int offset, drmp3_seek_origin origin)
{
- MA_FREE(p);
+ ma_decoder* pDecoder = (ma_decoder*)pUserData;
+ MA_ASSERT(pDecoder != NULL);
+
+ return ma_decoder_seek_bytes(pDecoder, offset, (origin == drmp3_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current);
}
-void* ma_aligned_malloc(size_t sz, size_t alignment)
+static ma_uint64 ma_decoder_internal_on_read_pcm_frames__mp3(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount)
{
- size_t extraBytes;
- void* pUnaligned;
- void* pAligned;
+ drmp3* pMP3;
- if (alignment == 0) {
- return 0;
- }
+ MA_ASSERT(pDecoder != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+ MA_ASSERT(pDecoder->internalFormat == ma_format_f32);
- extraBytes = alignment-1 + sizeof(void*);
+ pMP3 = (drmp3*)pDecoder->pInternalDecoder;
+ MA_ASSERT(pMP3 != NULL);
- pUnaligned = ma_malloc(sz + extraBytes);
- if (pUnaligned == NULL) {
- return NULL;
- }
+ return drmp3_read_pcm_frames_f32(pMP3, frameCount, (float*)pFramesOut);
+}
- pAligned = (void*)(((ma_uintptr)pUnaligned + extraBytes) & ~((ma_uintptr)(alignment-1)));
- ((void**)pAligned)[-1] = pUnaligned;
+static ma_result ma_decoder_internal_on_seek_to_pcm_frame__mp3(ma_decoder* pDecoder, ma_uint64 frameIndex)
+{
+ drmp3* pMP3;
+ drmp3_bool32 result;
- return pAligned;
+ pMP3 = (drmp3*)pDecoder->pInternalDecoder;
+ MA_ASSERT(pMP3 != NULL);
+
+ result = drmp3_seek_to_pcm_frame(pMP3, frameIndex);
+ if (result) {
+ return MA_SUCCESS;
+ } else {
+ return MA_ERROR;
+ }
}
-void ma_aligned_free(void* p)
+static ma_result ma_decoder_internal_on_uninit__mp3(ma_decoder* pDecoder)
{
- ma_free(((void**)p)[-1]);
+ drmp3_uninit((drmp3*)pDecoder->pInternalDecoder);
+ ma__free_from_callbacks(pDecoder->pInternalDecoder, &pDecoder->allocationCallbacks);
+ return MA_SUCCESS;
}
-const char* ma_get_format_name(ma_format format)
+static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__mp3(ma_decoder* pDecoder)
{
- switch (format)
- {
- case ma_format_unknown: return "Unknown";
- case ma_format_u8: return "8-bit Unsigned Integer";
- case ma_format_s16: return "16-bit Signed Integer";
- case ma_format_s24: return "24-bit Signed Integer (Tightly Packed)";
- case ma_format_s32: return "32-bit Signed Integer";
- case ma_format_f32: return "32-bit IEEE Floating Point";
- default: return "Invalid";
- }
+ return drmp3_get_pcm_frame_count((drmp3*)pDecoder->pInternalDecoder);
}
-void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels)
+static ma_result ma_decoder_init_mp3__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_uint32 i;
- for (i = 0; i < channels; ++i) {
- pOut[i] = ma_mix_f32(pInA[i], pInB[i], factor);
- }
-}
+ drmp3* pMP3;
+ drmp3_config mp3Config;
+ drmp3_allocation_callbacks allocationCallbacks;
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pDecoder != NULL);
-ma_uint32 ma_get_bytes_per_sample(ma_format format)
-{
- ma_uint32 sizes[] = {
- 0, /* unknown */
- 1, /* u8 */
- 2, /* s16 */
- 3, /* s24 */
- 4, /* s32 */
- 4, /* f32 */
- };
- return sizes[format];
-}
+ pMP3 = (drmp3*)ma__malloc_from_callbacks(sizeof(*pMP3), &pDecoder->allocationCallbacks);
+ if (pMP3 == NULL) {
+ return MA_OUT_OF_MEMORY;
+ }
+ allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData;
+ allocationCallbacks.onMalloc = pDecoder->allocationCallbacks.onMalloc;
+ allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc;
+ allocationCallbacks.onFree = pDecoder->allocationCallbacks.onFree;
+
+ /*
+ Try opening the decoder first. MP3 can have variable sample rates (it's per frame/packet). We therefore need
+ to use some smarts to determine the most appropriate internal sample rate. These are the rules we're going
+ to use:
+
+ Sample Rates
+ 1) If an output sample rate is specified in pConfig we just use that. Otherwise;
+ 2) Fall back to 44100.
+
+ The internal channel count is always stereo, and the internal format is always f32.
+ */
+ MA_ZERO_OBJECT(&mp3Config);
+ mp3Config.outputChannels = 2;
+ mp3Config.outputSampleRate = (pConfig->sampleRate != 0) ? pConfig->sampleRate : 44100;
+ if (!drmp3_init(pMP3, ma_decoder_internal_on_read__mp3, ma_decoder_internal_on_seek__mp3, pDecoder, &mp3Config, &allocationCallbacks)) {
+ ma__free_from_callbacks(pMP3, &pDecoder->allocationCallbacks);
+ return MA_ERROR;
+ }
-/**************************************************************************************************************************************************************
+ /* If we get here it means we successfully initialized the MP3 decoder. We can now initialize the rest of the ma_decoder. */
+ pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__mp3;
+ pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__mp3;
+ pDecoder->onUninit = ma_decoder_internal_on_uninit__mp3;
+ pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__mp3;
+ pDecoder->pInternalDecoder = pMP3;
-Decoding
+ /* Internal format. */
+ pDecoder->internalFormat = ma_format_f32;
+ pDecoder->internalChannels = pMP3->channels;
+ pDecoder->internalSampleRate = pMP3->sampleRate;
+ ma_get_standard_channel_map(ma_standard_channel_map_default, pDecoder->internalChannels, pDecoder->internalChannelMap);
-**************************************************************************************************************************************************************/
-#ifndef MA_NO_DECODING
+ return MA_SUCCESS;
+}
+#endif /* dr_mp3_h */
-size_t ma_decoder_read_bytes(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead)
+/* Raw */
+static ma_uint64 ma_decoder_internal_on_read_pcm_frames__raw(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount)
{
- size_t bytesRead;
+ ma_uint32 bpf;
+ ma_uint64 totalFramesRead;
+ void* pRunningFramesOut;
- ma_assert(pDecoder != NULL);
- ma_assert(pBufferOut != NULL);
- bytesRead = pDecoder->onRead(pDecoder, pBufferOut, bytesToRead);
- pDecoder->readPointer += bytesRead;
+ MA_ASSERT(pDecoder != NULL);
+ MA_ASSERT(pFramesOut != NULL);
- return bytesRead;
-}
+ /* For raw decoding we just read directly from the decoder's callbacks. */
+ bpf = ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels);
-ma_bool32 ma_decoder_seek_bytes(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin)
-{
- ma_bool32 wasSuccessful;
+ totalFramesRead = 0;
+ pRunningFramesOut = pFramesOut;
- ma_assert(pDecoder != NULL);
+ while (totalFramesRead < frameCount) {
+ ma_uint64 framesReadThisIteration;
+ ma_uint64 framesToReadThisIteration = (frameCount - totalFramesRead);
+ if (framesToReadThisIteration > MA_SIZE_MAX) {
+ framesToReadThisIteration = MA_SIZE_MAX;
+ }
- wasSuccessful = pDecoder->onSeek(pDecoder, byteOffset, origin);
- if (wasSuccessful) {
- if (origin == ma_seek_origin_start) {
- pDecoder->readPointer = (ma_uint64)byteOffset;
- } else {
- pDecoder->readPointer += byteOffset;
+ framesReadThisIteration = ma_decoder_read_bytes(pDecoder, pRunningFramesOut, (size_t)framesToReadThisIteration * bpf) / bpf; /* Safe cast to size_t. */
+
+ totalFramesRead += framesReadThisIteration;
+ pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIteration * bpf);
+
+ if (framesReadThisIteration < framesToReadThisIteration) {
+ break; /* Done. */
}
}
- return wasSuccessful;
+ return totalFramesRead;
}
-ma_bool32 ma_decoder_seek_bytes_64(ma_decoder* pDecoder, ma_uint64 byteOffset, ma_seek_origin origin)
+static ma_result ma_decoder_internal_on_seek_to_pcm_frame__raw(ma_decoder* pDecoder, ma_uint64 frameIndex)
{
- ma_assert(pDecoder != NULL);
-
- if (origin == ma_seek_origin_start) {
- ma_uint64 bytesToSeekThisIteration = 0x7FFFFFFF;
- if (bytesToSeekThisIteration > byteOffset) {
- bytesToSeekThisIteration = byteOffset;
- }
+ ma_bool32 result = MA_FALSE;
+ ma_uint64 totalBytesToSeek;
- if (!ma_decoder_seek_bytes(pDecoder, (int)bytesToSeekThisIteration, ma_seek_origin_start)) {
- return MA_FALSE;
- }
+ MA_ASSERT(pDecoder != NULL);
- byteOffset -= bytesToSeekThisIteration;
+ if (pDecoder->onSeek == NULL) {
+ return MA_ERROR;
}
- /* Getting here means we need to seek relative to the current position. */
- while (byteOffset > 0) {
- ma_uint64 bytesToSeekThisIteration = 0x7FFFFFFF;
- if (bytesToSeekThisIteration > byteOffset) {
- bytesToSeekThisIteration = byteOffset;
- }
+ /* The callback uses a 32 bit integer whereas we use a 64 bit unsigned integer. We just need to continuously seek until we're at the correct position. */
+ totalBytesToSeek = frameIndex * ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels);
+ if (totalBytesToSeek < 0x7FFFFFFF) {
+ /* Simple case. */
+ result = ma_decoder_seek_bytes(pDecoder, (int)(frameIndex * ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels)), ma_seek_origin_start);
+ } else {
+ /* Complex case. Start by doing a seek relative to the start. Then keep looping using offset seeking. */
+ result = ma_decoder_seek_bytes(pDecoder, 0x7FFFFFFF, ma_seek_origin_start);
+ if (result == MA_TRUE) {
+ totalBytesToSeek -= 0x7FFFFFFF;
- if (!ma_decoder_seek_bytes(pDecoder, (int)bytesToSeekThisIteration, ma_seek_origin_current)) {
- return MA_FALSE;
+ while (totalBytesToSeek > 0) {
+ ma_uint64 bytesToSeekThisIteration = totalBytesToSeek;
+ if (bytesToSeekThisIteration > 0x7FFFFFFF) {
+ bytesToSeekThisIteration = 0x7FFFFFFF;
+ }
+
+ result = ma_decoder_seek_bytes(pDecoder, (int)bytesToSeekThisIteration, ma_seek_origin_current);
+ if (result != MA_TRUE) {
+ break;
+ }
+
+ totalBytesToSeek -= bytesToSeekThisIteration;
+ }
}
+ }
- byteOffset -= bytesToSeekThisIteration;
+ if (result) {
+ return MA_SUCCESS;
+ } else {
+ return MA_ERROR;
}
+}
- return MA_TRUE;
+static ma_result ma_decoder_internal_on_uninit__raw(ma_decoder* pDecoder)
+{
+ (void)pDecoder;
+ return MA_SUCCESS;
}
+static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__raw(ma_decoder* pDecoder)
+{
+ (void)pDecoder;
+ return 0;
+}
-ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate)
+static ma_result ma_decoder_init_raw__internal(const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder)
{
- ma_decoder_config config;
- ma_zero_object(&config);
- config.format = outputFormat;
- config.channels = outputChannels;
- config.sampleRate = outputSampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_default, config.channels, config.channelMap);
+ MA_ASSERT(pConfigIn != NULL);
+ MA_ASSERT(pConfigOut != NULL);
+ MA_ASSERT(pDecoder != NULL);
- return config;
+ pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__raw;
+ pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__raw;
+ pDecoder->onUninit = ma_decoder_internal_on_uninit__raw;
+ pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__raw;
+
+ /* Internal format. */
+ pDecoder->internalFormat = pConfigIn->format;
+ pDecoder->internalChannels = pConfigIn->channels;
+ pDecoder->internalSampleRate = pConfigIn->sampleRate;
+ ma_channel_map_copy(pDecoder->internalChannelMap, pConfigIn->channelMap, pConfigIn->channels);
+
+ return MA_SUCCESS;
}
-ma_decoder_config ma_decoder_config_init_copy(const ma_decoder_config* pConfig)
+static ma_result ma_decoder__init_allocation_callbacks(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_decoder_config config;
+ MA_ASSERT(pDecoder != NULL);
+
if (pConfig != NULL) {
- config = *pConfig;
+ return ma_allocation_callbacks_init_copy(&pDecoder->allocationCallbacks, &pConfig->allocationCallbacks);
} else {
- ma_zero_object(&config);
+ pDecoder->allocationCallbacks = ma_allocation_callbacks_init_default();
+ return MA_SUCCESS;
}
-
- return config;
}
-ma_result ma_decoder__init_dsp(ma_decoder* pDecoder, const ma_decoder_config* pConfig, ma_pcm_converter_read_proc onRead)
+static ma_result ma_decoder__preinit(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_pcm_converter_config dspConfig;
-
- ma_assert(pDecoder != NULL);
+ ma_result result;
- /* Output format. */
- if (pConfig->format == ma_format_unknown) {
- pDecoder->outputFormat = pDecoder->internalFormat;
- } else {
- pDecoder->outputFormat = pConfig->format;
- }
+ MA_ASSERT(pConfig != NULL);
- if (pConfig->channels == 0) {
- pDecoder->outputChannels = pDecoder->internalChannels;
- } else {
- pDecoder->outputChannels = pConfig->channels;
+ if (pDecoder == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pConfig->sampleRate == 0) {
- pDecoder->outputSampleRate = pDecoder->internalSampleRate;
- } else {
- pDecoder->outputSampleRate = pConfig->sampleRate;
- }
+ MA_ZERO_OBJECT(pDecoder);
- if (ma_channel_map_blank(pDecoder->outputChannels, pConfig->channelMap)) {
- ma_get_standard_channel_map(ma_standard_channel_map_default, pDecoder->outputChannels, pDecoder->outputChannelMap);
- } else {
- ma_copy_memory(pDecoder->outputChannelMap, pConfig->channelMap, sizeof(pConfig->channelMap));
+ if (onRead == NULL || onSeek == NULL) {
+ return MA_INVALID_ARGS;
}
+ pDecoder->onRead = onRead;
+ pDecoder->onSeek = onSeek;
+ pDecoder->pUserData = pUserData;
- /* DSP. */
- dspConfig = ma_pcm_converter_config_init_ex(
- pDecoder->internalFormat, pDecoder->internalChannels, pDecoder->internalSampleRate, pDecoder->internalChannelMap,
- pDecoder->outputFormat, pDecoder->outputChannels, pDecoder->outputSampleRate, pDecoder->outputChannelMap,
- onRead, pDecoder);
- dspConfig.channelMixMode = pConfig->channelMixMode;
- dspConfig.ditherMode = pConfig->ditherMode;
- dspConfig.srcAlgorithm = pConfig->srcAlgorithm;
- dspConfig.sinc = pConfig->src.sinc;
+ result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- return ma_pcm_converter_init(&dspConfig, &pDecoder->dsp);
+ return MA_SUCCESS;
}
-/* WAV */
-#ifdef dr_wav_h
-#define MA_HAS_WAV
-
-size_t ma_decoder_internal_on_read__wav(void* pUserData, void* pBufferOut, size_t bytesToRead)
+static ma_result ma_decoder__postinit(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_decoder* pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
-
- return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead);
-}
+ ma_result result;
-drwav_bool32 ma_decoder_internal_on_seek__wav(void* pUserData, int offset, drwav_seek_origin origin)
-{
- ma_decoder* pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
+ result = ma_decoder__init_data_converter(pDecoder, pConfig);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- return ma_decoder_seek_bytes(pDecoder, offset, (origin == drwav_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current);
+ return result;
}
-ma_uint32 ma_decoder_internal_on_read_pcm_frames__wav(ma_pcm_converter* pDSP, void* pSamplesOut, ma_uint32 frameCount, void* pUserData)
+ma_result ma_decoder_init_wav(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_decoder* pDecoder;
- drwav* pWav;
-
- (void)pDSP;
+ ma_decoder_config config;
+ ma_result result;
- pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
+ config = ma_decoder_config_init_copy(pConfig);
- pWav = (drwav*)pDecoder->pInternalDecoder;
- ma_assert(pWav != NULL);
+ result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- switch (pDecoder->internalFormat) {
- case ma_format_s16: return (ma_uint32)drwav_read_pcm_frames_s16(pWav, frameCount, (drwav_int16*)pSamplesOut);
- case ma_format_s32: return (ma_uint32)drwav_read_pcm_frames_s32(pWav, frameCount, (drwav_int32*)pSamplesOut);
- case ma_format_f32: return (ma_uint32)drwav_read_pcm_frames_f32(pWav, frameCount, (float*)pSamplesOut);
- default: break;
+#ifdef MA_HAS_WAV
+ result = ma_decoder_init_wav__internal(&config, pDecoder);
+#else
+ result = MA_NO_BACKEND;
+#endif
+ if (result != MA_SUCCESS) {
+ return result;
}
- /* Should never get here. If we do, it means the internal format was not set correctly at initialization time. */
- ma_assert(MA_FALSE);
- return 0;
+ return ma_decoder__postinit(&config, pDecoder);
}
-ma_result ma_decoder_internal_on_seek_to_pcm_frame__wav(ma_decoder* pDecoder, ma_uint64 frameIndex)
+ma_result ma_decoder_init_flac(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- drwav* pWav;
- drwav_bool32 result;
+ ma_decoder_config config;
+ ma_result result;
- pWav = (drwav*)pDecoder->pInternalDecoder;
- ma_assert(pWav != NULL);
+ config = ma_decoder_config_init_copy(pConfig);
- result = drwav_seek_to_pcm_frame(pWav, frameIndex);
- if (result) {
- return MA_SUCCESS;
- } else {
- return MA_ERROR;
+ result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
}
-}
-ma_result ma_decoder_internal_on_uninit__wav(ma_decoder* pDecoder)
-{
- drwav_close((drwav*)pDecoder->pInternalDecoder);
- return MA_SUCCESS;
-}
+#ifdef MA_HAS_FLAC
+ result = ma_decoder_init_flac__internal(&config, pDecoder);
+#else
+ result = MA_NO_BACKEND;
+#endif
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__wav(ma_decoder* pDecoder)
-{
- return ((drwav*)pDecoder->pInternalDecoder)->totalPCMFrameCount;
+ return ma_decoder__postinit(&config, pDecoder);
}
-ma_result ma_decoder_init_wav__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_decoder_init_vorbis(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- drwav* pWav;
+ ma_decoder_config config;
ma_result result;
- ma_assert(pConfig != NULL);
- ma_assert(pDecoder != NULL);
-
- /* Try opening the decoder first. */
- pWav = drwav_open(ma_decoder_internal_on_read__wav, ma_decoder_internal_on_seek__wav, pDecoder);
- if (pWav == NULL) {
- return MA_ERROR;
- }
-
- /* If we get here it means we successfully initialized the WAV decoder. We can now initialize the rest of the ma_decoder. */
- pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__wav;
- pDecoder->onUninit = ma_decoder_internal_on_uninit__wav;
- pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__wav;
- pDecoder->pInternalDecoder = pWav;
-
- /* Try to be as optimal as possible for the internal format. If miniaudio does not support a format we will fall back to f32. */
- pDecoder->internalFormat = ma_format_unknown;
- switch (pWav->translatedFormatTag) {
- case DR_WAVE_FORMAT_PCM:
- {
- if (pWav->bitsPerSample == 8) {
- pDecoder->internalFormat = ma_format_s16;
- } else if (pWav->bitsPerSample == 16) {
- pDecoder->internalFormat = ma_format_s16;
- } else if (pWav->bitsPerSample == 32) {
- pDecoder->internalFormat = ma_format_s32;
- }
- } break;
-
- case DR_WAVE_FORMAT_IEEE_FLOAT:
- {
- if (pWav->bitsPerSample == 32) {
- pDecoder->internalFormat = ma_format_f32;
- }
- } break;
-
- case DR_WAVE_FORMAT_ALAW:
- case DR_WAVE_FORMAT_MULAW:
- case DR_WAVE_FORMAT_ADPCM:
- case DR_WAVE_FORMAT_DVI_ADPCM:
- {
- pDecoder->internalFormat = ma_format_s16;
- } break;
- }
+ config = ma_decoder_config_init_copy(pConfig);
- if (pDecoder->internalFormat == ma_format_unknown) {
- pDecoder->internalFormat = ma_format_f32;
+ result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
}
- pDecoder->internalChannels = pWav->channels;
- pDecoder->internalSampleRate = pWav->sampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDecoder->internalChannels, pDecoder->internalChannelMap);
-
- result = ma_decoder__init_dsp(pDecoder, pConfig, ma_decoder_internal_on_read_pcm_frames__wav);
+#ifdef MA_HAS_VORBIS
+ result = ma_decoder_init_vorbis__internal(&config, pDecoder);
+#else
+ result = MA_NO_BACKEND;
+#endif
if (result != MA_SUCCESS) {
- drwav_close(pWav);
return result;
}
- return MA_SUCCESS;
+ return ma_decoder__postinit(&config, pDecoder);
}
-#endif
-
-/* FLAC */
-#ifdef dr_flac_h
-#define MA_HAS_FLAC
-size_t ma_decoder_internal_on_read__flac(void* pUserData, void* pBufferOut, size_t bytesToRead)
+ma_result ma_decoder_init_mp3(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_decoder* pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
+ ma_decoder_config config;
+ ma_result result;
- return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead);
-}
+ config = ma_decoder_config_init_copy(pConfig);
-drflac_bool32 ma_decoder_internal_on_seek__flac(void* pUserData, int offset, drflac_seek_origin origin)
-{
- ma_decoder* pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
+ result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- return ma_decoder_seek_bytes(pDecoder, offset, (origin == drflac_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current);
+#ifdef MA_HAS_MP3
+ result = ma_decoder_init_mp3__internal(&config, pDecoder);
+#else
+ result = MA_NO_BACKEND;
+#endif
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ return ma_decoder__postinit(&config, pDecoder);
}
-ma_uint32 ma_decoder_internal_on_read_pcm_frames__flac(ma_pcm_converter* pDSP, void* pSamplesOut, ma_uint32 frameCount, void* pUserData)
+ma_result ma_decoder_init_raw(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder)
{
- ma_decoder* pDecoder;
- drflac* pFlac;
-
- (void)pDSP;
+ ma_decoder_config config;
+ ma_result result;
- pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
+ config = ma_decoder_config_init_copy(pConfigOut);
- pFlac = (drflac*)pDecoder->pInternalDecoder;
- ma_assert(pFlac != NULL);
+ result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- switch (pDecoder->internalFormat) {
- case ma_format_s16: return (ma_uint32)drflac_read_pcm_frames_s16(pFlac, frameCount, (drflac_int16*)pSamplesOut);
- case ma_format_s32: return (ma_uint32)drflac_read_pcm_frames_s32(pFlac, frameCount, (drflac_int32*)pSamplesOut);
- case ma_format_f32: return (ma_uint32)drflac_read_pcm_frames_f32(pFlac, frameCount, (float*)pSamplesOut);
- default: break;
+ result = ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
}
- /* Should never get here. If we do, it means the internal format was not set correctly at initialization time. */
- ma_assert(MA_FALSE);
- return 0;
+ return ma_decoder__postinit(&config, pDecoder);
}
-ma_result ma_decoder_internal_on_seek_to_pcm_frame__flac(ma_decoder* pDecoder, ma_uint64 frameIndex)
+static ma_result ma_decoder_init__internal(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- drflac* pFlac;
- drflac_bool32 result;
+ ma_result result = MA_NO_BACKEND;
- pFlac = (drflac*)pDecoder->pInternalDecoder;
- ma_assert(pFlac != NULL);
+ MA_ASSERT(pConfig != NULL);
+ MA_ASSERT(pDecoder != NULL);
- result = drflac_seek_to_pcm_frame(pFlac, frameIndex);
- if (result) {
- return MA_SUCCESS;
- } else {
- return MA_ERROR;
+ /* Silence some warnings in the case that we don't have any decoder backends enabled. */
+ (void)onRead;
+ (void)onSeek;
+ (void)pUserData;
+ (void)pConfig;
+ (void)pDecoder;
+
+ /* We use trial and error to open a decoder. */
+
+#ifdef MA_HAS_WAV
+ if (result != MA_SUCCESS) {
+ result = ma_decoder_init_wav__internal(pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ onSeek(pDecoder, 0, ma_seek_origin_start);
+ }
}
-}
+#endif
+#ifdef MA_HAS_FLAC
+ if (result != MA_SUCCESS) {
+ result = ma_decoder_init_flac__internal(pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ onSeek(pDecoder, 0, ma_seek_origin_start);
+ }
+ }
+#endif
+#ifdef MA_HAS_VORBIS
+ if (result != MA_SUCCESS) {
+ result = ma_decoder_init_vorbis__internal(pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ onSeek(pDecoder, 0, ma_seek_origin_start);
+ }
+ }
+#endif
+#ifdef MA_HAS_MP3
+ if (result != MA_SUCCESS) {
+ result = ma_decoder_init_mp3__internal(pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ onSeek(pDecoder, 0, ma_seek_origin_start);
+ }
+ }
+#endif
-ma_result ma_decoder_internal_on_uninit__flac(ma_decoder* pDecoder)
-{
- drflac_close((drflac*)pDecoder->pInternalDecoder);
- return MA_SUCCESS;
-}
+ if (result != MA_SUCCESS) {
+ return result;
+ }
-ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__flac(ma_decoder* pDecoder)
-{
- return ((drflac*)pDecoder->pInternalDecoder)->totalPCMFrameCount;
+ return ma_decoder__postinit(pConfig, pDecoder);
}
-ma_result ma_decoder_init_flac__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- drflac* pFlac;
+ ma_decoder_config config;
ma_result result;
- ma_assert(pConfig != NULL);
- ma_assert(pDecoder != NULL);
-
- /* Try opening the decoder first. */
- pFlac = drflac_open(ma_decoder_internal_on_read__flac, ma_decoder_internal_on_seek__flac, pDecoder);
- if (pFlac == NULL) {
- return MA_ERROR;
- }
-
- /* If we get here it means we successfully initialized the FLAC decoder. We can now initialize the rest of the ma_decoder. */
- pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__flac;
- pDecoder->onUninit = ma_decoder_internal_on_uninit__flac;
- pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__flac;
- pDecoder->pInternalDecoder = pFlac;
-
- /*
- dr_flac supports reading as s32, s16 and f32. Try to do a one-to-one mapping if possible, but fall back to s32 if not. s32 is the "native" FLAC format
- since it's the only one that's truly lossless.
- */
- pDecoder->internalFormat = ma_format_s32;
- if (pConfig->format == ma_format_s16) {
- pDecoder->internalFormat = ma_format_s16;
- } else if (pConfig->format == ma_format_f32) {
- pDecoder->internalFormat = ma_format_f32;
- }
-
- pDecoder->internalChannels = pFlac->channels;
- pDecoder->internalSampleRate = pFlac->sampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_flac, pDecoder->internalChannels, pDecoder->internalChannelMap);
+ config = ma_decoder_config_init_copy(pConfig);
- result = ma_decoder__init_dsp(pDecoder, pConfig, ma_decoder_internal_on_read_pcm_frames__flac);
+ result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
if (result != MA_SUCCESS) {
- drflac_close(pFlac);
return result;
}
- return MA_SUCCESS;
+ return ma_decoder_init__internal(onRead, onSeek, pUserData, &config, pDecoder);
}
-#endif
-
-/* Vorbis */
-#ifdef STB_VORBIS_INCLUDE_STB_VORBIS_H
-#define MA_HAS_VORBIS
-/* The size in bytes of each chunk of data to read from the Vorbis stream. */
-#define MA_VORBIS_DATA_CHUNK_SIZE 4096
-typedef struct
+static size_t ma_decoder__on_read_memory(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead)
{
- stb_vorbis* pInternalVorbis;
- ma_uint8* pData;
- size_t dataSize;
- size_t dataCapacity;
- ma_uint32 framesConsumed; /* The number of frames consumed in ppPacketData. */
- ma_uint32 framesRemaining; /* The number of frames remaining in ppPacketData. */
- float** ppPacketData;
-} ma_vorbis_decoder;
+ size_t bytesRemaining;
-ma_uint32 ma_vorbis_decoder_read_pcm_frames(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, void* pSamplesOut, ma_uint32 frameCount)
-{
- float* pSamplesOutF;
- ma_uint32 totalFramesRead;
+ MA_ASSERT(pDecoder->memory.dataSize >= pDecoder->memory.currentReadPos);
+
+ bytesRemaining = pDecoder->memory.dataSize - pDecoder->memory.currentReadPos;
+ if (bytesToRead > bytesRemaining) {
+ bytesToRead = bytesRemaining;
+ }
- ma_assert(pVorbis != NULL);
- ma_assert(pDecoder != NULL);
+ if (bytesToRead > 0) {
+ MA_COPY_MEMORY(pBufferOut, pDecoder->memory.pData + pDecoder->memory.currentReadPos, bytesToRead);
+ pDecoder->memory.currentReadPos += bytesToRead;
+ }
- pSamplesOutF = (float*)pSamplesOut;
+ return bytesToRead;
+}
- totalFramesRead = 0;
- while (frameCount > 0) {
- /* Read from the in-memory buffer first. */
- while (pVorbis->framesRemaining > 0 && frameCount > 0) {
- ma_uint32 iChannel;
- for (iChannel = 0; iChannel < pDecoder->internalChannels; ++iChannel) {
- pSamplesOutF[0] = pVorbis->ppPacketData[iChannel][pVorbis->framesConsumed];
- pSamplesOutF += 1;
+static ma_bool32 ma_decoder__on_seek_memory(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin)
+{
+ if (origin == ma_seek_origin_current) {
+ if (byteOffset > 0) {
+ if (pDecoder->memory.currentReadPos + byteOffset > pDecoder->memory.dataSize) {
+ byteOffset = (int)(pDecoder->memory.dataSize - pDecoder->memory.currentReadPos); /* Trying to seek too far forward. */
+ }
+ } else {
+ if (pDecoder->memory.currentReadPos < (size_t)-byteOffset) {
+ byteOffset = -(int)pDecoder->memory.currentReadPos; /* Trying to seek too far backwards. */
}
-
- pVorbis->framesConsumed += 1;
- pVorbis->framesRemaining -= 1;
- frameCount -= 1;
- totalFramesRead += 1;
}
- if (frameCount == 0) {
- break;
+ /* This will never underflow thanks to the clamps above. */
+ pDecoder->memory.currentReadPos += byteOffset;
+ } else {
+ if ((ma_uint32)byteOffset <= pDecoder->memory.dataSize) {
+ pDecoder->memory.currentReadPos = byteOffset;
+ } else {
+ pDecoder->memory.currentReadPos = pDecoder->memory.dataSize; /* Trying to seek too far forward. */
}
+ }
- ma_assert(pVorbis->framesRemaining == 0);
-
- /* We've run out of cached frames, so decode the next packet and continue iteration. */
- do
- {
- int samplesRead;
- int consumedDataSize;
+ return MA_TRUE;
+}
- if (pVorbis->dataSize > INT_MAX) {
- break; /* Too big. */
- }
+static ma_result ma_decoder__preinit_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+{
+ ma_result result = ma_decoder__preinit(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, NULL, pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- samplesRead = 0;
- consumedDataSize = stb_vorbis_decode_frame_pushdata(pVorbis->pInternalVorbis, pVorbis->pData, (int)pVorbis->dataSize, NULL, (float***)&pVorbis->ppPacketData, &samplesRead);
- if (consumedDataSize != 0) {
- size_t leftoverDataSize = (pVorbis->dataSize - (size_t)consumedDataSize);
- size_t i;
- for (i = 0; i < leftoverDataSize; ++i) {
- pVorbis->pData[i] = pVorbis->pData[i + consumedDataSize];
- }
+ if (pData == NULL || dataSize == 0) {
+ return MA_INVALID_ARGS;
+ }
- pVorbis->dataSize = leftoverDataSize;
- pVorbis->framesConsumed = 0;
- pVorbis->framesRemaining = samplesRead;
- break;
- } else {
- /* Need more data. If there's any room in the existing buffer allocation fill that first. Otherwise expand. */
- size_t bytesRead;
- if (pVorbis->dataCapacity == pVorbis->dataSize) {
- /* No room. Expand. */
- size_t newCap = pVorbis->dataCapacity + MA_VORBIS_DATA_CHUNK_SIZE;
- ma_uint8* pNewData;
+ pDecoder->memory.pData = (const ma_uint8*)pData;
+ pDecoder->memory.dataSize = dataSize;
+ pDecoder->memory.currentReadPos = 0;
- pNewData = (ma_uint8*)ma_realloc(pVorbis->pData, newCap);
- if (pNewData == NULL) {
- return totalFramesRead; /* Out of memory. */
- }
+ (void)pConfig;
+ return MA_SUCCESS;
+}
- pVorbis->pData = pNewData;
- pVorbis->dataCapacity = newCap;
- }
+ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+{
+ ma_decoder_config config;
+ ma_result result;
- /* Fill in a chunk. */
- bytesRead = ma_decoder_read_bytes(pDecoder, pVorbis->pData + pVorbis->dataSize, (pVorbis->dataCapacity - pVorbis->dataSize));
- if (bytesRead == 0) {
- return totalFramesRead; /* Error reading more data. */
- }
+ config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
- pVorbis->dataSize += bytesRead;
- }
- } while (MA_TRUE);
+ result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
}
- return totalFramesRead;
+ return ma_decoder_init__internal(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, NULL, &config, pDecoder);
}
-ma_result ma_vorbis_decoder_seek_to_pcm_frame(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, ma_uint64 frameIndex)
+ma_result ma_decoder_init_memory_wav(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- float buffer[4096];
+ ma_decoder_config config;
+ ma_result result;
- ma_assert(pVorbis != NULL);
- ma_assert(pDecoder != NULL);
+ config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
- /*
- This is terribly inefficient because stb_vorbis does not have a good seeking solution with it's push API. Currently this just performs
- a full decode right from the start of the stream. Later on I'll need to write a layer that goes through all of the Ogg pages until we
- find the one containing the sample we need. Then we know exactly where to seek for stb_vorbis.
- */
- if (!ma_decoder_seek_bytes(pDecoder, 0, ma_seek_origin_start)) {
- return MA_ERROR;
+ result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
}
- stb_vorbis_flush_pushdata(pVorbis->pInternalVorbis);
- pVorbis->framesConsumed = 0;
- pVorbis->framesRemaining = 0;
- pVorbis->dataSize = 0;
+#ifdef MA_HAS_WAV
+ result = ma_decoder_init_wav__internal(&config, pDecoder);
+#else
+ result = MA_NO_BACKEND;
+#endif
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- while (frameIndex > 0) {
- ma_uint32 framesRead;
- ma_uint32 framesToRead = ma_countof(buffer)/pDecoder->internalChannels;
- if (framesToRead > frameIndex) {
- framesToRead = (ma_uint32)frameIndex;
- }
+ return ma_decoder__postinit(&config, pDecoder);
+}
- framesRead = ma_vorbis_decoder_read_pcm_frames(pVorbis, pDecoder, buffer, framesToRead);
- if (framesRead == 0) {
- return MA_ERROR;
- }
+ma_result ma_decoder_init_memory_flac(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+{
+ ma_decoder_config config;
+ ma_result result;
- frameIndex -= framesRead;
+ config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
+
+ result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
}
- return MA_SUCCESS;
-}
+#ifdef MA_HAS_FLAC
+ result = ma_decoder_init_flac__internal(&config, pDecoder);
+#else
+ result = MA_NO_BACKEND;
+#endif
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ return ma_decoder__postinit(&config, pDecoder);
+}
-ma_result ma_decoder_internal_on_seek_to_pcm_frame__vorbis(ma_decoder* pDecoder, ma_uint64 frameIndex)
+ma_result ma_decoder_init_memory_vorbis(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_vorbis_decoder* pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder;
- ma_assert(pVorbis != NULL);
+ ma_decoder_config config;
+ ma_result result;
- return ma_vorbis_decoder_seek_to_pcm_frame(pVorbis, pDecoder, frameIndex);
-}
+ config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
-ma_result ma_decoder_internal_on_uninit__vorbis(ma_decoder* pDecoder)
-{
- ma_vorbis_decoder* pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder;
- ma_assert(pVorbis != NULL);
+ result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- stb_vorbis_close(pVorbis->pInternalVorbis);
- ma_free(pVorbis->pData);
- ma_free(pVorbis);
+#ifdef MA_HAS_VORBIS
+ result = ma_decoder_init_vorbis__internal(&config, pDecoder);
+#else
+ result = MA_NO_BACKEND;
+#endif
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- return MA_SUCCESS;
+ return ma_decoder__postinit(&config, pDecoder);
}
-ma_uint32 ma_decoder_internal_on_read_pcm_frames__vorbis(ma_pcm_converter* pDSP, void* pSamplesOut, ma_uint32 frameCount, void* pUserData)
+ma_result ma_decoder_init_memory_mp3(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_decoder* pDecoder;
- ma_vorbis_decoder* pVorbis;
+ ma_decoder_config config;
+ ma_result result;
- (void)pDSP;
+ config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
- pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
- ma_assert(pDecoder->internalFormat == ma_format_f32);
+ result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder;
- ma_assert(pVorbis != NULL);
+#ifdef MA_HAS_MP3
+ result = ma_decoder_init_mp3__internal(&config, pDecoder);
+#else
+ result = MA_NO_BACKEND;
+#endif
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- return ma_vorbis_decoder_read_pcm_frames(pVorbis, pDecoder, pSamplesOut, frameCount);
+ return ma_decoder__postinit(&config, pDecoder);
}
-ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__vorbis(ma_decoder* pDecoder)
+ma_result ma_decoder_init_memory_raw(const void* pData, size_t dataSize, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder)
{
- /* No good way to do this with Vorbis. */
- (void)pDecoder;
- return 0;
+ ma_decoder_config config;
+ ma_result result;
+
+ config = ma_decoder_config_init_copy(pConfigOut); /* Make sure the config is not NULL. */
+
+ result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ return ma_decoder__postinit(&config, pDecoder);
}
-ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+#ifndef MA_NO_STDIO
+static const char* ma_path_file_name(const char* path)
{
- ma_result result;
- stb_vorbis* pInternalVorbis = NULL;
- size_t dataSize = 0;
- size_t dataCapacity = 0;
- ma_uint8* pData = NULL;
- stb_vorbis_info vorbisInfo;
- size_t vorbisDataSize;
- ma_vorbis_decoder* pVorbis;
+ const char* fileName;
- ma_assert(pConfig != NULL);
- ma_assert(pDecoder != NULL);
+ if (path == NULL) {
+ return NULL;
+ }
- /* We grow the buffer in chunks. */
- do
- {
- /* Allocate memory for a new chunk. */
- ma_uint8* pNewData;
- size_t bytesRead;
- int vorbisError = 0;
- int consumedDataSize = 0;
+ fileName = path;
- dataCapacity += MA_VORBIS_DATA_CHUNK_SIZE;
- pNewData = (ma_uint8*)ma_realloc(pData, dataCapacity);
- if (pNewData == NULL) {
- ma_free(pData);
- return MA_OUT_OF_MEMORY;
+ /* We just loop through the path until we find the last slash. */
+ while (path[0] != '\0') {
+ if (path[0] == '/' || path[0] == '\\') {
+ fileName = path;
}
- pData = pNewData;
+ path += 1;
+ }
- /* Fill in a chunk. */
- bytesRead = ma_decoder_read_bytes(pDecoder, pData + dataSize, (dataCapacity - dataSize));
- if (bytesRead == 0) {
- return MA_ERROR;
- }
+ /* At this point the file name is sitting on a slash, so just move forward. */
+ while (fileName[0] != '\0' && (fileName[0] == '/' || fileName[0] == '\\')) {
+ fileName += 1;
+ }
- dataSize += bytesRead;
- if (dataSize > INT_MAX) {
- return MA_ERROR; /* Too big. */
- }
+ return fileName;
+}
- pInternalVorbis = stb_vorbis_open_pushdata(pData, (int)dataSize, &consumedDataSize, &vorbisError, NULL);
- if (pInternalVorbis != NULL) {
- /*
- If we get here it means we were able to open the stb_vorbis decoder. There may be some leftover bytes in our buffer, so
- we need to move those bytes down to the front of the buffer since they'll be needed for future decoding.
- */
- size_t leftoverDataSize = (dataSize - (size_t)consumedDataSize);
- size_t i;
- for (i = 0; i < leftoverDataSize; ++i) {
- pData[i] = pData[i + consumedDataSize];
- }
+static const wchar_t* ma_path_file_name_w(const wchar_t* path)
+{
+ const wchar_t* fileName;
- dataSize = leftoverDataSize;
- break; /* Success. */
- } else {
- if (vorbisError == VORBIS_need_more_data) {
- continue;
- } else {
- return MA_ERROR; /* Failed to open the stb_vorbis decoder. */
- }
+ if (path == NULL) {
+ return NULL;
+ }
+
+ fileName = path;
+
+ /* We just loop through the path until we find the last slash. */
+ while (path[0] != '\0') {
+ if (path[0] == '/' || path[0] == '\\') {
+ fileName = path;
}
- } while (MA_TRUE);
+ path += 1;
+ }
+
+ /* At this point the file name is sitting on a slash, so just move forward. */
+ while (fileName[0] != '\0' && (fileName[0] == '/' || fileName[0] == '\\')) {
+ fileName += 1;
+ }
- /* If we get here it means we successfully opened the Vorbis decoder. */
- vorbisInfo = stb_vorbis_get_info(pInternalVorbis);
+ return fileName;
+}
- /* Don't allow more than MA_MAX_CHANNELS channels. */
- if (vorbisInfo.channels > MA_MAX_CHANNELS) {
- stb_vorbis_close(pInternalVorbis);
- ma_free(pData);
- return MA_ERROR; /* Too many channels. */
+
+static const char* ma_path_extension(const char* path)
+{
+ const char* extension;
+ const char* lastOccurance;
+
+ if (path == NULL) {
+ path = "";
}
- vorbisDataSize = sizeof(ma_vorbis_decoder) + sizeof(float)*vorbisInfo.max_frame_size;
- pVorbis = (ma_vorbis_decoder*)ma_malloc(vorbisDataSize);
- if (pVorbis == NULL) {
- stb_vorbis_close(pInternalVorbis);
- ma_free(pData);
- return MA_OUT_OF_MEMORY;
+ extension = ma_path_file_name(path);
+ lastOccurance = NULL;
+
+ /* Just find the last '.' and return. */
+ while (extension[0] != '\0') {
+ if (extension[0] == '.') {
+ extension += 1;
+ lastOccurance = extension;
+ }
+
+ extension += 1;
}
- ma_zero_memory(pVorbis, vorbisDataSize);
- pVorbis->pInternalVorbis = pInternalVorbis;
- pVorbis->pData = pData;
- pVorbis->dataSize = dataSize;
- pVorbis->dataCapacity = dataCapacity;
+ return (lastOccurance != NULL) ? lastOccurance : extension;
+}
- pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__vorbis;
- pDecoder->onUninit = ma_decoder_internal_on_uninit__vorbis;
- pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__vorbis;
- pDecoder->pInternalDecoder = pVorbis;
+static const wchar_t* ma_path_extension_w(const wchar_t* path)
+{
+ const wchar_t* extension;
+ const wchar_t* lastOccurance;
- /* The internal format is always f32. */
- pDecoder->internalFormat = ma_format_f32;
- pDecoder->internalChannels = vorbisInfo.channels;
- pDecoder->internalSampleRate = vorbisInfo.sample_rate;
- ma_get_standard_channel_map(ma_standard_channel_map_vorbis, pDecoder->internalChannels, pDecoder->internalChannelMap);
+ if (path == NULL) {
+ path = L"";
+ }
- result = ma_decoder__init_dsp(pDecoder, pConfig, ma_decoder_internal_on_read_pcm_frames__vorbis);
- if (result != MA_SUCCESS) {
- stb_vorbis_close(pVorbis->pInternalVorbis);
- ma_free(pVorbis->pData);
- ma_free(pVorbis);
- return result;
+ extension = ma_path_file_name_w(path);
+ lastOccurance = NULL;
+
+ /* Just find the last '.' and return. */
+ while (extension[0] != '\0') {
+ if (extension[0] == '.') {
+ extension += 1;
+ lastOccurance = extension;
+ }
+
+ extension += 1;
}
- return MA_SUCCESS;
+ return (lastOccurance != NULL) ? lastOccurance : extension;
}
-#endif
-/* MP3 */
-#ifdef dr_mp3_h
-#define MA_HAS_MP3
-size_t ma_decoder_internal_on_read__mp3(void* pUserData, void* pBufferOut, size_t bytesToRead)
+static ma_bool32 ma_path_extension_equal(const char* path, const char* extension)
{
- ma_decoder* pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
+ const char* ext1;
+ const char* ext2;
- return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead);
-}
+ if (path == NULL || extension == NULL) {
+ return MA_FALSE;
+ }
-drmp3_bool32 ma_decoder_internal_on_seek__mp3(void* pUserData, int offset, drmp3_seek_origin origin)
-{
- ma_decoder* pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
+ ext1 = extension;
+ ext2 = ma_path_extension(path);
- return ma_decoder_seek_bytes(pDecoder, offset, (origin == drmp3_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current);
+#if defined(_MSC_VER) || defined(__DMC__)
+ return _stricmp(ext1, ext2) == 0;
+#else
+ return strcasecmp(ext1, ext2) == 0;
+#endif
}
-ma_uint32 ma_decoder_internal_on_read_pcm_frames__mp3(ma_pcm_converter* pDSP, void* pSamplesOut, ma_uint32 frameCount, void* pUserData)
+static ma_bool32 ma_path_extension_equal_w(const wchar_t* path, const wchar_t* extension)
{
- ma_decoder* pDecoder;
- drmp3* pMP3;
+ const wchar_t* ext1;
+ const wchar_t* ext2;
- (void)pDSP;
-
- pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
- ma_assert(pDecoder->internalFormat == ma_format_f32);
+ if (path == NULL || extension == NULL) {
+ return MA_FALSE;
+ }
- pMP3 = (drmp3*)pDecoder->pInternalDecoder;
- ma_assert(pMP3 != NULL);
+ ext1 = extension;
+ ext2 = ma_path_extension_w(path);
- return (ma_uint32)drmp3_read_pcm_frames_f32(pMP3, frameCount, (float*)pSamplesOut);
-}
+#if defined(_MSC_VER) || defined(__DMC__)
+ return _wcsicmp(ext1, ext2) == 0;
+#else
+ /*
+ I'm not aware of a wide character version of strcasecmp(). I'm therefore converting the extensions to multibyte strings and comparing those. This
+ isn't the most efficient way to do it, but it should work OK.
+ */
+ {
+ char ext1MB[4096];
+ char ext2MB[4096];
+ const wchar_t* pext1 = ext1;
+ const wchar_t* pext2 = ext2;
+ mbstate_t mbs1;
+ mbstate_t mbs2;
-ma_result ma_decoder_internal_on_seek_to_pcm_frame__mp3(ma_decoder* pDecoder, ma_uint64 frameIndex)
-{
- drmp3* pMP3;
- drmp3_bool32 result;
+ MA_ZERO_OBJECT(&mbs1);
+ MA_ZERO_OBJECT(&mbs2);
- pMP3 = (drmp3*)pDecoder->pInternalDecoder;
- ma_assert(pMP3 != NULL);
+ if (wcsrtombs(ext1MB, &pext1, sizeof(ext1MB), &mbs1) == (size_t)-1) {
+ return MA_FALSE;
+ }
+ if (wcsrtombs(ext2MB, &pext2, sizeof(ext2MB), &mbs2) == (size_t)-1) {
+ return MA_FALSE;
+ }
- result = drmp3_seek_to_pcm_frame(pMP3, frameIndex);
- if (result) {
- return MA_SUCCESS;
- } else {
- return MA_ERROR;
+ return strcasecmp(ext1MB, ext2MB) == 0;
}
+#endif
}
-ma_result ma_decoder_internal_on_uninit__mp3(ma_decoder* pDecoder)
+
+static size_t ma_decoder__on_read_stdio(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead)
{
- drmp3_uninit((drmp3*)pDecoder->pInternalDecoder);
- ma_free(pDecoder->pInternalDecoder);
- return MA_SUCCESS;
+ return fread(pBufferOut, 1, bytesToRead, (FILE*)pDecoder->pUserData);
}
-ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__mp3(ma_decoder* pDecoder)
+static ma_bool32 ma_decoder__on_seek_stdio(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin)
{
- return drmp3_get_pcm_frame_count((drmp3*)pDecoder->pInternalDecoder);
+ return fseek((FILE*)pDecoder->pUserData, byteOffset, (origin == ma_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0;
}
-ma_result ma_decoder_init_mp3__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+static ma_result ma_decoder__preinit_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- drmp3* pMP3;
- drmp3_config mp3Config;
ma_result result;
+ FILE* pFile;
- ma_assert(pConfig != NULL);
- ma_assert(pDecoder != NULL);
-
- pMP3 = (drmp3*)ma_malloc(sizeof(*pMP3));
- if (pMP3 == NULL) {
- return MA_OUT_OF_MEMORY;
- }
-
- /*
- Try opening the decoder first. MP3 can have variable sample rates (it's per frame/packet). We therefore need
- to use some smarts to determine the most appropriate internal sample rate. These are the rules we're going
- to use:
-
- Sample Rates
- 1) If an output sample rate is specified in pConfig we just use that. Otherwise;
- 2) Fall back to 44100.
-
- The internal channel count is always stereo, and the internal format is always f32.
- */
- ma_zero_object(&mp3Config);
- mp3Config.outputChannels = 2;
- mp3Config.outputSampleRate = (pConfig->sampleRate != 0) ? pConfig->sampleRate : 44100;
- if (!drmp3_init(pMP3, ma_decoder_internal_on_read__mp3, ma_decoder_internal_on_seek__mp3, pDecoder, &mp3Config)) {
- return MA_ERROR;
+ if (pDecoder == NULL) {
+ return MA_INVALID_ARGS;
}
- /* If we get here it means we successfully initialized the MP3 decoder. We can now initialize the rest of the ma_decoder. */
- pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__mp3;
- pDecoder->onUninit = ma_decoder_internal_on_uninit__mp3;
- pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__mp3;
- pDecoder->pInternalDecoder = pMP3;
+ MA_ZERO_OBJECT(pDecoder);
- /* Internal format. */
- pDecoder->internalFormat = ma_format_f32;
- pDecoder->internalChannels = pMP3->channels;
- pDecoder->internalSampleRate = pMP3->sampleRate;
- ma_get_standard_channel_map(ma_standard_channel_map_default, pDecoder->internalChannels, pDecoder->internalChannelMap);
+ if (pFilePath == NULL || pFilePath[0] == '\0') {
+ return MA_INVALID_ARGS;
+ }
- result = ma_decoder__init_dsp(pDecoder, pConfig, ma_decoder_internal_on_read_pcm_frames__mp3);
+ result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder);
if (result != MA_SUCCESS) {
- ma_free(pMP3);
return result;
}
+ result = ma_fopen(&pFile, pFilePath, "rb");
+ if (pFile == NULL) {
+ return result;
+ }
+
+ /* We need to manually set the user data so the calls to ma_decoder__on_seek_stdio() succeed. */
+ pDecoder->pUserData = pFile;
+
return MA_SUCCESS;
}
-#endif
-/* Raw */
-ma_uint32 ma_decoder_internal_on_read_pcm_frames__raw(ma_pcm_converter* pDSP, void* pSamplesOut, ma_uint32 frameCount, void* pUserData)
+static ma_result ma_decoder__preinit_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_decoder* pDecoder;
- ma_uint32 bpf;
+ ma_result result;
+ FILE* pFile;
- (void)pDSP;
+ if (pDecoder == NULL) {
+ return MA_INVALID_ARGS;
+ }
- pDecoder = (ma_decoder*)pUserData;
- ma_assert(pDecoder != NULL);
+ MA_ZERO_OBJECT(pDecoder);
- /* For raw decoding we just read directly from the decoder's callbacks. */
- bpf = ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels);
- return (ma_uint32)ma_decoder_read_bytes(pDecoder, pSamplesOut, frameCount * bpf) / bpf;
+ if (pFilePath == NULL || pFilePath[0] == '\0') {
+ return MA_INVALID_ARGS;
+ }
+
+ result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ result = ma_wfopen(&pFile, pFilePath, L"rb", &pDecoder->allocationCallbacks);
+ if (pFile == NULL) {
+ return result;
+ }
+
+ /* We need to manually set the user data so the calls to ma_decoder__on_seek_stdio() succeed. */
+ pDecoder->pUserData = pFile;
+
+ (void)pConfig;
+ return MA_SUCCESS;
}
-ma_result ma_decoder_internal_on_seek_to_pcm_frame__raw(ma_decoder* pDecoder, ma_uint64 frameIndex)
+ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_bool32 result = MA_FALSE;
- ma_uint64 totalBytesToSeek;
+ ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder); /* This sets pDecoder->pUserData to a FILE*. */
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- ma_assert(pDecoder != NULL);
+ /* WAV */
+ if (ma_path_extension_equal(pFilePath, "wav")) {
+ result = ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ if (result == MA_SUCCESS) {
+ return MA_SUCCESS;
+ }
- if (pDecoder->onSeek == NULL) {
- return MA_ERROR;
+ ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start);
}
- /* The callback uses a 32 bit integer whereas we use a 64 bit unsigned integer. We just need to continuously seek until we're at the correct position. */
- totalBytesToSeek = frameIndex * ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels);
- if (totalBytesToSeek < 0x7FFFFFFF) {
- /* Simple case. */
- result = ma_decoder_seek_bytes(pDecoder, (int)(frameIndex * ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels)), ma_seek_origin_start);
- } else {
- /* Complex case. Start by doing a seek relative to the start. Then keep looping using offset seeking. */
- result = ma_decoder_seek_bytes(pDecoder, 0x7FFFFFFF, ma_seek_origin_start);
- if (result == MA_TRUE) {
- totalBytesToSeek -= 0x7FFFFFFF;
-
- while (totalBytesToSeek > 0) {
- ma_uint64 bytesToSeekThisIteration = totalBytesToSeek;
- if (bytesToSeekThisIteration > 0x7FFFFFFF) {
- bytesToSeekThisIteration = 0x7FFFFFFF;
- }
+ /* FLAC */
+ if (ma_path_extension_equal(pFilePath, "flac")) {
+ result = ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ if (result == MA_SUCCESS) {
+ return MA_SUCCESS;
+ }
- result = ma_decoder_seek_bytes(pDecoder, (int)bytesToSeekThisIteration, ma_seek_origin_current);
- if (result != MA_TRUE) {
- break;
- }
+ ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start);
+ }
- totalBytesToSeek -= bytesToSeekThisIteration;
- }
+ /* MP3 */
+ if (ma_path_extension_equal(pFilePath, "mp3")) {
+ result = ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ if (result == MA_SUCCESS) {
+ return MA_SUCCESS;
}
- }
- if (result) {
- return MA_SUCCESS;
- } else {
- return MA_ERROR;
+ ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start);
}
-}
-ma_result ma_decoder_internal_on_uninit__raw(ma_decoder* pDecoder)
-{
- (void)pDecoder;
- return MA_SUCCESS;
+ /* Trial and error. */
+ return ma_decoder_init(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
}
-ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__raw(ma_decoder* pDecoder)
+ma_result ma_decoder_init_file_wav(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- (void)pDecoder;
- return 0;
+ ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ return ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
}
-ma_result ma_decoder_init_raw__internal(const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder)
+ma_result ma_decoder_init_file_flac(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_result result;
+ ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- ma_assert(pConfigIn != NULL);
- ma_assert(pConfigOut != NULL);
- ma_assert(pDecoder != NULL);
+ return ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+}
- pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__raw;
- pDecoder->onUninit = ma_decoder_internal_on_uninit__raw;
- pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__raw;
+ma_result ma_decoder_init_file_vorbis(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+{
+ ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- /* Internal format. */
- pDecoder->internalFormat = pConfigIn->format;
- pDecoder->internalChannels = pConfigIn->channels;
- pDecoder->internalSampleRate = pConfigIn->sampleRate;
- ma_channel_map_copy(pDecoder->internalChannelMap, pConfigIn->channelMap, pConfigIn->channels);
+ return ma_decoder_init_vorbis(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+}
- result = ma_decoder__init_dsp(pDecoder, pConfigOut, ma_decoder_internal_on_read_pcm_frames__raw);
+ma_result ma_decoder_init_file_mp3(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+{
+ ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder);
if (result != MA_SUCCESS) {
return result;
}
- return MA_SUCCESS;
+ return ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
}
-ma_result ma_decoder__preinit(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
-{
- ma_assert(pConfig != NULL);
- if (pDecoder == NULL) {
- return MA_INVALID_ARGS;
+ma_result ma_decoder_init_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+{
+ ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder); /* This sets pDecoder->pUserData to a FILE*. */
+ if (result != MA_SUCCESS) {
+ return result;
}
- ma_zero_object(pDecoder);
+ /* WAV */
+ if (ma_path_extension_equal_w(pFilePath, L"wav")) {
+ result = ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ if (result == MA_SUCCESS) {
+ return MA_SUCCESS;
+ }
- if (onRead == NULL || onSeek == NULL) {
- return MA_INVALID_ARGS;
+ ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start);
}
- pDecoder->onRead = onRead;
- pDecoder->onSeek = onSeek;
- pDecoder->pUserData = pUserData;
+ /* FLAC */
+ if (ma_path_extension_equal_w(pFilePath, L"flac")) {
+ result = ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ if (result == MA_SUCCESS) {
+ return MA_SUCCESS;
+ }
- (void)pConfig;
- return MA_SUCCESS;
-}
+ ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start);
+ }
-ma_result ma_decoder_init_wav(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
-{
- ma_decoder_config config;
- ma_result result;
+ /* MP3 */
+ if (ma_path_extension_equal_w(pFilePath, L"mp3")) {
+ result = ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ if (result == MA_SUCCESS) {
+ return MA_SUCCESS;
+ }
+
+ ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start);
+ }
- config = ma_decoder_config_init_copy(pConfig);
+ /* Trial and error. */
+ return ma_decoder_init(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+}
- result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
+ma_result ma_decoder_init_file_wav_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+{
+ ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder);
if (result != MA_SUCCESS) {
return result;
}
-#ifdef MA_HAS_WAV
- return ma_decoder_init_wav__internal(&config, pDecoder);
-#else
- return MA_NO_BACKEND;
-#endif
+ return ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
}
-ma_result ma_decoder_init_flac(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_decoder_init_file_flac_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_decoder_config config;
- ma_result result;
-
- config = ma_decoder_config_init_copy(pConfig);
-
- result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
+ ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder);
if (result != MA_SUCCESS) {
return result;
}
-#ifdef MA_HAS_FLAC
- return ma_decoder_init_flac__internal(&config, pDecoder);
-#else
- return MA_NO_BACKEND;
-#endif
+ return ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
}
-ma_result ma_decoder_init_vorbis(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_decoder_init_file_vorbis_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
{
- ma_decoder_config config;
- ma_result result;
+ ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
- config = ma_decoder_config_init_copy(pConfig);
+ return ma_decoder_init_vorbis(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+}
- result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
+ma_result ma_decoder_init_file_mp3_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+{
+ ma_result result = ma_decoder__preinit_file_w(pFilePath, pConfig, pDecoder);
if (result != MA_SUCCESS) {
return result;
}
-#ifdef MA_HAS_VORBIS
- return ma_decoder_init_vorbis__internal(&config, pDecoder);
-#else
- return MA_NO_BACKEND;
-#endif
+ return ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
}
+#endif /* MA_NO_STDIO */
-ma_result ma_decoder_init_mp3(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_decoder_uninit(ma_decoder* pDecoder)
{
- ma_decoder_config config;
- ma_result result;
-
- config = ma_decoder_config_init_copy(pConfig);
+ if (pDecoder == NULL) {
+ return MA_INVALID_ARGS;
+ }
- result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
+ if (pDecoder->onUninit) {
+ pDecoder->onUninit(pDecoder);
}
-#ifdef MA_HAS_MP3
- return ma_decoder_init_mp3__internal(&config, pDecoder);
-#else
- return MA_NO_BACKEND;
+#ifndef MA_NO_STDIO
+ /* If we have a file handle, close it. */
+ if (pDecoder->onRead == ma_decoder__on_read_stdio) {
+ fclose((FILE*)pDecoder->pUserData);
+ }
#endif
+
+ ma_data_converter_uninit(&pDecoder->converter);
+
+ return MA_SUCCESS;
}
-ma_result ma_decoder_init_raw(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder)
+ma_uint64 ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder)
{
- ma_decoder_config config;
- ma_result result;
-
- config = ma_decoder_config_init_copy(pConfigOut);
+ if (pDecoder == NULL) {
+ return 0;
+ }
- result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
+ if (pDecoder->onGetLengthInPCMFrames) {
+ ma_uint64 nativeLengthInPCMFrames = pDecoder->onGetLengthInPCMFrames(pDecoder);
+ if (pDecoder->internalSampleRate == pDecoder->outputSampleRate) {
+ return nativeLengthInPCMFrames;
+ } else {
+ return ma_calculate_frame_count_after_resampling(pDecoder->outputSampleRate, pDecoder->internalSampleRate, nativeLengthInPCMFrames);
+ }
}
- return ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder);
+ return 0;
}
-ma_result ma_decoder_init__internal(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_uint64 ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount)
{
- ma_result result = MA_NO_BACKEND;
+ ma_result result;
+ ma_uint64 totalFramesReadOut;
+ ma_uint64 totalFramesReadIn;
+ void* pRunningFramesOut;
+
+ if (pDecoder == NULL) {
+ return 0;
+ }
- ma_assert(pConfig != NULL);
- ma_assert(pDecoder != NULL);
+ if (pDecoder->onReadPCMFrames == NULL) {
+ return 0;
+ }
- /* Silence some warnings in the case that we don't have any decoder backends enabled. */
- (void)onRead;
- (void)onSeek;
- (void)pUserData;
- (void)pConfig;
- (void)pDecoder;
+ /* Fast path. */
+ if (pDecoder->converter.isPassthrough) {
+ return pDecoder->onReadPCMFrames(pDecoder, pFramesOut, frameCount);
+ }
- /* We use trial and error to open a decoder. */
+ /* Getting here means we need to do data conversion. */
+ totalFramesReadOut = 0;
+ totalFramesReadIn = 0;
+ pRunningFramesOut = pFramesOut;
+
+ while (totalFramesReadOut < frameCount) {
+ ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In internal format. */
+ ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels);
+ ma_uint64 framesToReadThisIterationIn;
+ ma_uint64 framesReadThisIterationIn;
+ ma_uint64 framesToReadThisIterationOut;
+ ma_uint64 framesReadThisIterationOut;
+ ma_uint64 requiredInputFrameCount;
-#ifdef MA_HAS_WAV
- if (result != MA_SUCCESS) {
- result = ma_decoder_init_wav__internal(pConfig, pDecoder);
- if (result != MA_SUCCESS) {
- onSeek(pDecoder, 0, ma_seek_origin_start);
+ framesToReadThisIterationOut = (frameCount - totalFramesReadOut);
+ framesToReadThisIterationIn = framesToReadThisIterationOut;
+ if (framesToReadThisIterationIn > intermediaryBufferCap) {
+ framesToReadThisIterationIn = intermediaryBufferCap;
}
- }
-#endif
-#ifdef MA_HAS_FLAC
- if (result != MA_SUCCESS) {
- result = ma_decoder_init_flac__internal(pConfig, pDecoder);
- if (result != MA_SUCCESS) {
- onSeek(pDecoder, 0, ma_seek_origin_start);
+
+ requiredInputFrameCount = ma_data_converter_get_required_input_frame_count(&pDecoder->converter, framesToReadThisIterationOut);
+ if (framesToReadThisIterationIn > requiredInputFrameCount) {
+ framesToReadThisIterationIn = requiredInputFrameCount;
}
- }
-#endif
-#ifdef MA_HAS_VORBIS
- if (result != MA_SUCCESS) {
- result = ma_decoder_init_vorbis__internal(pConfig, pDecoder);
- if (result != MA_SUCCESS) {
- onSeek(pDecoder, 0, ma_seek_origin_start);
+
+ if (requiredInputFrameCount > 0) {
+ framesReadThisIterationIn = pDecoder->onReadPCMFrames(pDecoder, pIntermediaryBuffer, framesToReadThisIterationIn);
+ totalFramesReadIn += framesReadThisIterationIn;
}
- }
-#endif
-#ifdef MA_HAS_MP3
- if (result != MA_SUCCESS) {
- result = ma_decoder_init_mp3__internal(pConfig, pDecoder);
+
+ /*
+ At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any
+ input frames, we still want to try processing frames because there may some output frames generated from cached input data.
+ */
+ framesReadThisIterationOut = framesToReadThisIterationOut;
+ result = ma_data_converter_process_pcm_frames(&pDecoder->converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut);
if (result != MA_SUCCESS) {
- onSeek(pDecoder, 0, ma_seek_origin_start);
+ break;
}
- }
-#endif
- if (result != MA_SUCCESS) {
- return result;
+ totalFramesReadOut += framesReadThisIterationOut;
+ pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels));
+
+ if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) {
+ break; /* We're done. */
+ }
}
- return result;
+ return totalFramesReadOut;
}
-ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex)
{
- ma_decoder_config config;
- ma_result result;
-
- config = ma_decoder_config_init_copy(pConfig);
+ if (pDecoder == NULL) {
+ return 0;
+ }
- result = ma_decoder__preinit(onRead, onSeek, pUserData, &config, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
+ if (pDecoder->onSeekToPCMFrame) {
+ return pDecoder->onSeekToPCMFrame(pDecoder, frameIndex);
}
- return ma_decoder_init__internal(onRead, onSeek, pUserData, &config, pDecoder);
+ /* Should never get here, but if we do it means onSeekToPCMFrame was not set by the backend. */
+ return MA_INVALID_ARGS;
}
-size_t ma_decoder__on_read_memory(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead)
+static ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_config* pConfigOut, ma_uint64* pFrameCountOut, void** ppPCMFramesOut)
{
- size_t bytesRemaining;
-
- ma_assert(pDecoder->memory.dataSize >= pDecoder->memory.currentReadPos);
+ ma_uint64 totalFrameCount;
+ ma_uint64 bpf;
+ ma_uint64 dataCapInFrames;
+ void* pPCMFramesOut;
- bytesRemaining = pDecoder->memory.dataSize - pDecoder->memory.currentReadPos;
- if (bytesToRead > bytesRemaining) {
- bytesToRead = bytesRemaining;
- }
+ MA_ASSERT(pDecoder != NULL);
+
+ totalFrameCount = 0;
+ bpf = ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels);
- if (bytesToRead > 0) {
- ma_copy_memory(pBufferOut, pDecoder->memory.pData + pDecoder->memory.currentReadPos, bytesToRead);
- pDecoder->memory.currentReadPos += bytesToRead;
- }
+ /* The frame count is unknown until we try reading. Thus, we just run in a loop. */
+ dataCapInFrames = 0;
+ pPCMFramesOut = NULL;
+ for (;;) {
+ ma_uint64 frameCountToTryReading;
+ ma_uint64 framesJustRead;
- return bytesToRead;
-}
+ /* Make room if there's not enough. */
+ if (totalFrameCount == dataCapInFrames) {
+ void* pNewPCMFramesOut;
+ ma_uint64 oldDataCapInFrames = dataCapInFrames;
+ ma_uint64 newDataCapInFrames = dataCapInFrames*2;
+ if (newDataCapInFrames == 0) {
+ newDataCapInFrames = 4096;
+ }
-ma_bool32 ma_decoder__on_seek_memory(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin)
-{
- if (origin == ma_seek_origin_current) {
- if (byteOffset > 0) {
- if (pDecoder->memory.currentReadPos + byteOffset > pDecoder->memory.dataSize) {
- byteOffset = (int)(pDecoder->memory.dataSize - pDecoder->memory.currentReadPos); /* Trying to seek too far forward. */
+ if ((newDataCapInFrames * bpf) > MA_SIZE_MAX) {
+ ma__free_from_callbacks(pPCMFramesOut, &pDecoder->allocationCallbacks);
+ return MA_TOO_BIG;
}
- } else {
- if (pDecoder->memory.currentReadPos < (size_t)-byteOffset) {
- byteOffset = -(int)pDecoder->memory.currentReadPos; /* Trying to seek too far backwards. */
+
+
+ pNewPCMFramesOut = (void*)ma__realloc_from_callbacks(pPCMFramesOut, (size_t)(newDataCapInFrames * bpf), (size_t)(oldDataCapInFrames * bpf), &pDecoder->allocationCallbacks);
+ if (pNewPCMFramesOut == NULL) {
+ ma__free_from_callbacks(pPCMFramesOut, &pDecoder->allocationCallbacks);
+ return MA_OUT_OF_MEMORY;
}
- }
- /* This will never underflow thanks to the clamps above. */
- pDecoder->memory.currentReadPos += byteOffset;
- } else {
- if ((ma_uint32)byteOffset <= pDecoder->memory.dataSize) {
- pDecoder->memory.currentReadPos = byteOffset;
- } else {
- pDecoder->memory.currentReadPos = pDecoder->memory.dataSize; /* Trying to seek too far forward. */
+ dataCapInFrames = newDataCapInFrames;
+ pPCMFramesOut = pNewPCMFramesOut;
}
- }
- return MA_TRUE;
-}
+ frameCountToTryReading = dataCapInFrames - totalFrameCount;
+ MA_ASSERT(frameCountToTryReading > 0);
-ma_result ma_decoder__preinit_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
-{
- ma_result result = ma_decoder__preinit(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, NULL, pConfig, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
- }
+ framesJustRead = ma_decoder_read_pcm_frames(pDecoder, (ma_uint8*)pPCMFramesOut + (totalFrameCount * bpf), frameCountToTryReading);
+ totalFrameCount += framesJustRead;
- if (pData == NULL || dataSize == 0) {
- return MA_INVALID_ARGS;
+ if (framesJustRead < frameCountToTryReading) {
+ break;
+ }
}
- pDecoder->memory.pData = (const ma_uint8*)pData;
- pDecoder->memory.dataSize = dataSize;
- pDecoder->memory.currentReadPos = 0;
-
- (void)pConfig;
- return MA_SUCCESS;
-}
-
-ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
-{
- ma_decoder_config config;
- ma_result result;
+
+ if (pConfigOut != NULL) {
+ pConfigOut->format = pDecoder->outputFormat;
+ pConfigOut->channels = pDecoder->outputChannels;
+ pConfigOut->sampleRate = pDecoder->outputSampleRate;
+ ma_channel_map_copy(pConfigOut->channelMap, pDecoder->outputChannelMap, pDecoder->outputChannels);
+ }
- config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
+ if (ppPCMFramesOut != NULL) {
+ *ppPCMFramesOut = pPCMFramesOut;
+ } else {
+ ma__free_from_callbacks(pPCMFramesOut, &pDecoder->allocationCallbacks);
+ }
- result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = totalFrameCount;
}
- return ma_decoder_init__internal(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, NULL, &config, pDecoder);
+ ma_decoder_uninit(pDecoder);
+ return MA_SUCCESS;
}
-ma_result ma_decoder_init_memory_wav(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+#ifndef MA_NO_STDIO
+ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut)
{
ma_decoder_config config;
+ ma_decoder decoder;
ma_result result;
- config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = 0;
+ }
+ if (ppPCMFramesOut != NULL) {
+ *ppPCMFramesOut = NULL;
+ }
- result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
+ if (pFilePath == NULL) {
+ return MA_INVALID_ARGS;
+ }
+
+ config = ma_decoder_config_init_copy(pConfig);
+
+ result = ma_decoder_init_file(pFilePath, &config, &decoder);
if (result != MA_SUCCESS) {
return result;
}
-#ifdef MA_HAS_WAV
- return ma_decoder_init_wav__internal(&config, pDecoder);
-#else
- return MA_NO_BACKEND;
-#endif
+ return ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut);
}
+#endif
-ma_result ma_decoder_init_memory_flac(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut)
{
ma_decoder_config config;
+ ma_decoder decoder;
ma_result result;
- config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
+ if (pFrameCountOut != NULL) {
+ *pFrameCountOut = 0;
+ }
+ if (ppPCMFramesOut != NULL) {
+ *ppPCMFramesOut = NULL;
+ }
- result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
+ if (pData == NULL || dataSize == 0) {
+ return MA_INVALID_ARGS;
+ }
+
+ config = ma_decoder_config_init_copy(pConfig);
+
+ result = ma_decoder_init_memory(pData, dataSize, &config, &decoder);
if (result != MA_SUCCESS) {
return result;
}
-#ifdef MA_HAS_FLAC
- return ma_decoder_init_flac__internal(&config, pDecoder);
-#else
- return MA_NO_BACKEND;
-#endif
+ return ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut);
}
+#endif /* MA_NO_DECODING */
-ma_result ma_decoder_init_memory_vorbis(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+
+#ifndef MA_NO_ENCODING
+
+#if defined(MA_HAS_WAV)
+size_t ma_encoder__internal_on_write_wav(void* pUserData, const void* pData, size_t bytesToWrite)
{
- ma_decoder_config config;
- ma_result result;
+ ma_encoder* pEncoder = (ma_encoder*)pUserData;
+ MA_ASSERT(pEncoder != NULL);
- config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
+ return pEncoder->onWrite(pEncoder, pData, bytesToWrite);
+}
- result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
- }
+drwav_bool32 ma_encoder__internal_on_seek_wav(void* pUserData, int offset, drwav_seek_origin origin)
+{
+ ma_encoder* pEncoder = (ma_encoder*)pUserData;
+ MA_ASSERT(pEncoder != NULL);
-#ifdef MA_HAS_VORBIS
- return ma_decoder_init_vorbis__internal(&config, pDecoder);
-#else
- return MA_NO_BACKEND;
-#endif
+ return pEncoder->onSeek(pEncoder, offset, (origin == drwav_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current);
}
-ma_result ma_decoder_init_memory_mp3(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_encoder__on_init_wav(ma_encoder* pEncoder)
{
- ma_decoder_config config;
- ma_result result;
+ drwav_data_format wavFormat;
+ drwav_allocation_callbacks allocationCallbacks;
+ drwav* pWav;
- config = ma_decoder_config_init_copy(pConfig); /* Make sure the config is not NULL. */
+ MA_ASSERT(pEncoder != NULL);
- result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
+ pWav = (drwav*)ma__malloc_from_callbacks(sizeof(*pWav), &pEncoder->config.allocationCallbacks);
+ if (pWav == NULL) {
+ return MA_OUT_OF_MEMORY;
}
-#ifdef MA_HAS_MP3
- return ma_decoder_init_mp3__internal(&config, pDecoder);
-#else
- return MA_NO_BACKEND;
-#endif
+ wavFormat.container = drwav_container_riff;
+ wavFormat.channels = pEncoder->config.channels;
+ wavFormat.sampleRate = pEncoder->config.sampleRate;
+ wavFormat.bitsPerSample = ma_get_bytes_per_sample(pEncoder->config.format) * 8;
+ if (pEncoder->config.format == ma_format_f32) {
+ wavFormat.format = DR_WAVE_FORMAT_IEEE_FLOAT;
+ } else {
+ wavFormat.format = DR_WAVE_FORMAT_PCM;
+ }
+
+ allocationCallbacks.pUserData = pEncoder->config.allocationCallbacks.pUserData;
+ allocationCallbacks.onMalloc = pEncoder->config.allocationCallbacks.onMalloc;
+ allocationCallbacks.onRealloc = pEncoder->config.allocationCallbacks.onRealloc;
+ allocationCallbacks.onFree = pEncoder->config.allocationCallbacks.onFree;
+
+ if (!drwav_init_write(pWav, &wavFormat, ma_encoder__internal_on_write_wav, ma_encoder__internal_on_seek_wav, pEncoder, &allocationCallbacks)) {
+ return MA_ERROR;
+ }
+
+ pEncoder->pInternalEncoder = pWav;
+
+ return MA_SUCCESS;
}
-ma_result ma_decoder_init_memory_raw(const void* pData, size_t dataSize, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder)
+void ma_encoder__on_uninit_wav(ma_encoder* pEncoder)
{
- ma_decoder_config config;
- ma_result result;
+ drwav* pWav;
- config = ma_decoder_config_init_copy(pConfigOut); /* Make sure the config is not NULL. */
+ MA_ASSERT(pEncoder != NULL);
- result = ma_decoder__preinit_memory(pData, dataSize, &config, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
- }
+ pWav = (drwav*)pEncoder->pInternalEncoder;
+ MA_ASSERT(pWav != NULL);
- return ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder);
+ drwav_uninit(pWav);
+ ma__free_from_callbacks(pWav, &pEncoder->config.allocationCallbacks);
}
-#ifndef MA_NO_STDIO
-#include
-#if !defined(_MSC_VER) && !defined(__DMC__)
-#include /* For strcasecmp(). */
-#endif
-
-const char* ma_path_file_name(const char* path)
+ma_uint64 ma_encoder__on_write_pcm_frames_wav(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount)
{
- const char* fileName;
+ drwav* pWav;
- if (path == NULL) {
- return NULL;
- }
+ MA_ASSERT(pEncoder != NULL);
- fileName = path;
+ pWav = (drwav*)pEncoder->pInternalEncoder;
+ MA_ASSERT(pWav != NULL);
- /* We just loop through the path until we find the last slash. */
- while (path[0] != '\0') {
- if (path[0] == '/' || path[0] == '\\') {
- fileName = path;
- }
+ return drwav_write_pcm_frames(pWav, frameCount, pFramesIn);
+}
+#endif
- path += 1;
- }
+ma_encoder_config ma_encoder_config_init(ma_resource_format resourceFormat, ma_format format, ma_uint32 channels, ma_uint32 sampleRate)
+{
+ ma_encoder_config config;
- /* At this point the file name is sitting on a slash, so just move forward. */
- while (fileName[0] != '\0' && (fileName[0] == '/' || fileName[0] == '\\')) {
- fileName += 1;
- }
+ MA_ZERO_OBJECT(&config);
+ config.resourceFormat = resourceFormat;
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
- return fileName;
+ return config;
}
-const char* ma_path_extension(const char* path)
+ma_result ma_encoder_preinit(const ma_encoder_config* pConfig, ma_encoder* pEncoder)
{
- const char* extension;
- const char* lastOccurance;
+ ma_result result;
- if (path == NULL) {
- path = "";
+ if (pEncoder == NULL) {
+ return MA_INVALID_ARGS;
}
- extension = ma_path_file_name(path);
- lastOccurance = NULL;
+ MA_ZERO_OBJECT(pEncoder);
- /* Just find the last '.' and return. */
- while (extension[0] != '\0') {
- if (extension[0] == '.') {
- extension += 1;
- lastOccurance = extension;
- }
+ if (pConfig == NULL) {
+ return MA_INVALID_ARGS;
+ }
- extension += 1;
+ if (pConfig->format == ma_format_unknown || pConfig->channels == 0 || pConfig->sampleRate == 0) {
+ return MA_INVALID_ARGS;
}
- return (lastOccurance != NULL) ? lastOccurance : extension;
+ pEncoder->config = *pConfig;
+
+ result = ma_allocation_callbacks_init_copy(&pEncoder->config.allocationCallbacks, &pConfig->allocationCallbacks);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+
+ return MA_SUCCESS;
}
-ma_bool32 ma_path_extension_equal(const char* path, const char* extension)
+ma_result ma_encoder_init__internal(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, ma_encoder* pEncoder)
{
- const char* ext1;
- const char* ext2;
+ ma_result result = MA_SUCCESS;
- if (path == NULL || extension == NULL) {
- return MA_FALSE;
+ /* This assumes ma_encoder_preinit() has been called prior. */
+ MA_ASSERT(pEncoder != NULL);
+
+ if (onWrite == NULL || onSeek == NULL) {
+ return MA_INVALID_ARGS;
}
- ext1 = extension;
- ext2 = ma_path_extension(path);
+ pEncoder->onWrite = onWrite;
+ pEncoder->onSeek = onSeek;
+ pEncoder->pUserData = pUserData;
-#if defined(_MSC_VER) || defined(__DMC__)
- return _stricmp(ext1, ext2) == 0;
-#else
- return strcasecmp(ext1, ext2) == 0;
-#endif
+ switch (pEncoder->config.resourceFormat)
+ {
+ case ma_resource_format_wav:
+ {
+ #if defined(MA_HAS_WAV)
+ pEncoder->onInit = ma_encoder__on_init_wav;
+ pEncoder->onUninit = ma_encoder__on_uninit_wav;
+ pEncoder->onWritePCMFrames = ma_encoder__on_write_pcm_frames_wav;
+ #else
+ result = MA_NO_BACKEND;
+ #endif
+ } break;
+
+ default:
+ {
+ result = MA_INVALID_ARGS;
+ } break;
+ }
+
+ /* Getting here means we should have our backend callbacks set up. */
+ if (result == MA_SUCCESS) {
+ result = pEncoder->onInit(pEncoder);
+ if (result != MA_SUCCESS) {
+ return result;
+ }
+ }
+
+ return MA_SUCCESS;
}
-size_t ma_decoder__on_read_stdio(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead)
+#ifndef MA_NO_STDIO
+size_t ma_encoder__on_write_stdio(ma_encoder* pEncoder, const void* pBufferIn, size_t bytesToWrite)
{
- return fread(pBufferOut, 1, bytesToRead, (FILE*)pDecoder->pUserData);
+ return fwrite(pBufferIn, 1, bytesToWrite, (FILE*)pEncoder->pFile);
}
-ma_bool32 ma_decoder__on_seek_stdio(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin)
+ma_bool32 ma_encoder__on_seek_stdio(ma_encoder* pEncoder, int byteOffset, ma_seek_origin origin)
{
- return fseek((FILE*)pDecoder->pUserData, byteOffset, (origin == ma_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0;
+ return fseek((FILE*)pEncoder->pFile, byteOffset, (origin == ma_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0;
}
-ma_result ma_decoder__preinit_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_encoder_init_file(const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder)
{
+ ma_result result;
FILE* pFile;
- if (pDecoder == NULL) {
- return MA_INVALID_ARGS;
+ result = ma_encoder_preinit(pConfig, pEncoder);
+ if (result != MA_SUCCESS) {
+ return result;
}
- ma_zero_object(pDecoder);
-
- if (pFilePath == NULL || pFilePath[0] == '\0') {
- return MA_INVALID_ARGS;
+ /* Now open the file. If this fails we don't need to uninitialize the encoder. */
+ result = ma_fopen(&pFile, pFilePath, "wb");
+ if (pFile == NULL) {
+ return result;
}
-#if defined(_MSC_VER) && _MSC_VER >= 1400
- if (fopen_s(&pFile, pFilePath, "rb") != 0) {
- return MA_ERROR;
+ pEncoder->pFile = pFile;
+
+ return ma_encoder_init__internal(ma_encoder__on_write_stdio, ma_encoder__on_seek_stdio, NULL, pEncoder);
+}
+
+ma_result ma_encoder_init_file_w(const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder)
+{
+ ma_result result;
+ FILE* pFile;
+
+ result = ma_encoder_preinit(pConfig, pEncoder);
+ if (result != MA_SUCCESS) {
+ return result;
}
-#else
- pFile = fopen(pFilePath, "rb");
- if (pFile == NULL) {
- return MA_ERROR;
+
+ /* Now open the file. If this fails we don't need to uninitialize the encoder. */
+ result = ma_wfopen(&pFile, pFilePath, L"wb", &pEncoder->config.allocationCallbacks);
+ if (pFile != NULL) {
+ return result;
}
-#endif
- /* We need to manually set the user data so the calls to ma_decoder__on_seek_stdio() succeed. */
- pDecoder->pUserData = pFile;
+ pEncoder->pFile = pFile;
- (void)pConfig;
- return MA_SUCCESS;
+ return ma_encoder_init__internal(ma_encoder__on_write_stdio, ma_encoder__on_seek_stdio, NULL, pEncoder);
}
+#endif
-ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_encoder_init(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, const ma_encoder_config* pConfig, ma_encoder* pEncoder)
{
- ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder); /* This sets pDecoder->pUserData to a FILE*. */
+ ma_result result;
+
+ result = ma_encoder_preinit(pConfig, pEncoder);
if (result != MA_SUCCESS) {
return result;
}
- /* WAV */
- if (ma_path_extension_equal(pFilePath, "wav")) {
- result = ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
- if (result == MA_SUCCESS) {
- return MA_SUCCESS;
- }
+ return ma_encoder_init__internal(onWrite, onSeek, pUserData, pEncoder);
+}
- ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start);
+
+void ma_encoder_uninit(ma_encoder* pEncoder)
+{
+ if (pEncoder == NULL) {
+ return;
}
- /* FLAC */
- if (ma_path_extension_equal(pFilePath, "flac")) {
- result = ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
- if (result == MA_SUCCESS) {
- return MA_SUCCESS;
- }
+ if (pEncoder->onUninit) {
+ pEncoder->onUninit(pEncoder);
+ }
- ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start);
+#ifndef MA_NO_STDIO
+ /* If we have a file handle, close it. */
+ if (pEncoder->onWrite == ma_encoder__on_write_stdio) {
+ fclose((FILE*)pEncoder->pFile);
}
+#endif
+}
+
+
+ma_uint64 ma_encoder_write_pcm_frames(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount)
+{
+ if (pEncoder == NULL || pFramesIn == NULL) {
+ return 0;
+ }
+
+ return pEncoder->onWritePCMFrames(pEncoder, pFramesIn, frameCount);
+}
+#endif /* MA_NO_ENCODING */
+
- /* MP3 */
- if (ma_path_extension_equal(pFilePath, "mp3")) {
- result = ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
- if (result == MA_SUCCESS) {
- return MA_SUCCESS;
- }
- ma_decoder__on_seek_stdio(pDecoder, 0, ma_seek_origin_start);
- }
+/**************************************************************************************************************************************************************
- /* Trial and error. */
- return ma_decoder_init(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
-}
+Generation
-ma_result ma_decoder_init_file_wav(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+**************************************************************************************************************************************************************/
+ma_waveform_config ma_waveform_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_waveform_type type, double amplitude, double frequency)
{
- ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
- }
+ ma_waveform_config config;
- return ma_decoder_init_wav(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ MA_ZERO_OBJECT(&config);
+ config.format = format;
+ config.channels = channels;
+ config.sampleRate = sampleRate;
+ config.type = type;
+ config.amplitude = amplitude;
+ config.frequency = frequency;
+
+ return config;
}
-ma_result ma_decoder_init_file_flac(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_waveform_init(const ma_waveform_config* pConfig, ma_waveform* pWaveform)
{
- ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
+ if (pWaveform == NULL) {
+ return MA_INVALID_ARGS;
}
- return ma_decoder_init_flac(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ MA_ZERO_OBJECT(pWaveform);
+ pWaveform->config = *pConfig;
+ pWaveform->advance = 1.0 / pWaveform->config.sampleRate;
+ pWaveform->time = 0;
+
+ return MA_SUCCESS;
}
-ma_result ma_decoder_init_file_vorbis(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_waveform_set_amplitude(ma_waveform* pWaveform, double amplitude)
{
- ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
+ if (pWaveform == NULL) {
+ return MA_INVALID_ARGS;
}
- return ma_decoder_init_vorbis(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ pWaveform->config.amplitude = amplitude;
+ return MA_SUCCESS;
}
-ma_result ma_decoder_init_file_mp3(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder)
+ma_result ma_waveform_set_frequency(ma_waveform* pWaveform, double frequency)
{
- ma_result result = ma_decoder__preinit_file(pFilePath, pConfig, pDecoder);
- if (result != MA_SUCCESS) {
- return result;
+ if (pWaveform == NULL) {
+ return MA_INVALID_ARGS;
}
- return ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder);
+ pWaveform->config.frequency = frequency;
+ return MA_SUCCESS;
}
-#endif
-ma_result ma_decoder_uninit(ma_decoder* pDecoder)
+ma_result ma_waveform_set_sample_rate(ma_waveform* pWaveform, ma_uint32 sampleRate)
{
- if (pDecoder == NULL) {
+ if (pWaveform == NULL) {
return MA_INVALID_ARGS;
}
- if (pDecoder->onUninit) {
- pDecoder->onUninit(pDecoder);
- }
+ pWaveform->advance = 1.0 / sampleRate;
+ return MA_SUCCESS;
+}
-#ifndef MA_NO_STDIO
- /* If we have a file handle, close it. */
- if (pDecoder->onRead == ma_decoder__on_read_stdio) {
- fclose((FILE*)pDecoder->pUserData);
- }
-#endif
+static float ma_waveform_sine_f32(double time, double frequency, double amplitude)
+{
+ return (float)(ma_sin(MA_TAU_D * time * frequency) * amplitude);
+}
- return MA_SUCCESS;
+static ma_int16 ma_waveform_sine_s16(double time, double frequency, double amplitude)
+{
+ return ma_pcm_sample_f32_to_s16(ma_waveform_sine_f32(time, frequency, amplitude));
}
-ma_uint64 ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder)
+static float ma_waveform_square_f32(double time, double frequency, double amplitude)
{
- if (pDecoder == NULL) {
- return 0;
+ double t = time * frequency;
+ double f = t - (ma_uint64)t;
+ double r;
+
+ if (f < 0.5) {
+ r = amplitude;
+ } else {
+ r = -amplitude;
}
- if (pDecoder->onGetLengthInPCMFrames) {
- return pDecoder->onGetLengthInPCMFrames(pDecoder);
- }
+ return (float)r;
+}
- return 0;
+static ma_int16 ma_waveform_square_s16(double time, double frequency, double amplitude)
+{
+ return ma_pcm_sample_f32_to_s16(ma_waveform_square_f32(time, frequency, amplitude));
}
-ma_uint64 ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount)
+static float ma_waveform_triangle_f32(double time, double frequency, double amplitude)
{
- if (pDecoder == NULL) {
- return 0;
- }
+ double t = time * frequency;
+ double f = t - (ma_uint64)t;
+ double r;
- return ma_pcm_converter_read(&pDecoder->dsp, pFramesOut, frameCount);
+ r = 2 * ma_abs(2 * (f - 0.5)) - 1;
+
+ return (float)(r * amplitude);
}
-ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex)
+static ma_int16 ma_waveform_triangle_s16(double time, double frequency, double amplitude)
{
- if (pDecoder == NULL) {
- return 0;
- }
+ return ma_pcm_sample_f32_to_s16(ma_waveform_triangle_f32(time, frequency, amplitude));
+}
- if (pDecoder->onSeekToPCMFrame) {
- return pDecoder->onSeekToPCMFrame(pDecoder, frameIndex);
- }
+static float ma_waveform_sawtooth_f32(double time, double frequency, double amplitude)
+{
+ double t = time * frequency;
+ double f = t - (ma_uint64)t;
+ double r;
- /* Should never get here, but if we do it means onSeekToPCMFrame was not set by the backend. */
- return MA_INVALID_ARGS;
+ r = 2 * (f - 0.5);
+
+ return (float)(r * amplitude);
}
+static ma_int16 ma_waveform_sawtooth_s16(double time, double frequency, double amplitude)
+{
+ return ma_pcm_sample_f32_to_s16(ma_waveform_sawtooth_f32(time, frequency, amplitude));
+}
-ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_config* pConfigOut, ma_uint64* pFrameCountOut, void** ppPCMFramesOut)
+static void ma_waveform_read_pcm_frames__sine(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount)
{
- ma_uint64 totalFrameCount;
- ma_uint64 bpf;
- ma_uint64 dataCapInFrames;
- void* pPCMFramesOut;
+ ma_uint64 iFrame;
+ ma_uint64 iChannel;
+ ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format);
+ ma_uint32 bpf = bps * pWaveform->config.channels;
- ma_assert(pDecoder != NULL);
-
- totalFrameCount = 0;
- bpf = ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels);
+ MA_ASSERT(pWaveform != NULL);
+ MA_ASSERT(pFramesOut != NULL);
- /* The frame count is unknown until we try reading. Thus, we just run in a loop. */
- dataCapInFrames = 0;
- pPCMFramesOut = NULL;
- for (;;) {
- ma_uint64 frameCountToTryReading;
- ma_uint64 framesJustRead;
+ if (pWaveform->config.format == ma_format_f32) {
+ float* pFramesOutF32 = (float*)pFramesOut;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_waveform_sine_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
- /* Make room if there's not enough. */
- if (totalFrameCount == dataCapInFrames) {
- void* pNewPCMFramesOut;
- ma_uint64 newDataCapInFrames = dataCapInFrames*2;
- if (newDataCapInFrames == 0) {
- newDataCapInFrames = 4096;
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s;
}
+ }
+ } else if (pWaveform->config.format == ma_format_s16) {
+ ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_int16 s = ma_waveform_sine_s16(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
- if ((newDataCapInFrames * bpf) > MA_SIZE_MAX) {
- ma_free(pPCMFramesOut);
- return MA_TOO_LARGE;
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s;
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_waveform_sine_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
+
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
}
+ }
+ }
+}
+static void ma_waveform_read_pcm_frames__square(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount)
+{
+ ma_uint64 iFrame;
+ ma_uint64 iChannel;
+ ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format);
+ ma_uint32 bpf = bps * pWaveform->config.channels;
- pNewPCMFramesOut = (void*)ma_realloc(pPCMFramesOut, (size_t)(newDataCapInFrames * bpf));
- if (pNewPCMFramesOut == NULL) {
- ma_free(pPCMFramesOut);
- return MA_OUT_OF_MEMORY;
+ MA_ASSERT(pWaveform != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+
+ if (pWaveform->config.format == ma_format_f32) {
+ float* pFramesOutF32 = (float*)pFramesOut;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_waveform_square_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
+
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s;
}
+ }
+ } else if (pWaveform->config.format == ma_format_s16) {
+ ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_int16 s = ma_waveform_square_s16(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
- dataCapInFrames = newDataCapInFrames;
- pPCMFramesOut = pNewPCMFramesOut;
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s;
+ }
}
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_waveform_square_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
- frameCountToTryReading = dataCapInFrames - totalFrameCount;
- ma_assert(frameCountToTryReading > 0);
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
+ }
+ }
+ }
+}
- framesJustRead = ma_decoder_read_pcm_frames(pDecoder, (ma_uint8*)pPCMFramesOut + (totalFrameCount * bpf), frameCountToTryReading);
- totalFrameCount += framesJustRead;
+static void ma_waveform_read_pcm_frames__triangle(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount)
+{
+ ma_uint64 iFrame;
+ ma_uint64 iChannel;
+ ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format);
+ ma_uint32 bpf = bps * pWaveform->config.channels;
- if (framesJustRead < frameCountToTryReading) {
- break;
+ MA_ASSERT(pWaveform != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+
+ if (pWaveform->config.format == ma_format_f32) {
+ float* pFramesOutF32 = (float*)pFramesOut;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_waveform_triangle_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
+
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s;
+ }
}
- }
+ } else if (pWaveform->config.format == ma_format_s16) {
+ ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_int16 s = ma_waveform_triangle_s16(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
-
- if (pConfigOut != NULL) {
- pConfigOut->format = pDecoder->outputFormat;
- pConfigOut->channels = pDecoder->outputChannels;
- pConfigOut->sampleRate = pDecoder->outputSampleRate;
- ma_channel_map_copy(pConfigOut->channelMap, pDecoder->outputChannelMap, pDecoder->outputChannels);
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s;
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_waveform_triangle_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
+
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
+ }
+ }
}
+}
- if (ppPCMFramesOut != NULL) {
- *ppPCMFramesOut = pPCMFramesOut;
+static void ma_waveform_read_pcm_frames__sawtooth(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount)
+{
+ ma_uint64 iFrame;
+ ma_uint64 iChannel;
+ ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format);
+ ma_uint32 bpf = bps * pWaveform->config.channels;
+
+ MA_ASSERT(pWaveform != NULL);
+ MA_ASSERT(pFramesOut != NULL);
+
+ if (pWaveform->config.format == ma_format_f32) {
+ float* pFramesOutF32 = (float*)pFramesOut;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_waveform_sawtooth_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
+
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s;
+ }
+ }
+ } else if (pWaveform->config.format == ma_format_s16) {
+ ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut;
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_int16 s = ma_waveform_sawtooth_s16(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
+
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s;
+ }
+ }
} else {
- ma_free(pPCMFramesOut);
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_waveform_sawtooth_f32(pWaveform->time, pWaveform->config.frequency, pWaveform->config.amplitude);
+ pWaveform->time += pWaveform->advance;
+
+ for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) {
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
+ }
+ }
}
+}
- if (pFrameCountOut != NULL) {
- *pFrameCountOut = totalFrameCount;
+ma_uint64 ma_waveform_read_pcm_frames(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount)
+{
+ if (pWaveform == NULL) {
+ return 0;
}
- ma_decoder_uninit(pDecoder);
- return MA_SUCCESS;
+ if (pFramesOut != NULL) {
+ switch (pWaveform->config.type)
+ {
+ case ma_waveform_type_sine:
+ {
+ ma_waveform_read_pcm_frames__sine(pWaveform, pFramesOut, frameCount);
+ } break;
+
+ case ma_waveform_type_square:
+ {
+ ma_waveform_read_pcm_frames__square(pWaveform, pFramesOut, frameCount);
+ } break;
+
+ case ma_waveform_type_triangle:
+ {
+ ma_waveform_read_pcm_frames__triangle(pWaveform, pFramesOut, frameCount);
+ } break;
+
+ case ma_waveform_type_sawtooth:
+ {
+ ma_waveform_read_pcm_frames__sawtooth(pWaveform, pFramesOut, frameCount);
+ } break;
+
+ default: return 0;
+ }
+ } else {
+ pWaveform->time += pWaveform->advance * (ma_int64)frameCount; /* Cast to int64 required for VC6. Won't affect anything in practice. */
+ }
+
+ return frameCount;
}
-#ifndef MA_NO_STDIO
-ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut)
+
+ma_noise_config ma_noise_config_init(ma_format format, ma_uint32 channels, ma_noise_type type, ma_int32 seed, double amplitude)
{
- ma_decoder_config config;
- ma_decoder decoder;
- ma_result result;
+ ma_noise_config config;
+ MA_ZERO_OBJECT(&config);
- if (pFrameCountOut != NULL) {
- *pFrameCountOut = 0;
+ config.format = format;
+ config.channels = channels;
+ config.type = type;
+ config.seed = seed;
+ config.amplitude = amplitude;
+
+ if (config.seed == 0) {
+ config.seed = MA_DEFAULT_LCG_SEED;
}
- if (ppPCMFramesOut != NULL) {
- *ppPCMFramesOut = NULL;
+
+ return config;
+}
+
+ma_result ma_noise_init(const ma_noise_config* pConfig, ma_noise* pNoise)
+{
+ if (pNoise == NULL) {
+ return MA_INVALID_ARGS;
}
- if (pFilePath == NULL) {
+ MA_ZERO_OBJECT(pNoise);
+
+ if (pConfig == NULL) {
return MA_INVALID_ARGS;
}
- config = ma_decoder_config_init_copy(pConfig);
-
- result = ma_decoder_init_file(pFilePath, &config, &decoder);
- if (result != MA_SUCCESS) {
- return result;
+ pNoise->config = *pConfig;
+ ma_lcg_seed(&pNoise->lcg, pConfig->seed);
+
+ if (pNoise->config.type == ma_noise_type_pink) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) {
+ pNoise->state.pink.accumulation[iChannel] = 0;
+ pNoise->state.pink.counter[iChannel] = 1;
+ }
+ }
+
+ if (pNoise->config.type == ma_noise_type_brownian) {
+ ma_uint32 iChannel;
+ for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) {
+ pNoise->state.brownian.accumulation[iChannel] = 0;
+ }
+ }
+
+ return MA_SUCCESS;
+}
+
+static MA_INLINE float ma_noise_f32_white(ma_noise* pNoise)
+{
+ return (float)(ma_lcg_rand_f64(&pNoise->lcg) * pNoise->config.amplitude);
+}
+
+static MA_INLINE ma_int16 ma_noise_s16_white(ma_noise* pNoise)
+{
+ return ma_pcm_sample_f32_to_s16(ma_noise_f32_white(pNoise));
+}
+
+static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__white(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount)
+{
+ ma_uint64 iFrame;
+ ma_uint32 iChannel;
+
+ if (pNoise->config.format == ma_format_f32) {
+ float* pFramesOutF32 = (float*)pFramesOut;
+ if (pNoise->config.duplicateChannels) {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_noise_f32_white(pNoise);
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = s;
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = ma_noise_f32_white(pNoise);
+ }
+ }
+ }
+ } else if (pNoise->config.format == ma_format_s16) {
+ ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut;
+ if (pNoise->config.duplicateChannels) {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_int16 s = ma_noise_s16_white(pNoise);
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = s;
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = ma_noise_s16_white(pNoise);
+ }
+ }
+ }
+ } else {
+ ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format);
+ ma_uint32 bpf = bps * pNoise->config.channels;
+
+ if (pNoise->config.duplicateChannels) {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_noise_f32_white(pNoise);
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ float s = ma_noise_f32_white(pNoise);
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
+ }
+ }
+ }
}
- return ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut);
+ return frameCount;
}
-#endif
-ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut)
+
+static MA_INLINE unsigned int ma_tzcnt32(unsigned int x)
{
- ma_decoder_config config;
- ma_decoder decoder;
- ma_result result;
+ unsigned int n;
- if (pFrameCountOut != NULL) {
- *pFrameCountOut = 0;
- }
- if (ppPCMFramesOut != NULL) {
- *ppPCMFramesOut = NULL;
+ /* Special case for odd numbers since they should happen about half the time. */
+ if (x & 0x1) {
+ return 0;
}
- if (pData == NULL || dataSize == 0) {
- return MA_INVALID_ARGS;
+ if (x == 0) {
+ return sizeof(x) << 3;
}
- config = ma_decoder_config_init_copy(pConfig);
-
- result = ma_decoder_init_memory(pData, dataSize, &config, &decoder);
- if (result != MA_SUCCESS) {
- return result;
- }
+ n = 1;
+ if ((x & 0x0000FFFF) == 0) { x >>= 16; n += 16; }
+ if ((x & 0x000000FF) == 0) { x >>= 8; n += 8; }
+ if ((x & 0x0000000F) == 0) { x >>= 4; n += 4; }
+ if ((x & 0x00000003) == 0) { x >>= 2; n += 2; }
+ n -= x & 0x00000001;
- return ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut);
+ return n;
}
-#endif /* MA_NO_DECODING */
+/*
+Pink noise generation based on Tonic (public domain) with modifications. https://github.com/TonicAudio/Tonic/blob/master/src/Tonic/Noise.h
+This is basically _the_ reference for pink noise from what I've found: http://www.firstpr.com.au/dsp/pink-noise/
+*/
+static MA_INLINE float ma_noise_f32_pink(ma_noise* pNoise, ma_uint32 iChannel)
+{
+ double result;
+ double binPrev;
+ double binNext;
+ unsigned int ibin;
+ ibin = ma_tzcnt32(pNoise->state.pink.counter[iChannel]) & (ma_countof(pNoise->state.pink.bin[0]) - 1);
+ binPrev = pNoise->state.pink.bin[iChannel][ibin];
+ binNext = ma_lcg_rand_f64(&pNoise->lcg);
+ pNoise->state.pink.bin[iChannel][ibin] = binNext;
-/**************************************************************************************************************************************************************
+ pNoise->state.pink.accumulation[iChannel] += (binNext - binPrev);
+ pNoise->state.pink.counter[iChannel] += 1;
-Generation
+ result = (ma_lcg_rand_f64(&pNoise->lcg) + pNoise->state.pink.accumulation[iChannel]);
+ result /= 10;
-**************************************************************************************************************************************************************/
-ma_result ma_sine_wave_init(double amplitude, double periodsPerSecond, ma_uint32 sampleRate, ma_sine_wave* pSineWave)
+ return (float)(result * pNoise->config.amplitude);
+}
+
+static MA_INLINE ma_int16 ma_noise_s16_pink(ma_noise* pNoise, ma_uint32 iChannel)
{
- if (pSineWave == NULL) {
- return MA_INVALID_ARGS;
- }
- ma_zero_object(pSineWave);
+ return ma_pcm_sample_f32_to_s16(ma_noise_f32_pink(pNoise, iChannel));
+}
- if (amplitude == 0 || periodsPerSecond == 0) {
- return MA_INVALID_ARGS;
- }
+static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__pink(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount)
+{
+ ma_uint64 iFrame;
+ ma_uint32 iChannel;
- if (amplitude > 1) {
- amplitude = 1;
- }
- if (amplitude < -1) {
- amplitude = -1;
+ if (pNoise->config.format == ma_format_f32) {
+ float* pFramesOutF32 = (float*)pFramesOut;
+ if (pNoise->config.duplicateChannels) {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_noise_f32_pink(pNoise, 0);
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = s;
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = ma_noise_f32_pink(pNoise, iChannel);
+ }
+ }
+ }
+ } else if (pNoise->config.format == ma_format_s16) {
+ ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut;
+ if (pNoise->config.duplicateChannels) {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_int16 s = ma_noise_s16_pink(pNoise, 0);
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = s;
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = ma_noise_s16_pink(pNoise, iChannel);
+ }
+ }
+ }
+ } else {
+ ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format);
+ ma_uint32 bpf = bps * pNoise->config.channels;
+
+ if (pNoise->config.duplicateChannels) {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_noise_f32_pink(pNoise, 0);
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ float s = ma_noise_f32_pink(pNoise, iChannel);
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
+ }
+ }
+ }
}
- pSineWave->amplitude = amplitude;
- pSineWave->periodsPerSecond = periodsPerSecond;
- pSineWave->delta = MA_TAU_D / sampleRate;
- pSineWave->time = 0;
-
- return MA_SUCCESS;
+ return frameCount;
}
-ma_uint64 ma_sine_wave_read_f32(ma_sine_wave* pSineWave, ma_uint64 count, float* pSamples)
+
+static MA_INLINE float ma_noise_f32_brownian(ma_noise* pNoise, ma_uint32 iChannel)
{
- return ma_sine_wave_read_f32_ex(pSineWave, count, 1, ma_stream_layout_interleaved, &pSamples);
+ double result;
+
+ result = (ma_lcg_rand_f64(&pNoise->lcg) + pNoise->state.brownian.accumulation[iChannel]);
+ result /= 1.005; /* Don't escape the -1..1 range on average. */
+
+ pNoise->state.brownian.accumulation[iChannel] = result;
+ result /= 20;
+
+ return (float)(result * pNoise->config.amplitude);
}
-ma_uint64 ma_sine_wave_read_f32_ex(ma_sine_wave* pSineWave, ma_uint64 frameCount, ma_uint32 channels, ma_stream_layout layout, float** ppFrames)
+static MA_INLINE ma_int16 ma_noise_s16_brownian(ma_noise* pNoise, ma_uint32 iChannel)
{
- if (pSineWave == NULL) {
- return 0;
- }
-
- if (ppFrames != NULL) {
- ma_uint64 iFrame;
- for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
- ma_uint32 iChannel;
+ return ma_pcm_sample_f32_to_s16(ma_noise_f32_brownian(pNoise, iChannel));
+}
- float s = (float)(sin(pSineWave->time * pSineWave->periodsPerSecond) * pSineWave->amplitude);
- pSineWave->time += pSineWave->delta;
+static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__brownian(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount)
+{
+ ma_uint64 iFrame;
+ ma_uint32 iChannel;
- if (layout == ma_stream_layout_interleaved) {
- for (iChannel = 0; iChannel < channels; iChannel += 1) {
- ppFrames[0][iFrame*channels + iChannel] = s;
+ if (pNoise->config.format == ma_format_f32) {
+ float* pFramesOutF32 = (float*)pFramesOut;
+ if (pNoise->config.duplicateChannels) {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_noise_f32_brownian(pNoise, 0);
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = s;
}
- } else {
- for (iChannel = 0; iChannel < channels; iChannel += 1) {
- ppFrames[iChannel][iFrame] = s;
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutF32[iFrame*pNoise->config.channels + iChannel] = ma_noise_f32_brownian(pNoise, iChannel);
+ }
+ }
+ }
+ } else if (pNoise->config.format == ma_format_s16) {
+ ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut;
+ if (pNoise->config.duplicateChannels) {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ ma_int16 s = ma_noise_s16_brownian(pNoise, 0);
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = s;
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ pFramesOutS16[iFrame*pNoise->config.channels + iChannel] = ma_noise_s16_brownian(pNoise, iChannel);
}
}
}
} else {
- pSineWave->time += pSineWave->delta * (ma_int64)frameCount; /* Cast to int64 required for VC6. */
+ ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format);
+ ma_uint32 bpf = bps * pNoise->config.channels;
+
+ if (pNoise->config.duplicateChannels) {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ float s = ma_noise_f32_brownian(pNoise, 0);
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
+ }
+ }
+ } else {
+ for (iFrame = 0; iFrame < frameCount; iFrame += 1) {
+ for (iChannel = 0; iChannel < pNoise->config.channels; iChannel += 1) {
+ float s = ma_noise_f32_brownian(pNoise, iChannel);
+ ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none);
+ }
+ }
+ }
}
return frameCount;
}
+ma_uint64 ma_noise_read_pcm_frames(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount)
+{
+ if (pNoise == NULL) {
+ return 0;
+ }
+
+ if (pNoise->config.type == ma_noise_type_white) {
+ return ma_noise_read_pcm_frames__white(pNoise, pFramesOut, frameCount);
+ }
+
+ if (pNoise->config.type == ma_noise_type_pink) {
+ return ma_noise_read_pcm_frames__pink(pNoise, pFramesOut, frameCount);
+ }
+
+ if (pNoise->config.type == ma_noise_type_brownian) {
+ return ma_noise_read_pcm_frames__brownian(pNoise, pFramesOut, frameCount);
+ }
+
+ /* Should never get here. */
+ MA_ASSERT(MA_FALSE);
+ return 0;
+}
+
+/* End globally disabled warnings. */
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
@@ -33005,36 +42065,253 @@ ma_uint64 ma_sine_wave_read_f32_ex(ma_sine_wave* pSineWave, ma_uint64 frameCount
#endif /* MINIAUDIO_IMPLEMENTATION */
/*
-BACKEND IMPLEMENTATION GUIDLINES
-================================
-Context
--------
-- Run-time linking if possible.
-- Set whether or not it's an asynchronous backend
+MAJOR CHANGES IN VERSION 0.9
+============================
+Version 0.9 includes major API changes, centered mostly around full-duplex and the rebrand to "miniaudio". Before I go into
+detail about the major changes I would like to apologize. I know it's annoying dealing with breaking API changes, but I think
+it's best to get these changes out of the way now while the library is still relatively young and unknown.
-Device
-------
-- If a full-duplex device is requested and the backend does not support full duplex devices, have ma_device_init__[backend]()
- return MA_DEVICE_TYPE_NOT_SUPPORTED.
-- If exclusive mode is requested, but the backend does not support it, return MA_SHARE_MODE_NOT_SUPPORTED. If practical, try
- not to fall back to a different share mode - give the client exactly what they asked for. Some backends, such as ALSA, may
- not have a practical way to distinguish between the two.
-- If pDevice->usingDefault* is set, prefer the device's native value if the backend supports it. Otherwise use the relevant
- value from the config.
-- If the configs buffer size in frames is 0, set it based on the buffer size in milliseconds, keeping in mind to handle the
- case when the default sample rate is being used where practical.
-- Backends must set the following members of pDevice before returning successfully from ma_device_init__[backend]():
- - internalFormat
- - internalChannels
- - internalSampleRate
- - internalChannelMap
- - bufferSizeInFrames
- - periods
+There's been a lot of refactoring with this release so there's a good chance a few bugs have been introduced. I apologize in
+advance for this. You may want to hold off on upgrading for the short term if you're worried. If mini_al v0.8.14 works for
+you, and you don't need full-duplex support, you can avoid upgrading (though you won't be getting future bug fixes).
+
+
+Rebranding to "miniaudio"
+-------------------------
+The decision was made to rename mini_al to miniaudio. Don't worry, it's the same project. The reason for this is simple:
+
+1) Having the word "audio" in the title makes it immediately clear that the library is related to audio; and
+2) I don't like the look of the underscore.
+
+This rebrand has necessitated a change in namespace from "mal" to "ma". I know this is annoying, and I apologize, but it's
+better to get this out of the road now rather than later. Also, since there are necessary API changes for full-duplex support
+I think it's better to just get the namespace change over and done with at the same time as the full-duplex changes. I'm hoping
+this will be the last of the major API changes. Fingers crossed!
+
+The implementation define is now "#define MINIAUDIO_IMPLEMENTATION". You can also use "#define MA_IMPLEMENTATION" if that's
+your preference.
+
+
+Full-Duplex Support
+-------------------
+The major feature added to version 0.9 is full-duplex. This has necessitated a few API changes.
+
+1) The data callback has now changed. Previously there was one type of callback for playback and another for capture. I wanted
+ to avoid a third callback just for full-duplex so the decision was made to break this API and unify the callbacks. Now,
+ there is just one callback which is the same for all three modes (playback, capture, duplex). The new callback looks like
+ the following:
+
+ void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount);
+
+ This callback allows you to move data straight out of the input buffer and into the output buffer in full-duplex mode. In
+ playback-only mode, pInput will be null. Likewise, pOutput will be null in capture-only mode. The sample count is no longer
+ returned from the callback since it's not necessary for miniaudio anymore.
+
+2) The device config needed to change in order to support full-duplex. Full-duplex requires the ability to allow the client
+ to choose a different PCM format for the playback and capture sides. The old ma_device_config object simply did not allow
+ this and needed to change. With these changes you now specify the device ID, format, channels, channel map and share mode
+ on a per-playback and per-capture basis (see example below). The sample rate must be the same for playback and capture.
+
+ Since the device config API has changed I have also decided to take the opportunity to simplify device initialization. Now,
+ the device ID, device type and callback user data are set in the config. ma_device_init() is now simplified down to taking
+ just the context, device config and a pointer to the device object being initialized. The rationale for this change is that
+ it just makes more sense to me that these are set as part of the config like everything else.
+
+ Example device initialization:
+
+ ma_device_config config = ma_device_config_init(ma_device_type_duplex); // Or ma_device_type_playback or ma_device_type_capture.
+ config.playback.pDeviceID = &myPlaybackDeviceID; // Or NULL for the default playback device.
+ config.playback.format = ma_format_f32;
+ config.playback.channels = 2;
+ config.capture.pDeviceID = &myCaptureDeviceID; // Or NULL for the default capture device.
+ config.capture.format = ma_format_s16;
+ config.capture.channels = 1;
+ config.sampleRate = 44100;
+ config.dataCallback = data_callback;
+ config.pUserData = &myUserData;
+
+ result = ma_device_init(&myContext, &config, &device);
+ if (result != MA_SUCCESS) {
+ ... handle error ...
+ }
+
+ Note that the "onDataCallback" member of ma_device_config has been renamed to "dataCallback". Also, "onStopCallback" has
+ been renamed to "stopCallback".
+
+This is the first pass for full-duplex and there is a known bug. You will hear crackling on the following backends when sample
+rate conversion is required for the playback device:
+ - Core Audio
+ - JACK
+ - AAudio
+ - OpenSL
+ - WebAudio
+
+In addition to the above, not all platforms have been absolutely thoroughly tested simply because I lack the hardware for such
+thorough testing. If you experience a bug, an issue report on GitHub or an email would be greatly appreciated (and a sample
+program that reproduces the issue if possible).
+
+
+Other API Changes
+-----------------
+In addition to the above, the following API changes have been made:
+
+- The log callback is no longer passed to ma_context_config_init(). Instead you need to set it manually after initialization.
+- The onLogCallback member of ma_context_config has been renamed to "logCallback".
+- The log callback now takes a logLevel parameter. The new callback looks like: void log_callback(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message)
+ - You can use ma_log_level_to_string() to convert the logLevel to human readable text if you want to log it.
+- Some APIs have been renamed:
+ - mal_decoder_read() -> ma_decoder_read_pcm_frames()
+ - mal_decoder_seek_to_frame() -> ma_decoder_seek_to_pcm_frame()
+ - mal_sine_wave_read() -> ma_sine_wave_read_f32()
+ - mal_sine_wave_read_ex() -> ma_sine_wave_read_f32_ex()
+- Some APIs have been removed:
+ - mal_device_get_buffer_size_in_bytes()
+ - mal_device_set_recv_callback()
+ - mal_device_set_send_callback()
+ - mal_src_set_input_sample_rate()
+ - mal_src_set_output_sample_rate()
+- Error codes have been rearranged. If you're a binding maintainer you will need to update.
+- The ma_backend enums have been rearranged to priority order. The rationale for this is to simplify automatic backend selection
+ and to make it easier to see the priority. If you're a binding maintainer you will need to update.
+- ma_dsp has been renamed to ma_pcm_converter. The rationale for this change is that I'm expecting "ma_dsp" to conflict with
+ some future planned high-level APIs.
+- For functions that take a pointer/count combo, such as ma_decoder_read_pcm_frames(), the parameter order has changed so that
+ the pointer comes before the count. The rationale for this is to keep it consistent with things like memcpy().
+
+
+Miscellaneous Changes
+---------------------
+The following miscellaneous changes have also been made.
+
+- The AAudio backend has been added for Android 8 and above. This is Android's new "High-Performance Audio" API. (For the
+ record, this is one of the nicest audio APIs out there, just behind the BSD audio APIs).
+- The WebAudio backend has been added. This is based on ScriptProcessorNode. This removes the need for SDL.
+- The SDL and OpenAL backends have been removed. These were originally implemented to add support for platforms for which miniaudio
+ was not explicitly supported. These are no longer needed and have therefore been removed.
+- Device initialization now fails if the requested share mode is not supported. If you ask for exclusive mode, you either get an
+ exclusive mode device, or an error. The rationale for this change is to give the client more control over how to handle cases
+ when the desired shared mode is unavailable.
+- A lock-free ring buffer API has been added. There are two varients of this. "ma_rb" operates on bytes, whereas "ma_pcm_rb"
+ operates on PCM frames.
+- The library is now licensed as a choice of Public Domain (Unlicense) _or_ MIT-0 (No Attribution) which is the same as MIT, but
+ removes the attribution requirement. The rationale for this is to support countries that don't recognize public domain.
*/
/*
REVISION HISTORY
================
+v0.10.0 - 2020-03-07
+ - API CHANGE: Refactor data conversion APIs
+ - ma_format_converter has been removed. Use ma_convert_pcm_frames_format() instead.
+ - ma_channel_router has been replaced with ma_channel_converter.
+ - ma_src has been replaced with ma_resampler
+ - ma_pcm_converter has been replaced with ma_data_converter
+ - API CHANGE: Add support for custom memory allocation callbacks. The following APIs have been updated to take an extra parameter for the allocation
+ callbacks:
+ - ma_malloc()
+ - ma_realloc()
+ - ma_free()
+ - ma_aligned_malloc()
+ - ma_aligned_free()
+ - ma_rb_init() / ma_rb_init_ex()
+ - ma_pcm_rb_init() / ma_pcm_rb_init_ex()
+ - API CHANGE: Simplify latency specification in device configurations. The bufferSizeInFrames and bufferSizeInMilliseconds parameters have been replaced with
+ periodSizeInFrames and periodSizeInMilliseconds respectively. The previous variables defined the size of the entire buffer, whereas the new ones define the
+ size of a period. The following APIs have been removed since they are no longer relevant:
+ - ma_get_default_buffer_size_in_milliseconds()
+ - ma_get_default_buffer_size_in_frames()
+ - API CHANGE: ma_device_set_stop_callback() has been removed. If you require a stop callback, you must now set it via the device config just like the data
+ callback.
+ - API CHANGE: The ma_sine_wave API has been replaced with ma_waveform. The following APIs have been removed:
+ - ma_sine_wave_init()
+ - ma_sine_wave_read_f32()
+ - ma_sine_wave_read_f32_ex()
+ - API CHANGE: ma_convert_frames() has been updated to take an extra parameter which is the size of the output buffer in PCM frames. Parameters have also been
+ reordered.
+ - API CHANGE: ma_convert_frames_ex() has been changed to take a pointer to a ma_data_converter_config object to specify the input and output formats to
+ convert between.
+ - API CHANGE: ma_calculate_frame_count_after_src() has been renamed to ma_calculate_frame_count_after_resampling().
+ - Add support for the following filters:
+ - Biquad (ma_biquad)
+ - First order low-pass (ma_lpf1)
+ - Second order low-pass (ma_lpf2)
+ - Low-pass with configurable order (ma_lpf)
+ - First order high-pass (ma_hpf1)
+ - Second order high-pass (ma_hpf2)
+ - High-pass with configurable order (ma_hpf)
+ - Second order band-pass (ma_bpf2)
+ - Band-pass with configurable order (ma_bpf)
+ - Second order peaking EQ (ma_peak2)
+ - Second order notching (ma_notch2)
+ - Second order low shelf (ma_loshelf2)
+ - Second order high shelf (ma_hishelf2)
+ - Add waveform generation API (ma_waveform) with support for the following:
+ - Sine
+ - Square
+ - Triangle
+ - Sawtooth
+ - Add noise generation API (ma_noise) with support for the following:
+ - White
+ - Pink
+ - Brownian
+ - Add encoding API (ma_encoder). This only supports outputting to WAV files via dr_wav.
+ - Add ma_result_description() which is used to retrieve a human readable description of a given result code.
+ - Result codes have been changed. Binding maintainers will need to update their result code constants.
+ - More meaningful result codes are now returned when a file fails to open.
+ - Internal functions have all been made static where possible.
+ - Fix potential crash when ma_device object's are not aligned to MA_SIMD_ALIGNMENT.
+ - Fix a bug in ma_decoder_get_length_in_pcm_frames() where it was returning the length based on the internal sample rate rather than the output sample rate.
+ - Fix bugs in some backends where the device is not drained properly in ma_device_stop().
+ - Improvements to documentation.
+
+v0.9.10 - 2020-01-15
+ - Fix compilation errors due to #if/#endif mismatches.
+ - WASAPI: Fix a bug where automatic stream routing is being performed for devices that are initialized with an explicit device ID.
+ - iOS: Fix a crash on device uninitialization.
+
+v0.9.9 - 2020-01-09
+ - Fix compilation errors with MinGW.
+ - Fix compilation errors when compiling on Apple platforms.
+ - WASAPI: Add support for disabling hardware offloading.
+ - WASAPI: Add support for disabling automatic stream routing.
+ - Core Audio: Fix bugs in the case where the internal device uses deinterleaved buffers.
+ - Core Audio: Add support for controlling the session category (AVAudioSessionCategory) and options (AVAudioSessionCategoryOptions).
+ - JACK: Fix bug where incorrect ports are connected.
+
+v0.9.8 - 2019-10-07
+ - WASAPI: Fix a potential deadlock when starting a full-duplex device.
+ - WASAPI: Enable automatic resampling by default. Disable with config.wasapi.noAutoConvertSRC.
+ - Core Audio: Fix bugs with automatic stream routing.
+ - Add support for controlling whether or not the content of the output buffer passed in to the data callback is pre-initialized
+ to zero. By default it will be initialized to zero, but this can be changed by setting noPreZeroedOutputBuffer in the device
+ config. Setting noPreZeroedOutputBuffer to true will leave the contents undefined.
+ - Add support for clipping samples after the data callback has returned. This only applies when the playback sample format is
+ configured as ma_format_f32. If you are doing clipping yourself, you can disable this overhead by setting noClip to true in
+ the device config.
+ - Add support for master volume control for devices.
+ - Use ma_device_set_master_volume() to set the volume to a factor between 0 and 1, where 0 is silence and 1 is full volume.
+ - Use ma_device_set_master_gain_db() to set the volume in decibels where 0 is full volume and < 0 reduces the volume.
+ - Fix warnings emitted by GCC when `__inline__` is undefined or defined as nothing.
+
+v0.9.7 - 2019-08-28
+ - Add support for loopback mode (WASAPI only).
+ - To use this, set the device type to ma_device_type_loopback, and then fill out the capture section of the device config.
+ - If you need to capture from a specific output device, set the capture device ID to that of a playback device.
+ - Fix a crash when an error is posted in ma_device_init().
+ - Fix a compilation error when compiling for ARM architectures.
+ - Fix a bug with the audio(4) backend where the device is incorrectly being opened in non-blocking mode.
+ - Fix memory leaks in the Core Audio backend.
+ - Minor refactoring to the WinMM, ALSA, PulseAudio, OSS, audio(4), sndio and null backends.
+
+v0.9.6 - 2019-08-04
+ - Add support for loading decoders using a wchar_t string for file paths.
+ - Don't trigger an assert when ma_device_start() is called on a device that is already started. This will now log a warning
+ and return MA_INVALID_OPERATION. The same applies for ma_device_stop().
+ - Try fixing an issue with PulseAudio taking a long time to start playback.
+ - Fix a bug in ma_convert_frames() and ma_convert_frames_ex().
+ - Fix memory leaks in the WASAPI backend.
+ - Fix a compilation error with Visual Studio 2010.
+
v0.9.5 - 2019-05-21
- Add logging to ma_dlopen() and ma_dlsym().
- Add ma_decoder_get_length_in_pcm_frames().
@@ -33043,7 +42320,7 @@ v0.9.5 - 2019-05-21
v0.9.4 - 2019-05-06
- Add support for C89. With this change, miniaudio should compile clean with GCC/Clang with "-std=c89 -ansi -pedantic" and
- Microsoft copmilers back to VC6. Other compilers should also work, but have not been tested.
+ Microsoft compilers back to VC6. Other compilers should also work, but have not been tested.
v0.9.3 - 2019-04-19
- Fix compiler errors on GCC when compiling with -std=c99.
@@ -33138,7 +42415,7 @@ v0.8.11 - 2018-11-21
v0.8.10 - 2018-10-21
- Core Audio: Fix a hang when uninitializing a device.
- - Fix a bug where an incorrect value is returned from ma_device_stop().
+ - Fix a bug where an incorrect value is returned from mal_device_stop().
v0.8.9 - 2018-09-28
- Fix a bug with the SDL backend where device initialization fails.
@@ -33201,17 +42478,17 @@ v0.8.1 - 2018-07-06
- Fix compilation errors and warnings.
v0.8 - 2018-07-05
- - Changed MA_IMPLEMENTATION to MINI_AL_IMPLEMENTATION for consistency with other libraries. The old
+ - Changed MAL_IMPLEMENTATION to MINI_AL_IMPLEMENTATION for consistency with other libraries. The old
way is still supported for now, but you should update as it may be removed in the future.
- API CHANGE: Replace device enumeration APIs. mal_enumerate_devices() has been replaced with
mal_context_get_devices(). An additional low-level device enumration API has been introduced called
mal_context_enumerate_devices() which uses a callback to report devices.
- API CHANGE: Rename mal_get_sample_size_in_bytes() to mal_get_bytes_per_sample() and add
mal_get_bytes_per_frame().
- - API CHANGE: Replace mal_device_config.preferExclusiveMode with ma_device_config.shareMode.
- - This new config can be set to mal_share_mode_shared (default) or ma_share_mode_exclusive.
+ - API CHANGE: Replace mal_device_config.preferExclusiveMode with mal_device_config.shareMode.
+ - This new config can be set to mal_share_mode_shared (default) or mal_share_mode_exclusive.
- API CHANGE: Remove excludeNullDevice from mal_context_config.alsa.
- - API CHANGE: Rename MA_MAX_SAMPLE_SIZE_IN_BYTES to MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES.
+ - API CHANGE: Rename MAL_MAX_SAMPLE_SIZE_IN_BYTES to MAL_MAX_PCM_SAMPLE_SIZE_IN_BYTES.
- API CHANGE: Change the default channel mapping to the standard Microsoft mapping.
- API CHANGE: Remove backend-specific result codes.
- API CHANGE: Changes to the format conversion APIs (mal_pcm_f32_to_s16(), etc.)
@@ -33271,7 +42548,7 @@ v0.6a - 2018-01-26
v0.6 - 2017-12-08
- API CHANGE: Expose and improve mutex APIs. If you were using the mutex APIs before this version you'll
need to update.
- - API CHANGE: SRC and DSP callbacks now take a pointer to a mal_src and ma_dsp object respectively.
+ - API CHANGE: SRC and DSP callbacks now take a pointer to a mal_src and mal_dsp object respectively.
- API CHANGE: Improvements to event and thread APIs. These changes make these APIs more consistent.
- Add support for SDL and Emscripten.
- Simplify the build system further for when development packages for various backends are not installed.
@@ -33286,7 +42563,7 @@ v0.6 - 2017-12-08
- Warning fixes.
v0.5 - 2017-11-11
- - API CHANGE: The mal_context_init() function now takes a pointer to a ma_context_config object for
+ - API CHANGE: The mal_context_init() function now takes a pointer to a mal_context_config object for
configuring the context. The works in the same kind of way as the device config. The rationale for this
change is to give applications better control over context-level properties, add support for backend-
specific configurations, and support extensibility without breaking the API.
@@ -33390,7 +42667,7 @@ For more information, please refer to
===============================================================================
ALTERNATIVE 2 - MIT No Attribution
===============================================================================
-Copyright 2019 David Reid
+Copyright 2020 David Reid
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff --git a/client/miniaudio/dr_flac.h b/client/miniaudio/dr_flac.h
index bc6f9243..38af9876 100644
--- a/client/miniaudio/dr_flac.h
+++ b/client/miniaudio/dr_flac.h
@@ -1,21 +1,131 @@
/*
FLAC audio decoder. Choice of public domain or MIT-0. See license statements at the end of this file.
-dr_flac - v0.11.8 - 2019-05-21
+dr_flac - v0.12.6 - 2020-03-07
David Reid - mackron@gmail.com
*/
+/*
+RELEASE NOTES - v0.12.0
+=======================
+Version 0.12.0 has breaking API changes including changes to the existing API and the removal of deprecated APIs.
+
+
+Improved Client-Defined Memory Allocation
+-----------------------------------------
+The main change with this release is the addition of a more flexible way of implementing custom memory allocation routines. The
+existing system of DRFLAC_MALLOC, DRFLAC_REALLOC and DRFLAC_FREE are still in place and will be used by default when no custom
+allocation callbacks are specified.
+
+To use the new system, you pass in a pointer to a drflac_allocation_callbacks object to drflac_open() and family, like this:
+
+ void* my_malloc(size_t sz, void* pUserData)
+ {
+ return malloc(sz);
+ }
+ void* my_realloc(void* p, size_t sz, void* pUserData)
+ {
+ return realloc(p, sz);
+ }
+ void my_free(void* p, void* pUserData)
+ {
+ free(p);
+ }
+
+ ...
+
+ drflac_allocation_callbacks allocationCallbacks;
+ allocationCallbacks.pUserData = &myData;
+ allocationCallbacks.onMalloc = my_malloc;
+ allocationCallbacks.onRealloc = my_realloc;
+ allocationCallbacks.onFree = my_free;
+ drflac* pFlac = drflac_open_file("my_file.flac", &allocationCallbacks);
+
+The advantage of this new system is that it allows you to specify user data which will be passed in to the allocation routines.
+
+Passing in null for the allocation callbacks object will cause dr_flac to use defaults which is the same as DRFLAC_MALLOC,
+DRFLAC_REALLOC and DRFLAC_FREE and the equivalent of how it worked in previous versions.
+
+Every API that opens a drflac object now takes this extra parameter. These include the following:
+
+ drflac_open()
+ drflac_open_relaxed()
+ drflac_open_with_metadata()
+ drflac_open_with_metadata_relaxed()
+ drflac_open_file()
+ drflac_open_file_with_metadata()
+ drflac_open_memory()
+ drflac_open_memory_with_metadata()
+ drflac_open_and_read_pcm_frames_s32()
+ drflac_open_and_read_pcm_frames_s16()
+ drflac_open_and_read_pcm_frames_f32()
+ drflac_open_file_and_read_pcm_frames_s32()
+ drflac_open_file_and_read_pcm_frames_s16()
+ drflac_open_file_and_read_pcm_frames_f32()
+ drflac_open_memory_and_read_pcm_frames_s32()
+ drflac_open_memory_and_read_pcm_frames_s16()
+ drflac_open_memory_and_read_pcm_frames_f32()
+
+
+
+Optimizations
+-------------
+Seeking performance has been greatly improved. A new binary search based seeking algorithm has been introduced which significantly
+improves performance over the brute force method which was used when no seek table was present. Seek table based seeking also takes
+advantage of the new binary search seeking system to further improve performance there as well. Note that this depends on CRC which
+means it will be disabled when DR_FLAC_NO_CRC is used.
+
+The SSE4.1 pipeline has been cleaned up and optimized. You should see some improvements with decoding speed of 24-bit files in
+particular. 16-bit streams should also see some improvement.
+
+drflac_read_pcm_frames_s16() has been optimized. Previously this sat on top of drflac_read_pcm_frames_s32() and performed it's s32
+to s16 conversion in a second pass. This is now all done in a single pass. This includes SSE2 and ARM NEON optimized paths.
+
+A minor optimization has been implemented for drflac_read_pcm_frames_s32(). This will now use an SSE2 optimized pipeline for stereo
+channel reconstruction which is the last part of the decoding process.
+
+The ARM build has seen a few improvements. The CLZ (count leading zeroes) and REV (byte swap) instructions are now used when
+compiling with GCC and Clang which is achieved using inline assembly. The CLZ instruction requires ARM architecture version 5 at
+compile time and the REV instruction requires ARM architecture version 6.
+
+An ARM NEON optimized pipeline has been implemented. To enable this you'll need to add -mfpu=neon to the command line when compiling.
+
+
+Removed APIs
+------------
+The following APIs were deprecated in version 0.11.0 and have been completely removed in version 0.12.0:
+
+ drflac_read_s32() -> drflac_read_pcm_frames_s32()
+ drflac_read_s16() -> drflac_read_pcm_frames_s16()
+ drflac_read_f32() -> drflac_read_pcm_frames_f32()
+ drflac_seek_to_sample() -> drflac_seek_to_pcm_frame()
+ drflac_open_and_decode_s32() -> drflac_open_and_read_pcm_frames_s32()
+ drflac_open_and_decode_s16() -> drflac_open_and_read_pcm_frames_s16()
+ drflac_open_and_decode_f32() -> drflac_open_and_read_pcm_frames_f32()
+ drflac_open_and_decode_file_s32() -> drflac_open_file_and_read_pcm_frames_s32()
+ drflac_open_and_decode_file_s16() -> drflac_open_file_and_read_pcm_frames_s16()
+ drflac_open_and_decode_file_f32() -> drflac_open_file_and_read_pcm_frames_f32()
+ drflac_open_and_decode_memory_s32() -> drflac_open_memory_and_read_pcm_frames_s32()
+ drflac_open_and_decode_memory_s16() -> drflac_open_memory_and_read_pcm_frames_s16()
+ drflac_open_and_decode_memory_f32() -> drflac_open_memroy_and_read_pcm_frames_f32()
+
+Prior versions of dr_flac operated on a per-sample basis whereas now it operates on PCM frames. The removed APIs all relate
+to the old per-sample APIs. You now need to use the "pcm_frame" versions.
+*/
+
+
/*
USAGE
=====
dr_flac is a single-file library. To use it, do something like the following in one .c file.
+
#define DR_FLAC_IMPLEMENTATION
#include "dr_flac.h"
You can then #include this file in other parts of the program as you would with any other header file. To decode audio data,
do something like the following:
- drflac* pFlac = drflac_open_file("MySong.flac");
+ drflac* pFlac = drflac_open_file("MySong.flac", NULL);
if (pFlac == NULL) {
// Failed to open FLAC file
}
@@ -47,7 +157,7 @@ If you just want to quickly decode an entire FLAC file in one go you can do some
unsigned int channels;
unsigned int sampleRate;
drflac_uint64 totalPCMFrameCount;
- drflac_int32* pSampleData = drflac_open_file_and_read_pcm_frames_s32("MySong.flac", &channels, &sampleRate, &totalPCMFrameCount);
+ drflac_int32* pSampleData = drflac_open_file_and_read_pcm_frames_s32("MySong.flac", &channels, &sampleRate, &totalPCMFrameCount, NULL);
if (pSampleData == NULL) {
// Failed to open and decode FLAC file.
}
@@ -70,11 +180,11 @@ drflac_open_with_metdata() returns.
The main opening APIs (drflac_open(), etc.) will fail if the header is not present. The presents a problem in certain
-scenarios such as broadcast style streams like internet radio where the header may not be present because the user has
+scenarios such as broadcast style streams or internet radio where the header may not be present because the user has
started playback mid-stream. To handle this, use the relaxed APIs: drflac_open_relaxed() and drflac_open_with_metadata_relaxed().
It is not recommended to use these APIs for file based streams because a missing header would usually indicate a
-corrupted or perverse file. In addition, these APIs can take a long time to initialize because they may need to spend
+corrupt or perverse file. In addition, these APIs can take a long time to initialize because they may need to spend
a lot of time finding the first frame.
@@ -96,18 +206,18 @@ OPTIONS
onRead(), or increase it if it's very inefficient. Must be a multiple of 8.
#define DR_FLAC_NO_CRC
- Disables CRC checks. This will offer a performance boost when CRC is unnecessary.
+ Disables CRC checks. This will offer a performance boost when CRC is unnecessary. This will disable binary search seeking.
+ When seeking, the seek table will be used if available. Otherwise the seek will be performed using brute force.
#define DR_FLAC_NO_SIMD
- Disables SIMD optimizations (SSE on x86/x64 architectures). Use this if you are having compatibility issues with your
- compiler.
+ Disables SIMD optimizations (SSE on x86/x64 architectures, NEON on ARM architectures). Use this if you are having
+ compatibility issues with your compiler.
QUICK NOTES
===========
- dr_flac does not currently support changing the sample rate nor channel count mid stream.
-- Audio data is output as signed 32-bit PCM, regardless of the bits per sample the FLAC stream is encoded as.
- This has not been tested on big-endian architectures.
- dr_flac is not thread-safe, but its APIs can be called from any thread so long as you do your own synchronization.
- When using Ogg encapsulation, a corrupted metadata block will result in drflac_open_with_metadata() and drflac_open()
@@ -149,7 +259,7 @@ typedef drflac_uint32 drflac_bool32;
#elif (defined(__GNUC__) && __GNUC__ >= 4) /* GCC 4 */
#define DRFLAC_DEPRECATED __attribute__((deprecated))
#elif defined(__has_feature) /* Clang */
- #if defined(__has_feature(attribute_deprecated))
+ #if __has_feature(attribute_deprecated)
#define DRFLAC_DEPRECATED __attribute__((deprecated))
#else
#define DRFLAC_DEPRECATED
@@ -232,22 +342,22 @@ typedef enum
#pragma pack(2)
typedef struct
{
- drflac_uint64 firstSample;
- drflac_uint64 frameOffset; /* The offset from the first byte of the header of the first frame. */
- drflac_uint16 sampleCount;
+ drflac_uint64 firstPCMFrame;
+ drflac_uint64 flacFrameOffset; /* The offset from the first byte of the header of the first frame. */
+ drflac_uint16 pcmFrameCount;
} drflac_seekpoint;
#pragma pack()
typedef struct
{
- drflac_uint16 minBlockSize;
- drflac_uint16 maxBlockSize;
- drflac_uint32 minFrameSize;
- drflac_uint32 maxFrameSize;
+ drflac_uint16 minBlockSizeInPCMFrames;
+ drflac_uint16 maxBlockSizeInPCMFrames;
+ drflac_uint32 minFrameSizeInPCMFrames;
+ drflac_uint32 maxFrameSizeInPCMFrames;
drflac_uint32 sampleRate;
drflac_uint8 channels;
drflac_uint8 bitsPerSample;
- drflac_uint64 totalSampleCount;
+ drflac_uint64 totalPCMFrameCount;
drflac_uint8 md5[16];
} drflac_streaminfo;
@@ -348,6 +458,9 @@ Returns whether or not the seek was successful.
The offset will never be negative. Whether or not it is relative to the beginning or current position is determined
by the "origin" parameter which will be either drflac_seek_origin_start or drflac_seek_origin_current.
+
+When seeking to a PCM frame using drflac_seek_to_pcm_frame(), dr_flac may call this with an offset beyond the end of
+the FLAC stream. This needs to be detected and handled by returning DRFLAC_FALSE.
*/
typedef drflac_bool32 (* drflac_seek_proc)(void* pUserData, int offset, drflac_seek_origin origin);
@@ -362,6 +475,14 @@ Use pMetadata->type to determine which metadata block is being handled and how t
typedef void (* drflac_meta_proc)(void* pUserData, drflac_metadata* pMetadata);
+typedef struct
+{
+ void* pUserData;
+ void* (* onMalloc)(size_t sz, void* pUserData);
+ void* (* onRealloc)(void* p, size_t sz, void* pUserData);
+ void (* onFree)(void* p, void* pUserData);
+} drflac_allocation_callbacks;
+
/* Structure for internal use. Only used for decoders opened with drflac_open_memory. */
typedef struct
{
@@ -426,35 +547,29 @@ typedef struct
/* The order to use for the prediction stage for SUBFRAME_FIXED and SUBFRAME_LPC. */
drflac_uint8 lpcOrder;
- /*
- The number of bits per sample for this subframe. This is not always equal to the current frame's bit per sample because
- an extra bit is required for side channels when interchannel decorrelation is being used.
- */
- drflac_uint32 bitsPerSample;
-
- /*
- A pointer to the buffer containing the decoded samples in the subframe. This pointer is an offset from drflac::pExtraData. Note that
- it's a signed 32-bit integer for each value.
- */
- drflac_int32* pDecodedSamples;
+ /* A pointer to the buffer containing the decoded samples in the subframe. This pointer is an offset from drflac::pExtraData. */
+ drflac_int32* pSamplesS32;
} drflac_subframe;
typedef struct
{
/*
- If the stream uses variable block sizes, this will be set to the index of the first sample. If fixed block sizes are used, this will
- always be set to 0.
+ If the stream uses variable block sizes, this will be set to the index of the first PCM frame. If fixed block sizes are used, this will
+ always be set to 0. This is 64-bit because the decoded PCM frame number will be 36 bits.
*/
- drflac_uint64 sampleNumber;
+ drflac_uint64 pcmFrameNumber;
- /* If the stream uses fixed block sizes, this will be set to the frame number. If variable block sizes are used, this will always be 0. */
- drflac_uint32 frameNumber;
+ /*
+ If the stream uses fixed block sizes, this will be set to the frame number. If variable block sizes are used, this will always be 0. This
+ is 32-bit because in fixed block sizes, the maximum frame number will be 31 bits.
+ */
+ drflac_uint32 flacFrameNumber;
/* The sample rate of this frame. */
drflac_uint32 sampleRate;
- /* The number of samples in each sub-frame within this frame. */
- drflac_uint16 blockSize;
+ /* The number of PCM frames in each sub-frame within this frame. */
+ drflac_uint16 blockSizeInPCMFrames;
/*
The channel assignment of this frame. This is not always set to the channel count. If interchannel decorrelation is being used this
@@ -475,10 +590,10 @@ typedef struct
drflac_frame_header header;
/*
- The number of samples left to be read in this frame. This is initially set to the block size multiplied by the channel count. As samples
- are read, this will be decremented. When it reaches 0, the decoder will see this frame as fully consumed and load the next frame.
+ The number of PCM frames left to be read in this FLAC frame. This is initially set to the block size. As PCM frames are read,
+ this will be decremented. When it reaches 0, the decoder will see this frame as fully consumed and load the next frame.
*/
- drflac_uint32 samplesRemaining;
+ drflac_uint32 pcmFramesRemaining;
/* The list of sub-frames within the frame. There is one sub-frame for each channel, and there's a maximum of 8 channels. */
drflac_subframe subframes[8];
@@ -492,6 +607,9 @@ typedef struct
/* The user data posted to the metadata callback function. */
void* pUserDataMD;
+ /* Memory allocation callbacks. */
+ drflac_allocation_callbacks allocationCallbacks;
+
/* The sample rate. Will be set to something like 44100. */
drflac_uint32 sampleRate;
@@ -506,15 +624,13 @@ typedef struct
drflac_uint8 bitsPerSample;
/* The maximum block size, in samples. This number represents the number of samples in each channel (not combined). */
- drflac_uint16 maxBlockSize;
+ drflac_uint16 maxBlockSizeInPCMFrames;
/*
- The total number of samples making up the stream. This includes every channel. For example, if the stream has 2 channels,
- with each channel having a total of 4096, this value will be set to 2*4096 = 8192. Can be 0 in which case it's still a
- valid stream, but just means the total sample count is unknown. Likely the case with streams like internet radio.
+ The total number of PCM Frames making up the stream. Can be 0 in which case it's still a valid stream, but just means
+ the total PCM frame count is unknown. Likely the case with streams like internet radio.
*/
- drflac_uint64 totalSampleCount;
- drflac_uint64 totalPCMFrameCount; /* <-- Equal to totalSampleCount / channels. */
+ drflac_uint64 totalPCMFrameCount;
/* The container type. This is set based on whether or not the decoder was opened from a native or Ogg stream. */
@@ -525,13 +641,14 @@ typedef struct
/* Information about the frame the decoder is currently sitting on. */
- drflac_frame currentFrame;
+ drflac_frame currentFLACFrame;
- /* The index of the sample the decoder is currently sitting on. This is only used for seeking. */
- drflac_uint64 currentSample;
- /* The position of the first frame in the stream. This is only ever used for seeking. */
- drflac_uint64 firstFramePos;
+ /* The index of the PCM frame the decoder is currently sitting on. This is only used for seeking. */
+ drflac_uint64 currentPCMFrame;
+
+ /* The position of the first FLAC frame in the stream. This is only ever used for seeking. */
+ drflac_uint64 firstFLACFramePosInBytes;
/* A hack to avoid a malloc() when opening a decoder with drflac_open_memory(). */
@@ -547,6 +664,11 @@ typedef struct
/* Internal use only. Only used with Ogg containers. Points to a drflac_oggbs object. This is an offset of pExtraData. */
void* _oggbs;
+ /* Internal use only. Used for profiling and testing different seeking modes. */
+ drflac_bool32 _noSeekTableSeek : 1;
+ drflac_bool32 _noBinarySearchSeek : 1;
+ drflac_bool32 _noBruteForceSeek : 1;
+
/* The bit streamer. The raw FLAC data is fed through this object. */
drflac_bs bs;
@@ -554,18 +676,20 @@ typedef struct
drflac_uint8 pExtraData[1];
} drflac;
-
/*
Opens a FLAC decoder.
-onRead [in] The function to call when data needs to be read from the client.
-onSeek [in] The function to call when the read position of the client data needs to move.
-pUserData [in, optional] A pointer to application defined data that will be passed to onRead and onSeek.
+onRead [in] The function to call when data needs to be read from the client.
+onSeek [in] The function to call when the read position of the client data needs to move.
+pUserData [in, optional] A pointer to application defined data that will be passed to onRead and onSeek.
+pAllocationCallbacks [in, optional] A pointer to application defined callbacks for managing memory allocations.
Returns a pointer to an object representing the decoder.
Close the decoder with drflac_close().
+pAllocationCallbacks can be NULL in which case it will use DRFLAC_MALLOC, DRFLAC_REALLOC and DRFLAC_FREE.
+
This function will automatically detect whether or not you are attempting to open a native or Ogg encapsulated
FLAC, both of which should work seamlessly without any manual intervention. Ogg encapsulation also works with
multiplexed streams which basically means it can play FLAC encoded audio tracks in videos.
@@ -578,7 +702,7 @@ the header may not be present.
See also: drflac_open_file(), drflac_open_memory(), drflac_open_with_metadata(), drflac_close()
*/
-drflac* drflac_open(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData);
+drflac* drflac_open(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks);
/*
The same as drflac_open(), except attempts to open the stream even when a header block is not present.
@@ -590,22 +714,25 @@ Opening in relaxed mode will continue reading data from onRead until it finds a
found it will continue forever. To abort, force your onRead callback to return 0, which dr_flac will use as an
indicator that the end of the stream was found.
*/
-drflac* drflac_open_relaxed(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_container container, void* pUserData);
+drflac* drflac_open_relaxed(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_container container, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks);
/*
Opens a FLAC decoder and notifies the caller of the metadata chunks (album art, etc.).
-onRead [in] The function to call when data needs to be read from the client.
-onSeek [in] The function to call when the read position of the client data needs to move.
-onMeta [in] The function to call for every metadata block.
-pUserData [in, optional] A pointer to application defined data that will be passed to onRead, onSeek and onMeta.
+onRead [in] The function to call when data needs to be read from the client.
+onSeek [in] The function to call when the read position of the client data needs to move.
+onMeta [in] The function to call for every metadata block.
+pUserData [in, optional] A pointer to application defined data that will be passed to onRead, onSeek and onMeta.
+pAllocationCallbacks [in, optional] A pointer to application defined callbacks for managing memory allocations.
Returns a pointer to an object representing the decoder.
Close the decoder with drflac_close().
-This is slower than drflac_open(), so avoid this one if you don't need metadata. Internally, this will do a DRFLAC_MALLOC()
-and DRFLAC_FREE() for every metadata block except for STREAMINFO and PADDING blocks.
+pAllocationCallbacks can be NULL in which case it will use DRFLAC_MALLOC, DRFLAC_REALLOC and DRFLAC_FREE.
+
+This is slower than drflac_open(), so avoid this one if you don't need metadata. Internally, this will allocate and free
+memory on the heap for every metadata block except for STREAMINFO and PADDING blocks.
The caller is notified of the metadata via the onMeta callback. All metadata blocks will be handled before the function
returns.
@@ -621,14 +748,14 @@ whether or not the stream is being opened with metadata.
See also: drflac_open_file_with_metadata(), drflac_open_memory_with_metadata(), drflac_open(), drflac_close()
*/
-drflac* drflac_open_with_metadata(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData);
+drflac* drflac_open_with_metadata(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks);
/*
The same as drflac_open_with_metadata(), except attempts to open the stream even when a header block is not present.
See also: drflac_open_with_metadata(), drflac_open_relaxed()
*/
-drflac* drflac_open_with_metadata_relaxed(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, drflac_container container, void* pUserData);
+drflac* drflac_open_with_metadata_relaxed(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, drflac_container container, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks);
/*
Closes the given FLAC decoder.
@@ -685,7 +812,8 @@ drflac_bool32 drflac_seek_to_pcm_frame(drflac* pFlac, drflac_uint64 pcmFrameInde
/*
Opens a FLAC decoder from the file at the given path.
-filename [in] The path of the file to open, either absolute or relative to the current directory.
+filename [in] The path of the file to open, either absolute or relative to the current directory.
+pAllocationCallbacks [in, optional] A pointer to application defined callbacks for managing memory allocations.
Returns a pointer to an object representing the decoder.
@@ -697,14 +825,14 @@ same time.
See also: drflac_open(), drflac_open_file_with_metadata(), drflac_close()
*/
-drflac* drflac_open_file(const char* filename);
+drflac* drflac_open_file(const char* filename, const drflac_allocation_callbacks* pAllocationCallbacks);
/*
Opens a FLAC decoder from the file at the given path and notifies the caller of the metadata chunks (album art, etc.)
Look at the documentation for drflac_open_with_metadata() for more information on how metadata is handled.
*/
-drflac* drflac_open_file_with_metadata(const char* filename, drflac_meta_proc onMeta, void* pUserData);
+drflac* drflac_open_file_with_metadata(const char* filename, drflac_meta_proc onMeta, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks);
#endif
/*
@@ -713,14 +841,14 @@ Opens a FLAC decoder from a pre-allocated block of memory
This does not create a copy of the data. It is up to the application to ensure the buffer remains valid for
the lifetime of the decoder.
*/
-drflac* drflac_open_memory(const void* data, size_t dataSize);
+drflac* drflac_open_memory(const void* data, size_t dataSize, const drflac_allocation_callbacks* pAllocationCallbacks);
/*
Opens a FLAC decoder from a pre-allocated block of memory and notifies the caller of the metadata chunks (album art, etc.)
Look at the documentation for drflac_open_with_metadata() for more information on how metadata is handled.
*/
-drflac* drflac_open_memory_with_metadata(const void* data, size_t dataSize, drflac_meta_proc onMeta, void* pUserData);
+drflac* drflac_open_memory_with_metadata(const void* data, size_t dataSize, drflac_meta_proc onMeta, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks);
@@ -728,43 +856,50 @@ drflac* drflac_open_memory_with_metadata(const void* data, size_t dataSize, drfl
/*
Opens a FLAC stream from the given callbacks and fully decodes it in a single operation. The return value is a
-pointer to the sample data as interleaved signed 32-bit PCM. The returned data must be freed with DRFLAC_FREE().
+pointer to the sample data as interleaved signed 32-bit PCM. The returned data must be freed with drflac_free().
+
+You can pass in custom memory allocation callbacks via the pAllocationCallbacks parameter. This can be NULL in which
+case it will use DRFLAC_MALLOC, DRFLAC_REALLOC and DRFLAC_FREE.
Sometimes a FLAC file won't keep track of the total sample count. In this situation the function will continuously
read samples into a dynamically sized buffer on the heap until no samples are left.
Do not call this function on a broadcast type of stream (like internet radio streams and whatnot).
*/
-drflac_int32* drflac_open_and_read_pcm_frames_s32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount);
+drflac_int32* drflac_open_and_read_pcm_frames_s32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks);
/* Same as drflac_open_and_read_pcm_frames_s32(), except returns signed 16-bit integer samples. */
-drflac_int16* drflac_open_and_read_pcm_frames_s16(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount);
+drflac_int16* drflac_open_and_read_pcm_frames_s16(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks);
/* Same as drflac_open_and_read_pcm_frames_s32(), except returns 32-bit floating-point samples. */
-float* drflac_open_and_read_pcm_frames_f32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount);
+float* drflac_open_and_read_pcm_frames_f32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks);
#ifndef DR_FLAC_NO_STDIO
/* Same as drflac_open_and_read_pcm_frames_s32() except opens the decoder from a file. */
-drflac_int32* drflac_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount);
+drflac_int32* drflac_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks);
/* Same as drflac_open_file_and_read_pcm_frames_s32(), except returns signed 16-bit integer samples. */
-drflac_int16* drflac_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount);
+drflac_int16* drflac_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks);
/* Same as drflac_open_file_and_read_pcm_frames_s32(), except returns 32-bit floating-point samples. */
-float* drflac_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount);
+float* drflac_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks);
#endif
/* Same as drflac_open_and_read_pcm_frames_s32() except opens the decoder from a block of memory. */
-drflac_int32* drflac_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount);
+drflac_int32* drflac_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks);
/* Same as drflac_open_memory_and_read_pcm_frames_s32(), except returns signed 16-bit integer samples. */
-drflac_int16* drflac_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount);
+drflac_int16* drflac_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks);
/* Same as drflac_open_memory_and_read_pcm_frames_s32(), except returns 32-bit floating-point samples. */
-float* drflac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount);
+float* drflac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks);
+
+/*
+Frees memory that was allocated internally by dr_flac.
-/* Frees memory that was allocated internally by dr_flac. */
-void drflac_free(void* p);
+Set pAllocationCallbacks to the same object that was passed to drflac_open_*_and_read_pcm_frames_*(). If you originally passed in NULL, pass in NULL for this.
+*/
+void drflac_free(void* p, const drflac_allocation_callbacks* pAllocationCallbacks);
/* Structure representing an iterator for vorbis comments in a VORBIS_COMMENT metadata block. */
@@ -825,21 +960,6 @@ void drflac_init_cuesheet_track_iterator(drflac_cuesheet_track_iterator* pIter,
drflac_bool32 drflac_next_cuesheet_track(drflac_cuesheet_track_iterator* pIter, drflac_cuesheet_track* pCuesheetTrack);
-/* Deprecated APIs */
-DRFLAC_DEPRECATED drflac_uint64 drflac_read_s32(drflac* pFlac, drflac_uint64 samplesToRead, drflac_int32* pBufferOut); /* Use drflac_read_pcm_frames_s32() instead. */
-DRFLAC_DEPRECATED drflac_uint64 drflac_read_s16(drflac* pFlac, drflac_uint64 samplesToRead, drflac_int16* pBufferOut); /* Use drflac_read_pcm_frames_s16() instead. */
-DRFLAC_DEPRECATED drflac_uint64 drflac_read_f32(drflac* pFlac, drflac_uint64 samplesToRead, float* pBufferOut); /* Use drflac_read_pcm_frames_f32() instead. */
-DRFLAC_DEPRECATED drflac_bool32 drflac_seek_to_sample(drflac* pFlac, drflac_uint64 sampleIndex); /* Use drflac_seek_to_pcm_frame() instead. */
-DRFLAC_DEPRECATED drflac_int32* drflac_open_and_decode_s32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalSampleCount); /* Use drflac_open_and_read_pcm_frames_s32(). */
-DRFLAC_DEPRECATED drflac_int16* drflac_open_and_decode_s16(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalSampleCount); /* Use drflac_open_and_read_pcm_frames_s16(). */
-DRFLAC_DEPRECATED float* drflac_open_and_decode_f32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalSampleCount); /* Use drflac_open_and_read_pcm_frames_f32(). */
-DRFLAC_DEPRECATED drflac_int32* drflac_open_and_decode_file_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalSampleCount); /* Use drflac_open_file_and_read_pcm_frames_s32(). */
-DRFLAC_DEPRECATED drflac_int16* drflac_open_and_decode_file_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalSampleCount); /* Use drflac_open_file_and_read_pcm_frames_s16(). */
-DRFLAC_DEPRECATED float* drflac_open_and_decode_file_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalSampleCount); /* Use drflac_open_file_and_read_pcm_frames_f32(). */
-DRFLAC_DEPRECATED drflac_int32* drflac_open_and_decode_memory_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalSampleCount); /* Use drflac_open_memory_and_read_pcm_frames_s32(). */
-DRFLAC_DEPRECATED drflac_int16* drflac_open_and_decode_memory_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalSampleCount); /* Use drflac_open_memory_and_read_pcm_frames_s16(). */
-DRFLAC_DEPRECATED float* drflac_open_and_decode_memory_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalSampleCount); /* Use drflac_open_memory_and_read_pcm_frames_f32(). */
-
#ifdef __cplusplus
}
#endif
@@ -877,13 +997,22 @@ DRFLAC_DEPRECATED float* drflac_open_and_decode_memory_f32(const void* data, siz
#include
#ifdef _MSC_VER
-#define DRFLAC_INLINE __forceinline
-#else
-#ifdef __GNUC__
-#define DRFLAC_INLINE __inline__ __attribute__((always_inline))
+ #define DRFLAC_INLINE __forceinline
+#elif defined(__GNUC__)
+ /*
+ I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when
+ the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some
+ case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the
+ command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue
+ I am using "__inline__" only when we're compiling in strict ANSI mode.
+ */
+ #if defined(__STRICT_ANSI__)
+ #define DRFLAC_INLINE __inline__ __attribute__((always_inline))
+ #else
+ #define DRFLAC_INLINE inline __attribute__((always_inline))
+ #endif
#else
-#define DRFLAC_INLINE
-#endif
+ #define DRFLAC_INLINE
#endif
/* CPU architecture. */
@@ -971,7 +1100,7 @@ DRFLAC_DEPRECATED float* drflac_open_and_decode_memory_f32(const void* data, siz
It looks like the -fPIC option uses the ebx register which GCC complains about. We can work around this by just using a different register, the
specific register of which I'm letting the compiler decide on. The "k" prefix is used to specify a 32-bit register. The {...} syntax is for
supporting different assembly dialects.
-
+
What's basically happening is that we're saving and restoring the ebx register manually.
*/
#if defined(DRFLAC_X86) && defined(__PIC__)
@@ -1008,7 +1137,7 @@ static DRFLAC_INLINE drflac_bool32 drflac_has_sse2()
return DRFLAC_FALSE;
#else
int info[4];
- drflac_cpuid(info, 1);
+ drflac__cpuid(info, 1);
return (info[3] & (1 << 26)) != 0;
#endif
#endif
@@ -1033,7 +1162,7 @@ static DRFLAC_INLINE drflac_bool32 drflac_has_sse41()
return DRFLAC_FALSE;
#else
int info[4];
- drflac_cpuid(info, 1);
+ drflac__cpuid(info, 1);
return (info[2] & (1 << 19)) != 0;
#endif
#endif
@@ -1051,24 +1180,28 @@ static DRFLAC_INLINE drflac_bool32 drflac_has_sse41()
#elif (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)))
#define DRFLAC_HAS_LZCNT_INTRINSIC
#elif defined(__clang__)
- #if __has_builtin(__builtin_clzll) || __has_builtin(__builtin_clzl)
- #define DRFLAC_HAS_LZCNT_INTRINSIC
+ #if defined(__has_builtin)
+ #if __has_builtin(__builtin_clzll) || __has_builtin(__builtin_clzl)
+ #define DRFLAC_HAS_LZCNT_INTRINSIC
+ #endif
#endif
#endif
-#if defined(_MSC_VER) && _MSC_VER >= 1300
+#if defined(_MSC_VER) && _MSC_VER >= 1400
#define DRFLAC_HAS_BYTESWAP16_INTRINSIC
#define DRFLAC_HAS_BYTESWAP32_INTRINSIC
#define DRFLAC_HAS_BYTESWAP64_INTRINSIC
#elif defined(__clang__)
- #if __has_builtin(__builtin_bswap16)
- #define DRFLAC_HAS_BYTESWAP16_INTRINSIC
- #endif
- #if __has_builtin(__builtin_bswap32)
- #define DRFLAC_HAS_BYTESWAP32_INTRINSIC
- #endif
- #if __has_builtin(__builtin_bswap64)
- #define DRFLAC_HAS_BYTESWAP64_INTRINSIC
+ #if defined(__has_builtin)
+ #if __has_builtin(__builtin_bswap16)
+ #define DRFLAC_HAS_BYTESWAP16_INTRINSIC
+ #endif
+ #if __has_builtin(__builtin_bswap32)
+ #define DRFLAC_HAS_BYTESWAP32_INTRINSIC
+ #endif
+ #if __has_builtin(__builtin_bswap64)
+ #define DRFLAC_HAS_BYTESWAP64_INTRINSIC
+ #endif
#endif
#elif defined(__GNUC__)
#if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
@@ -1125,25 +1258,35 @@ typedef drflac_int32 drflac_result;
#define DRFLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE 9
#define DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE 10
-/*
-Keeps track of the number of leading samples for each sub-frame. This is required because the SSE pipeline will occasionally
-reference excess prior samples.
-*/
-#define DRFLAC_LEADING_SAMPLES 32
-
-
#define drflac_align(x, a) ((((x) + (a) - 1) / (a)) * (a))
-#define drflac_assert DRFLAC_ASSERT
-#define drflac_copy_memory DRFLAC_COPY_MEMORY
-#define drflac_zero_memory DRFLAC_ZERO_MEMORY
/* CPU caps. */
+#if defined(__has_feature)
+ #if __has_feature(thread_sanitizer)
+ #define DRFLAC_NO_THREAD_SANITIZE __attribute__((no_sanitize("thread")))
+ #else
+ #define DRFLAC_NO_THREAD_SANITIZE
+ #endif
+#else
+ #define DRFLAC_NO_THREAD_SANITIZE
+#endif
+
+#if defined(DRFLAC_HAS_LZCNT_INTRINSIC)
static drflac_bool32 drflac__gIsLZCNTSupported = DRFLAC_FALSE;
+#endif
+
#ifndef DRFLAC_NO_CPUID
static drflac_bool32 drflac__gIsSSE2Supported = DRFLAC_FALSE;
static drflac_bool32 drflac__gIsSSE41Supported = DRFLAC_FALSE;
-static void drflac__init_cpu_caps()
+
+/*
+I've had a bug report that Clang's ThreadSanitizer presents a warning in this function. Having reviewed this, this does
+actually make sense. However, since CPU caps should never differ for a running process, I don't think the trade off of
+complicating internal API's by passing around CPU caps versus just disabling the warnings is worthwhile. I'm therefore
+just going to disable these warnings. This is disabled via the DRFLAC_NO_THREAD_SANITIZE attribute.
+*/
+DRFLAC_NO_THREAD_SANITIZE static void drflac__init_cpu_caps()
{
static drflac_bool32 isCPUCapsInitialized = DRFLAC_FALSE;
@@ -1151,8 +1294,10 @@ static void drflac__init_cpu_caps()
int info[4] = {0};
/* LZCNT */
+#if defined(DRFLAC_HAS_LZCNT_INTRINSIC)
drflac__cpuid(info, 0x80000001);
drflac__gIsLZCNTSupported = (info[2] & (1 << 5)) != 0;
+#endif
/* SSE2 */
drflac__gIsSSE2Supported = drflac_has_sse2();
@@ -1164,6 +1309,35 @@ static void drflac__init_cpu_caps()
isCPUCapsInitialized = DRFLAC_TRUE;
}
}
+#else
+static drflac_bool32 drflac__gIsNEONSupported = DRFLAC_FALSE;
+
+static DRFLAC_INLINE drflac_bool32 drflac__has_neon()
+{
+#if defined(DRFLAC_SUPPORT_NEON)
+ #if defined(DRFLAC_ARM) && !defined(DRFLAC_NO_NEON)
+ #if (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64))
+ return DRFLAC_TRUE; /* If the compiler is allowed to freely generate NEON code we can assume support. */
+ #else
+ /* TODO: Runtime check. */
+ return DRFLAC_FALSE;
+ #endif
+ #else
+ return DRFLAC_FALSE; /* NEON is only supported on ARM architectures. */
+ #endif
+#else
+ return DRFLAC_FALSE; /* No compiler support. */
+#endif
+}
+
+DRFLAC_NO_THREAD_SANITIZE static void drflac__init_cpu_caps()
+{
+ drflac__gIsNEONSupported = drflac__has_neon();
+
+#if defined(DRFLAC_HAS_LZCNT_INTRINSIC) && defined(DRFLAC_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5)
+ drflac__gIsLZCNTSupported = DRFLAC_TRUE;
+#endif
+}
#endif
@@ -1202,7 +1376,20 @@ static DRFLAC_INLINE drflac_uint32 drflac__swap_endian_uint32(drflac_uint32 n)
#if defined(_MSC_VER)
return _byteswap_ulong(n);
#elif defined(__GNUC__) || defined(__clang__)
- return __builtin_bswap32(n);
+ #if defined(DRFLAC_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(DRFLAC_64BIT) /* <-- 64-bit inline assembly has not been tested, so disabling for now. */
+ /* Inline assembly optimized implementation for ARM. In my testing, GCC does not generate optimized code with __builtin_bswap32(). */
+ drflac_uint32 r;
+ __asm__ __volatile__ (
+ #if defined(DRFLAC_64BIT)
+ "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) /* <-- This is untested. If someone in the community could test this, that would be appreciated! */
+ #else
+ "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n)
+ #endif
+ );
+ return r;
+ #else
+ return __builtin_bswap32(n);
+ #endif
#else
#error "This compiler does not support the byte swap intrinsic."
#endif
@@ -1376,8 +1563,8 @@ static DRFLAC_INLINE drflac_uint8 drflac_crc8(drflac_uint8 crc, drflac_uint32 da
static drflac_uint64 leftoverDataMaskTable[8] = {
0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F
};
-
- drflac_assert(count <= 32);
+
+ DRFLAC_ASSERT(count <= 32);
wholeBytes = count >> 3;
leftoverBits = count - (wholeBytes*8);
@@ -1400,6 +1587,22 @@ static DRFLAC_INLINE drflac_uint16 drflac_crc16_byte(drflac_uint16 crc, drflac_u
return (crc << 8) ^ drflac__crc16_table[(drflac_uint8)(crc >> 8) ^ data];
}
+static DRFLAC_INLINE drflac_uint16 drflac_crc16_cache(drflac_uint16 crc, drflac_cache_t data)
+{
+#ifdef DRFLAC_64BIT
+ crc = drflac_crc16_byte(crc, (drflac_uint8)((data >> 56) & 0xFF));
+ crc = drflac_crc16_byte(crc, (drflac_uint8)((data >> 48) & 0xFF));
+ crc = drflac_crc16_byte(crc, (drflac_uint8)((data >> 40) & 0xFF));
+ crc = drflac_crc16_byte(crc, (drflac_uint8)((data >> 32) & 0xFF));
+#endif
+ crc = drflac_crc16_byte(crc, (drflac_uint8)((data >> 24) & 0xFF));
+ crc = drflac_crc16_byte(crc, (drflac_uint8)((data >> 16) & 0xFF));
+ crc = drflac_crc16_byte(crc, (drflac_uint8)((data >> 8) & 0xFF));
+ crc = drflac_crc16_byte(crc, (drflac_uint8)((data >> 0) & 0xFF));
+
+ return crc;
+}
+
static DRFLAC_INLINE drflac_uint16 drflac_crc16_bytes(drflac_uint16 crc, drflac_cache_t data, drflac_uint32 byteCount)
{
switch (byteCount)
@@ -1419,6 +1622,7 @@ static DRFLAC_INLINE drflac_uint16 drflac_crc16_bytes(drflac_uint16 crc, drflac_
return crc;
}
+#if 0
static DRFLAC_INLINE drflac_uint16 drflac_crc16__32bit(drflac_uint16 crc, drflac_uint32 data, drflac_uint32 count)
{
#ifdef DR_FLAC_NO_CRC
@@ -1449,10 +1653,10 @@ static DRFLAC_INLINE drflac_uint16 drflac_crc16__32bit(drflac_uint16 crc, drflac
0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F
};
- drflac_assert(count <= 64);
-
+ DRFLAC_ASSERT(count <= 64);
+
wholeBytes = count >> 3;
- leftoverBits = count - (wholeBytes*8);
+ leftoverBits = count & 7;
leftoverDataMask = leftoverDataMaskTable[leftoverBits];
switch (wholeBytes) {
@@ -1483,11 +1687,11 @@ static DRFLAC_INLINE drflac_uint16 drflac_crc16__64bit(drflac_uint16 crc, drflac
static drflac_uint64 leftoverDataMaskTable[8] = {
0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F
};
-
- drflac_assert(count <= 64);
+
+ DRFLAC_ASSERT(count <= 64);
wholeBytes = count >> 3;
- leftoverBits = count - (wholeBytes*8);
+ leftoverBits = count & 7;
leftoverDataMask = leftoverDataMaskTable[leftoverBits];
switch (wholeBytes) {
@@ -1515,6 +1719,7 @@ static DRFLAC_INLINE drflac_uint16 drflac_crc16(drflac_uint16 crc, drflac_cache_
return drflac_crc16__32bit(crc, data, count);
#endif
}
+#endif
#ifdef DRFLAC_64BIT
@@ -1554,14 +1759,18 @@ static DRFLAC_INLINE void drflac__reset_crc16(drflac_bs* bs)
static DRFLAC_INLINE void drflac__update_crc16(drflac_bs* bs)
{
- bs->crc16 = drflac_crc16_bytes(bs->crc16, bs->crc16Cache, DRFLAC_CACHE_L1_SIZE_BYTES(bs) - bs->crc16CacheIgnoredBytes);
- bs->crc16CacheIgnoredBytes = 0;
+ if (bs->crc16CacheIgnoredBytes == 0) {
+ bs->crc16 = drflac_crc16_cache(bs->crc16, bs->crc16Cache);
+ } else {
+ bs->crc16 = drflac_crc16_bytes(bs->crc16, bs->crc16Cache, DRFLAC_CACHE_L1_SIZE_BYTES(bs) - bs->crc16CacheIgnoredBytes);
+ bs->crc16CacheIgnoredBytes = 0;
+ }
}
static DRFLAC_INLINE drflac_uint16 drflac__flush_crc16(drflac_bs* bs)
{
/* We should never be flushing in a situation where we are not aligned on a byte boundary. */
- drflac_assert((DRFLAC_CACHE_L1_BITS_REMAINING(bs) & 7) == 0);
+ DRFLAC_ASSERT((DRFLAC_CACHE_L1_BITS_REMAINING(bs) & 7) == 0);
/*
The bits that were read from the L1 cache need to be accumulated. The number of bytes needing to be accumulated is determined
@@ -1674,7 +1883,7 @@ static drflac_bool32 drflac__reload_cache(drflac_bs* bs)
return DRFLAC_FALSE;
}
- drflac_assert(bytesRead < DRFLAC_CACHE_L1_SIZE_BYTES(bs));
+ DRFLAC_ASSERT(bytesRead < DRFLAC_CACHE_L1_SIZE_BYTES(bs));
bs->consumedBits = (drflac_uint32)(DRFLAC_CACHE_L1_SIZE_BYTES(bs) - bytesRead) * 8;
bs->cache = drflac__be2host__cache_line(bs->unalignedCache);
@@ -1705,10 +1914,10 @@ static void drflac__reset_cache(drflac_bs* bs)
static DRFLAC_INLINE drflac_bool32 drflac__read_uint32(drflac_bs* bs, unsigned int bitCount, drflac_uint32* pResultOut)
{
- drflac_assert(bs != NULL);
- drflac_assert(pResultOut != NULL);
- drflac_assert(bitCount > 0);
- drflac_assert(bitCount <= 32);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(pResultOut != NULL);
+ DRFLAC_ASSERT(bitCount > 0);
+ DRFLAC_ASSERT(bitCount <= 32);
if (bs->consumedBits == DRFLAC_CACHE_L1_SIZE_BITS(bs)) {
if (!drflac__reload_cache(bs)) {
@@ -1762,10 +1971,10 @@ static drflac_bool32 drflac__read_int32(drflac_bs* bs, unsigned int bitCount, dr
drflac_uint32 result;
drflac_uint32 signbit;
- drflac_assert(bs != NULL);
- drflac_assert(pResult != NULL);
- drflac_assert(bitCount > 0);
- drflac_assert(bitCount <= 32);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(pResult != NULL);
+ DRFLAC_ASSERT(bitCount > 0);
+ DRFLAC_ASSERT(bitCount <= 32);
if (!drflac__read_uint32(bs, bitCount, &result)) {
return DRFLAC_FALSE;
@@ -1784,8 +1993,8 @@ static drflac_bool32 drflac__read_uint64(drflac_bs* bs, unsigned int bitCount, d
drflac_uint32 resultHi;
drflac_uint32 resultLo;
- drflac_assert(bitCount <= 64);
- drflac_assert(bitCount > 32);
+ DRFLAC_ASSERT(bitCount <= 64);
+ DRFLAC_ASSERT(bitCount > 32);
if (!drflac__read_uint32(bs, bitCount - 32, &resultHi)) {
return DRFLAC_FALSE;
@@ -1807,7 +2016,7 @@ static drflac_bool32 drflac__read_int64(drflac_bs* bs, unsigned int bitCount, dr
drflac_uint64 result;
drflac_uint64 signbit;
- drflac_assert(bitCount <= 64);
+ DRFLAC_ASSERT(bitCount <= 64);
if (!drflac__read_uint64(bs, bitCount, &result)) {
return DRFLAC_FALSE;
@@ -1825,10 +2034,10 @@ static drflac_bool32 drflac__read_uint16(drflac_bs* bs, unsigned int bitCount, d
{
drflac_uint32 result;
- drflac_assert(bs != NULL);
- drflac_assert(pResult != NULL);
- drflac_assert(bitCount > 0);
- drflac_assert(bitCount <= 16);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(pResult != NULL);
+ DRFLAC_ASSERT(bitCount > 0);
+ DRFLAC_ASSERT(bitCount <= 16);
if (!drflac__read_uint32(bs, bitCount, &result)) {
return DRFLAC_FALSE;
@@ -1843,10 +2052,10 @@ static drflac_bool32 drflac__read_int16(drflac_bs* bs, unsigned int bitCount, dr
{
drflac_int32 result;
- drflac_assert(bs != NULL);
- drflac_assert(pResult != NULL);
- drflac_assert(bitCount > 0);
- drflac_assert(bitCount <= 16);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(pResult != NULL);
+ DRFLAC_ASSERT(bitCount > 0);
+ DRFLAC_ASSERT(bitCount <= 16);
if (!drflac__read_int32(bs, bitCount, &result)) {
return DRFLAC_FALSE;
@@ -1861,10 +2070,10 @@ static drflac_bool32 drflac__read_uint8(drflac_bs* bs, unsigned int bitCount, dr
{
drflac_uint32 result;
- drflac_assert(bs != NULL);
- drflac_assert(pResult != NULL);
- drflac_assert(bitCount > 0);
- drflac_assert(bitCount <= 8);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(pResult != NULL);
+ DRFLAC_ASSERT(bitCount > 0);
+ DRFLAC_ASSERT(bitCount <= 8);
if (!drflac__read_uint32(bs, bitCount, &result)) {
return DRFLAC_FALSE;
@@ -1878,10 +2087,10 @@ static drflac_bool32 drflac__read_int8(drflac_bs* bs, unsigned int bitCount, drf
{
drflac_int32 result;
- drflac_assert(bs != NULL);
- drflac_assert(pResult != NULL);
- drflac_assert(bitCount > 0);
- drflac_assert(bitCount <= 8);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(pResult != NULL);
+ DRFLAC_ASSERT(bitCount > 0);
+ DRFLAC_ASSERT(bitCount <= 8);
if (!drflac__read_int32(bs, bitCount, &result)) {
return DRFLAC_FALSE;
@@ -1941,7 +2150,7 @@ static drflac_bool32 drflac__seek_bits(drflac_bs* bs, size_t bitsToSeek)
bitsToSeek = 0; /* <-- Necessary for the assert below. */
}
- drflac_assert(bitsToSeek == 0);
+ DRFLAC_ASSERT(bitsToSeek == 0);
return DRFLAC_TRUE;
}
}
@@ -1950,7 +2159,7 @@ static drflac_bool32 drflac__seek_bits(drflac_bs* bs, size_t bitsToSeek)
/* This function moves the bit streamer to the first bit after the sync code (bit 15 of the of the frame header). It will also update the CRC-16. */
static drflac_bool32 drflac__find_and_seek_to_next_sync_code(drflac_bs* bs)
{
- drflac_assert(bs != NULL);
+ DRFLAC_ASSERT(bs != NULL);
/*
The sync code is always aligned to 8 bits. This is convenient for us because it means we can do byte-aligned movements. The first
@@ -1992,7 +2201,7 @@ static drflac_bool32 drflac__find_and_seek_to_next_sync_code(drflac_bs* bs)
}
-#if !defined(DR_FLAC_NO_SIMD) && defined(DRFLAC_HAS_LZCNT_INTRINSIC)
+#if defined(DRFLAC_HAS_LZCNT_INTRINSIC)
#define DRFLAC_IMPLEMENT_CLZ_LZCNT
#endif
#if defined(_MSC_VER) && _MSC_VER >= 1400 && (defined(DRFLAC_X64) || defined(DRFLAC_X86))
@@ -2035,11 +2244,16 @@ static DRFLAC_INLINE drflac_uint32 drflac__clz_software(drflac_cache_t x)
#ifdef DRFLAC_IMPLEMENT_CLZ_LZCNT
static DRFLAC_INLINE drflac_bool32 drflac__is_lzcnt_supported()
{
- /* If the compiler itself does not support the intrinsic then we'll need to return false. */
-#ifdef DRFLAC_HAS_LZCNT_INTRINSIC
- return drflac__gIsLZCNTSupported;
+ /* Fast compile time check for ARM. */
+#if defined(DRFLAC_HAS_LZCNT_INTRINSIC) && defined(DRFLAC_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5)
+ return DRFLAC_TRUE;
#else
- return DRFLAC_FALSE;
+ /* If the compiler itself does not support the intrinsic then we'll need to return false. */
+ #ifdef DRFLAC_HAS_LZCNT_INTRINSIC
+ return drflac__gIsLZCNTSupported;
+ #else
+ return DRFLAC_FALSE;
+ #endif
#endif
}
@@ -2053,13 +2267,46 @@ static DRFLAC_INLINE drflac_uint32 drflac__clz_lzcnt(drflac_cache_t x)
#endif
#else
#if defined(__GNUC__) || defined(__clang__)
- if (x == 0) {
- return sizeof(x)*8;
- }
- #ifdef DRFLAC_64BIT
- return (drflac_uint32)__builtin_clzll((drflac_uint64)x);
+ #if defined(DRFLAC_X64)
+ {
+ drflac_uint64 r;
+ __asm__ __volatile__ (
+ "lzcnt{ %1, %0| %0, %1}" : "=r"(r) : "r"(x)
+ );
+
+ return (drflac_uint32)r;
+ }
+ #elif defined(DRFLAC_X86)
+ {
+ drflac_uint32 r;
+ __asm__ __volatile__ (
+ "lzcnt{l %1, %0| %0, %1}" : "=r"(r) : "r"(x)
+ );
+
+ return r;
+ }
+ #elif defined(DRFLAC_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) && !defined(DRFLAC_64BIT) /* <-- I haven't tested 64-bit inline assembly, so only enabling this for the 32-bit build for now. */
+ {
+ unsigned int r;
+ __asm__ __volatile__ (
+ #if defined(DRFLAC_64BIT)
+ "clz %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(x) /* <-- This is untested. If someone in the community could test this, that would be appreciated! */
+ #else
+ "clz %[out], %[in]" : [out]"=r"(r) : [in]"r"(x)
+ #endif
+ );
+
+ return r;
+ }
#else
- return (drflac_uint32)__builtin_clzl((drflac_uint32)x);
+ if (x == 0) {
+ return sizeof(x)*8;
+ }
+ #ifdef DRFLAC_64BIT
+ return (drflac_uint32)__builtin_clzll((drflac_uint64)x);
+ #else
+ return (drflac_uint32)__builtin_clzl((drflac_uint32)x);
+ #endif
#endif
#else
/* Unsupported compiler. */
@@ -2132,8 +2379,8 @@ static DRFLAC_INLINE drflac_bool32 drflac__seek_past_next_set_bit(drflac_bs* bs,
static drflac_bool32 drflac__seek_to_byte(drflac_bs* bs, drflac_uint64 offsetFromStart)
{
- drflac_assert(bs != NULL);
- drflac_assert(offsetFromStart > 0);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(offsetFromStart > 0);
/*
Seeking from the start is not quite as trivial as it sounds because the onSeek callback takes a signed 32-bit integer (which
@@ -2179,9 +2426,9 @@ static drflac_result drflac__read_utf8_coded_number(drflac_bs* bs, drflac_uint64
int byteCount;
int i;
- drflac_assert(bs != NULL);
- drflac_assert(pNumberOut != NULL);
- drflac_assert(pCRCOut != NULL);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(pNumberOut != NULL);
+ DRFLAC_ASSERT(pCRCOut != NULL);
crc = *pCRCOut;
@@ -2197,7 +2444,7 @@ static drflac_result drflac__read_utf8_coded_number(drflac_bs* bs, drflac_uint64
return DRFLAC_SUCCESS;
}
- byteCount = 1;
+ /*byteCount = 1;*/
if ((utf8[0] & 0xE0) == 0xC0) {
byteCount = 2;
} else if ((utf8[0] & 0xF0) == 0xE0) {
@@ -2216,7 +2463,7 @@ static drflac_result drflac__read_utf8_coded_number(drflac_bs* bs, drflac_uint64
}
/* Read extra bytes. */
- drflac_assert(byteCount > 1);
+ DRFLAC_ASSERT(byteCount > 1);
result = (drflac_uint64)(utf8[0] & (0xFF >> (byteCount + 1)));
for (i = 1; i < byteCount; ++i) {
@@ -2246,7 +2493,7 @@ static DRFLAC_INLINE drflac_int32 drflac__calculate_prediction_32(drflac_uint32
{
drflac_int32 prediction = 0;
- drflac_assert(order <= 32);
+ DRFLAC_ASSERT(order <= 32);
/* 32-bit version. */
@@ -2294,7 +2541,7 @@ static DRFLAC_INLINE drflac_int32 drflac__calculate_prediction_64(drflac_uint32
{
drflac_int64 prediction;
- drflac_assert(order <= 32);
+ DRFLAC_ASSERT(order <= 32);
/* 64-bit version. */
@@ -2471,436 +2718,6 @@ static DRFLAC_INLINE drflac_int32 drflac__calculate_prediction_64(drflac_uint32
return (drflac_int32)(prediction >> shift);
}
-static DRFLAC_INLINE void drflac__calculate_prediction_64_x4(drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, const drflac_uint32 riceParamParts[4], drflac_int32* pDecodedSamples)
-{
- drflac_int64 prediction0 = 0;
- drflac_int64 prediction1 = 0;
- drflac_int64 prediction2 = 0;
- drflac_int64 prediction3 = 0;
-
- drflac_assert(order <= 32);
-
- switch (order)
- {
- case 32:
- prediction0 += coefficients[31] * (drflac_int64)pDecodedSamples[-32];
- prediction1 += coefficients[31] * (drflac_int64)pDecodedSamples[-31];
- prediction2 += coefficients[31] * (drflac_int64)pDecodedSamples[-30];
- prediction3 += coefficients[31] * (drflac_int64)pDecodedSamples[-29];
- case 31:
- prediction0 += coefficients[30] * (drflac_int64)pDecodedSamples[-31];
- prediction1 += coefficients[30] * (drflac_int64)pDecodedSamples[-30];
- prediction2 += coefficients[30] * (drflac_int64)pDecodedSamples[-29];
- prediction3 += coefficients[30] * (drflac_int64)pDecodedSamples[-28];
- case 30:
- prediction0 += coefficients[29] * (drflac_int64)pDecodedSamples[-30];
- prediction1 += coefficients[29] * (drflac_int64)pDecodedSamples[-29];
- prediction2 += coefficients[29] * (drflac_int64)pDecodedSamples[-28];
- prediction3 += coefficients[29] * (drflac_int64)pDecodedSamples[-27];
- case 29:
- prediction0 += coefficients[28] * (drflac_int64)pDecodedSamples[-29];
- prediction1 += coefficients[28] * (drflac_int64)pDecodedSamples[-28];
- prediction2 += coefficients[28] * (drflac_int64)pDecodedSamples[-27];
- prediction3 += coefficients[28] * (drflac_int64)pDecodedSamples[-26];
- case 28:
- prediction0 += coefficients[27] * (drflac_int64)pDecodedSamples[-28];
- prediction1 += coefficients[27] * (drflac_int64)pDecodedSamples[-27];
- prediction2 += coefficients[27] * (drflac_int64)pDecodedSamples[-26];
- prediction3 += coefficients[27] * (drflac_int64)pDecodedSamples[-25];
- case 27:
- prediction0 += coefficients[26] * (drflac_int64)pDecodedSamples[-27];
- prediction1 += coefficients[26] * (drflac_int64)pDecodedSamples[-26];
- prediction2 += coefficients[26] * (drflac_int64)pDecodedSamples[-25];
- prediction3 += coefficients[26] * (drflac_int64)pDecodedSamples[-24];
- case 26:
- prediction0 += coefficients[25] * (drflac_int64)pDecodedSamples[-26];
- prediction1 += coefficients[25] * (drflac_int64)pDecodedSamples[-25];
- prediction2 += coefficients[25] * (drflac_int64)pDecodedSamples[-24];
- prediction3 += coefficients[25] * (drflac_int64)pDecodedSamples[-23];
- case 25:
- prediction0 += coefficients[24] * (drflac_int64)pDecodedSamples[-25];
- prediction1 += coefficients[24] * (drflac_int64)pDecodedSamples[-24];
- prediction2 += coefficients[24] * (drflac_int64)pDecodedSamples[-23];
- prediction3 += coefficients[24] * (drflac_int64)pDecodedSamples[-22];
- case 24:
- prediction0 += coefficients[23] * (drflac_int64)pDecodedSamples[-24];
- prediction1 += coefficients[23] * (drflac_int64)pDecodedSamples[-23];
- prediction2 += coefficients[23] * (drflac_int64)pDecodedSamples[-22];
- prediction3 += coefficients[23] * (drflac_int64)pDecodedSamples[-21];
- case 23:
- prediction0 += coefficients[22] * (drflac_int64)pDecodedSamples[-23];
- prediction1 += coefficients[22] * (drflac_int64)pDecodedSamples[-22];
- prediction2 += coefficients[22] * (drflac_int64)pDecodedSamples[-21];
- prediction3 += coefficients[22] * (drflac_int64)pDecodedSamples[-20];
- case 22:
- prediction0 += coefficients[21] * (drflac_int64)pDecodedSamples[-22];
- prediction1 += coefficients[21] * (drflac_int64)pDecodedSamples[-21];
- prediction2 += coefficients[21] * (drflac_int64)pDecodedSamples[-20];
- prediction3 += coefficients[21] * (drflac_int64)pDecodedSamples[-19];
- case 21:
- prediction0 += coefficients[20] * (drflac_int64)pDecodedSamples[-21];
- prediction1 += coefficients[20] * (drflac_int64)pDecodedSamples[-20];
- prediction2 += coefficients[20] * (drflac_int64)pDecodedSamples[-19];
- prediction3 += coefficients[20] * (drflac_int64)pDecodedSamples[-18];
- case 20:
- prediction0 += coefficients[19] * (drflac_int64)pDecodedSamples[-20];
- prediction1 += coefficients[19] * (drflac_int64)pDecodedSamples[-19];
- prediction2 += coefficients[19] * (drflac_int64)pDecodedSamples[-18];
- prediction3 += coefficients[19] * (drflac_int64)pDecodedSamples[-17];
- case 19:
- prediction0 += coefficients[18] * (drflac_int64)pDecodedSamples[-19];
- prediction1 += coefficients[18] * (drflac_int64)pDecodedSamples[-18];
- prediction2 += coefficients[18] * (drflac_int64)pDecodedSamples[-17];
- prediction3 += coefficients[18] * (drflac_int64)pDecodedSamples[-16];
- case 18:
- prediction0 += coefficients[17] * (drflac_int64)pDecodedSamples[-18];
- prediction1 += coefficients[17] * (drflac_int64)pDecodedSamples[-17];
- prediction2 += coefficients[17] * (drflac_int64)pDecodedSamples[-16];
- prediction3 += coefficients[17] * (drflac_int64)pDecodedSamples[-15];
- case 17:
- prediction0 += coefficients[16] * (drflac_int64)pDecodedSamples[-17];
- prediction1 += coefficients[16] * (drflac_int64)pDecodedSamples[-16];
- prediction2 += coefficients[16] * (drflac_int64)pDecodedSamples[-15];
- prediction3 += coefficients[16] * (drflac_int64)pDecodedSamples[-14];
-
- case 16:
- prediction0 += coefficients[15] * (drflac_int64)pDecodedSamples[-16];
- prediction1 += coefficients[15] * (drflac_int64)pDecodedSamples[-15];
- prediction2 += coefficients[15] * (drflac_int64)pDecodedSamples[-14];
- prediction3 += coefficients[15] * (drflac_int64)pDecodedSamples[-13];
- case 15:
- prediction0 += coefficients[14] * (drflac_int64)pDecodedSamples[-15];
- prediction1 += coefficients[14] * (drflac_int64)pDecodedSamples[-14];
- prediction2 += coefficients[14] * (drflac_int64)pDecodedSamples[-13];
- prediction3 += coefficients[14] * (drflac_int64)pDecodedSamples[-12];
- case 14:
- prediction0 += coefficients[13] * (drflac_int64)pDecodedSamples[-14];
- prediction1 += coefficients[13] * (drflac_int64)pDecodedSamples[-13];
- prediction2 += coefficients[13] * (drflac_int64)pDecodedSamples[-12];
- prediction3 += coefficients[13] * (drflac_int64)pDecodedSamples[-11];
- case 13:
- prediction0 += coefficients[12] * (drflac_int64)pDecodedSamples[-13];
- prediction1 += coefficients[12] * (drflac_int64)pDecodedSamples[-12];
- prediction2 += coefficients[12] * (drflac_int64)pDecodedSamples[-11];
- prediction3 += coefficients[12] * (drflac_int64)pDecodedSamples[-10];
- case 12:
- prediction0 += coefficients[11] * (drflac_int64)pDecodedSamples[-12];
- prediction1 += coefficients[11] * (drflac_int64)pDecodedSamples[-11];
- prediction2 += coefficients[11] * (drflac_int64)pDecodedSamples[-10];
- prediction3 += coefficients[11] * (drflac_int64)pDecodedSamples[- 9];
- case 11:
- prediction0 += coefficients[10] * (drflac_int64)pDecodedSamples[-11];
- prediction1 += coefficients[10] * (drflac_int64)pDecodedSamples[-10];
- prediction2 += coefficients[10] * (drflac_int64)pDecodedSamples[- 9];
- prediction3 += coefficients[10] * (drflac_int64)pDecodedSamples[- 8];
- case 10:
- prediction0 += coefficients[9] * (drflac_int64)pDecodedSamples[-10];
- prediction1 += coefficients[9] * (drflac_int64)pDecodedSamples[- 9];
- prediction2 += coefficients[9] * (drflac_int64)pDecodedSamples[- 8];
- prediction3 += coefficients[9] * (drflac_int64)pDecodedSamples[- 7];
- case 9:
- prediction0 += coefficients[8] * (drflac_int64)pDecodedSamples[- 9];
- prediction1 += coefficients[8] * (drflac_int64)pDecodedSamples[- 8];
- prediction2 += coefficients[8] * (drflac_int64)pDecodedSamples[- 7];
- prediction3 += coefficients[8] * (drflac_int64)pDecodedSamples[- 6];
- case 8:
- prediction0 += coefficients[7] * (drflac_int64)pDecodedSamples[- 8];
- prediction1 += coefficients[7] * (drflac_int64)pDecodedSamples[- 7];
- prediction2 += coefficients[7] * (drflac_int64)pDecodedSamples[- 6];
- prediction3 += coefficients[7] * (drflac_int64)pDecodedSamples[- 5];
- case 7:
- prediction0 += coefficients[6] * (drflac_int64)pDecodedSamples[- 7];
- prediction1 += coefficients[6] * (drflac_int64)pDecodedSamples[- 6];
- prediction2 += coefficients[6] * (drflac_int64)pDecodedSamples[- 5];
- prediction3 += coefficients[6] * (drflac_int64)pDecodedSamples[- 4];
- case 6:
- prediction0 += coefficients[5] * (drflac_int64)pDecodedSamples[- 6];
- prediction1 += coefficients[5] * (drflac_int64)pDecodedSamples[- 5];
- prediction2 += coefficients[5] * (drflac_int64)pDecodedSamples[- 4];
- prediction3 += coefficients[5] * (drflac_int64)pDecodedSamples[- 3];
- case 5:
- prediction0 += coefficients[4] * (drflac_int64)pDecodedSamples[- 5];
- prediction1 += coefficients[4] * (drflac_int64)pDecodedSamples[- 4];
- prediction2 += coefficients[4] * (drflac_int64)pDecodedSamples[- 3];
- prediction3 += coefficients[4] * (drflac_int64)pDecodedSamples[- 2];
- case 4:
- prediction0 += coefficients[3] * (drflac_int64)pDecodedSamples[- 4];
- prediction1 += coefficients[3] * (drflac_int64)pDecodedSamples[- 3];
- prediction2 += coefficients[3] * (drflac_int64)pDecodedSamples[- 2];
- prediction3 += coefficients[3] * (drflac_int64)pDecodedSamples[- 1];
- order = 3;
- }
-
- switch (order)
- {
- case 3: prediction0 += coefficients[ 2] * (drflac_int64)pDecodedSamples[- 3];
- case 2: prediction0 += coefficients[ 1] * (drflac_int64)pDecodedSamples[- 2];
- case 1: prediction0 += coefficients[ 0] * (drflac_int64)pDecodedSamples[- 1];
- }
- pDecodedSamples[0] = riceParamParts[0] + (drflac_int32)(prediction0 >> shift);
-
- switch (order)
- {
- case 3: prediction1 += coefficients[ 2] * (drflac_int64)pDecodedSamples[- 2];
- case 2: prediction1 += coefficients[ 1] * (drflac_int64)pDecodedSamples[- 1];
- case 1: prediction1 += coefficients[ 0] * (drflac_int64)pDecodedSamples[ 0];
- }
- pDecodedSamples[1] = riceParamParts[1] + (drflac_int32)(prediction1 >> shift);
-
- switch (order)
- {
- case 3: prediction2 += coefficients[ 2] * (drflac_int64)pDecodedSamples[- 1];
- case 2: prediction2 += coefficients[ 1] * (drflac_int64)pDecodedSamples[ 0];
- case 1: prediction2 += coefficients[ 0] * (drflac_int64)pDecodedSamples[ 1];
- }
- pDecodedSamples[2] = riceParamParts[2] + (drflac_int32)(prediction2 >> shift);
-
- switch (order)
- {
- case 3: prediction3 += coefficients[ 2] * (drflac_int64)pDecodedSamples[ 0];
- case 2: prediction3 += coefficients[ 1] * (drflac_int64)pDecodedSamples[ 1];
- case 1: prediction3 += coefficients[ 0] * (drflac_int64)pDecodedSamples[ 2];
- }
- pDecodedSamples[3] = riceParamParts[3] + (drflac_int32)(prediction3 >> shift);
-}
-
-#if defined(DRFLAC_SUPPORT_SSE41)
-static DRFLAC_INLINE drflac_int32 drflac__calculate_prediction_64__sse41(drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pDecodedSamples)
-{
- __m128i prediction = _mm_setzero_si128();
-
- drflac_assert(order <= 32);
-
- switch (order)
- {
- case 32:
- case 31: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[31], 0, coefficients[30]), _mm_set_epi32(0, pDecodedSamples[-32], 0, pDecodedSamples[-31])));
- case 30:
- case 29: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[29], 0, coefficients[28]), _mm_set_epi32(0, pDecodedSamples[-30], 0, pDecodedSamples[-29])));
- case 28:
- case 27: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[27], 0, coefficients[26]), _mm_set_epi32(0, pDecodedSamples[-28], 0, pDecodedSamples[-27])));
- case 26:
- case 25: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[25], 0, coefficients[24]), _mm_set_epi32(0, pDecodedSamples[-26], 0, pDecodedSamples[-25])));
- case 24:
- case 23: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[23], 0, coefficients[22]), _mm_set_epi32(0, pDecodedSamples[-24], 0, pDecodedSamples[-23])));
- case 22:
- case 21: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[21], 0, coefficients[20]), _mm_set_epi32(0, pDecodedSamples[-22], 0, pDecodedSamples[-21])));
- case 20:
- case 19: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[19], 0, coefficients[18]), _mm_set_epi32(0, pDecodedSamples[-20], 0, pDecodedSamples[-19])));
- case 18:
- case 17: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[17], 0, coefficients[16]), _mm_set_epi32(0, pDecodedSamples[-18], 0, pDecodedSamples[-17])));
- case 16:
- case 15: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[15], 0, coefficients[14]), _mm_set_epi32(0, pDecodedSamples[-16], 0, pDecodedSamples[-15])));
- case 14:
- case 13: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[13], 0, coefficients[12]), _mm_set_epi32(0, pDecodedSamples[-14], 0, pDecodedSamples[-13])));
- case 12:
- case 11: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[11], 0, coefficients[10]), _mm_set_epi32(0, pDecodedSamples[-12], 0, pDecodedSamples[-11])));
- case 10:
- case 9: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 9], 0, coefficients[ 8]), _mm_set_epi32(0, pDecodedSamples[-10], 0, pDecodedSamples[- 9])));
- case 8:
- case 7: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 7], 0, coefficients[ 6]), _mm_set_epi32(0, pDecodedSamples[- 8], 0, pDecodedSamples[- 7])));
- case 6:
- case 5: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 5], 0, coefficients[ 4]), _mm_set_epi32(0, pDecodedSamples[- 6], 0, pDecodedSamples[- 5])));
- case 4:
- case 3: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 3], 0, coefficients[ 2]), _mm_set_epi32(0, pDecodedSamples[- 4], 0, pDecodedSamples[- 3])));
- case 2:
- case 1: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 1], 0, coefficients[ 0]), _mm_set_epi32(0, pDecodedSamples[- 2], 0, pDecodedSamples[- 1])));
- }
-
- return (drflac_int32)((
- ((drflac_uint64*)&prediction)[0] +
- ((drflac_uint64*)&prediction)[1]) >> shift);
-}
-
-static DRFLAC_INLINE void drflac__calculate_prediction_64_x2__sse41(drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, const drflac_uint32 riceParamParts[4], drflac_int32* pDecodedSamples)
-{
- __m128i prediction = _mm_setzero_si128();
- drflac_int64 predictions[2] = {0, 0};
-
- drflac_assert(order <= 32);
-
- switch (order)
- {
- case 32: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[31], 0, coefficients[31]), _mm_set_epi32(0, pDecodedSamples[-31], 0, pDecodedSamples[-32])));
- case 31: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[30], 0, coefficients[30]), _mm_set_epi32(0, pDecodedSamples[-30], 0, pDecodedSamples[-31])));
- case 30: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[29], 0, coefficients[29]), _mm_set_epi32(0, pDecodedSamples[-29], 0, pDecodedSamples[-30])));
- case 29: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[28], 0, coefficients[28]), _mm_set_epi32(0, pDecodedSamples[-28], 0, pDecodedSamples[-29])));
- case 28: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[27], 0, coefficients[27]), _mm_set_epi32(0, pDecodedSamples[-27], 0, pDecodedSamples[-28])));
- case 27: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[26], 0, coefficients[26]), _mm_set_epi32(0, pDecodedSamples[-26], 0, pDecodedSamples[-27])));
- case 26: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[25], 0, coefficients[25]), _mm_set_epi32(0, pDecodedSamples[-25], 0, pDecodedSamples[-26])));
- case 25: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[24], 0, coefficients[24]), _mm_set_epi32(0, pDecodedSamples[-24], 0, pDecodedSamples[-25])));
- case 24: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[23], 0, coefficients[23]), _mm_set_epi32(0, pDecodedSamples[-23], 0, pDecodedSamples[-24])));
- case 23: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[22], 0, coefficients[22]), _mm_set_epi32(0, pDecodedSamples[-22], 0, pDecodedSamples[-23])));
- case 22: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[21], 0, coefficients[21]), _mm_set_epi32(0, pDecodedSamples[-21], 0, pDecodedSamples[-22])));
- case 21: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[20], 0, coefficients[20]), _mm_set_epi32(0, pDecodedSamples[-20], 0, pDecodedSamples[-21])));
- case 20: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[19], 0, coefficients[19]), _mm_set_epi32(0, pDecodedSamples[-19], 0, pDecodedSamples[-20])));
- case 19: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[18], 0, coefficients[18]), _mm_set_epi32(0, pDecodedSamples[-18], 0, pDecodedSamples[-19])));
- case 18: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[17], 0, coefficients[17]), _mm_set_epi32(0, pDecodedSamples[-17], 0, pDecodedSamples[-18])));
- case 17: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[16], 0, coefficients[16]), _mm_set_epi32(0, pDecodedSamples[-16], 0, pDecodedSamples[-17])));
- case 16: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[15], 0, coefficients[15]), _mm_set_epi32(0, pDecodedSamples[-15], 0, pDecodedSamples[-16])));
- case 15: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[14], 0, coefficients[14]), _mm_set_epi32(0, pDecodedSamples[-14], 0, pDecodedSamples[-15])));
- case 14: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[13], 0, coefficients[13]), _mm_set_epi32(0, pDecodedSamples[-13], 0, pDecodedSamples[-14])));
- case 13: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[12], 0, coefficients[12]), _mm_set_epi32(0, pDecodedSamples[-12], 0, pDecodedSamples[-13])));
- case 12: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[11], 0, coefficients[11]), _mm_set_epi32(0, pDecodedSamples[-11], 0, pDecodedSamples[-12])));
- case 11: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[10], 0, coefficients[10]), _mm_set_epi32(0, pDecodedSamples[-10], 0, pDecodedSamples[-11])));
- case 10: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 9], 0, coefficients[ 9]), _mm_set_epi32(0, pDecodedSamples[- 9], 0, pDecodedSamples[-10])));
- case 9: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 8], 0, coefficients[ 8]), _mm_set_epi32(0, pDecodedSamples[- 8], 0, pDecodedSamples[- 9])));
- case 8: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 7], 0, coefficients[ 7]), _mm_set_epi32(0, pDecodedSamples[- 7], 0, pDecodedSamples[- 8])));
- case 7: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 6], 0, coefficients[ 6]), _mm_set_epi32(0, pDecodedSamples[- 6], 0, pDecodedSamples[- 7])));
- case 6: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 5], 0, coefficients[ 5]), _mm_set_epi32(0, pDecodedSamples[- 5], 0, pDecodedSamples[- 6])));
- case 5: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 4], 0, coefficients[ 4]), _mm_set_epi32(0, pDecodedSamples[- 4], 0, pDecodedSamples[- 5])));
- case 4: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 3], 0, coefficients[ 3]), _mm_set_epi32(0, pDecodedSamples[- 3], 0, pDecodedSamples[- 4])));
- case 3: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 2], 0, coefficients[ 2]), _mm_set_epi32(0, pDecodedSamples[- 2], 0, pDecodedSamples[- 3])));
- case 2: prediction = _mm_add_epi64(prediction, _mm_mul_epi32(_mm_set_epi32(0, coefficients[ 1], 0, coefficients[ 1]), _mm_set_epi32(0, pDecodedSamples[- 1], 0, pDecodedSamples[- 2])));
- order = 1;
- }
-
- _mm_storeu_si128((__m128i*)predictions, prediction);
-
- switch (order)
- {
- case 1: predictions[0] += coefficients[ 0] * (drflac_int64)pDecodedSamples[- 1];
- }
- pDecodedSamples[0] = riceParamParts[0] + (drflac_int32)(predictions[0] >> shift);
-
- switch (order)
- {
- case 1: predictions[1] += coefficients[ 0] * (drflac_int64)pDecodedSamples[ 0];
- }
- pDecodedSamples[1] = riceParamParts[1] + (drflac_int32)(predictions[1] >> shift);
-}
-
-
-static DRFLAC_INLINE __m128i drflac__mm_not_si128(__m128i a)
-{
- return _mm_xor_si128(a, _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128()));
-}
-
-static DRFLAC_INLINE __m128i drflac__mm_slide1_epi32(__m128i a, __m128i b)
-{
- /* a3a2a1a0/b3b2b1b0 -> a2a1a0b3 */
-
- /* Result = a2a1a0b3 */
- __m128i b3a3b2a2 = _mm_unpackhi_epi32(a, b);
- __m128i a2b3a2b3 = _mm_shuffle_epi32(b3a3b2a2, _MM_SHUFFLE(0, 3, 0, 3));
- __m128i a1a2a0b3 = _mm_unpacklo_epi32(a2b3a2b3, a);
- __m128i a2a1a0b3 = _mm_shuffle_epi32(a1a2a0b3, _MM_SHUFFLE(2, 3, 1, 0));
- return a2a1a0b3;
-}
-
-static DRFLAC_INLINE __m128i drflac__mm_slide2_epi32(__m128i a, __m128i b)
-{
- /* Result = a1a0b3b2 */
- __m128i b1b0b3b2 = _mm_shuffle_epi32(b, _MM_SHUFFLE(1, 0, 3, 2));
- __m128i a1b3a0b2 = _mm_unpacklo_epi32(b1b0b3b2, a);
- __m128i a1a0b3b2 = _mm_shuffle_epi32(a1b3a0b2, _MM_SHUFFLE(3, 1, 2, 0));
- return a1a0b3b2;
-}
-
-static DRFLAC_INLINE __m128i drflac__mm_slide3_epi32(__m128i a, __m128i b)
-{
- /* Result = a0b3b2b1 */
- __m128i b1a1b0a0 = _mm_unpacklo_epi32(a, b);
- __m128i a0b1a0b1 = _mm_shuffle_epi32(b1a1b0a0, _MM_SHUFFLE(0, 3, 0, 3));
- __m128i b3a0b2b1 = _mm_unpackhi_epi32(a0b1a0b1, b);
- __m128i a0b3b2b1 = _mm_shuffle_epi32(b3a0b2b1, _MM_SHUFFLE(2, 3, 1, 0));
- return a0b3b2b1;
-}
-
-static DRFLAC_INLINE void drflac__calculate_prediction_32_x4__sse41(drflac_uint32 order, drflac_int32 shift, const __m128i* coefficients128, const __m128i riceParamParts128, drflac_int32* pDecodedSamples)
-{
- drflac_assert(order <= 32);
-
- /* I don't think this is as efficient as it could be. More work needs to be done on this. */
- if (order > 0) {
- drflac_int32 predictions[4];
- drflac_uint32 riceParamParts[4];
-
- __m128i s_09_10_11_12 = _mm_loadu_si128((const __m128i*)(pDecodedSamples - 12));
- __m128i s_05_06_07_08 = _mm_loadu_si128((const __m128i*)(pDecodedSamples - 8));
- __m128i s_01_02_03_04 = _mm_loadu_si128((const __m128i*)(pDecodedSamples - 4));
-
- __m128i prediction = _mm_setzero_si128();
-
- /*
- The idea with this switch is to do do a single jump based on the value of "order". In my test library, "order" is never larger than 12, so
- I have decided to do a less optimal, but simpler solution in the order > 12 case.
- */
- switch (order)
- {
- case 32: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[31], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 32))));
- case 31: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[30], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 31))));
- case 30: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[29], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 30))));
- case 29: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[28], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 29))));
- case 28: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[27], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 28))));
- case 27: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[26], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 27))));
- case 26: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[25], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 26))));
- case 25: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[24], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 25))));
- case 24: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[23], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 24))));
- case 23: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[22], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 23))));
- case 22: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[21], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 22))));
- case 21: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[20], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 21))));
- case 20: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[19], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 20))));
- case 19: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[18], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 19))));
- case 18: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[17], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 18))));
- case 17: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[16], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 17))));
- case 16: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[15], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 16))));
- case 15: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[14], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 15))));
- case 14: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[13], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 14))));
- case 13: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[12], _mm_loadu_si128((const __m128i*)(pDecodedSamples - 13))));
-
- case 12: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[11], s_09_10_11_12));
- case 11: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[10], drflac__mm_slide3_epi32(s_05_06_07_08, s_09_10_11_12)));
- case 10: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 9], drflac__mm_slide2_epi32(s_05_06_07_08, s_09_10_11_12)));
- case 9: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 8], drflac__mm_slide1_epi32(s_05_06_07_08, s_09_10_11_12)));
- case 8: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 7], s_05_06_07_08));
- case 7: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 6], drflac__mm_slide3_epi32(s_01_02_03_04, s_05_06_07_08)));
- case 6: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 5], drflac__mm_slide2_epi32(s_01_02_03_04, s_05_06_07_08)));
- case 5: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 4], drflac__mm_slide1_epi32(s_01_02_03_04, s_05_06_07_08)));
- case 4: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 3], s_01_02_03_04)); order = 3; /* <-- Don't forget to set order to 3 here! */
- case 3: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 2], drflac__mm_slide3_epi32(_mm_setzero_si128(), s_01_02_03_04)));
- case 2: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 1], drflac__mm_slide2_epi32(_mm_setzero_si128(), s_01_02_03_04)));
- case 1: prediction = _mm_add_epi32(prediction, _mm_mullo_epi32(coefficients128[ 0], drflac__mm_slide1_epi32(_mm_setzero_si128(), s_01_02_03_04)));
- }
-
- _mm_storeu_si128((__m128i*)predictions, prediction);
- _mm_storeu_si128((__m128i*)riceParamParts, riceParamParts128);
-
- predictions[0] = riceParamParts[0] + (predictions[0] >> shift);
-
- switch (order)
- {
- case 3: predictions[3] += ((const drflac_int32*)&coefficients128[ 2])[0] * predictions[ 0];
- case 2: predictions[2] += ((const drflac_int32*)&coefficients128[ 1])[0] * predictions[ 0];
- case 1: predictions[1] += ((const drflac_int32*)&coefficients128[ 0])[0] * predictions[ 0];
- }
- predictions[1] = riceParamParts[1] + (predictions[1] >> shift);
-
- switch (order)
- {
- case 3:
- case 2: predictions[3] += ((const drflac_int32*)&coefficients128[ 1])[0] * predictions[ 1];
- case 1: predictions[2] += ((const drflac_int32*)&coefficients128[ 0])[0] * predictions[ 1];
- }
- predictions[2] = riceParamParts[2] + (predictions[2] >> shift);
-
- switch (order)
- {
- case 3:
- case 2:
- case 1: predictions[3] += ((const drflac_int32*)&coefficients128[ 0])[0] * predictions[ 2];
- }
- predictions[3] = riceParamParts[3] + (predictions[3] >> shift);
-
- pDecodedSamples[0] = predictions[0];
- pDecodedSamples[1] = predictions[1];
- pDecodedSamples[2] = predictions[2];
- pDecodedSamples[3] = predictions[3];
- } else {
- _mm_storeu_si128((__m128i*)pDecodedSamples, riceParamParts128);
- }
-}
-#endif
#if 0
/*
@@ -2911,9 +2728,9 @@ static drflac_bool32 drflac__decode_samples_with_residual__rice__reference(drfla
{
drflac_uint32 i;
- drflac_assert(bs != NULL);
- drflac_assert(count > 0);
- drflac_assert(pSamplesOut != NULL);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(count > 0);
+ DRFLAC_ASSERT(pSamplesOut != NULL);
for (i = 0; i < count; ++i) {
drflac_uint32 zeroCounter = 0;
@@ -2947,7 +2764,7 @@ static drflac_bool32 drflac__decode_samples_with_residual__rice__reference(drfla
}
- if (bitsPerSample > 16) {
+ if (bitsPerSample+shift >= 32) {
pSamplesOut[i] = decodedRice + drflac__calculate_prediction_64(order, shift, coefficients, pSamplesOut + i);
} else {
pSamplesOut[i] = decodedRice + drflac__calculate_prediction_32(order, shift, coefficients, pSamplesOut + i);
@@ -3000,7 +2817,7 @@ static DRFLAC_INLINE drflac_bool32 drflac__read_rice_parts(drflac_bs* bs, drflac
drflac_uint32 riceParamPart;
drflac_uint32 riceLength;
- drflac_assert(riceParam > 0); /* <-- riceParam should never be 0. drflac__read_rice_parts__param_equals_zero() should be used instead for this case. */
+ DRFLAC_ASSERT(riceParam > 0); /* <-- riceParam should never be 0. drflac__read_rice_parts__param_equals_zero() should be used instead for this case. */
riceParamMask = DRFLAC_CACHE_L1_SELECTION_MASK(riceParam);
@@ -3110,8 +2927,8 @@ static DRFLAC_INLINE drflac_bool32 drflac__read_rice_parts_x1(drflac_bs* bs, drf
/* Before reloading the cache we need to grab the size in bits of the low part. */
riceParamPartLoBitCount = bs_consumedBits - riceParamPlus1MaxConsumedBits;
- drflac_assert(riceParamPartLoBitCount > 0 && riceParamPartLoBitCount < 32);
-
+ DRFLAC_ASSERT(riceParamPartLoBitCount > 0 && riceParamPartLoBitCount < 32);
+
/* Now reload the cache. */
if (bs->nextL2Line < DRFLAC_CACHE_L2_LINE_COUNT(bs)) {
#ifndef DR_FLAC_NO_CRC
@@ -3183,11 +3000,9 @@ static DRFLAC_INLINE drflac_bool32 drflac__read_rice_parts_x1(drflac_bs* bs, drf
return DRFLAC_TRUE;
}
-static DRFLAC_INLINE drflac_bool32 drflac__read_rice_parts_x4(drflac_bs* bs, drflac_uint8 riceParam, drflac_uint32* pZeroCounterOut, drflac_uint32* pRiceParamPartOut)
+static DRFLAC_INLINE drflac_bool32 drflac__seek_rice_parts(drflac_bs* bs, drflac_uint8 riceParam)
{
drflac_uint32 riceParamPlus1 = riceParam + 1;
- /*drflac_cache_t riceParamPlus1Mask = DRFLAC_CACHE_L1_SELECTION_MASK(riceParamPlus1);*/
- drflac_uint32 riceParamPlus1Shift = DRFLAC_CACHE_L1_SELECTION_SHIFT(bs, riceParamPlus1);
drflac_uint32 riceParamPlus1MaxConsumedBits = DRFLAC_CACHE_L1_SIZE_BITS(bs) - riceParamPlus1;
/*
@@ -3197,157 +3012,32 @@ static DRFLAC_INLINE drflac_bool32 drflac__read_rice_parts_x4(drflac_bs* bs, drf
drflac_cache_t bs_cache = bs->cache;
drflac_uint32 bs_consumedBits = bs->consumedBits;
- /*
- What this is doing is trying to efficiently extract 4 rice parts at a time, the idea being that we can exploit certain properties
- to our advantage to make things more efficient.
- */
- int i;
- for (i = 0; i < 4; ++i) {
- /* The first thing to do is find the first unset bit. Most likely a bit will be set in the current cache line. */
- drflac_uint32 lzcount = drflac__clz(bs_cache);
- if (lzcount < sizeof(bs_cache)*8) {
- pZeroCounterOut[i] = lzcount;
+ /* The first thing to do is find the first unset bit. Most likely a bit will be set in the current cache line. */
+ drflac_uint32 lzcount = drflac__clz(bs_cache);
+ if (lzcount < sizeof(bs_cache)*8) {
+ /*
+ It is most likely that the riceParam part (which comes after the zero counter) is also on this cache line. When extracting
+ this, we include the set bit from the unary coded part because it simplifies cache management. This bit will be handled
+ outside of this function at a higher level.
+ */
+ extract_rice_param_part:
+ bs_cache <<= lzcount;
+ bs_consumedBits += lzcount;
+ if (bs_consumedBits <= riceParamPlus1MaxConsumedBits) {
+ /* Getting here means the rice parameter part is wholly contained within the current cache line. */
+ bs_cache <<= riceParamPlus1;
+ bs_consumedBits += riceParamPlus1;
+ } else {
/*
- It is most likely that the riceParam part (which comes after the zero counter) is also on this cache line. When extracting
- this, we include the set bit from the unary coded part because it simplifies cache management. This bit will be handled
- outside of this function at a higher level.
+ Getting here means the rice parameter part straddles the cache line. We need to read from the tail of the current cache
+ line, reload the cache, and then combine it with the head of the next cache line.
*/
- extract_rice_param_part:
- bs_cache <<= lzcount;
- bs_consumedBits += lzcount;
-
- if (bs_consumedBits <= riceParamPlus1MaxConsumedBits) {
- /* Getting here means the rice parameter part is wholly contained within the current cache line. */
- pRiceParamPartOut[i] = (drflac_uint32)(bs_cache >> riceParamPlus1Shift);
- bs_cache <<= riceParamPlus1;
- bs_consumedBits += riceParamPlus1;
- } else {
- drflac_uint32 riceParamPartHi;
- drflac_uint32 riceParamPartLo;
- drflac_uint32 riceParamPartLoBitCount;
-
- /*
- Getting here means the rice parameter part straddles the cache line. We need to read from the tail of the current cache
- line, reload the cache, and then combine it with the head of the next cache line.
- */
- /* Grab the high part of the rice parameter part. */
- riceParamPartHi = (drflac_uint32)(bs_cache >> riceParamPlus1Shift);
+ /* Before reloading the cache we need to grab the size in bits of the low part. */
+ drflac_uint32 riceParamPartLoBitCount = bs_consumedBits - riceParamPlus1MaxConsumedBits;
+ DRFLAC_ASSERT(riceParamPartLoBitCount > 0 && riceParamPartLoBitCount < 32);
- /* Before reloading the cache we need to grab the size in bits of the low part. */
- riceParamPartLoBitCount = bs_consumedBits - riceParamPlus1MaxConsumedBits;
-
- /* Now reload the cache. */
- if (bs->nextL2Line < DRFLAC_CACHE_L2_LINE_COUNT(bs)) {
- #ifndef DR_FLAC_NO_CRC
- drflac__update_crc16(bs);
- #endif
- bs_cache = drflac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]);
- bs_consumedBits = riceParamPartLoBitCount;
- #ifndef DR_FLAC_NO_CRC
- bs->crc16Cache = bs_cache;
- #endif
- } else {
- /* Slow path. We need to fetch more data from the client. */
- if (!drflac__reload_cache(bs)) {
- return DRFLAC_FALSE;
- }
-
- bs_cache = bs->cache;
- bs_consumedBits = bs->consumedBits + riceParamPartLoBitCount;
- }
-
- /* We should now have enough information to construct the rice parameter part. */
- riceParamPartLo = (drflac_uint32)(bs_cache >> (DRFLAC_CACHE_L1_SELECTION_SHIFT(bs, riceParamPartLoBitCount)));
- pRiceParamPartOut[i] = riceParamPartHi | riceParamPartLo;
-
- bs_cache <<= riceParamPartLoBitCount;
- }
- } else {
- /*
- Getting here means there are no bits set on the cache line. This is a less optimal case because we just wasted a call
- to drflac__clz() and we need to reload the cache.
- */
- drflac_uint32 zeroCounter = (drflac_uint32)(DRFLAC_CACHE_L1_SIZE_BITS(bs) - bs_consumedBits);
- for (;;) {
- if (bs->nextL2Line < DRFLAC_CACHE_L2_LINE_COUNT(bs)) {
- #ifndef DR_FLAC_NO_CRC
- drflac__update_crc16(bs);
- #endif
- bs_cache = drflac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]);
- bs_consumedBits = 0;
- #ifndef DR_FLAC_NO_CRC
- bs->crc16Cache = bs_cache;
- #endif
- } else {
- /* Slow path. We need to fetch more data from the client. */
- if (!drflac__reload_cache(bs)) {
- return DRFLAC_FALSE;
- }
-
- bs_cache = bs->cache;
- bs_consumedBits = bs->consumedBits;
- }
-
- lzcount = drflac__clz(bs_cache);
- zeroCounter += lzcount;
-
- if (lzcount < sizeof(bs_cache)*8) {
- break;
- }
- }
-
- pZeroCounterOut[i] = zeroCounter;
- goto extract_rice_param_part;
- }
- }
-
- /* Make sure the cache is restored at the end of it all. */
- bs->cache = bs_cache;
- bs->consumedBits = bs_consumedBits;
-
- return DRFLAC_TRUE;
-}
-
-static DRFLAC_INLINE drflac_bool32 drflac__seek_rice_parts(drflac_bs* bs, drflac_uint8 riceParam)
-{
- drflac_uint32 riceParamPlus1 = riceParam + 1;
- drflac_uint32 riceParamPlus1MaxConsumedBits = DRFLAC_CACHE_L1_SIZE_BITS(bs) - riceParamPlus1;
-
- /*
- The idea here is to use local variables for the cache in an attempt to encourage the compiler to store them in registers. I have
- no idea how this will work in practice...
- */
- drflac_cache_t bs_cache = bs->cache;
- drflac_uint32 bs_consumedBits = bs->consumedBits;
-
- /* The first thing to do is find the first unset bit. Most likely a bit will be set in the current cache line. */
- drflac_uint32 lzcount = drflac__clz(bs_cache);
- if (lzcount < sizeof(bs_cache)*8) {
- /*
- It is most likely that the riceParam part (which comes after the zero counter) is also on this cache line. When extracting
- this, we include the set bit from the unary coded part because it simplifies cache management. This bit will be handled
- outside of this function at a higher level.
- */
- extract_rice_param_part:
- bs_cache <<= lzcount;
- bs_consumedBits += lzcount;
-
- if (bs_consumedBits <= riceParamPlus1MaxConsumedBits) {
- /* Getting here means the rice parameter part is wholly contained within the current cache line. */
- bs_cache <<= riceParamPlus1;
- bs_consumedBits += riceParamPlus1;
- } else {
- /*
- Getting here means the rice parameter part straddles the cache line. We need to read from the tail of the current cache
- line, reload the cache, and then combine it with the head of the next cache line.
- */
-
- /* Before reloading the cache we need to grab the size in bits of the low part. */
- drflac_uint32 riceParamPartLoBitCount = bs_consumedBits - riceParamPlus1MaxConsumedBits;
- drflac_assert(riceParamPartLoBitCount > 0 && riceParamPartLoBitCount < 32);
-
/* Now reload the cache. */
if (bs->nextL2Line < DRFLAC_CACHE_L2_LINE_COUNT(bs)) {
#ifndef DR_FLAC_NO_CRC
@@ -3412,29 +3102,72 @@ static DRFLAC_INLINE drflac_bool32 drflac__seek_rice_parts(drflac_bs* bs, drflac
}
-static drflac_bool32 drflac__decode_samples_with_residual__rice__scalar(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+static drflac_bool32 drflac__decode_samples_with_residual__rice__scalar_zeroorder(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
{
drflac_uint32 t[2] = {0x00000000, 0xFFFFFFFF};
drflac_uint32 zeroCountPart0;
- drflac_uint32 zeroCountPart1;
- drflac_uint32 zeroCountPart2;
- drflac_uint32 zeroCountPart3;
drflac_uint32 riceParamPart0;
- drflac_uint32 riceParamPart1;
- drflac_uint32 riceParamPart2;
- drflac_uint32 riceParamPart3;
+ drflac_uint32 riceParamMask;
+ drflac_uint32 i;
+
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(count > 0);
+ DRFLAC_ASSERT(pSamplesOut != NULL);
+
+ (void)bitsPerSample;
+ (void)order;
+ (void)shift;
+ (void)coefficients;
+
+ riceParamMask = (drflac_uint32)~((~0UL) << riceParam);
+
+ i = 0;
+ while (i < count) {
+ /* Rice extraction. */
+ if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0)) {
+ return DRFLAC_FALSE;
+ }
+
+ /* Rice reconstruction. */
+ riceParamPart0 &= riceParamMask;
+ riceParamPart0 |= (zeroCountPart0 << riceParam);
+ riceParamPart0 = (riceParamPart0 >> 1) ^ t[riceParamPart0 & 0x01];
+
+ pSamplesOut[i] = riceParamPart0;
+
+ i += 1;
+ }
+
+ return DRFLAC_TRUE;
+}
+
+static drflac_bool32 drflac__decode_samples_with_residual__rice__scalar(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+{
+ drflac_uint32 t[2] = {0x00000000, 0xFFFFFFFF};
+ drflac_uint32 zeroCountPart0 = 0;
+ drflac_uint32 zeroCountPart1 = 0;
+ drflac_uint32 zeroCountPart2 = 0;
+ drflac_uint32 zeroCountPart3 = 0;
+ drflac_uint32 riceParamPart0 = 0;
+ drflac_uint32 riceParamPart1 = 0;
+ drflac_uint32 riceParamPart2 = 0;
+ drflac_uint32 riceParamPart3 = 0;
drflac_uint32 riceParamMask;
const drflac_int32* pSamplesOutEnd;
drflac_uint32 i;
- drflac_assert(bs != NULL);
- drflac_assert(count > 0);
- drflac_assert(pSamplesOut != NULL);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(count > 0);
+ DRFLAC_ASSERT(pSamplesOut != NULL);
+
+ if (order == 0) {
+ return drflac__decode_samples_with_residual__rice__scalar_zeroorder(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
+ }
- riceParamMask = ~((~0UL) << riceParam);
- pSamplesOutEnd = pSamplesOut + ((count >> 2) << 2);
+ riceParamMask = (drflac_uint32)~((~0UL) << riceParam);
+ pSamplesOutEnd = pSamplesOut + (count & ~3);
- if (bitsPerSample >= 24) {
+ if (bitsPerSample+shift > 32) {
while (pSamplesOut < pSamplesOutEnd) {
/*
Rice extraction. It's faster to do this one at a time against local variables than it is to use the x4 version
@@ -3502,7 +3235,7 @@ static drflac_bool32 drflac__decode_samples_with_residual__rice__scalar(drflac_b
}
}
- i = ((count >> 2) << 2);
+ i = (count & ~3);
while (i < count) {
/* Rice extraction. */
if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0)) {
@@ -3516,7 +3249,7 @@ static drflac_bool32 drflac__decode_samples_with_residual__rice__scalar(drflac_b
/*riceParamPart0 = (riceParamPart0 >> 1) ^ (~(riceParamPart0 & 0x01) + 1);*/
/* Sample reconstruction. */
- if (bitsPerSample >= 24) {
+ if (bitsPerSample+shift > 32) {
pSamplesOut[0] = riceParamPart0 + drflac__calculate_prediction_64(order, shift, coefficients, pSamplesOut + 0);
} else {
pSamplesOut[0] = riceParamPart0 + drflac__calculate_prediction_32(order, shift, coefficients, pSamplesOut + 0);
@@ -3525,136 +3258,246 @@ static drflac_bool32 drflac__decode_samples_with_residual__rice__scalar(drflac_b
i += 1;
pSamplesOut += 1;
}
-
+
return DRFLAC_TRUE;
}
+#if defined(DRFLAC_SUPPORT_SSE2)
+static DRFLAC_INLINE __m128i drflac__mm_packs_interleaved_epi32(__m128i a, __m128i b)
+{
+ __m128i r;
+
+ /* Pack. */
+ r = _mm_packs_epi32(a, b);
+
+ /* a3a2 a1a0 b3b2 b1b0 -> a3a2 b3b2 a1a0 b1b0 */
+ r = _mm_shuffle_epi32(r, _MM_SHUFFLE(3, 1, 2, 0));
+
+ /* a3a2 b3b2 a1a0 b1b0 -> a3b3 a2b2 a1b1 a0b0 */
+ r = _mm_shufflehi_epi16(r, _MM_SHUFFLE(3, 1, 2, 0));
+ r = _mm_shufflelo_epi16(r, _MM_SHUFFLE(3, 1, 2, 0));
+
+ return r;
+}
+#endif
+
#if defined(DRFLAC_SUPPORT_SSE41)
-static drflac_bool32 drflac__decode_samples_with_residual__rice__sse41(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+static DRFLAC_INLINE __m128i drflac__mm_not_si128(__m128i a)
{
- static drflac_uint32 t[2] = {0x00000000, 0xFFFFFFFF};
+ return _mm_xor_si128(a, _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128()));
+}
- /*drflac_uint32 zeroCountParts[4];*/
- /*drflac_uint32 riceParamParts[4];*/
+static DRFLAC_INLINE __m128i drflac__mm_hadd_epi32(__m128i x)
+{
+ __m128i x64 = _mm_add_epi32(x, _mm_shuffle_epi32(x, _MM_SHUFFLE(1, 0, 3, 2)));
+ __m128i x32 = _mm_shufflelo_epi16(x64, _MM_SHUFFLE(1, 0, 3, 2));
+ return _mm_add_epi32(x64, x32);
+}
- drflac_uint32 zeroCountParts0;
- drflac_uint32 zeroCountParts1;
- drflac_uint32 zeroCountParts2;
- drflac_uint32 zeroCountParts3;
- drflac_uint32 riceParamParts0;
- drflac_uint32 riceParamParts1;
- drflac_uint32 riceParamParts2;
- drflac_uint32 riceParamParts3;
- drflac_uint32 riceParamMask;
- const drflac_int32* pSamplesOutEnd;
- __m128i riceParamMask128;
- __m128i one;
- drflac_uint32 i;
+static DRFLAC_INLINE __m128i drflac__mm_hadd_epi64(__m128i x)
+{
+ return _mm_add_epi64(x, _mm_shuffle_epi32(x, _MM_SHUFFLE(1, 0, 3, 2)));
+}
- drflac_assert(bs != NULL);
- drflac_assert(count > 0);
- drflac_assert(pSamplesOut != NULL);
+static DRFLAC_INLINE __m128i drflac__mm_srai_epi64(__m128i x, int count)
+{
+ /*
+ To simplify this we are assuming count < 32. This restriction allows us to work on a low side and a high side. The low side
+ is shifted with zero bits, whereas the right side is shifted with sign bits.
+ */
+ __m128i lo = _mm_srli_epi64(x, count);
+ __m128i hi = _mm_srai_epi32(x, count);
- riceParamMask = ~((~0UL) << riceParam);
- riceParamMask128 = _mm_set1_epi32(riceParamMask);
- one = _mm_set1_epi32(0x01);
+ hi = _mm_and_si128(hi, _mm_set_epi32(0xFFFFFFFF, 0, 0xFFFFFFFF, 0)); /* The high part needs to have the low part cleared. */
- pSamplesOutEnd = pSamplesOut + ((count >> 2) << 2);
+ return _mm_or_si128(lo, hi);
+}
- if (bitsPerSample >= 24) {
- while (pSamplesOut < pSamplesOutEnd) {
- __m128i zeroCountPart128;
- __m128i riceParamPart128;
- drflac_uint32 riceParamParts[4];
-
- /* Rice extraction. */
- if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0) ||
- !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts1, &riceParamParts1) ||
- !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts2, &riceParamParts2) ||
- !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts3, &riceParamParts3)) {
- return DRFLAC_FALSE;
- }
+static drflac_bool32 drflac__decode_samples_with_residual__rice__sse41_32(drflac_bs* bs, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+{
+ int i;
+ drflac_uint32 riceParamMask;
+ drflac_int32* pDecodedSamples = pSamplesOut;
+ drflac_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3);
+ drflac_uint32 zeroCountParts0 = 0;
+ drflac_uint32 zeroCountParts1 = 0;
+ drflac_uint32 zeroCountParts2 = 0;
+ drflac_uint32 zeroCountParts3 = 0;
+ drflac_uint32 riceParamParts0 = 0;
+ drflac_uint32 riceParamParts1 = 0;
+ drflac_uint32 riceParamParts2 = 0;
+ drflac_uint32 riceParamParts3 = 0;
+ __m128i coefficients128_0;
+ __m128i coefficients128_4;
+ __m128i coefficients128_8;
+ __m128i samples128_0;
+ __m128i samples128_4;
+ __m128i samples128_8;
+ __m128i riceParamMask128;
- zeroCountPart128 = _mm_set_epi32(zeroCountParts3, zeroCountParts2, zeroCountParts1, zeroCountParts0);
- riceParamPart128 = _mm_set_epi32(riceParamParts3, riceParamParts2, riceParamParts1, riceParamParts0);
+ const drflac_uint32 t[2] = {0x00000000, 0xFFFFFFFF};
- riceParamPart128 = _mm_and_si128(riceParamPart128, riceParamMask128);
- riceParamPart128 = _mm_or_si128(riceParamPart128, _mm_slli_epi32(zeroCountPart128, riceParam));
- riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_mullo_epi32(_mm_and_si128(riceParamPart128, one), _mm_set1_epi32(0xFFFFFFFF))); /* <-- Only supported from SSE4.1 */
- /*riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_add_epi32(drflac__mm_not_si128(_mm_and_si128(riceParamPart128, one)), one));*/ /* <-- SSE2 compatible */
+ riceParamMask = (drflac_uint32)~((~0UL) << riceParam);
+ riceParamMask128 = _mm_set1_epi32(riceParamMask);
- _mm_storeu_si128((__m128i*)riceParamParts, riceParamPart128);
+ /* Pre-load. */
+ coefficients128_0 = _mm_setzero_si128();
+ coefficients128_4 = _mm_setzero_si128();
+ coefficients128_8 = _mm_setzero_si128();
- #if defined(DRFLAC_64BIT)
- /* The scalar implementation seems to be faster on 64-bit in my testing. */
- drflac__calculate_prediction_64_x4(order, shift, coefficients, riceParamParts, pSamplesOut);
- #else
- pSamplesOut[0] = riceParamParts[0] + drflac__calculate_prediction_64__sse41(order, shift, coefficients, pSamplesOut + 0);
- pSamplesOut[1] = riceParamParts[1] + drflac__calculate_prediction_64__sse41(order, shift, coefficients, pSamplesOut + 1);
- pSamplesOut[2] = riceParamParts[2] + drflac__calculate_prediction_64__sse41(order, shift, coefficients, pSamplesOut + 2);
- pSamplesOut[3] = riceParamParts[3] + drflac__calculate_prediction_64__sse41(order, shift, coefficients, pSamplesOut + 3);
- #endif
+ samples128_0 = _mm_setzero_si128();
+ samples128_4 = _mm_setzero_si128();
+ samples128_8 = _mm_setzero_si128();
- pSamplesOut += 4;
- }
- } else {
- drflac_int32 coefficientsUnaligned[32*4 + 4] = {0};
- drflac_int32* coefficients128 = (drflac_int32*)(((size_t)coefficientsUnaligned + 15) & ~15);
+ /*
+ Pre-loading the coefficients and prior samples is annoying because we need to ensure we don't try reading more than
+ what's available in the input buffers. It would be conenient to use a fall-through switch to do this, but this results
+ in strict aliasing warnings with GCC. To work around this I'm just doing something hacky. This feels a bit convoluted
+ so I think there's opportunity for this to be simplified.
+ */
+#if 1
+ {
+ int runningOrder = order;
- for (i = 0; i < order; ++i) {
- coefficients128[i*4+0] = coefficients[i];
- coefficients128[i*4+1] = coefficients[i];
- coefficients128[i*4+2] = coefficients[i];
- coefficients128[i*4+3] = coefficients[i];
+ /* 0 - 3. */
+ if (runningOrder >= 4) {
+ coefficients128_0 = _mm_loadu_si128((const __m128i*)(coefficients + 0));
+ samples128_0 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 4));
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: coefficients128_0 = _mm_set_epi32(0, coefficients[2], coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], pSamplesOut[-3], 0); break;
+ case 2: coefficients128_0 = _mm_set_epi32(0, 0, coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], 0, 0); break;
+ case 1: coefficients128_0 = _mm_set_epi32(0, 0, 0, coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], 0, 0, 0); break;
+ }
+ runningOrder = 0;
}
- while (pSamplesOut < pSamplesOutEnd) {
- __m128i zeroCountPart128;
- __m128i riceParamPart128;
- /*drflac_int32 riceParamParts[4];*/
+ /* 4 - 7 */
+ if (runningOrder >= 4) {
+ coefficients128_4 = _mm_loadu_si128((const __m128i*)(coefficients + 4));
+ samples128_4 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 8));
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: coefficients128_4 = _mm_set_epi32(0, coefficients[6], coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], pSamplesOut[-7], 0); break;
+ case 2: coefficients128_4 = _mm_set_epi32(0, 0, coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], 0, 0); break;
+ case 1: coefficients128_4 = _mm_set_epi32(0, 0, 0, coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], 0, 0, 0); break;
+ }
+ runningOrder = 0;
+ }
- /* Rice extraction. */
-#if 1
- if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0) ||
- !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts1, &riceParamParts1) ||
- !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts2, &riceParamParts2) ||
- !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts3, &riceParamParts3)) {
- return DRFLAC_FALSE;
+ /* 8 - 11 */
+ if (runningOrder == 4) {
+ coefficients128_8 = _mm_loadu_si128((const __m128i*)(coefficients + 8));
+ samples128_8 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 12));
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: coefficients128_8 = _mm_set_epi32(0, coefficients[10], coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], pSamplesOut[-11], 0); break;
+ case 2: coefficients128_8 = _mm_set_epi32(0, 0, coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], 0, 0); break;
+ case 1: coefficients128_8 = _mm_set_epi32(0, 0, 0, coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], 0, 0, 0); break;
}
+ runningOrder = 0;
+ }
- zeroCountPart128 = _mm_set_epi32(zeroCountParts3, zeroCountParts2, zeroCountParts1, zeroCountParts0);
- riceParamPart128 = _mm_set_epi32(riceParamParts3, riceParamParts2, riceParamParts1, riceParamParts0);
+ /* Coefficients need to be shuffled for our streaming algorithm below to work. Samples are already in the correct order from the loading routine above. */
+ coefficients128_0 = _mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(0, 1, 2, 3));
+ coefficients128_4 = _mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(0, 1, 2, 3));
+ coefficients128_8 = _mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(0, 1, 2, 3));
+ }
#else
- if (!drflac__read_rice_parts_x4(bs, riceParam, zeroCountParts, riceParamParts)) {
- return DRFLAC_FALSE;
- }
+ /* This causes strict-aliasing warnings with GCC. */
+ switch (order)
+ {
+ case 12: ((drflac_int32*)&coefficients128_8)[0] = coefficients[11]; ((drflac_int32*)&samples128_8)[0] = pDecodedSamples[-12];
+ case 11: ((drflac_int32*)&coefficients128_8)[1] = coefficients[10]; ((drflac_int32*)&samples128_8)[1] = pDecodedSamples[-11];
+ case 10: ((drflac_int32*)&coefficients128_8)[2] = coefficients[ 9]; ((drflac_int32*)&samples128_8)[2] = pDecodedSamples[-10];
+ case 9: ((drflac_int32*)&coefficients128_8)[3] = coefficients[ 8]; ((drflac_int32*)&samples128_8)[3] = pDecodedSamples[- 9];
+ case 8: ((drflac_int32*)&coefficients128_4)[0] = coefficients[ 7]; ((drflac_int32*)&samples128_4)[0] = pDecodedSamples[- 8];
+ case 7: ((drflac_int32*)&coefficients128_4)[1] = coefficients[ 6]; ((drflac_int32*)&samples128_4)[1] = pDecodedSamples[- 7];
+ case 6: ((drflac_int32*)&coefficients128_4)[2] = coefficients[ 5]; ((drflac_int32*)&samples128_4)[2] = pDecodedSamples[- 6];
+ case 5: ((drflac_int32*)&coefficients128_4)[3] = coefficients[ 4]; ((drflac_int32*)&samples128_4)[3] = pDecodedSamples[- 5];
+ case 4: ((drflac_int32*)&coefficients128_0)[0] = coefficients[ 3]; ((drflac_int32*)&samples128_0)[0] = pDecodedSamples[- 4];
+ case 3: ((drflac_int32*)&coefficients128_0)[1] = coefficients[ 2]; ((drflac_int32*)&samples128_0)[1] = pDecodedSamples[- 3];
+ case 2: ((drflac_int32*)&coefficients128_0)[2] = coefficients[ 1]; ((drflac_int32*)&samples128_0)[2] = pDecodedSamples[- 2];
+ case 1: ((drflac_int32*)&coefficients128_0)[3] = coefficients[ 0]; ((drflac_int32*)&samples128_0)[3] = pDecodedSamples[- 1];
+ }
+#endif
+
+ /* For this version we are doing one sample at a time. */
+ while (pDecodedSamples < pDecodedSamplesEnd) {
+ __m128i prediction128;
+ __m128i zeroCountPart128;
+ __m128i riceParamPart128;
+
+ if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts1, &riceParamParts1) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts2, &riceParamParts2) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts3, &riceParamParts3)) {
+ return DRFLAC_FALSE;
+ }
- zeroCountPart128 = _mm_set_epi32(zeroCountParts[3], zeroCountParts[2], zeroCountParts[1], zeroCountParts[0]);
- riceParamPart128 = _mm_set_epi32(riceParamParts[3], riceParamParts[2], riceParamParts[1], riceParamParts[0]);
-#endif
+ zeroCountPart128 = _mm_set_epi32(zeroCountParts3, zeroCountParts2, zeroCountParts1, zeroCountParts0);
+ riceParamPart128 = _mm_set_epi32(riceParamParts3, riceParamParts2, riceParamParts1, riceParamParts0);
- riceParamPart128 = _mm_and_si128(riceParamPart128, riceParamMask128);
- riceParamPart128 = _mm_or_si128(riceParamPart128, _mm_slli_epi32(zeroCountPart128, riceParam));
- riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_mullo_epi32(_mm_and_si128(riceParamPart128, one), _mm_set1_epi32(0xFFFFFFFF)));
+ riceParamPart128 = _mm_and_si128(riceParamPart128, riceParamMask128);
+ riceParamPart128 = _mm_or_si128(riceParamPart128, _mm_slli_epi32(zeroCountPart128, riceParam));
+ riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_add_epi32(drflac__mm_not_si128(_mm_and_si128(riceParamPart128, _mm_set1_epi32(0x01))), _mm_set1_epi32(0x01))); /* <-- SSE2 compatible */
+ /*riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_mullo_epi32(_mm_and_si128(riceParamPart128, _mm_set1_epi32(0x01)), _mm_set1_epi32(0xFFFFFFFF)));*/ /* <-- Only supported from SSE4.1 and is slower in my testing... */
-#if 1
- drflac__calculate_prediction_32_x4__sse41(order, shift, (const __m128i*)coefficients128, riceParamPart128, pSamplesOut);
-#else
- _mm_storeu_si128((__m128i*)riceParamParts, riceParamPart128);
+ if (order <= 4) {
+ for (i = 0; i < 4; i += 1) {
+ prediction128 = _mm_mullo_epi32(coefficients128_0, samples128_0);
- pSamplesOut[0] = riceParamParts[0] + drflac__calculate_prediction_32(order, shift, coefficients, pSamplesOut + 0);
- pSamplesOut[1] = riceParamParts[1] + drflac__calculate_prediction_32(order, shift, coefficients, pSamplesOut + 1);
- pSamplesOut[2] = riceParamParts[2] + drflac__calculate_prediction_32(order, shift, coefficients, pSamplesOut + 2);
- pSamplesOut[3] = riceParamParts[3] + drflac__calculate_prediction_32(order, shift, coefficients, pSamplesOut + 3);
-#endif
+ /* Horizontal add and shift. */
+ prediction128 = drflac__mm_hadd_epi32(prediction128);
+ prediction128 = _mm_srai_epi32(prediction128, shift);
+ prediction128 = _mm_add_epi32(riceParamPart128, prediction128);
- pSamplesOut += 4;
+ samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4);
+ riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4);
+ }
+ } else if (order <= 8) {
+ for (i = 0; i < 4; i += 1) {
+ prediction128 = _mm_mullo_epi32(coefficients128_4, samples128_4);
+ prediction128 = _mm_add_epi32(prediction128, _mm_mullo_epi32(coefficients128_0, samples128_0));
+
+ /* Horizontal add and shift. */
+ prediction128 = drflac__mm_hadd_epi32(prediction128);
+ prediction128 = _mm_srai_epi32(prediction128, shift);
+ prediction128 = _mm_add_epi32(riceParamPart128, prediction128);
+
+ samples128_4 = _mm_alignr_epi8(samples128_0, samples128_4, 4);
+ samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4);
+ riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4);
+ }
+ } else {
+ for (i = 0; i < 4; i += 1) {
+ prediction128 = _mm_mullo_epi32(coefficients128_8, samples128_8);
+ prediction128 = _mm_add_epi32(prediction128, _mm_mullo_epi32(coefficients128_4, samples128_4));
+ prediction128 = _mm_add_epi32(prediction128, _mm_mullo_epi32(coefficients128_0, samples128_0));
+
+ /* Horizontal add and shift. */
+ prediction128 = drflac__mm_hadd_epi32(prediction128);
+ prediction128 = _mm_srai_epi32(prediction128, shift);
+ prediction128 = _mm_add_epi32(riceParamPart128, prediction128);
+
+ samples128_8 = _mm_alignr_epi8(samples128_4, samples128_8, 4);
+ samples128_4 = _mm_alignr_epi8(samples128_0, samples128_4, 4);
+ samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4);
+ riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4);
+ }
}
- }
+ /* We store samples in groups of 4. */
+ _mm_storeu_si128((__m128i*)pDecodedSamples, samples128_0);
+ pDecodedSamples += 4;
+ }
- i = ((count >> 2) << 2);
- while (i < count) {
+ /* Make sure we process the last few samples. */
+ i = (count & ~3);
+ while (i < (int)count) {
/* Rice extraction. */
if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0)) {
return DRFLAC_FALSE;
@@ -3666,3461 +3509,5873 @@ static drflac_bool32 drflac__decode_samples_with_residual__rice__sse41(drflac_bs
riceParamParts0 = (riceParamParts0 >> 1) ^ t[riceParamParts0 & 0x01];
/* Sample reconstruction. */
- if (bitsPerSample >= 24) {
- pSamplesOut[0] = riceParamParts0 + drflac__calculate_prediction_64(order, shift, coefficients, pSamplesOut + 0);
- } else {
- pSamplesOut[0] = riceParamParts0 + drflac__calculate_prediction_32(order, shift, coefficients, pSamplesOut + 0);
- }
+ pDecodedSamples[0] = riceParamParts0 + drflac__calculate_prediction_32(order, shift, coefficients, pDecodedSamples);
i += 1;
- pSamplesOut += 1;
+ pDecodedSamples += 1;
}
return DRFLAC_TRUE;
}
-#endif
-static drflac_bool32 drflac__decode_samples_with_residual__rice(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+static drflac_bool32 drflac__decode_samples_with_residual__rice__sse41_64(drflac_bs* bs, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
{
-#if defined(DRFLAC_SUPPORT_SSE41)
- if (drflac__gIsSSE41Supported) {
- return drflac__decode_samples_with_residual__rice__sse41(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
- } else
-#endif
- {
- /* Scalar fallback. */
- #if 0
- return drflac__decode_samples_with_residual__rice__reference(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
- #else
- return drflac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
- #endif
- }
-}
+ int i;
+ drflac_uint32 riceParamMask;
+ drflac_int32* pDecodedSamples = pSamplesOut;
+ drflac_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3);
+ drflac_uint32 zeroCountParts0 = 0;
+ drflac_uint32 zeroCountParts1 = 0;
+ drflac_uint32 zeroCountParts2 = 0;
+ drflac_uint32 zeroCountParts3 = 0;
+ drflac_uint32 riceParamParts0 = 0;
+ drflac_uint32 riceParamParts1 = 0;
+ drflac_uint32 riceParamParts2 = 0;
+ drflac_uint32 riceParamParts3 = 0;
+ __m128i coefficients128_0;
+ __m128i coefficients128_4;
+ __m128i coefficients128_8;
+ __m128i samples128_0;
+ __m128i samples128_4;
+ __m128i samples128_8;
+ __m128i prediction128;
+ __m128i riceParamMask128;
-/* Reads and seeks past a string of residual values as Rice codes. The decoder should be sitting on the first bit of the Rice codes. */
-static drflac_bool32 drflac__read_and_seek_residual__rice(drflac_bs* bs, drflac_uint32 count, drflac_uint8 riceParam)
-{
- drflac_uint32 i;
+ const drflac_uint32 t[2] = {0x00000000, 0xFFFFFFFF};
- drflac_assert(bs != NULL);
- drflac_assert(count > 0);
+ DRFLAC_ASSERT(order <= 12);
- for (i = 0; i < count; ++i) {
- if (!drflac__seek_rice_parts(bs, riceParam)) {
- return DRFLAC_FALSE;
- }
- }
+ riceParamMask = (drflac_uint32)~((~0UL) << riceParam);
+ riceParamMask128 = _mm_set1_epi32(riceParamMask);
- return DRFLAC_TRUE;
-}
+ prediction128 = _mm_setzero_si128();
-static drflac_bool32 drflac__decode_samples_with_residual__unencoded(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 unencodedBitsPerSample, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
-{
- drflac_uint32 i;
+ /* Pre-load. */
+ coefficients128_0 = _mm_setzero_si128();
+ coefficients128_4 = _mm_setzero_si128();
+ coefficients128_8 = _mm_setzero_si128();
- drflac_assert(bs != NULL);
- drflac_assert(count > 0);
- drflac_assert(unencodedBitsPerSample <= 31); /* <-- unencodedBitsPerSample is a 5 bit number, so cannot exceed 31. */
- drflac_assert(pSamplesOut != NULL);
+ samples128_0 = _mm_setzero_si128();
+ samples128_4 = _mm_setzero_si128();
+ samples128_8 = _mm_setzero_si128();
- for (i = 0; i < count; ++i) {
- if (unencodedBitsPerSample > 0) {
- if (!drflac__read_int32(bs, unencodedBitsPerSample, pSamplesOut + i)) {
- return DRFLAC_FALSE;
+#if 1
+ {
+ int runningOrder = order;
+
+ /* 0 - 3. */
+ if (runningOrder >= 4) {
+ coefficients128_0 = _mm_loadu_si128((const __m128i*)(coefficients + 0));
+ samples128_0 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 4));
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: coefficients128_0 = _mm_set_epi32(0, coefficients[2], coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], pSamplesOut[-3], 0); break;
+ case 2: coefficients128_0 = _mm_set_epi32(0, 0, coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], 0, 0); break;
+ case 1: coefficients128_0 = _mm_set_epi32(0, 0, 0, coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], 0, 0, 0); break;
}
+ runningOrder = 0;
+ }
+
+ /* 4 - 7 */
+ if (runningOrder >= 4) {
+ coefficients128_4 = _mm_loadu_si128((const __m128i*)(coefficients + 4));
+ samples128_4 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 8));
+ runningOrder -= 4;
} else {
- pSamplesOut[i] = 0;
+ switch (runningOrder) {
+ case 3: coefficients128_4 = _mm_set_epi32(0, coefficients[6], coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], pSamplesOut[-7], 0); break;
+ case 2: coefficients128_4 = _mm_set_epi32(0, 0, coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], 0, 0); break;
+ case 1: coefficients128_4 = _mm_set_epi32(0, 0, 0, coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], 0, 0, 0); break;
+ }
+ runningOrder = 0;
}
- if (bitsPerSample > 16) {
- pSamplesOut[i] += drflac__calculate_prediction_64(order, shift, coefficients, pSamplesOut + i);
+ /* 8 - 11 */
+ if (runningOrder == 4) {
+ coefficients128_8 = _mm_loadu_si128((const __m128i*)(coefficients + 8));
+ samples128_8 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 12));
+ runningOrder -= 4;
} else {
- pSamplesOut[i] += drflac__calculate_prediction_32(order, shift, coefficients, pSamplesOut + i);
+ switch (runningOrder) {
+ case 3: coefficients128_8 = _mm_set_epi32(0, coefficients[10], coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], pSamplesOut[-11], 0); break;
+ case 2: coefficients128_8 = _mm_set_epi32(0, 0, coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], 0, 0); break;
+ case 1: coefficients128_8 = _mm_set_epi32(0, 0, 0, coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], 0, 0, 0); break;
+ }
+ runningOrder = 0;
}
+
+ /* Coefficients need to be shuffled for our streaming algorithm below to work. Samples are already in the correct order from the loading routine above. */
+ coefficients128_0 = _mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(0, 1, 2, 3));
+ coefficients128_4 = _mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(0, 1, 2, 3));
+ coefficients128_8 = _mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(0, 1, 2, 3));
}
+#else
+ switch (order)
+ {
+ case 12: ((drflac_int32*)&coefficients128_8)[0] = coefficients[11]; ((drflac_int32*)&samples128_8)[0] = pDecodedSamples[-12];
+ case 11: ((drflac_int32*)&coefficients128_8)[1] = coefficients[10]; ((drflac_int32*)&samples128_8)[1] = pDecodedSamples[-11];
+ case 10: ((drflac_int32*)&coefficients128_8)[2] = coefficients[ 9]; ((drflac_int32*)&samples128_8)[2] = pDecodedSamples[-10];
+ case 9: ((drflac_int32*)&coefficients128_8)[3] = coefficients[ 8]; ((drflac_int32*)&samples128_8)[3] = pDecodedSamples[- 9];
+ case 8: ((drflac_int32*)&coefficients128_4)[0] = coefficients[ 7]; ((drflac_int32*)&samples128_4)[0] = pDecodedSamples[- 8];
+ case 7: ((drflac_int32*)&coefficients128_4)[1] = coefficients[ 6]; ((drflac_int32*)&samples128_4)[1] = pDecodedSamples[- 7];
+ case 6: ((drflac_int32*)&coefficients128_4)[2] = coefficients[ 5]; ((drflac_int32*)&samples128_4)[2] = pDecodedSamples[- 6];
+ case 5: ((drflac_int32*)&coefficients128_4)[3] = coefficients[ 4]; ((drflac_int32*)&samples128_4)[3] = pDecodedSamples[- 5];
+ case 4: ((drflac_int32*)&coefficients128_0)[0] = coefficients[ 3]; ((drflac_int32*)&samples128_0)[0] = pDecodedSamples[- 4];
+ case 3: ((drflac_int32*)&coefficients128_0)[1] = coefficients[ 2]; ((drflac_int32*)&samples128_0)[1] = pDecodedSamples[- 3];
+ case 2: ((drflac_int32*)&coefficients128_0)[2] = coefficients[ 1]; ((drflac_int32*)&samples128_0)[2] = pDecodedSamples[- 2];
+ case 1: ((drflac_int32*)&coefficients128_0)[3] = coefficients[ 0]; ((drflac_int32*)&samples128_0)[3] = pDecodedSamples[- 1];
+ }
+#endif
+
+ /* For this version we are doing one sample at a time. */
+ while (pDecodedSamples < pDecodedSamplesEnd) {
+ __m128i zeroCountPart128;
+ __m128i riceParamPart128;
+
+ if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts1, &riceParamParts1) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts2, &riceParamParts2) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts3, &riceParamParts3)) {
+ return DRFLAC_FALSE;
+ }
- return DRFLAC_TRUE;
-}
+ zeroCountPart128 = _mm_set_epi32(zeroCountParts3, zeroCountParts2, zeroCountParts1, zeroCountParts0);
+ riceParamPart128 = _mm_set_epi32(riceParamParts3, riceParamParts2, riceParamParts1, riceParamParts0);
+ riceParamPart128 = _mm_and_si128(riceParamPart128, riceParamMask128);
+ riceParamPart128 = _mm_or_si128(riceParamPart128, _mm_slli_epi32(zeroCountPart128, riceParam));
+ riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_add_epi32(drflac__mm_not_si128(_mm_and_si128(riceParamPart128, _mm_set1_epi32(1))), _mm_set1_epi32(1)));
-/*
-Reads and decodes the residual for the sub-frame the decoder is currently sitting on. This function should be called
-when the decoder is sitting at the very start of the RESIDUAL block. The first residuals will be ignored. The
- and parameters are used to determine how many residual values need to be decoded.
-*/
-static drflac_bool32 drflac__decode_samples_with_residual(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 blockSize, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pDecodedSamples)
-{
- drflac_uint8 residualMethod;
- drflac_uint8 partitionOrder;
- drflac_uint32 samplesInPartition;
- drflac_uint32 partitionsRemaining;
+ for (i = 0; i < 4; i += 1) {
+ prediction128 = _mm_xor_si128(prediction128, prediction128); /* Reset to 0. */
- drflac_assert(bs != NULL);
- drflac_assert(blockSize != 0);
- drflac_assert(pDecodedSamples != NULL); /* <-- Should we allow NULL, in which case we just seek past the residual rather than do a full decode? */
+ switch (order)
+ {
+ case 12:
+ case 11: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(1, 1, 0, 0)), _mm_shuffle_epi32(samples128_8, _MM_SHUFFLE(1, 1, 0, 0))));
+ case 10:
+ case 9: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(3, 3, 2, 2)), _mm_shuffle_epi32(samples128_8, _MM_SHUFFLE(3, 3, 2, 2))));
+ case 8:
+ case 7: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(1, 1, 0, 0)), _mm_shuffle_epi32(samples128_4, _MM_SHUFFLE(1, 1, 0, 0))));
+ case 6:
+ case 5: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(3, 3, 2, 2)), _mm_shuffle_epi32(samples128_4, _MM_SHUFFLE(3, 3, 2, 2))));
+ case 4:
+ case 3: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(1, 1, 0, 0)), _mm_shuffle_epi32(samples128_0, _MM_SHUFFLE(1, 1, 0, 0))));
+ case 2:
+ case 1: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(3, 3, 2, 2)), _mm_shuffle_epi32(samples128_0, _MM_SHUFFLE(3, 3, 2, 2))));
+ }
- if (!drflac__read_uint8(bs, 2, &residualMethod)) {
- return DRFLAC_FALSE;
- }
+ /* Horizontal add and shift. */
+ prediction128 = drflac__mm_hadd_epi64(prediction128);
+ prediction128 = drflac__mm_srai_epi64(prediction128, shift);
+ prediction128 = _mm_add_epi32(riceParamPart128, prediction128);
- if (residualMethod != DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE && residualMethod != DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) {
- return DRFLAC_FALSE; /* Unknown or unsupported residual coding method. */
- }
+ /* Our value should be sitting in prediction128[0]. We need to combine this with our SSE samples. */
+ samples128_8 = _mm_alignr_epi8(samples128_4, samples128_8, 4);
+ samples128_4 = _mm_alignr_epi8(samples128_0, samples128_4, 4);
+ samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4);
- /* Ignore the first values. */
- pDecodedSamples += order;
+ /* Slide our rice parameter down so that the value in position 0 contains the next one to process. */
+ riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4);
+ }
- if (!drflac__read_uint8(bs, 4, &partitionOrder)) {
- return DRFLAC_FALSE;
+ /* We store samples in groups of 4. */
+ _mm_storeu_si128((__m128i*)pDecodedSamples, samples128_0);
+ pDecodedSamples += 4;
}
- /*
- From the FLAC spec:
- The Rice partition order in a Rice-coded residual section must be less than or equal to 8.
- */
- if (partitionOrder > 8) {
- return DRFLAC_FALSE;
- }
-
- /* Validation check. */
- if ((blockSize / (1 << partitionOrder)) <= order) {
- return DRFLAC_FALSE;
- }
-
- samplesInPartition = (blockSize / (1 << partitionOrder)) - order;
- partitionsRemaining = (1 << partitionOrder);
- for (;;) {
- drflac_uint8 riceParam = 0;
- if (residualMethod == DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE) {
- if (!drflac__read_uint8(bs, 4, &riceParam)) {
- return DRFLAC_FALSE;
- }
- if (riceParam == 15) {
- riceParam = 0xFF;
- }
- } else if (residualMethod == DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) {
- if (!drflac__read_uint8(bs, 5, &riceParam)) {
- return DRFLAC_FALSE;
- }
- if (riceParam == 31) {
- riceParam = 0xFF;
- }
+ /* Make sure we process the last few samples. */
+ i = (count & ~3);
+ while (i < (int)count) {
+ /* Rice extraction. */
+ if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0)) {
+ return DRFLAC_FALSE;
}
- if (riceParam != 0xFF) {
- if (!drflac__decode_samples_with_residual__rice(bs, bitsPerSample, samplesInPartition, riceParam, order, shift, coefficients, pDecodedSamples)) {
- return DRFLAC_FALSE;
- }
- } else {
- unsigned char unencodedBitsPerSample = 0;
- if (!drflac__read_uint8(bs, 5, &unencodedBitsPerSample)) {
- return DRFLAC_FALSE;
- }
+ /* Rice reconstruction. */
+ riceParamParts0 &= riceParamMask;
+ riceParamParts0 |= (zeroCountParts0 << riceParam);
+ riceParamParts0 = (riceParamParts0 >> 1) ^ t[riceParamParts0 & 0x01];
- if (!drflac__decode_samples_with_residual__unencoded(bs, bitsPerSample, samplesInPartition, unencodedBitsPerSample, order, shift, coefficients, pDecodedSamples)) {
- return DRFLAC_FALSE;
- }
- }
+ /* Sample reconstruction. */
+ pDecodedSamples[0] = riceParamParts0 + drflac__calculate_prediction_64(order, shift, coefficients, pDecodedSamples);
- pDecodedSamples += samplesInPartition;
+ i += 1;
+ pDecodedSamples += 1;
+ }
- if (partitionsRemaining == 1) {
- break;
- }
+ return DRFLAC_TRUE;
+}
- partitionsRemaining -= 1;
+static drflac_bool32 drflac__decode_samples_with_residual__rice__sse41(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+{
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(count > 0);
+ DRFLAC_ASSERT(pSamplesOut != NULL);
- if (partitionOrder != 0) {
- samplesInPartition = blockSize / (1 << partitionOrder);
+ /* In my testing the order is rarely > 12, so in this case I'm going to simplify the SSE implementation by only handling order <= 12. */
+ if (order > 0 && order <= 12) {
+ if (bitsPerSample+shift > 32) {
+ return drflac__decode_samples_with_residual__rice__sse41_64(bs, count, riceParam, order, shift, coefficients, pSamplesOut);
+ } else {
+ return drflac__decode_samples_with_residual__rice__sse41_32(bs, count, riceParam, order, shift, coefficients, pSamplesOut);
}
+ } else {
+ return drflac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
}
+}
+#endif
- return DRFLAC_TRUE;
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac__vst2q_s32(drflac_int32* p, int32x4x2_t x)
+{
+ vst1q_s32(p+0, x.val[0]);
+ vst1q_s32(p+4, x.val[1]);
}
-/*
-Reads and seeks past the residual for the sub-frame the decoder is currently sitting on. This function should be called
-when the decoder is sitting at the very start of the RESIDUAL block. The first residuals will be set to 0. The
- and parameters are used to determine how many residual values need to be decoded.
-*/
-static drflac_bool32 drflac__read_and_seek_residual(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 order)
+static DRFLAC_INLINE void drflac__vst2q_f32(float* p, float32x4x2_t x)
{
- drflac_uint8 residualMethod;
- drflac_uint8 partitionOrder;
- drflac_uint32 samplesInPartition;
- drflac_uint32 partitionsRemaining;
+ vst1q_f32(p+0, x.val[0]);
+ vst1q_f32(p+4, x.val[1]);
+}
- drflac_assert(bs != NULL);
- drflac_assert(blockSize != 0);
+static DRFLAC_INLINE void drflac__vst2q_s16(drflac_int16* p, int16x4x2_t x)
+{
+ vst1q_s16(p, vcombine_s16(x.val[0], x.val[1]));
+}
- if (!drflac__read_uint8(bs, 2, &residualMethod)) {
- return DRFLAC_FALSE;
- }
+static DRFLAC_INLINE int32x4_t drflac__vdupq_n_s32x4(drflac_int32 x3, drflac_int32 x2, drflac_int32 x1, drflac_int32 x0)
+{
+ drflac_int32 x[4];
+ x[3] = x3;
+ x[2] = x2;
+ x[1] = x1;
+ x[0] = x0;
+ return vld1q_s32(x);
+}
- if (residualMethod != DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE && residualMethod != DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) {
- return DRFLAC_FALSE; /* Unknown or unsupported residual coding method. */
- }
+static DRFLAC_INLINE int32x4_t drflac__valignrq_s32_1(int32x4_t a, int32x4_t b)
+{
+ /* Equivalent to SSE's _mm_alignr_epi8(a, b, 4) */
- if (!drflac__read_uint8(bs, 4, &partitionOrder)) {
- return DRFLAC_FALSE;
- }
+ /* Reference */
+ /*return drflac__vdupq_n_s32x4(
+ vgetq_lane_s32(a, 0),
+ vgetq_lane_s32(b, 3),
+ vgetq_lane_s32(b, 2),
+ vgetq_lane_s32(b, 1)
+ );*/
- /*
- From the FLAC spec:
- The Rice partition order in a Rice-coded residual section must be less than or equal to 8.
- */
- if (partitionOrder > 8) {
- return DRFLAC_FALSE;
- }
+ return vextq_s32(b, a, 1);
+}
- /* Validation check. */
- if ((blockSize / (1 << partitionOrder)) <= order) {
- return DRFLAC_FALSE;
- }
+static DRFLAC_INLINE uint32x4_t drflac__valignrq_u32_1(uint32x4_t a, uint32x4_t b)
+{
+ /* Equivalent to SSE's _mm_alignr_epi8(a, b, 4) */
- samplesInPartition = (blockSize / (1 << partitionOrder)) - order;
- partitionsRemaining = (1 << partitionOrder);
- for (;;)
- {
- drflac_uint8 riceParam = 0;
- if (residualMethod == DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE) {
- if (!drflac__read_uint8(bs, 4, &riceParam)) {
- return DRFLAC_FALSE;
- }
- if (riceParam == 15) {
- riceParam = 0xFF;
- }
- } else if (residualMethod == DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) {
- if (!drflac__read_uint8(bs, 5, &riceParam)) {
- return DRFLAC_FALSE;
- }
- if (riceParam == 31) {
- riceParam = 0xFF;
- }
- }
+ /* Reference */
+ /*return drflac__vdupq_n_s32x4(
+ vgetq_lane_s32(a, 0),
+ vgetq_lane_s32(b, 3),
+ vgetq_lane_s32(b, 2),
+ vgetq_lane_s32(b, 1)
+ );*/
- if (riceParam != 0xFF) {
- if (!drflac__read_and_seek_residual__rice(bs, samplesInPartition, riceParam)) {
- return DRFLAC_FALSE;
- }
- } else {
- unsigned char unencodedBitsPerSample = 0;
- if (!drflac__read_uint8(bs, 5, &unencodedBitsPerSample)) {
- return DRFLAC_FALSE;
- }
+ return vextq_u32(b, a, 1);
+}
- if (!drflac__seek_bits(bs, unencodedBitsPerSample * samplesInPartition)) {
- return DRFLAC_FALSE;
- }
- }
+static DRFLAC_INLINE int32x2_t drflac__vhaddq_s32(int32x4_t x)
+{
+ /* The sum must end up in position 0. */
+ /* Reference */
+ /*return vdupq_n_s32(
+ vgetq_lane_s32(x, 3) +
+ vgetq_lane_s32(x, 2) +
+ vgetq_lane_s32(x, 1) +
+ vgetq_lane_s32(x, 0)
+ );*/
- if (partitionsRemaining == 1) {
- break;
- }
+ int32x2_t r = vadd_s32(vget_high_s32(x), vget_low_s32(x));
+ return vpadd_s32(r, r);
+}
- partitionsRemaining -= 1;
- samplesInPartition = blockSize / (1 << partitionOrder);
- }
+static DRFLAC_INLINE int64x1_t drflac__vhaddq_s64(int64x2_t x)
+{
+ return vadd_s64(vget_high_s64(x), vget_low_s64(x));
+}
- return DRFLAC_TRUE;
+static DRFLAC_INLINE int32x4_t drflac__vrevq_s32(int32x4_t x)
+{
+ /* Reference */
+ /*return drflac__vdupq_n_s32x4(
+ vgetq_lane_s32(x, 0),
+ vgetq_lane_s32(x, 1),
+ vgetq_lane_s32(x, 2),
+ vgetq_lane_s32(x, 3)
+ );*/
+
+ return vrev64q_s32(vcombine_s32(vget_high_s32(x), vget_low_s32(x)));
}
+static DRFLAC_INLINE int32x4_t drflac__vnotq_s32(int32x4_t x)
+{
+ return veorq_s32(x, vdupq_n_s32(0xFFFFFFFF));
+}
-static drflac_bool32 drflac__decode_samples__constant(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 bitsPerSample, drflac_int32* pDecodedSamples)
+static DRFLAC_INLINE uint32x4_t drflac__vnotq_u32(uint32x4_t x)
{
- drflac_uint32 i;
+ return veorq_u32(x, vdupq_n_u32(0xFFFFFFFF));
+}
- /* Only a single sample needs to be decoded here. */
- drflac_int32 sample;
- if (!drflac__read_int32(bs, bitsPerSample, &sample)) {
- return DRFLAC_FALSE;
- }
+static drflac_bool32 drflac__decode_samples_with_residual__rice__neon_32(drflac_bs* bs, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+{
+ int i;
+ drflac_uint32 riceParamMask;
+ drflac_int32* pDecodedSamples = pSamplesOut;
+ drflac_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3);
+ drflac_uint32 zeroCountParts[4];
+ drflac_uint32 riceParamParts[4];
+ int32x4_t coefficients128_0;
+ int32x4_t coefficients128_4;
+ int32x4_t coefficients128_8;
+ int32x4_t samples128_0;
+ int32x4_t samples128_4;
+ int32x4_t samples128_8;
+ uint32x4_t riceParamMask128;
+ int32x4_t riceParam128;
+ int32x2_t shift64;
+ uint32x4_t one128;
+
+ const drflac_uint32 t[2] = {0x00000000, 0xFFFFFFFF};
+
+ riceParamMask = ~((~0UL) << riceParam);
+ riceParamMask128 = vdupq_n_u32(riceParamMask);
+
+ riceParam128 = vdupq_n_s32(riceParam);
+ shift64 = vdup_n_s32(-shift); /* Negate the shift because we'll be doing a variable shift using vshlq_s32(). */
+ one128 = vdupq_n_u32(1);
/*
- We don't really need to expand this, but it does simplify the process of reading samples. If this becomes a performance issue (unlikely)
- we'll want to look at a more efficient way.
+ Pre-loading the coefficients and prior samples is annoying because we need to ensure we don't try reading more than
+ what's available in the input buffers. It would be conenient to use a fall-through switch to do this, but this results
+ in strict aliasing warnings with GCC. To work around this I'm just doing something hacky. This feels a bit convoluted
+ so I think there's opportunity for this to be simplified.
*/
- for (i = 0; i < blockSize; ++i) {
- pDecodedSamples[i] = sample;
- }
+ {
+ int runningOrder = order;
+ drflac_int32 tempC[4] = {0, 0, 0, 0};
+ drflac_int32 tempS[4] = {0, 0, 0, 0};
+
+ /* 0 - 3. */
+ if (runningOrder >= 4) {
+ coefficients128_0 = vld1q_s32(coefficients + 0);
+ samples128_0 = vld1q_s32(pSamplesOut - 4);
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: tempC[2] = coefficients[2]; tempS[1] = pSamplesOut[-3]; /* fallthrough */
+ case 2: tempC[1] = coefficients[1]; tempS[2] = pSamplesOut[-2]; /* fallthrough */
+ case 1: tempC[0] = coefficients[0]; tempS[3] = pSamplesOut[-1]; /* fallthrough */
+ }
- return DRFLAC_TRUE;
-}
+ coefficients128_0 = vld1q_s32(tempC);
+ samples128_0 = vld1q_s32(tempS);
+ runningOrder = 0;
+ }
-static drflac_bool32 drflac__decode_samples__verbatim(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 bitsPerSample, drflac_int32* pDecodedSamples)
-{
- drflac_uint32 i;
+ /* 4 - 7 */
+ if (runningOrder >= 4) {
+ coefficients128_4 = vld1q_s32(coefficients + 4);
+ samples128_4 = vld1q_s32(pSamplesOut - 8);
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: tempC[2] = coefficients[6]; tempS[1] = pSamplesOut[-7]; /* fallthrough */
+ case 2: tempC[1] = coefficients[5]; tempS[2] = pSamplesOut[-6]; /* fallthrough */
+ case 1: tempC[0] = coefficients[4]; tempS[3] = pSamplesOut[-5]; /* fallthrough */
+ }
- for (i = 0; i < blockSize; ++i) {
- drflac_int32 sample;
- if (!drflac__read_int32(bs, bitsPerSample, &sample)) {
- return DRFLAC_FALSE;
+ coefficients128_4 = vld1q_s32(tempC);
+ samples128_4 = vld1q_s32(tempS);
+ runningOrder = 0;
}
- pDecodedSamples[i] = sample;
- }
+ /* 8 - 11 */
+ if (runningOrder == 4) {
+ coefficients128_8 = vld1q_s32(coefficients + 8);
+ samples128_8 = vld1q_s32(pSamplesOut - 12);
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: tempC[2] = coefficients[10]; tempS[1] = pSamplesOut[-11]; /* fallthrough */
+ case 2: tempC[1] = coefficients[ 9]; tempS[2] = pSamplesOut[-10]; /* fallthrough */
+ case 1: tempC[0] = coefficients[ 8]; tempS[3] = pSamplesOut[- 9]; /* fallthrough */
+ }
- return DRFLAC_TRUE;
-}
+ coefficients128_8 = vld1q_s32(tempC);
+ samples128_8 = vld1q_s32(tempS);
+ runningOrder = 0;
+ }
-static drflac_bool32 drflac__decode_samples__fixed(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 bitsPerSample, drflac_uint8 lpcOrder, drflac_int32* pDecodedSamples)
-{
- drflac_uint32 i;
+ /* Coefficients need to be shuffled for our streaming algorithm below to work. Samples are already in the correct order from the loading routine above. */
+ coefficients128_0 = drflac__vrevq_s32(coefficients128_0);
+ coefficients128_4 = drflac__vrevq_s32(coefficients128_4);
+ coefficients128_8 = drflac__vrevq_s32(coefficients128_8);
+ }
- static drflac_int32 lpcCoefficientsTable[5][4] = {
- {0, 0, 0, 0},
- {1, 0, 0, 0},
- {2, -1, 0, 0},
- {3, -3, 1, 0},
- {4, -6, 4, -1}
- };
+ /* For this version we are doing one sample at a time. */
+ while (pDecodedSamples < pDecodedSamplesEnd) {
+ int32x4_t prediction128;
+ int32x2_t prediction64;
+ uint32x4_t zeroCountPart128;
+ uint32x4_t riceParamPart128;
- /* Warm up samples and coefficients. */
- for (i = 0; i < lpcOrder; ++i) {
- drflac_int32 sample;
- if (!drflac__read_int32(bs, bitsPerSample, &sample)) {
+ if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0]) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[1], &riceParamParts[1]) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[2], &riceParamParts[2]) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[3], &riceParamParts[3])) {
return DRFLAC_FALSE;
}
- pDecodedSamples[i] = sample;
- }
+ zeroCountPart128 = vld1q_u32(zeroCountParts);
+ riceParamPart128 = vld1q_u32(riceParamParts);
- if (!drflac__decode_samples_with_residual(bs, bitsPerSample, blockSize, lpcOrder, 0, lpcCoefficientsTable[lpcOrder], pDecodedSamples)) {
- return DRFLAC_FALSE;
- }
+ riceParamPart128 = vandq_u32(riceParamPart128, riceParamMask128);
+ riceParamPart128 = vorrq_u32(riceParamPart128, vshlq_u32(zeroCountPart128, riceParam128));
+ riceParamPart128 = veorq_u32(vshrq_n_u32(riceParamPart128, 1), vaddq_u32(drflac__vnotq_u32(vandq_u32(riceParamPart128, one128)), one128));
- return DRFLAC_TRUE;
-}
+ if (order <= 4) {
+ for (i = 0; i < 4; i += 1) {
+ prediction128 = vmulq_s32(coefficients128_0, samples128_0);
-static drflac_bool32 drflac__decode_samples__lpc(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 bitsPerSample, drflac_uint8 lpcOrder, drflac_int32* pDecodedSamples)
-{
- drflac_uint8 i;
- drflac_uint8 lpcPrecision;
- drflac_int8 lpcShift;
- drflac_int32 coefficients[32];
+ /* Horizontal add and shift. */
+ prediction64 = drflac__vhaddq_s32(prediction128);
+ prediction64 = vshl_s32(prediction64, shift64);
+ prediction64 = vadd_s32(prediction64, vget_low_s32(vreinterpretq_s32_u32(riceParamPart128)));
- /* Warm up samples. */
- for (i = 0; i < lpcOrder; ++i) {
- drflac_int32 sample;
- if (!drflac__read_int32(bs, bitsPerSample, &sample)) {
- return DRFLAC_FALSE;
+ samples128_0 = drflac__valignrq_s32_1(vcombine_s32(prediction64, vdup_n_s32(0)), samples128_0);
+ riceParamPart128 = drflac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128);
+ }
+ } else if (order <= 8) {
+ for (i = 0; i < 4; i += 1) {
+ prediction128 = vmulq_s32(coefficients128_4, samples128_4);
+ prediction128 = vmlaq_s32(prediction128, coefficients128_0, samples128_0);
+
+ /* Horizontal add and shift. */
+ prediction64 = drflac__vhaddq_s32(prediction128);
+ prediction64 = vshl_s32(prediction64, shift64);
+ prediction64 = vadd_s32(prediction64, vget_low_s32(vreinterpretq_s32_u32(riceParamPart128)));
+
+ samples128_4 = drflac__valignrq_s32_1(samples128_0, samples128_4);
+ samples128_0 = drflac__valignrq_s32_1(vcombine_s32(prediction64, vdup_n_s32(0)), samples128_0);
+ riceParamPart128 = drflac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128);
+ }
+ } else {
+ for (i = 0; i < 4; i += 1) {
+ prediction128 = vmulq_s32(coefficients128_8, samples128_8);
+ prediction128 = vmlaq_s32(prediction128, coefficients128_4, samples128_4);
+ prediction128 = vmlaq_s32(prediction128, coefficients128_0, samples128_0);
+
+ /* Horizontal add and shift. */
+ prediction64 = drflac__vhaddq_s32(prediction128);
+ prediction64 = vshl_s32(prediction64, shift64);
+ prediction64 = vadd_s32(prediction64, vget_low_s32(vreinterpretq_s32_u32(riceParamPart128)));
+
+ samples128_8 = drflac__valignrq_s32_1(samples128_4, samples128_8);
+ samples128_4 = drflac__valignrq_s32_1(samples128_0, samples128_4);
+ samples128_0 = drflac__valignrq_s32_1(vcombine_s32(prediction64, vdup_n_s32(0)), samples128_0);
+ riceParamPart128 = drflac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128);
+ }
}
- pDecodedSamples[i] = sample;
- }
-
- if (!drflac__read_uint8(bs, 4, &lpcPrecision)) {
- return DRFLAC_FALSE;
- }
- if (lpcPrecision == 15) {
- return DRFLAC_FALSE; /* Invalid. */
- }
- lpcPrecision += 1;
-
- if (!drflac__read_int8(bs, 5, &lpcShift)) {
- return DRFLAC_FALSE;
+ /* We store samples in groups of 4. */
+ vst1q_s32(pDecodedSamples, samples128_0);
+ pDecodedSamples += 4;
}
- drflac_zero_memory(coefficients, sizeof(coefficients));
- for (i = 0; i < lpcOrder; ++i) {
- if (!drflac__read_int32(bs, lpcPrecision, coefficients + i)) {
+ /* Make sure we process the last few samples. */
+ i = (count & ~3);
+ while (i < (int)count) {
+ /* Rice extraction. */
+ if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0])) {
return DRFLAC_FALSE;
}
- }
- if (!drflac__decode_samples_with_residual(bs, bitsPerSample, blockSize, lpcOrder, lpcShift, coefficients, pDecodedSamples)) {
- return DRFLAC_FALSE;
+ /* Rice reconstruction. */
+ riceParamParts[0] &= riceParamMask;
+ riceParamParts[0] |= (zeroCountParts[0] << riceParam);
+ riceParamParts[0] = (riceParamParts[0] >> 1) ^ t[riceParamParts[0] & 0x01];
+
+ /* Sample reconstruction. */
+ pDecodedSamples[0] = riceParamParts[0] + drflac__calculate_prediction_32(order, shift, coefficients, pDecodedSamples);
+
+ i += 1;
+ pDecodedSamples += 1;
}
return DRFLAC_TRUE;
}
-
-static drflac_bool32 drflac__read_next_flac_frame_header(drflac_bs* bs, drflac_uint8 streaminfoBitsPerSample, drflac_frame_header* header)
+static drflac_bool32 drflac__decode_samples_with_residual__rice__neon_64(drflac_bs* bs, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
{
- const drflac_uint32 sampleRateTable[12] = {0, 88200, 176400, 192000, 8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000};
- const drflac_uint8 bitsPerSampleTable[8] = {0, 8, 12, (drflac_uint8)-1, 16, 20, 24, (drflac_uint8)-1}; /* -1 = reserved. */
-
- drflac_assert(bs != NULL);
- drflac_assert(header != NULL);
+ int i;
+ drflac_uint32 riceParamMask;
+ drflac_int32* pDecodedSamples = pSamplesOut;
+ drflac_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3);
+ drflac_uint32 zeroCountParts[4];
+ drflac_uint32 riceParamParts[4];
+ int32x4_t coefficients128_0;
+ int32x4_t coefficients128_4;
+ int32x4_t coefficients128_8;
+ int32x4_t samples128_0;
+ int32x4_t samples128_4;
+ int32x4_t samples128_8;
+ uint32x4_t riceParamMask128;
+ int32x4_t riceParam128;
+ int64x1_t shift64;
+ uint32x4_t one128;
+
+ const drflac_uint32 t[2] = {0x00000000, 0xFFFFFFFF};
+
+ riceParamMask = ~((~0UL) << riceParam);
+ riceParamMask128 = vdupq_n_u32(riceParamMask);
+
+ riceParam128 = vdupq_n_s32(riceParam);
+ shift64 = vdup_n_s64(-shift); /* Negate the shift because we'll be doing a variable shift using vshlq_s32(). */
+ one128 = vdupq_n_u32(1);
- /* Keep looping until we find a valid sync code. */
- for (;;) {
- drflac_uint8 crc8 = 0xCE; /* 0xCE = drflac_crc8(0, 0x3FFE, 14); */
- drflac_uint8 reserved = 0;
- drflac_uint8 blockingStrategy = 0;
- drflac_uint8 blockSize = 0;
- drflac_uint8 sampleRate = 0;
- drflac_uint8 channelAssignment = 0;
- drflac_uint8 bitsPerSample = 0;
- drflac_bool32 isVariableBlockSize;
+ /*
+ Pre-loading the coefficients and prior samples is annoying because we need to ensure we don't try reading more than
+ what's available in the input buffers. It would be conenient to use a fall-through switch to do this, but this results
+ in strict aliasing warnings with GCC. To work around this I'm just doing something hacky. This feels a bit convoluted
+ so I think there's opportunity for this to be simplified.
+ */
+ {
+ int runningOrder = order;
+ drflac_int32 tempC[4] = {0, 0, 0, 0};
+ drflac_int32 tempS[4] = {0, 0, 0, 0};
+
+ /* 0 - 3. */
+ if (runningOrder >= 4) {
+ coefficients128_0 = vld1q_s32(coefficients + 0);
+ samples128_0 = vld1q_s32(pSamplesOut - 4);
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: tempC[2] = coefficients[2]; tempS[1] = pSamplesOut[-3]; /* fallthrough */
+ case 2: tempC[1] = coefficients[1]; tempS[2] = pSamplesOut[-2]; /* fallthrough */
+ case 1: tempC[0] = coefficients[0]; tempS[3] = pSamplesOut[-1]; /* fallthrough */
+ }
- if (!drflac__find_and_seek_to_next_sync_code(bs)) {
- return DRFLAC_FALSE;
+ coefficients128_0 = vld1q_s32(tempC);
+ samples128_0 = vld1q_s32(tempS);
+ runningOrder = 0;
}
- if (!drflac__read_uint8(bs, 1, &reserved)) {
- return DRFLAC_FALSE;
- }
- if (reserved == 1) {
- continue;
- }
- crc8 = drflac_crc8(crc8, reserved, 1);
+ /* 4 - 7 */
+ if (runningOrder >= 4) {
+ coefficients128_4 = vld1q_s32(coefficients + 4);
+ samples128_4 = vld1q_s32(pSamplesOut - 8);
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: tempC[2] = coefficients[6]; tempS[1] = pSamplesOut[-7]; /* fallthrough */
+ case 2: tempC[1] = coefficients[5]; tempS[2] = pSamplesOut[-6]; /* fallthrough */
+ case 1: tempC[0] = coefficients[4]; tempS[3] = pSamplesOut[-5]; /* fallthrough */
+ }
- if (!drflac__read_uint8(bs, 1, &blockingStrategy)) {
- return DRFLAC_FALSE;
+ coefficients128_4 = vld1q_s32(tempC);
+ samples128_4 = vld1q_s32(tempS);
+ runningOrder = 0;
}
- crc8 = drflac_crc8(crc8, blockingStrategy, 1);
- if (!drflac__read_uint8(bs, 4, &blockSize)) {
- return DRFLAC_FALSE;
- }
- if (blockSize == 0) {
- continue;
- }
- crc8 = drflac_crc8(crc8, blockSize, 4);
+ /* 8 - 11 */
+ if (runningOrder == 4) {
+ coefficients128_8 = vld1q_s32(coefficients + 8);
+ samples128_8 = vld1q_s32(pSamplesOut - 12);
+ runningOrder -= 4;
+ } else {
+ switch (runningOrder) {
+ case 3: tempC[2] = coefficients[10]; tempS[1] = pSamplesOut[-11]; /* fallthrough */
+ case 2: tempC[1] = coefficients[ 9]; tempS[2] = pSamplesOut[-10]; /* fallthrough */
+ case 1: tempC[0] = coefficients[ 8]; tempS[3] = pSamplesOut[- 9]; /* fallthrough */
+ }
- if (!drflac__read_uint8(bs, 4, &sampleRate)) {
- return DRFLAC_FALSE;
+ coefficients128_8 = vld1q_s32(tempC);
+ samples128_8 = vld1q_s32(tempS);
+ runningOrder = 0;
}
- crc8 = drflac_crc8(crc8, sampleRate, 4);
- if (!drflac__read_uint8(bs, 4, &channelAssignment)) {
- return DRFLAC_FALSE;
- }
- if (channelAssignment > 10) {
- continue;
- }
- crc8 = drflac_crc8(crc8, channelAssignment, 4);
+ /* Coefficients need to be shuffled for our streaming algorithm below to work. Samples are already in the correct order from the loading routine above. */
+ coefficients128_0 = drflac__vrevq_s32(coefficients128_0);
+ coefficients128_4 = drflac__vrevq_s32(coefficients128_4);
+ coefficients128_8 = drflac__vrevq_s32(coefficients128_8);
+ }
- if (!drflac__read_uint8(bs, 3, &bitsPerSample)) {
+ /* For this version we are doing one sample at a time. */
+ while (pDecodedSamples < pDecodedSamplesEnd) {
+ int64x2_t prediction128;
+ uint32x4_t zeroCountPart128;
+ uint32x4_t riceParamPart128;
+
+ if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0]) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[1], &riceParamParts[1]) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[2], &riceParamParts[2]) ||
+ !drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[3], &riceParamParts[3])) {
return DRFLAC_FALSE;
}
- if (bitsPerSample == 3 || bitsPerSample == 7) {
- continue;
- }
- crc8 = drflac_crc8(crc8, bitsPerSample, 3);
+ zeroCountPart128 = vld1q_u32(zeroCountParts);
+ riceParamPart128 = vld1q_u32(riceParamParts);
- if (!drflac__read_uint8(bs, 1, &reserved)) {
- return DRFLAC_FALSE;
- }
- if (reserved == 1) {
- continue;
- }
- crc8 = drflac_crc8(crc8, reserved, 1);
+ riceParamPart128 = vandq_u32(riceParamPart128, riceParamMask128);
+ riceParamPart128 = vorrq_u32(riceParamPart128, vshlq_u32(zeroCountPart128, riceParam128));
+ riceParamPart128 = veorq_u32(vshrq_n_u32(riceParamPart128, 1), vaddq_u32(drflac__vnotq_u32(vandq_u32(riceParamPart128, one128)), one128));
+ for (i = 0; i < 4; i += 1) {
+ int64x1_t prediction64;
- isVariableBlockSize = blockingStrategy == 1;
- if (isVariableBlockSize) {
- drflac_uint64 sampleNumber;
- drflac_result result = drflac__read_utf8_coded_number(bs, &sampleNumber, &crc8);
- if (result != DRFLAC_SUCCESS) {
- if (result == DRFLAC_END_OF_STREAM) {
- return DRFLAC_FALSE;
- } else {
- continue;
- }
- }
- header->frameNumber = 0;
- header->sampleNumber = sampleNumber;
- } else {
- drflac_uint64 frameNumber = 0;
- drflac_result result = drflac__read_utf8_coded_number(bs, &frameNumber, &crc8);
- if (result != DRFLAC_SUCCESS) {
- if (result == DRFLAC_END_OF_STREAM) {
- return DRFLAC_FALSE;
- } else {
- continue;
- }
+ prediction128 = veorq_s64(prediction128, prediction128); /* Reset to 0. */
+ switch (order)
+ {
+ case 12:
+ case 11: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_low_s32(coefficients128_8), vget_low_s32(samples128_8)));
+ case 10:
+ case 9: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_high_s32(coefficients128_8), vget_high_s32(samples128_8)));
+ case 8:
+ case 7: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_low_s32(coefficients128_4), vget_low_s32(samples128_4)));
+ case 6:
+ case 5: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_high_s32(coefficients128_4), vget_high_s32(samples128_4)));
+ case 4:
+ case 3: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_low_s32(coefficients128_0), vget_low_s32(samples128_0)));
+ case 2:
+ case 1: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_high_s32(coefficients128_0), vget_high_s32(samples128_0)));
}
- header->frameNumber = (drflac_uint32)frameNumber; /* <-- Safe cast. */
- header->sampleNumber = 0;
- }
+ /* Horizontal add and shift. */
+ prediction64 = drflac__vhaddq_s64(prediction128);
+ prediction64 = vshl_s64(prediction64, shift64);
+ prediction64 = vadd_s64(prediction64, vdup_n_s64(vgetq_lane_u32(riceParamPart128, 0)));
- if (blockSize == 1) {
- header->blockSize = 192;
- } else if (blockSize >= 2 && blockSize <= 5) {
- header->blockSize = 576 * (1 << (blockSize - 2));
- } else if (blockSize == 6) {
- if (!drflac__read_uint16(bs, 8, &header->blockSize)) {
- return DRFLAC_FALSE;
- }
- crc8 = drflac_crc8(crc8, header->blockSize, 8);
- header->blockSize += 1;
- } else if (blockSize == 7) {
- if (!drflac__read_uint16(bs, 16, &header->blockSize)) {
- return DRFLAC_FALSE;
- }
- crc8 = drflac_crc8(crc8, header->blockSize, 16);
- header->blockSize += 1;
- } else {
- header->blockSize = 256 * (1 << (blockSize - 8));
+ /* Our value should be sitting in prediction64[0]. We need to combine this with our SSE samples. */
+ samples128_8 = drflac__valignrq_s32_1(samples128_4, samples128_8);
+ samples128_4 = drflac__valignrq_s32_1(samples128_0, samples128_4);
+ samples128_0 = drflac__valignrq_s32_1(vcombine_s32(vreinterpret_s32_s64(prediction64), vdup_n_s32(0)), samples128_0);
+
+ /* Slide our rice parameter down so that the value in position 0 contains the next one to process. */
+ riceParamPart128 = drflac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128);
}
+ /* We store samples in groups of 4. */
+ vst1q_s32(pDecodedSamples, samples128_0);
+ pDecodedSamples += 4;
+ }
- if (sampleRate <= 11) {
- header->sampleRate = sampleRateTable[sampleRate];
- } else if (sampleRate == 12) {
- if (!drflac__read_uint32(bs, 8, &header->sampleRate)) {
- return DRFLAC_FALSE;
- }
- crc8 = drflac_crc8(crc8, header->sampleRate, 8);
- header->sampleRate *= 1000;
- } else if (sampleRate == 13) {
- if (!drflac__read_uint32(bs, 16, &header->sampleRate)) {
- return DRFLAC_FALSE;
- }
- crc8 = drflac_crc8(crc8, header->sampleRate, 16);
- } else if (sampleRate == 14) {
- if (!drflac__read_uint32(bs, 16, &header->sampleRate)) {
- return DRFLAC_FALSE;
- }
- crc8 = drflac_crc8(crc8, header->sampleRate, 16);
- header->sampleRate *= 10;
- } else {
- continue; /* Invalid. Assume an invalid block. */
+ /* Make sure we process the last few samples. */
+ i = (count & ~3);
+ while (i < (int)count) {
+ /* Rice extraction. */
+ if (!drflac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0])) {
+ return DRFLAC_FALSE;
}
+ /* Rice reconstruction. */
+ riceParamParts[0] &= riceParamMask;
+ riceParamParts[0] |= (zeroCountParts[0] << riceParam);
+ riceParamParts[0] = (riceParamParts[0] >> 1) ^ t[riceParamParts[0] & 0x01];
- header->channelAssignment = channelAssignment;
+ /* Sample reconstruction. */
+ pDecodedSamples[0] = riceParamParts[0] + drflac__calculate_prediction_64(order, shift, coefficients, pDecodedSamples);
- header->bitsPerSample = bitsPerSampleTable[bitsPerSample];
- if (header->bitsPerSample == 0) {
- header->bitsPerSample = streaminfoBitsPerSample;
- }
+ i += 1;
+ pDecodedSamples += 1;
+ }
- if (!drflac__read_uint8(bs, 8, &header->crc8)) {
- return DRFLAC_FALSE;
- }
+ return DRFLAC_TRUE;
+}
-#ifndef DR_FLAC_NO_CRC
- if (header->crc8 != crc8) {
- continue; /* CRC mismatch. Loop back to the top and find the next sync code. */
+static drflac_bool32 drflac__decode_samples_with_residual__rice__neon(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+{
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(count > 0);
+ DRFLAC_ASSERT(pSamplesOut != NULL);
+
+ /* In my testing the order is rarely > 12, so in this case I'm going to simplify the NEON implementation by only handling order <= 12. */
+ if (order > 0 && order <= 12) {
+ if (bitsPerSample+shift > 32) {
+ return drflac__decode_samples_with_residual__rice__neon_64(bs, count, riceParam, order, shift, coefficients, pSamplesOut);
+ } else {
+ return drflac__decode_samples_with_residual__rice__neon_32(bs, count, riceParam, order, shift, coefficients, pSamplesOut);
}
+ } else {
+ return drflac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
+ }
+}
#endif
- return DRFLAC_TRUE;
+
+static drflac_bool32 drflac__decode_samples_with_residual__rice(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 riceParam, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+{
+#if defined(DRFLAC_SUPPORT_SSE41)
+ if (drflac__gIsSSE41Supported) {
+ return drflac__decode_samples_with_residual__rice__sse41(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
+ } else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported) {
+ return drflac__decode_samples_with_residual__rice__neon(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
+ } else
+#endif
+ {
+ /* Scalar fallback. */
+ #if 0
+ return drflac__decode_samples_with_residual__rice__reference(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
+ #else
+ return drflac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, order, shift, coefficients, pSamplesOut);
+ #endif
}
}
-static drflac_bool32 drflac__read_subframe_header(drflac_bs* bs, drflac_subframe* pSubframe)
+/* Reads and seeks past a string of residual values as Rice codes. The decoder should be sitting on the first bit of the Rice codes. */
+static drflac_bool32 drflac__read_and_seek_residual__rice(drflac_bs* bs, drflac_uint32 count, drflac_uint8 riceParam)
{
- drflac_uint8 header;
- int type;
+ drflac_uint32 i;
- if (!drflac__read_uint8(bs, 8, &header)) {
- return DRFLAC_FALSE;
- }
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(count > 0);
- /* First bit should always be 0. */
- if ((header & 0x80) != 0) {
- return DRFLAC_FALSE;
+ for (i = 0; i < count; ++i) {
+ if (!drflac__seek_rice_parts(bs, riceParam)) {
+ return DRFLAC_FALSE;
+ }
}
- type = (header & 0x7E) >> 1;
- if (type == 0) {
- pSubframe->subframeType = DRFLAC_SUBFRAME_CONSTANT;
- } else if (type == 1) {
- pSubframe->subframeType = DRFLAC_SUBFRAME_VERBATIM;
- } else {
- if ((type & 0x20) != 0) {
- pSubframe->subframeType = DRFLAC_SUBFRAME_LPC;
- pSubframe->lpcOrder = (type & 0x1F) + 1;
- } else if ((type & 0x08) != 0) {
- pSubframe->subframeType = DRFLAC_SUBFRAME_FIXED;
- pSubframe->lpcOrder = (type & 0x07);
- if (pSubframe->lpcOrder > 4) {
- pSubframe->subframeType = DRFLAC_SUBFRAME_RESERVED;
- pSubframe->lpcOrder = 0;
+ return DRFLAC_TRUE;
+}
+
+static drflac_bool32 drflac__decode_samples_with_residual__unencoded(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 count, drflac_uint8 unencodedBitsPerSample, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pSamplesOut)
+{
+ drflac_uint32 i;
+
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(count > 0);
+ DRFLAC_ASSERT(unencodedBitsPerSample <= 31); /* <-- unencodedBitsPerSample is a 5 bit number, so cannot exceed 31. */
+ DRFLAC_ASSERT(pSamplesOut != NULL);
+
+ for (i = 0; i < count; ++i) {
+ if (unencodedBitsPerSample > 0) {
+ if (!drflac__read_int32(bs, unencodedBitsPerSample, pSamplesOut + i)) {
+ return DRFLAC_FALSE;
}
} else {
- pSubframe->subframeType = DRFLAC_SUBFRAME_RESERVED;
+ pSamplesOut[i] = 0;
}
- }
-
- if (pSubframe->subframeType == DRFLAC_SUBFRAME_RESERVED) {
- return DRFLAC_FALSE;
- }
- /* Wasted bits per sample. */
- pSubframe->wastedBitsPerSample = 0;
- if ((header & 0x01) == 1) {
- unsigned int wastedBitsPerSample;
- if (!drflac__seek_past_next_set_bit(bs, &wastedBitsPerSample)) {
- return DRFLAC_FALSE;
+ if (bitsPerSample >= 24) {
+ pSamplesOut[i] += drflac__calculate_prediction_64(order, shift, coefficients, pSamplesOut + i);
+ } else {
+ pSamplesOut[i] += drflac__calculate_prediction_32(order, shift, coefficients, pSamplesOut + i);
}
- pSubframe->wastedBitsPerSample = (unsigned char)wastedBitsPerSample + 1;
}
return DRFLAC_TRUE;
}
-static drflac_bool32 drflac__decode_subframe(drflac_bs* bs, drflac_frame* frame, int subframeIndex, drflac_int32* pDecodedSamplesOut)
+
+/*
+Reads and decodes the residual for the sub-frame the decoder is currently sitting on. This function should be called
+when the decoder is sitting at the very start of the RESIDUAL block. The first residuals will be ignored. The
+ and parameters are used to determine how many residual values need to be decoded.
+*/
+static drflac_bool32 drflac__decode_samples_with_residual(drflac_bs* bs, drflac_uint32 bitsPerSample, drflac_uint32 blockSize, drflac_uint32 order, drflac_int32 shift, const drflac_int32* coefficients, drflac_int32* pDecodedSamples)
{
- drflac_subframe* pSubframe;
+ drflac_uint8 residualMethod;
+ drflac_uint8 partitionOrder;
+ drflac_uint32 samplesInPartition;
+ drflac_uint32 partitionsRemaining;
- drflac_assert(bs != NULL);
- drflac_assert(frame != NULL);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(blockSize != 0);
+ DRFLAC_ASSERT(pDecodedSamples != NULL); /* <-- Should we allow NULL, in which case we just seek past the residual rather than do a full decode? */
- pSubframe = frame->subframes + subframeIndex;
- if (!drflac__read_subframe_header(bs, pSubframe)) {
+ if (!drflac__read_uint8(bs, 2, &residualMethod)) {
return DRFLAC_FALSE;
}
- /* Side channels require an extra bit per sample. Took a while to figure that one out... */
- pSubframe->bitsPerSample = frame->header.bitsPerSample;
- if ((frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE || frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE) && subframeIndex == 1) {
- pSubframe->bitsPerSample += 1;
- } else if (frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE && subframeIndex == 0) {
- pSubframe->bitsPerSample += 1;
+ if (residualMethod != DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE && residualMethod != DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) {
+ return DRFLAC_FALSE; /* Unknown or unsupported residual coding method. */
}
- /* Need to handle wasted bits per sample. */
- if (pSubframe->wastedBitsPerSample >= pSubframe->bitsPerSample) {
+ /* Ignore the first values. */
+ pDecodedSamples += order;
+
+ if (!drflac__read_uint8(bs, 4, &partitionOrder)) {
return DRFLAC_FALSE;
}
- pSubframe->bitsPerSample -= pSubframe->wastedBitsPerSample;
- pSubframe->pDecodedSamples = pDecodedSamplesOut;
-
- switch (pSubframe->subframeType)
- {
- case DRFLAC_SUBFRAME_CONSTANT:
- {
- drflac__decode_samples__constant(bs, frame->header.blockSize, pSubframe->bitsPerSample, pSubframe->pDecodedSamples);
- } break;
-
- case DRFLAC_SUBFRAME_VERBATIM:
- {
- drflac__decode_samples__verbatim(bs, frame->header.blockSize, pSubframe->bitsPerSample, pSubframe->pDecodedSamples);
- } break;
- case DRFLAC_SUBFRAME_FIXED:
- {
- drflac__decode_samples__fixed(bs, frame->header.blockSize, pSubframe->bitsPerSample, pSubframe->lpcOrder, pSubframe->pDecodedSamples);
- } break;
+ /*
+ From the FLAC spec:
+ The Rice partition order in a Rice-coded residual section must be less than or equal to 8.
+ */
+ if (partitionOrder > 8) {
+ return DRFLAC_FALSE;
+ }
- case DRFLAC_SUBFRAME_LPC:
- {
- drflac__decode_samples__lpc(bs, frame->header.blockSize, pSubframe->bitsPerSample, pSubframe->lpcOrder, pSubframe->pDecodedSamples);
- } break;
+ /* Validation check. */
+ if ((blockSize / (1 << partitionOrder)) <= order) {
+ return DRFLAC_FALSE;
+ }
- default: return DRFLAC_FALSE;
+ samplesInPartition = (blockSize / (1 << partitionOrder)) - order;
+ partitionsRemaining = (1 << partitionOrder);
+ for (;;) {
+ drflac_uint8 riceParam = 0;
+ if (residualMethod == DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE) {
+ if (!drflac__read_uint8(bs, 4, &riceParam)) {
+ return DRFLAC_FALSE;
+ }
+ if (riceParam == 15) {
+ riceParam = 0xFF;
+ }
+ } else if (residualMethod == DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) {
+ if (!drflac__read_uint8(bs, 5, &riceParam)) {
+ return DRFLAC_FALSE;
+ }
+ if (riceParam == 31) {
+ riceParam = 0xFF;
+ }
+ }
+
+ if (riceParam != 0xFF) {
+ if (!drflac__decode_samples_with_residual__rice(bs, bitsPerSample, samplesInPartition, riceParam, order, shift, coefficients, pDecodedSamples)) {
+ return DRFLAC_FALSE;
+ }
+ } else {
+ unsigned char unencodedBitsPerSample = 0;
+ if (!drflac__read_uint8(bs, 5, &unencodedBitsPerSample)) {
+ return DRFLAC_FALSE;
+ }
+
+ if (!drflac__decode_samples_with_residual__unencoded(bs, bitsPerSample, samplesInPartition, unencodedBitsPerSample, order, shift, coefficients, pDecodedSamples)) {
+ return DRFLAC_FALSE;
+ }
+ }
+
+ pDecodedSamples += samplesInPartition;
+
+ if (partitionsRemaining == 1) {
+ break;
+ }
+
+ partitionsRemaining -= 1;
+
+ if (partitionOrder != 0) {
+ samplesInPartition = blockSize / (1 << partitionOrder);
+ }
}
return DRFLAC_TRUE;
}
-static drflac_bool32 drflac__seek_subframe(drflac_bs* bs, drflac_frame* frame, int subframeIndex)
+/*
+Reads and seeks past the residual for the sub-frame the decoder is currently sitting on. This function should be called
+when the decoder is sitting at the very start of the RESIDUAL block. The first residuals will be set to 0. The
+ and parameters are used to determine how many residual values need to be decoded.
+*/
+static drflac_bool32 drflac__read_and_seek_residual(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 order)
{
- drflac_subframe* pSubframe;
+ drflac_uint8 residualMethod;
+ drflac_uint8 partitionOrder;
+ drflac_uint32 samplesInPartition;
+ drflac_uint32 partitionsRemaining;
- drflac_assert(bs != NULL);
- drflac_assert(frame != NULL);
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(blockSize != 0);
- pSubframe = frame->subframes + subframeIndex;
- if (!drflac__read_subframe_header(bs, pSubframe)) {
+ if (!drflac__read_uint8(bs, 2, &residualMethod)) {
return DRFLAC_FALSE;
}
- /* Side channels require an extra bit per sample. Took a while to figure that one out... */
- pSubframe->bitsPerSample = frame->header.bitsPerSample;
- if ((frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE || frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE) && subframeIndex == 1) {
- pSubframe->bitsPerSample += 1;
- } else if (frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE && subframeIndex == 0) {
- pSubframe->bitsPerSample += 1;
+ if (residualMethod != DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE && residualMethod != DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) {
+ return DRFLAC_FALSE; /* Unknown or unsupported residual coding method. */
}
- /* Need to handle wasted bits per sample. */
- if (pSubframe->wastedBitsPerSample >= pSubframe->bitsPerSample) {
+ if (!drflac__read_uint8(bs, 4, &partitionOrder)) {
return DRFLAC_FALSE;
}
- pSubframe->bitsPerSample -= pSubframe->wastedBitsPerSample;
- pSubframe->pDecodedSamples = NULL;
- switch (pSubframe->subframeType)
+ /*
+ From the FLAC spec:
+ The Rice partition order in a Rice-coded residual section must be less than or equal to 8.
+ */
+ if (partitionOrder > 8) {
+ return DRFLAC_FALSE;
+ }
+
+ /* Validation check. */
+ if ((blockSize / (1 << partitionOrder)) <= order) {
+ return DRFLAC_FALSE;
+ }
+
+ samplesInPartition = (blockSize / (1 << partitionOrder)) - order;
+ partitionsRemaining = (1 << partitionOrder);
+ for (;;)
{
- case DRFLAC_SUBFRAME_CONSTANT:
- {
- if (!drflac__seek_bits(bs, pSubframe->bitsPerSample)) {
+ drflac_uint8 riceParam = 0;
+ if (residualMethod == DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE) {
+ if (!drflac__read_uint8(bs, 4, &riceParam)) {
return DRFLAC_FALSE;
}
- } break;
-
- case DRFLAC_SUBFRAME_VERBATIM:
- {
- unsigned int bitsToSeek = frame->header.blockSize * pSubframe->bitsPerSample;
- if (!drflac__seek_bits(bs, bitsToSeek)) {
- return DRFLAC_FALSE;
+ if (riceParam == 15) {
+ riceParam = 0xFF;
}
- } break;
-
- case DRFLAC_SUBFRAME_FIXED:
- {
- unsigned int bitsToSeek = pSubframe->lpcOrder * pSubframe->bitsPerSample;
- if (!drflac__seek_bits(bs, bitsToSeek)) {
+ } else if (residualMethod == DRFLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) {
+ if (!drflac__read_uint8(bs, 5, &riceParam)) {
return DRFLAC_FALSE;
}
-
- if (!drflac__read_and_seek_residual(bs, frame->header.blockSize, pSubframe->lpcOrder)) {
- return DRFLAC_FALSE;
+ if (riceParam == 31) {
+ riceParam = 0xFF;
}
- } break;
-
- case DRFLAC_SUBFRAME_LPC:
- {
- unsigned char lpcPrecision;
+ }
- unsigned int bitsToSeek = pSubframe->lpcOrder * pSubframe->bitsPerSample;
- if (!drflac__seek_bits(bs, bitsToSeek)) {
+ if (riceParam != 0xFF) {
+ if (!drflac__read_and_seek_residual__rice(bs, samplesInPartition, riceParam)) {
return DRFLAC_FALSE;
}
-
- if (!drflac__read_uint8(bs, 4, &lpcPrecision)) {
+ } else {
+ unsigned char unencodedBitsPerSample = 0;
+ if (!drflac__read_uint8(bs, 5, &unencodedBitsPerSample)) {
return DRFLAC_FALSE;
}
- if (lpcPrecision == 15) {
- return DRFLAC_FALSE; /* Invalid. */
- }
- lpcPrecision += 1;
-
- bitsToSeek = (pSubframe->lpcOrder * lpcPrecision) + 5; /* +5 for shift. */
- if (!drflac__seek_bits(bs, bitsToSeek)) {
+ if (!drflac__seek_bits(bs, unencodedBitsPerSample * samplesInPartition)) {
return DRFLAC_FALSE;
}
+ }
- if (!drflac__read_and_seek_residual(bs, frame->header.blockSize, pSubframe->lpcOrder)) {
- return DRFLAC_FALSE;
- }
- } break;
- default: return DRFLAC_FALSE;
+ if (partitionsRemaining == 1) {
+ break;
+ }
+
+ partitionsRemaining -= 1;
+ samplesInPartition = blockSize / (1 << partitionOrder);
}
return DRFLAC_TRUE;
}
-static DRFLAC_INLINE drflac_uint8 drflac__get_channel_count_from_channel_assignment(drflac_int8 channelAssignment)
+static drflac_bool32 drflac__decode_samples__constant(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 subframeBitsPerSample, drflac_int32* pDecodedSamples)
{
- drflac_uint8 lookup[] = {1, 2, 3, 4, 5, 6, 7, 8, 2, 2, 2};
+ drflac_uint32 i;
- drflac_assert(channelAssignment <= 10);
- return lookup[channelAssignment];
+ /* Only a single sample needs to be decoded here. */
+ drflac_int32 sample;
+ if (!drflac__read_int32(bs, subframeBitsPerSample, &sample)) {
+ return DRFLAC_FALSE;
+ }
+
+ /*
+ We don't really need to expand this, but it does simplify the process of reading samples. If this becomes a performance issue (unlikely)
+ we'll want to look at a more efficient way.
+ */
+ for (i = 0; i < blockSize; ++i) {
+ pDecodedSamples[i] = sample;
+ }
+
+ return DRFLAC_TRUE;
}
-static drflac_result drflac__decode_flac_frame(drflac* pFlac)
+static drflac_bool32 drflac__decode_samples__verbatim(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 subframeBitsPerSample, drflac_int32* pDecodedSamples)
{
- int channelCount;
- int i;
- drflac_uint8 paddingSizeInBits;
- drflac_uint16 desiredCRC16;
-#ifndef DR_FLAC_NO_CRC
- drflac_uint16 actualCRC16;
-#endif
+ drflac_uint32 i;
- /* This function should be called while the stream is sitting on the first byte after the frame header. */
- drflac_zero_memory(pFlac->currentFrame.subframes, sizeof(pFlac->currentFrame.subframes));
+ for (i = 0; i < blockSize; ++i) {
+ drflac_int32 sample;
+ if (!drflac__read_int32(bs, subframeBitsPerSample, &sample)) {
+ return DRFLAC_FALSE;
+ }
- /* The frame block size must never be larger than the maximum block size defined by the FLAC stream. */
- if (pFlac->currentFrame.header.blockSize > pFlac->maxBlockSize) {
- return DRFLAC_ERROR;
+ pDecodedSamples[i] = sample;
}
- /* The number of channels in the frame must match the channel count from the STREAMINFO block. */
- channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFrame.header.channelAssignment);
- if (channelCount != (int)pFlac->channels) {
- return DRFLAC_ERROR;
- }
+ return DRFLAC_TRUE;
+}
- for (i = 0; i < channelCount; ++i) {
- if (!drflac__decode_subframe(&pFlac->bs, &pFlac->currentFrame, i, pFlac->pDecodedSamples + ((pFlac->currentFrame.header.blockSize+DRFLAC_LEADING_SAMPLES) * i) + DRFLAC_LEADING_SAMPLES)) {
- return DRFLAC_ERROR;
- }
- }
+static drflac_bool32 drflac__decode_samples__fixed(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 subframeBitsPerSample, drflac_uint8 lpcOrder, drflac_int32* pDecodedSamples)
+{
+ drflac_uint32 i;
- paddingSizeInBits = DRFLAC_CACHE_L1_BITS_REMAINING(&pFlac->bs) & 7;
- if (paddingSizeInBits > 0) {
- drflac_uint8 padding = 0;
- if (!drflac__read_uint8(&pFlac->bs, paddingSizeInBits, &padding)) {
- return DRFLAC_END_OF_STREAM;
+ static drflac_int32 lpcCoefficientsTable[5][4] = {
+ {0, 0, 0, 0},
+ {1, 0, 0, 0},
+ {2, -1, 0, 0},
+ {3, -3, 1, 0},
+ {4, -6, 4, -1}
+ };
+
+ /* Warm up samples and coefficients. */
+ for (i = 0; i < lpcOrder; ++i) {
+ drflac_int32 sample;
+ if (!drflac__read_int32(bs, subframeBitsPerSample, &sample)) {
+ return DRFLAC_FALSE;
}
- }
-#ifndef DR_FLAC_NO_CRC
- actualCRC16 = drflac__flush_crc16(&pFlac->bs);
-#endif
- if (!drflac__read_uint16(&pFlac->bs, 16, &desiredCRC16)) {
- return DRFLAC_END_OF_STREAM;
+ pDecodedSamples[i] = sample;
}
-#ifndef DR_FLAC_NO_CRC
- if (actualCRC16 != desiredCRC16) {
- return DRFLAC_CRC_MISMATCH; /* CRC mismatch. */
+ if (!drflac__decode_samples_with_residual(bs, subframeBitsPerSample, blockSize, lpcOrder, 0, lpcCoefficientsTable[lpcOrder], pDecodedSamples)) {
+ return DRFLAC_FALSE;
}
-#endif
-
- pFlac->currentFrame.samplesRemaining = pFlac->currentFrame.header.blockSize * channelCount;
- return DRFLAC_SUCCESS;
+ return DRFLAC_TRUE;
}
-static drflac_result drflac__seek_flac_frame(drflac* pFlac)
+static drflac_bool32 drflac__decode_samples__lpc(drflac_bs* bs, drflac_uint32 blockSize, drflac_uint32 bitsPerSample, drflac_uint8 lpcOrder, drflac_int32* pDecodedSamples)
{
- int channelCount;
- int i;
- drflac_uint16 desiredCRC16;
-#ifndef DR_FLAC_NO_CRC
- drflac_uint16 actualCRC16;
-#endif
-
- channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFrame.header.channelAssignment);
- for (i = 0; i < channelCount; ++i) {
- if (!drflac__seek_subframe(&pFlac->bs, &pFlac->currentFrame, i)) {
- return DRFLAC_ERROR;
- }
- }
+ drflac_uint8 i;
+ drflac_uint8 lpcPrecision;
+ drflac_int8 lpcShift;
+ drflac_int32 coefficients[32];
- /* Padding. */
- if (!drflac__seek_bits(&pFlac->bs, DRFLAC_CACHE_L1_BITS_REMAINING(&pFlac->bs) & 7)) {
- return DRFLAC_ERROR;
- }
-
- /* CRC. */
-#ifndef DR_FLAC_NO_CRC
- actualCRC16 = drflac__flush_crc16(&pFlac->bs);
-#endif
- if (!drflac__read_uint16(&pFlac->bs, 16, &desiredCRC16)) {
- return DRFLAC_END_OF_STREAM;
- }
-
-#ifndef DR_FLAC_NO_CRC
- if (actualCRC16 != desiredCRC16) {
- return DRFLAC_CRC_MISMATCH; /* CRC mismatch. */
- }
-#endif
-
- return DRFLAC_SUCCESS;
-}
-
-static drflac_bool32 drflac__read_and_decode_next_flac_frame(drflac* pFlac)
-{
- drflac_assert(pFlac != NULL);
-
- for (;;) {
- drflac_result result;
-
- if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFrame.header)) {
+ /* Warm up samples. */
+ for (i = 0; i < lpcOrder; ++i) {
+ drflac_int32 sample;
+ if (!drflac__read_int32(bs, bitsPerSample, &sample)) {
return DRFLAC_FALSE;
}
- result = drflac__decode_flac_frame(pFlac);
- if (result != DRFLAC_SUCCESS) {
- if (result == DRFLAC_CRC_MISMATCH) {
- continue; /* CRC mismatch. Skip to the next frame. */
- } else {
- return DRFLAC_FALSE;
- }
- }
-
- return DRFLAC_TRUE;
+ pDecodedSamples[i] = sample;
}
-}
-
-
-static void drflac__get_current_frame_sample_range(drflac* pFlac, drflac_uint64* pFirstSampleInFrameOut, drflac_uint64* pLastSampleInFrameOut)
-{
- unsigned int channelCount;
- drflac_uint64 firstSampleInFrame;
- drflac_uint64 lastSampleInFrame;
-
- drflac_assert(pFlac != NULL);
-
- channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFrame.header.channelAssignment);
- firstSampleInFrame = pFlac->currentFrame.header.sampleNumber*channelCount;
- if (firstSampleInFrame == 0) {
- firstSampleInFrame = pFlac->currentFrame.header.frameNumber * pFlac->maxBlockSize*channelCount;
+ if (!drflac__read_uint8(bs, 4, &lpcPrecision)) {
+ return DRFLAC_FALSE;
}
-
- lastSampleInFrame = firstSampleInFrame + (pFlac->currentFrame.header.blockSize*channelCount);
- if (lastSampleInFrame > 0) {
- lastSampleInFrame -= 1; /* Needs to be zero based. */
+ if (lpcPrecision == 15) {
+ return DRFLAC_FALSE; /* Invalid. */
}
+ lpcPrecision += 1;
- if (pFirstSampleInFrameOut) {
- *pFirstSampleInFrameOut = firstSampleInFrame;
- }
- if (pLastSampleInFrameOut) {
- *pLastSampleInFrameOut = lastSampleInFrame;
+ if (!drflac__read_int8(bs, 5, &lpcShift)) {
+ return DRFLAC_FALSE;
}
-}
-
-/* This function will be replacing drflac__get_current_frame_sample_range(), but it's not currently used so I have commented it out to silence a compiler warning. */
-#if 0
-static void drflac__get_pcm_frame_range_of_current_flac_frame(drflac* pFlac, drflac_uint64* pFirstPCMFrame, drflac_uint64* pLastPCMFrame)
-{
- drflac_uint64 firstPCMFrame;
- drflac_uint64 lastPCMFrame;
- drflac_assert(pFlac != NULL);
-
- firstPCMFrame = pFlac->currentFrame.header.sampleNumber;
- if (firstPCMFrame == 0) {
- firstPCMFrame = pFlac->currentFrame.header.frameNumber * pFlac->maxBlockSize;
+ DRFLAC_ZERO_MEMORY(coefficients, sizeof(coefficients));
+ for (i = 0; i < lpcOrder; ++i) {
+ if (!drflac__read_int32(bs, lpcPrecision, coefficients + i)) {
+ return DRFLAC_FALSE;
+ }
}
- lastPCMFrame = firstPCMFrame + (pFlac->currentFrame.header.blockSize);
- if (lastPCMFrame > 0) {
- lastPCMFrame -= 1; /* Needs to be zero based. */
+ if (!drflac__decode_samples_with_residual(bs, bitsPerSample, blockSize, lpcOrder, lpcShift, coefficients, pDecodedSamples)) {
+ return DRFLAC_FALSE;
}
- if (pFirstPCMFrame) {
- *pFirstPCMFrame = firstPCMFrame;
- }
- if (pLastPCMFrame) {
- *pLastPCMFrame = lastPCMFrame;
- }
+ return DRFLAC_TRUE;
}
-#endif
-
-static drflac_bool32 drflac__seek_to_first_frame(drflac* pFlac)
-{
- drflac_bool32 result;
-
- drflac_assert(pFlac != NULL);
-
- result = drflac__seek_to_byte(&pFlac->bs, pFlac->firstFramePos);
- drflac_zero_memory(&pFlac->currentFrame, sizeof(pFlac->currentFrame));
- pFlac->currentSample = 0;
- return result;
-}
-
-static DRFLAC_INLINE drflac_result drflac__seek_to_next_flac_frame(drflac* pFlac)
+static drflac_bool32 drflac__read_next_flac_frame_header(drflac_bs* bs, drflac_uint8 streaminfoBitsPerSample, drflac_frame_header* header)
{
- /* This function should only ever be called while the decoder is sitting on the first byte past the FRAME_HEADER section. */
- drflac_assert(pFlac != NULL);
- return drflac__seek_flac_frame(pFlac);
-}
+ const drflac_uint32 sampleRateTable[12] = {0, 88200, 176400, 192000, 8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000};
+ const drflac_uint8 bitsPerSampleTable[8] = {0, 8, 12, (drflac_uint8)-1, 16, 20, 24, (drflac_uint8)-1}; /* -1 = reserved. */
-drflac_uint64 drflac__seek_forward_by_samples(drflac* pFlac, drflac_uint64 samplesToRead)
-{
- drflac_uint64 samplesRead = 0;
- while (samplesToRead > 0) {
- if (pFlac->currentFrame.samplesRemaining == 0) {
- if (!drflac__read_and_decode_next_flac_frame(pFlac)) {
- break; /* Couldn't read the next frame, so just break from the loop and return. */
- }
- } else {
- if (pFlac->currentFrame.samplesRemaining > samplesToRead) {
- samplesRead += samplesToRead;
- pFlac->currentFrame.samplesRemaining -= (drflac_uint32)samplesToRead; /* <-- Safe cast. Will always be < currentFrame.samplesRemaining < 65536. */
- samplesToRead = 0;
- } else {
- samplesRead += pFlac->currentFrame.samplesRemaining;
- samplesToRead -= pFlac->currentFrame.samplesRemaining;
- pFlac->currentFrame.samplesRemaining = 0;
- }
- }
- }
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(header != NULL);
- pFlac->currentSample += samplesRead;
- return samplesRead;
-}
+ /* Keep looping until we find a valid sync code. */
+ for (;;) {
+ drflac_uint8 crc8 = 0xCE; /* 0xCE = drflac_crc8(0, 0x3FFE, 14); */
+ drflac_uint8 reserved = 0;
+ drflac_uint8 blockingStrategy = 0;
+ drflac_uint8 blockSize = 0;
+ drflac_uint8 sampleRate = 0;
+ drflac_uint8 channelAssignment = 0;
+ drflac_uint8 bitsPerSample = 0;
+ drflac_bool32 isVariableBlockSize;
-drflac_uint64 drflac__seek_forward_by_pcm_frames(drflac* pFlac, drflac_uint64 pcmFramesToSeek)
-{
- return drflac__seek_forward_by_samples(pFlac, pcmFramesToSeek*pFlac->channels);
-}
+ if (!drflac__find_and_seek_to_next_sync_code(bs)) {
+ return DRFLAC_FALSE;
+ }
-static drflac_bool32 drflac__seek_to_sample__brute_force(drflac* pFlac, drflac_uint64 sampleIndex)
-{
- drflac_bool32 isMidFrame = DRFLAC_FALSE;
- drflac_uint64 runningSampleCount;
+ if (!drflac__read_uint8(bs, 1, &reserved)) {
+ return DRFLAC_FALSE;
+ }
+ if (reserved == 1) {
+ continue;
+ }
+ crc8 = drflac_crc8(crc8, reserved, 1);
- drflac_assert(pFlac != NULL);
+ if (!drflac__read_uint8(bs, 1, &blockingStrategy)) {
+ return DRFLAC_FALSE;
+ }
+ crc8 = drflac_crc8(crc8, blockingStrategy, 1);
- /* If we are seeking forward we start from the current position. Otherwise we need to start all the way from the start of the file. */
- if (sampleIndex >= pFlac->currentSample) {
- /* Seeking forward. Need to seek from the current position. */
- runningSampleCount = pFlac->currentSample;
+ if (!drflac__read_uint8(bs, 4, &blockSize)) {
+ return DRFLAC_FALSE;
+ }
+ if (blockSize == 0) {
+ continue;
+ }
+ crc8 = drflac_crc8(crc8, blockSize, 4);
- /* The frame header for the first frame may not yet have been read. We need to do that if necessary. */
- if (pFlac->currentSample == 0 && pFlac->currentFrame.samplesRemaining == 0) {
- if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFrame.header)) {
- return DRFLAC_FALSE;
- }
- } else {
- isMidFrame = DRFLAC_TRUE;
+ if (!drflac__read_uint8(bs, 4, &sampleRate)) {
+ return DRFLAC_FALSE;
}
- } else {
- /* Seeking backwards. Need to seek from the start of the file. */
- runningSampleCount = 0;
+ crc8 = drflac_crc8(crc8, sampleRate, 4);
- /* Move back to the start. */
- if (!drflac__seek_to_first_frame(pFlac)) {
+ if (!drflac__read_uint8(bs, 4, &channelAssignment)) {
return DRFLAC_FALSE;
}
+ if (channelAssignment > 10) {
+ continue;
+ }
+ crc8 = drflac_crc8(crc8, channelAssignment, 4);
- /* Decode the first frame in preparation for sample-exact seeking below. */
- if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFrame.header)) {
+ if (!drflac__read_uint8(bs, 3, &bitsPerSample)) {
return DRFLAC_FALSE;
}
- }
+ if (bitsPerSample == 3 || bitsPerSample == 7) {
+ continue;
+ }
+ crc8 = drflac_crc8(crc8, bitsPerSample, 3);
- /*
- We need to as quickly as possible find the frame that contains the target sample. To do this, we iterate over each frame and inspect its
- header. If based on the header we can determine that the frame contains the sample, we do a full decode of that frame.
- */
- for (;;) {
- drflac_uint64 sampleCountInThisFrame;
- drflac_uint64 firstSampleInFrame = 0;
- drflac_uint64 lastSampleInFrame = 0;
- drflac__get_current_frame_sample_range(pFlac, &firstSampleInFrame, &lastSampleInFrame);
+ if (!drflac__read_uint8(bs, 1, &reserved)) {
+ return DRFLAC_FALSE;
+ }
+ if (reserved == 1) {
+ continue;
+ }
+ crc8 = drflac_crc8(crc8, reserved, 1);
- sampleCountInThisFrame = (lastSampleInFrame - firstSampleInFrame) + 1;
- if (sampleIndex < (runningSampleCount + sampleCountInThisFrame)) {
- /*
- The sample should be in this frame. We need to fully decode it, however if it's an invalid frame (a CRC mismatch), we need to pretend
- it never existed and keep iterating.
- */
- drflac_uint64 samplesToDecode = sampleIndex - runningSampleCount;
- if (!isMidFrame) {
- drflac_result result = drflac__decode_flac_frame(pFlac);
- if (result == DRFLAC_SUCCESS) {
- /* The frame is valid. We just need to skip over some samples to ensure it's sample-exact. */
- return drflac__seek_forward_by_samples(pFlac, samplesToDecode) == samplesToDecode; /* <-- If this fails, something bad has happened (it should never fail). */
+ isVariableBlockSize = blockingStrategy == 1;
+ if (isVariableBlockSize) {
+ drflac_uint64 pcmFrameNumber;
+ drflac_result result = drflac__read_utf8_coded_number(bs, &pcmFrameNumber, &crc8);
+ if (result != DRFLAC_SUCCESS) {
+ if (result == DRFLAC_END_OF_STREAM) {
+ return DRFLAC_FALSE;
} else {
- if (result == DRFLAC_CRC_MISMATCH) {
- goto next_iteration; /* CRC mismatch. Pretend this frame never existed. */
- } else {
- return DRFLAC_FALSE;
- }
+ continue;
}
- } else {
- /* We started seeking mid-frame which means we need to skip the frame decoding part. */
- return drflac__seek_forward_by_samples(pFlac, samplesToDecode) == samplesToDecode;
}
+ header->flacFrameNumber = 0;
+ header->pcmFrameNumber = pcmFrameNumber;
} else {
- /*
- It's not in this frame. We need to seek past the frame, but check if there was a CRC mismatch. If so, we pretend this
- frame never existed and leave the running sample count untouched.
- */
- if (!isMidFrame) {
- drflac_result result = drflac__seek_to_next_flac_frame(pFlac);
- if (result == DRFLAC_SUCCESS) {
- runningSampleCount += sampleCountInThisFrame;
+ drflac_uint64 flacFrameNumber = 0;
+ drflac_result result = drflac__read_utf8_coded_number(bs, &flacFrameNumber, &crc8);
+ if (result != DRFLAC_SUCCESS) {
+ if (result == DRFLAC_END_OF_STREAM) {
+ return DRFLAC_FALSE;
} else {
- if (result == DRFLAC_CRC_MISMATCH) {
- goto next_iteration; /* CRC mismatch. Pretend this frame never existed. */
- } else {
- return DRFLAC_FALSE;
- }
+ continue;
}
- } else {
- /*
- We started seeking mid-frame which means we need to seek by reading to the end of the frame instead of with
- drflac__seek_to_next_flac_frame() which only works if the decoder is sitting on the byte just after the frame header.
- */
- runningSampleCount += pFlac->currentFrame.samplesRemaining;
- pFlac->currentFrame.samplesRemaining = 0;
- isMidFrame = DRFLAC_FALSE;
}
+ header->flacFrameNumber = (drflac_uint32)flacFrameNumber; /* <-- Safe cast. */
+ header->pcmFrameNumber = 0;
}
- next_iteration:
- /* Grab the next frame in preparation for the next iteration. */
- if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFrame.header)) {
- return DRFLAC_FALSE;
- }
- }
-}
-
-
-static drflac_bool32 drflac__seek_to_sample__seek_table(drflac* pFlac, drflac_uint64 sampleIndex)
-{
- drflac_uint32 iClosestSeekpoint = 0;
- drflac_bool32 isMidFrame = DRFLAC_FALSE;
- drflac_uint64 runningSampleCount;
- drflac_uint32 iSeekpoint;
-
- drflac_assert(pFlac != NULL);
-
- if (pFlac->pSeekpoints == NULL || pFlac->seekpointCount == 0) {
- return DRFLAC_FALSE;
- }
- for (iSeekpoint = 0; iSeekpoint < pFlac->seekpointCount; ++iSeekpoint) {
- if (pFlac->pSeekpoints[iSeekpoint].firstSample*pFlac->channels >= sampleIndex) {
- break;
+ DRFLAC_ASSERT(blockSize > 0);
+ if (blockSize == 1) {
+ header->blockSizeInPCMFrames = 192;
+ } else if (blockSize >= 2 && blockSize <= 5) {
+ header->blockSizeInPCMFrames = 576 * (1 << (blockSize - 2));
+ } else if (blockSize == 6) {
+ if (!drflac__read_uint16(bs, 8, &header->blockSizeInPCMFrames)) {
+ return DRFLAC_FALSE;
+ }
+ crc8 = drflac_crc8(crc8, header->blockSizeInPCMFrames, 8);
+ header->blockSizeInPCMFrames += 1;
+ } else if (blockSize == 7) {
+ if (!drflac__read_uint16(bs, 16, &header->blockSizeInPCMFrames)) {
+ return DRFLAC_FALSE;
+ }
+ crc8 = drflac_crc8(crc8, header->blockSizeInPCMFrames, 16);
+ header->blockSizeInPCMFrames += 1;
+ } else {
+ DRFLAC_ASSERT(blockSize >= 8);
+ header->blockSizeInPCMFrames = 256 * (1 << (blockSize - 8));
}
- iClosestSeekpoint = iSeekpoint;
- }
- /*
- At this point we should have found the seekpoint closest to our sample. If we are seeking forward and the closest seekpoint is _before_ the current sample, we
- just seek forward from where we are. Otherwise we start seeking from the seekpoint's first sample.
- */
- if ((sampleIndex >= pFlac->currentSample) && (pFlac->pSeekpoints[iClosestSeekpoint].firstSample*pFlac->channels <= pFlac->currentSample)) {
- /* Optimized case. Just seek forward from where we are. */
- runningSampleCount = pFlac->currentSample;
-
- /* The frame header for the first frame may not yet have been read. We need to do that if necessary. */
- if (pFlac->currentSample == 0 && pFlac->currentFrame.samplesRemaining == 0) {
- if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFrame.header)) {
+ if (sampleRate <= 11) {
+ header->sampleRate = sampleRateTable[sampleRate];
+ } else if (sampleRate == 12) {
+ if (!drflac__read_uint32(bs, 8, &header->sampleRate)) {
+ return DRFLAC_FALSE;
+ }
+ crc8 = drflac_crc8(crc8, header->sampleRate, 8);
+ header->sampleRate *= 1000;
+ } else if (sampleRate == 13) {
+ if (!drflac__read_uint32(bs, 16, &header->sampleRate)) {
+ return DRFLAC_FALSE;
+ }
+ crc8 = drflac_crc8(crc8, header->sampleRate, 16);
+ } else if (sampleRate == 14) {
+ if (!drflac__read_uint32(bs, 16, &header->sampleRate)) {
return DRFLAC_FALSE;
}
+ crc8 = drflac_crc8(crc8, header->sampleRate, 16);
+ header->sampleRate *= 10;
} else {
- isMidFrame = DRFLAC_TRUE;
+ continue; /* Invalid. Assume an invalid block. */
}
- } else {
- /* Slower case. Seek to the start of the seekpoint and then seek forward from there. */
- runningSampleCount = pFlac->pSeekpoints[iClosestSeekpoint].firstSample*pFlac->channels;
- if (!drflac__seek_to_byte(&pFlac->bs, pFlac->firstFramePos + pFlac->pSeekpoints[iClosestSeekpoint].frameOffset)) {
- return DRFLAC_FALSE;
+
+ header->channelAssignment = channelAssignment;
+
+ header->bitsPerSample = bitsPerSampleTable[bitsPerSample];
+ if (header->bitsPerSample == 0) {
+ header->bitsPerSample = streaminfoBitsPerSample;
}
- /* Grab the frame the seekpoint is sitting on in preparation for the sample-exact seeking below. */
- if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFrame.header)) {
+ if (!drflac__read_uint8(bs, 8, &header->crc8)) {
return DRFLAC_FALSE;
}
+
+#ifndef DR_FLAC_NO_CRC
+ if (header->crc8 != crc8) {
+ continue; /* CRC mismatch. Loop back to the top and find the next sync code. */
+ }
+#endif
+ return DRFLAC_TRUE;
}
+}
- for (;;) {
- drflac_uint64 sampleCountInThisFrame;
- drflac_uint64 firstSampleInFrame = 0;
- drflac_uint64 lastSampleInFrame = 0;
- drflac__get_current_frame_sample_range(pFlac, &firstSampleInFrame, &lastSampleInFrame);
+static drflac_bool32 drflac__read_subframe_header(drflac_bs* bs, drflac_subframe* pSubframe)
+{
+ drflac_uint8 header;
+ int type;
- sampleCountInThisFrame = (lastSampleInFrame - firstSampleInFrame) + 1;
- if (sampleIndex < (runningSampleCount + sampleCountInThisFrame)) {
- /*
- The sample should be in this frame. We need to fully decode it, but if it's an invalid frame (a CRC mismatch) we need to pretend
- it never existed and keep iterating.
- */
- drflac_uint64 samplesToDecode = sampleIndex - runningSampleCount;
+ if (!drflac__read_uint8(bs, 8, &header)) {
+ return DRFLAC_FALSE;
+ }
- if (!isMidFrame) {
- drflac_result result = drflac__decode_flac_frame(pFlac);
- if (result == DRFLAC_SUCCESS) {
- /* The frame is valid. We just need to skip over some samples to ensure it's sample-exact. */
- return drflac__seek_forward_by_samples(pFlac, samplesToDecode) == samplesToDecode; /* <-- If this fails, something bad has happened (it should never fail). */
- } else {
- if (result == DRFLAC_CRC_MISMATCH) {
- goto next_iteration; /* CRC mismatch. Pretend this frame never existed. */
- } else {
- return DRFLAC_FALSE;
- }
- }
- } else {
- /* We started seeking mid-frame which means we need to skip the frame decoding part. */
- return drflac__seek_forward_by_samples(pFlac, samplesToDecode) == samplesToDecode;
+ /* First bit should always be 0. */
+ if ((header & 0x80) != 0) {
+ return DRFLAC_FALSE;
+ }
+
+ type = (header & 0x7E) >> 1;
+ if (type == 0) {
+ pSubframe->subframeType = DRFLAC_SUBFRAME_CONSTANT;
+ } else if (type == 1) {
+ pSubframe->subframeType = DRFLAC_SUBFRAME_VERBATIM;
+ } else {
+ if ((type & 0x20) != 0) {
+ pSubframe->subframeType = DRFLAC_SUBFRAME_LPC;
+ pSubframe->lpcOrder = (type & 0x1F) + 1;
+ } else if ((type & 0x08) != 0) {
+ pSubframe->subframeType = DRFLAC_SUBFRAME_FIXED;
+ pSubframe->lpcOrder = (type & 0x07);
+ if (pSubframe->lpcOrder > 4) {
+ pSubframe->subframeType = DRFLAC_SUBFRAME_RESERVED;
+ pSubframe->lpcOrder = 0;
}
} else {
- /*
- It's not in this frame. We need to seek past the frame, but check if there was a CRC mismatch. If so, we pretend this
- frame never existed and leave the running sample count untouched.
- */
- if (!isMidFrame) {
- drflac_result result = drflac__seek_to_next_flac_frame(pFlac);
- if (result == DRFLAC_SUCCESS) {
- runningSampleCount += sampleCountInThisFrame;
- } else {
- if (result == DRFLAC_CRC_MISMATCH) {
- goto next_iteration; /* CRC mismatch. Pretend this frame never existed. */
- } else {
- return DRFLAC_FALSE;
- }
- }
- } else {
- /*
- We started seeking mid-frame which means we need to seek by reading to the end of the frame instead of with
- drflac__seek_to_next_flac_frame() which only works if the decoder is sitting on the byte just after the frame header.
- */
- runningSampleCount += pFlac->currentFrame.samplesRemaining;
- pFlac->currentFrame.samplesRemaining = 0;
- isMidFrame = DRFLAC_FALSE;
- }
+ pSubframe->subframeType = DRFLAC_SUBFRAME_RESERVED;
}
+ }
- next_iteration:
- /* Grab the next frame in preparation for the next iteration. */
- if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFrame.header)) {
+ if (pSubframe->subframeType == DRFLAC_SUBFRAME_RESERVED) {
+ return DRFLAC_FALSE;
+ }
+
+ /* Wasted bits per sample. */
+ pSubframe->wastedBitsPerSample = 0;
+ if ((header & 0x01) == 1) {
+ unsigned int wastedBitsPerSample;
+ if (!drflac__seek_past_next_set_bit(bs, &wastedBitsPerSample)) {
return DRFLAC_FALSE;
}
+ pSubframe->wastedBitsPerSample = (unsigned char)wastedBitsPerSample + 1;
}
-}
+ return DRFLAC_TRUE;
+}
-#ifndef DR_FLAC_NO_OGG
-typedef struct
+static drflac_bool32 drflac__decode_subframe(drflac_bs* bs, drflac_frame* frame, int subframeIndex, drflac_int32* pDecodedSamplesOut)
{
- drflac_uint8 capturePattern[4]; /* Should be "OggS" */
- drflac_uint8 structureVersion; /* Always 0. */
- drflac_uint8 headerType;
- drflac_uint64 granulePosition;
- drflac_uint32 serialNumber;
- drflac_uint32 sequenceNumber;
- drflac_uint32 checksum;
- drflac_uint8 segmentCount;
- drflac_uint8 segmentTable[255];
-} drflac_ogg_page_header;
-#endif
+ drflac_subframe* pSubframe;
+ drflac_uint32 subframeBitsPerSample;
-typedef struct
-{
- drflac_read_proc onRead;
- drflac_seek_proc onSeek;
- drflac_meta_proc onMeta;
- drflac_container container;
- void* pUserData;
- void* pUserDataMD;
- drflac_uint32 sampleRate;
- drflac_uint8 channels;
- drflac_uint8 bitsPerSample;
- drflac_uint64 totalSampleCount;
- drflac_uint16 maxBlockSize;
- drflac_uint64 runningFilePos;
- drflac_bool32 hasStreamInfoBlock;
- drflac_bool32 hasMetadataBlocks;
- drflac_bs bs; /* <-- A bit streamer is required for loading data during initialization. */
- drflac_frame_header firstFrameHeader; /* <-- The header of the first frame that was read during relaxed initalization. Only set if there is no STREAMINFO block. */
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(frame != NULL);
-#ifndef DR_FLAC_NO_OGG
- drflac_uint32 oggSerial;
- drflac_uint64 oggFirstBytePos;
- drflac_ogg_page_header oggBosHeader;
-#endif
-} drflac_init_info;
+ pSubframe = frame->subframes + subframeIndex;
+ if (!drflac__read_subframe_header(bs, pSubframe)) {
+ return DRFLAC_FALSE;
+ }
-static DRFLAC_INLINE void drflac__decode_block_header(drflac_uint32 blockHeader, drflac_uint8* isLastBlock, drflac_uint8* blockType, drflac_uint32* blockSize)
-{
- blockHeader = drflac__be2host_32(blockHeader);
- *isLastBlock = (blockHeader & 0x80000000UL) >> 31;
- *blockType = (blockHeader & 0x7F000000UL) >> 24;
- *blockSize = (blockHeader & 0x00FFFFFFUL);
-}
+ /* Side channels require an extra bit per sample. Took a while to figure that one out... */
+ subframeBitsPerSample = frame->header.bitsPerSample;
+ if ((frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE || frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE) && subframeIndex == 1) {
+ subframeBitsPerSample += 1;
+ } else if (frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE && subframeIndex == 0) {
+ subframeBitsPerSample += 1;
+ }
-static DRFLAC_INLINE drflac_bool32 drflac__read_and_decode_block_header(drflac_read_proc onRead, void* pUserData, drflac_uint8* isLastBlock, drflac_uint8* blockType, drflac_uint32* blockSize)
-{
- drflac_uint32 blockHeader;
- if (onRead(pUserData, &blockHeader, 4) != 4) {
+ /* Need to handle wasted bits per sample. */
+ if (pSubframe->wastedBitsPerSample >= subframeBitsPerSample) {
return DRFLAC_FALSE;
}
+ subframeBitsPerSample -= pSubframe->wastedBitsPerSample;
+
+ pSubframe->pSamplesS32 = pDecodedSamplesOut;
+
+ switch (pSubframe->subframeType)
+ {
+ case DRFLAC_SUBFRAME_CONSTANT:
+ {
+ drflac__decode_samples__constant(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->pSamplesS32);
+ } break;
+
+ case DRFLAC_SUBFRAME_VERBATIM:
+ {
+ drflac__decode_samples__verbatim(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->pSamplesS32);
+ } break;
+
+ case DRFLAC_SUBFRAME_FIXED:
+ {
+ drflac__decode_samples__fixed(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->lpcOrder, pSubframe->pSamplesS32);
+ } break;
+
+ case DRFLAC_SUBFRAME_LPC:
+ {
+ drflac__decode_samples__lpc(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->lpcOrder, pSubframe->pSamplesS32);
+ } break;
+
+ default: return DRFLAC_FALSE;
+ }
- drflac__decode_block_header(blockHeader, isLastBlock, blockType, blockSize);
return DRFLAC_TRUE;
}
-drflac_bool32 drflac__read_streaminfo(drflac_read_proc onRead, void* pUserData, drflac_streaminfo* pStreamInfo)
+static drflac_bool32 drflac__seek_subframe(drflac_bs* bs, drflac_frame* frame, int subframeIndex)
{
- drflac_uint32 blockSizes;
- drflac_uint64 frameSizes = 0;
- drflac_uint64 importantProps;
- drflac_uint8 md5[16];
+ drflac_subframe* pSubframe;
+ drflac_uint32 subframeBitsPerSample;
- /* min/max block size. */
- if (onRead(pUserData, &blockSizes, 4) != 4) {
- return DRFLAC_FALSE;
- }
+ DRFLAC_ASSERT(bs != NULL);
+ DRFLAC_ASSERT(frame != NULL);
- /* min/max frame size. */
- if (onRead(pUserData, &frameSizes, 6) != 6) {
+ pSubframe = frame->subframes + subframeIndex;
+ if (!drflac__read_subframe_header(bs, pSubframe)) {
return DRFLAC_FALSE;
}
- /* Sample rate, channels, bits per sample and total sample count. */
- if (onRead(pUserData, &importantProps, 8) != 8) {
- return DRFLAC_FALSE;
+ /* Side channels require an extra bit per sample. Took a while to figure that one out... */
+ subframeBitsPerSample = frame->header.bitsPerSample;
+ if ((frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE || frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE) && subframeIndex == 1) {
+ subframeBitsPerSample += 1;
+ } else if (frame->header.channelAssignment == DRFLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE && subframeIndex == 0) {
+ subframeBitsPerSample += 1;
}
- /* MD5 */
- if (onRead(pUserData, md5, sizeof(md5)) != sizeof(md5)) {
+ /* Need to handle wasted bits per sample. */
+ if (pSubframe->wastedBitsPerSample >= subframeBitsPerSample) {
return DRFLAC_FALSE;
}
+ subframeBitsPerSample -= pSubframe->wastedBitsPerSample;
- blockSizes = drflac__be2host_32(blockSizes);
- frameSizes = drflac__be2host_64(frameSizes);
- importantProps = drflac__be2host_64(importantProps);
-
- pStreamInfo->minBlockSize = (blockSizes & 0xFFFF0000) >> 16;
- pStreamInfo->maxBlockSize = (blockSizes & 0x0000FFFF);
- pStreamInfo->minFrameSize = (drflac_uint32)((frameSizes & (((drflac_uint64)0x00FFFFFF << 16) << 24)) >> 40);
- pStreamInfo->maxFrameSize = (drflac_uint32)((frameSizes & (((drflac_uint64)0x00FFFFFF << 16) << 0)) >> 16);
- pStreamInfo->sampleRate = (drflac_uint32)((importantProps & (((drflac_uint64)0x000FFFFF << 16) << 28)) >> 44);
- pStreamInfo->channels = (drflac_uint8 )((importantProps & (((drflac_uint64)0x0000000E << 16) << 24)) >> 41) + 1;
- pStreamInfo->bitsPerSample = (drflac_uint8 )((importantProps & (((drflac_uint64)0x0000001F << 16) << 20)) >> 36) + 1;
- pStreamInfo->totalSampleCount = ((importantProps & ((((drflac_uint64)0x0000000F << 16) << 16) | 0xFFFFFFFF))) * pStreamInfo->channels;
- drflac_copy_memory(pStreamInfo->md5, md5, sizeof(md5));
+ pSubframe->pSamplesS32 = NULL;
- return DRFLAC_TRUE;
-}
+ switch (pSubframe->subframeType)
+ {
+ case DRFLAC_SUBFRAME_CONSTANT:
+ {
+ if (!drflac__seek_bits(bs, subframeBitsPerSample)) {
+ return DRFLAC_FALSE;
+ }
+ } break;
-drflac_bool32 drflac__read_and_decode_metadata(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData, void* pUserDataMD, drflac_uint64* pFirstFramePos, drflac_uint64* pSeektablePos, drflac_uint32* pSeektableSize)
-{
- /*
- We want to keep track of the byte position in the stream of the seektable. At the time of calling this function we know that
- we'll be sitting on byte 42.
- */
- drflac_uint64 runningFilePos = 42;
- drflac_uint64 seektablePos = 0;
- drflac_uint32 seektableSize = 0;
+ case DRFLAC_SUBFRAME_VERBATIM:
+ {
+ unsigned int bitsToSeek = frame->header.blockSizeInPCMFrames * subframeBitsPerSample;
+ if (!drflac__seek_bits(bs, bitsToSeek)) {
+ return DRFLAC_FALSE;
+ }
+ } break;
- for (;;) {
- drflac_metadata metadata;
- drflac_uint8 isLastBlock = 0;
- drflac_uint8 blockType;
- drflac_uint32 blockSize;
- if (!drflac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize)) {
- return DRFLAC_FALSE;
- }
- runningFilePos += 4;
+ case DRFLAC_SUBFRAME_FIXED:
+ {
+ unsigned int bitsToSeek = pSubframe->lpcOrder * subframeBitsPerSample;
+ if (!drflac__seek_bits(bs, bitsToSeek)) {
+ return DRFLAC_FALSE;
+ }
- metadata.type = blockType;
- metadata.pRawData = NULL;
- metadata.rawDataSize = 0;
+ if (!drflac__read_and_seek_residual(bs, frame->header.blockSizeInPCMFrames, pSubframe->lpcOrder)) {
+ return DRFLAC_FALSE;
+ }
+ } break;
- switch (blockType)
+ case DRFLAC_SUBFRAME_LPC:
{
- case DRFLAC_METADATA_BLOCK_TYPE_APPLICATION:
- {
- if (blockSize < 4) {
- return DRFLAC_FALSE;
- }
+ unsigned char lpcPrecision;
- if (onMeta) {
- void* pRawData = DRFLAC_MALLOC(blockSize);
- if (pRawData == NULL) {
- return DRFLAC_FALSE;
- }
+ unsigned int bitsToSeek = pSubframe->lpcOrder * subframeBitsPerSample;
+ if (!drflac__seek_bits(bs, bitsToSeek)) {
+ return DRFLAC_FALSE;
+ }
- if (onRead(pUserData, pRawData, blockSize) != blockSize) {
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
+ if (!drflac__read_uint8(bs, 4, &lpcPrecision)) {
+ return DRFLAC_FALSE;
+ }
+ if (lpcPrecision == 15) {
+ return DRFLAC_FALSE; /* Invalid. */
+ }
+ lpcPrecision += 1;
- metadata.pRawData = pRawData;
- metadata.rawDataSize = blockSize;
- metadata.data.application.id = drflac__be2host_32(*(drflac_uint32*)pRawData);
- metadata.data.application.pData = (const void*)((drflac_uint8*)pRawData + sizeof(drflac_uint32));
- metadata.data.application.dataSize = blockSize - sizeof(drflac_uint32);
- onMeta(pUserDataMD, &metadata);
- DRFLAC_FREE(pRawData);
- }
- } break;
+ bitsToSeek = (pSubframe->lpcOrder * lpcPrecision) + 5; /* +5 for shift. */
+ if (!drflac__seek_bits(bs, bitsToSeek)) {
+ return DRFLAC_FALSE;
+ }
- case DRFLAC_METADATA_BLOCK_TYPE_SEEKTABLE:
- {
- seektablePos = runningFilePos;
- seektableSize = blockSize;
+ if (!drflac__read_and_seek_residual(bs, frame->header.blockSizeInPCMFrames, pSubframe->lpcOrder)) {
+ return DRFLAC_FALSE;
+ }
+ } break;
- if (onMeta) {
- drflac_uint32 iSeekpoint;
- void* pRawData;
+ default: return DRFLAC_FALSE;
+ }
- pRawData = DRFLAC_MALLOC(blockSize);
- if (pRawData == NULL) {
- return DRFLAC_FALSE;
- }
+ return DRFLAC_TRUE;
+}
- if (onRead(pUserData, pRawData, blockSize) != blockSize) {
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
- metadata.pRawData = pRawData;
- metadata.rawDataSize = blockSize;
- metadata.data.seektable.seekpointCount = blockSize/sizeof(drflac_seekpoint);
- metadata.data.seektable.pSeekpoints = (const drflac_seekpoint*)pRawData;
+static DRFLAC_INLINE drflac_uint8 drflac__get_channel_count_from_channel_assignment(drflac_int8 channelAssignment)
+{
+ drflac_uint8 lookup[] = {1, 2, 3, 4, 5, 6, 7, 8, 2, 2, 2};
- /* Endian swap. */
- for (iSeekpoint = 0; iSeekpoint < metadata.data.seektable.seekpointCount; ++iSeekpoint) {
- drflac_seekpoint* pSeekpoint = (drflac_seekpoint*)pRawData + iSeekpoint;
- pSeekpoint->firstSample = drflac__be2host_64(pSeekpoint->firstSample);
- pSeekpoint->frameOffset = drflac__be2host_64(pSeekpoint->frameOffset);
- pSeekpoint->sampleCount = drflac__be2host_16(pSeekpoint->sampleCount);
- }
+ DRFLAC_ASSERT(channelAssignment <= 10);
+ return lookup[channelAssignment];
+}
- onMeta(pUserDataMD, &metadata);
+static drflac_result drflac__decode_flac_frame(drflac* pFlac)
+{
+ int channelCount;
+ int i;
+ drflac_uint8 paddingSizeInBits;
+ drflac_uint16 desiredCRC16;
+#ifndef DR_FLAC_NO_CRC
+ drflac_uint16 actualCRC16;
+#endif
- DRFLAC_FREE(pRawData);
- }
- } break;
+ /* This function should be called while the stream is sitting on the first byte after the frame header. */
+ DRFLAC_ZERO_MEMORY(pFlac->currentFLACFrame.subframes, sizeof(pFlac->currentFLACFrame.subframes));
- case DRFLAC_METADATA_BLOCK_TYPE_VORBIS_COMMENT:
- {
- if (blockSize < 8) {
- return DRFLAC_FALSE;
- }
+ /* The frame block size must never be larger than the maximum block size defined by the FLAC stream. */
+ if (pFlac->currentFLACFrame.header.blockSizeInPCMFrames > pFlac->maxBlockSizeInPCMFrames) {
+ return DRFLAC_ERROR;
+ }
- if (onMeta) {
- void* pRawData;
- const char* pRunningData;
- const char* pRunningDataEnd;
- drflac_uint32 i;
+ /* The number of channels in the frame must match the channel count from the STREAMINFO block. */
+ channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment);
+ if (channelCount != (int)pFlac->channels) {
+ return DRFLAC_ERROR;
+ }
- pRawData = DRFLAC_MALLOC(blockSize);
- if (pRawData == NULL) {
- return DRFLAC_FALSE;
- }
+ for (i = 0; i < channelCount; ++i) {
+ if (!drflac__decode_subframe(&pFlac->bs, &pFlac->currentFLACFrame, i, pFlac->pDecodedSamples + (pFlac->currentFLACFrame.header.blockSizeInPCMFrames * i))) {
+ return DRFLAC_ERROR;
+ }
+ }
- if (onRead(pUserData, pRawData, blockSize) != blockSize) {
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
+ paddingSizeInBits = DRFLAC_CACHE_L1_BITS_REMAINING(&pFlac->bs) & 7;
+ if (paddingSizeInBits > 0) {
+ drflac_uint8 padding = 0;
+ if (!drflac__read_uint8(&pFlac->bs, paddingSizeInBits, &padding)) {
+ return DRFLAC_END_OF_STREAM;
+ }
+ }
- metadata.pRawData = pRawData;
- metadata.rawDataSize = blockSize;
+#ifndef DR_FLAC_NO_CRC
+ actualCRC16 = drflac__flush_crc16(&pFlac->bs);
+#endif
+ if (!drflac__read_uint16(&pFlac->bs, 16, &desiredCRC16)) {
+ return DRFLAC_END_OF_STREAM;
+ }
- pRunningData = (const char*)pRawData;
- pRunningDataEnd = (const char*)pRawData + blockSize;
+#ifndef DR_FLAC_NO_CRC
+ if (actualCRC16 != desiredCRC16) {
+ return DRFLAC_CRC_MISMATCH; /* CRC mismatch. */
+ }
+#endif
- metadata.data.vorbis_comment.vendorLength = drflac__le2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+ pFlac->currentFLACFrame.pcmFramesRemaining = pFlac->currentFLACFrame.header.blockSizeInPCMFrames;
- /* Need space for the rest of the block */
- if ((pRunningDataEnd - pRunningData) - 4 < (drflac_int64)metadata.data.vorbis_comment.vendorLength) { /* <-- Note the order of operations to avoid overflow to a valid value */
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
- metadata.data.vorbis_comment.vendor = pRunningData; pRunningData += metadata.data.vorbis_comment.vendorLength;
- metadata.data.vorbis_comment.commentCount = drflac__le2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+ return DRFLAC_SUCCESS;
+}
- /* Need space for 'commentCount' comments after the block, which at minimum is a drflac_uint32 per comment */
- if ((pRunningDataEnd - pRunningData) / sizeof(drflac_uint32) < metadata.data.vorbis_comment.commentCount) { /* <-- Note the order of operations to avoid overflow to a valid value */
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
- metadata.data.vorbis_comment.pComments = pRunningData;
+static drflac_result drflac__seek_flac_frame(drflac* pFlac)
+{
+ int channelCount;
+ int i;
+ drflac_uint16 desiredCRC16;
+#ifndef DR_FLAC_NO_CRC
+ drflac_uint16 actualCRC16;
+#endif
- /* Check that the comments section is valid before passing it to the callback */
- for (i = 0; i < metadata.data.vorbis_comment.commentCount; ++i) {
- drflac_uint32 commentLength;
+ channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment);
+ for (i = 0; i < channelCount; ++i) {
+ if (!drflac__seek_subframe(&pFlac->bs, &pFlac->currentFLACFrame, i)) {
+ return DRFLAC_ERROR;
+ }
+ }
- if (pRunningDataEnd - pRunningData < 4) {
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
+ /* Padding. */
+ if (!drflac__seek_bits(&pFlac->bs, DRFLAC_CACHE_L1_BITS_REMAINING(&pFlac->bs) & 7)) {
+ return DRFLAC_ERROR;
+ }
- commentLength = drflac__le2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
- if (pRunningDataEnd - pRunningData < (drflac_int64)commentLength) { /* <-- Note the order of operations to avoid overflow to a valid value */
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
- pRunningData += commentLength;
- }
+ /* CRC. */
+#ifndef DR_FLAC_NO_CRC
+ actualCRC16 = drflac__flush_crc16(&pFlac->bs);
+#endif
+ if (!drflac__read_uint16(&pFlac->bs, 16, &desiredCRC16)) {
+ return DRFLAC_END_OF_STREAM;
+ }
- onMeta(pUserDataMD, &metadata);
+#ifndef DR_FLAC_NO_CRC
+ if (actualCRC16 != desiredCRC16) {
+ return DRFLAC_CRC_MISMATCH; /* CRC mismatch. */
+ }
+#endif
- DRFLAC_FREE(pRawData);
- }
- } break;
+ return DRFLAC_SUCCESS;
+}
- case DRFLAC_METADATA_BLOCK_TYPE_CUESHEET:
- {
- if (blockSize < 396) {
- return DRFLAC_FALSE;
- }
+static drflac_bool32 drflac__read_and_decode_next_flac_frame(drflac* pFlac)
+{
+ DRFLAC_ASSERT(pFlac != NULL);
- if (onMeta) {
- void* pRawData;
- const char* pRunningData;
- const char* pRunningDataEnd;
- drflac_uint8 iTrack;
- drflac_uint8 iIndex;
+ for (;;) {
+ drflac_result result;
- pRawData = DRFLAC_MALLOC(blockSize);
- if (pRawData == NULL) {
- return DRFLAC_FALSE;
- }
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ return DRFLAC_FALSE;
+ }
- if (onRead(pUserData, pRawData, blockSize) != blockSize) {
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
+ result = drflac__decode_flac_frame(pFlac);
+ if (result != DRFLAC_SUCCESS) {
+ if (result == DRFLAC_CRC_MISMATCH) {
+ continue; /* CRC mismatch. Skip to the next frame. */
+ } else {
+ return DRFLAC_FALSE;
+ }
+ }
- metadata.pRawData = pRawData;
- metadata.rawDataSize = blockSize;
+ return DRFLAC_TRUE;
+ }
+}
- pRunningData = (const char*)pRawData;
- pRunningDataEnd = (const char*)pRawData + blockSize;
+static void drflac__get_pcm_frame_range_of_current_flac_frame(drflac* pFlac, drflac_uint64* pFirstPCMFrame, drflac_uint64* pLastPCMFrame)
+{
+ drflac_uint64 firstPCMFrame;
+ drflac_uint64 lastPCMFrame;
- drflac_copy_memory(metadata.data.cuesheet.catalog, pRunningData, 128); pRunningData += 128;
- metadata.data.cuesheet.leadInSampleCount = drflac__be2host_64(*(const drflac_uint64*)pRunningData); pRunningData += 8;
- metadata.data.cuesheet.isCD = (pRunningData[0] & 0x80) != 0; pRunningData += 259;
- metadata.data.cuesheet.trackCount = pRunningData[0]; pRunningData += 1;
- metadata.data.cuesheet.pTrackData = pRunningData;
+ DRFLAC_ASSERT(pFlac != NULL);
- /* Check that the cuesheet tracks are valid before passing it to the callback */
- for (iTrack = 0; iTrack < metadata.data.cuesheet.trackCount; ++iTrack) {
- drflac_uint8 indexCount;
- drflac_uint32 indexPointSize;
+ firstPCMFrame = pFlac->currentFLACFrame.header.pcmFrameNumber;
+ if (firstPCMFrame == 0) {
+ firstPCMFrame = ((drflac_uint64)pFlac->currentFLACFrame.header.flacFrameNumber) * pFlac->maxBlockSizeInPCMFrames;
+ }
- if (pRunningDataEnd - pRunningData < 36) {
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
+ lastPCMFrame = firstPCMFrame + pFlac->currentFLACFrame.header.blockSizeInPCMFrames;
+ if (lastPCMFrame > 0) {
+ lastPCMFrame -= 1; /* Needs to be zero based. */
+ }
- /* Skip to the index point count */
- pRunningData += 35;
- indexCount = pRunningData[0]; pRunningData += 1;
- indexPointSize = indexCount * sizeof(drflac_cuesheet_track_index);
- if (pRunningDataEnd - pRunningData < (drflac_int64)indexPointSize) {
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
+ if (pFirstPCMFrame) {
+ *pFirstPCMFrame = firstPCMFrame;
+ }
+ if (pLastPCMFrame) {
+ *pLastPCMFrame = lastPCMFrame;
+ }
+}
- /* Endian swap. */
- for (iIndex = 0; iIndex < indexCount; ++iIndex) {
- drflac_cuesheet_track_index* pTrack = (drflac_cuesheet_track_index*)pRunningData;
- pRunningData += sizeof(drflac_cuesheet_track_index);
- pTrack->offset = drflac__be2host_64(pTrack->offset);
- }
- }
+static drflac_bool32 drflac__seek_to_first_frame(drflac* pFlac)
+{
+ drflac_bool32 result;
- onMeta(pUserDataMD, &metadata);
+ DRFLAC_ASSERT(pFlac != NULL);
- DRFLAC_FREE(pRawData);
- }
- } break;
+ result = drflac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes);
- case DRFLAC_METADATA_BLOCK_TYPE_PICTURE:
- {
- if (blockSize < 32) {
- return DRFLAC_FALSE;
- }
+ DRFLAC_ZERO_MEMORY(&pFlac->currentFLACFrame, sizeof(pFlac->currentFLACFrame));
+ pFlac->currentPCMFrame = 0;
- if (onMeta) {
- void* pRawData;
- const char* pRunningData;
- const char* pRunningDataEnd;
+ return result;
+}
- pRawData = DRFLAC_MALLOC(blockSize);
- if (pRawData == NULL) {
- return DRFLAC_FALSE;
- }
+static DRFLAC_INLINE drflac_result drflac__seek_to_next_flac_frame(drflac* pFlac)
+{
+ /* This function should only ever be called while the decoder is sitting on the first byte past the FRAME_HEADER section. */
+ DRFLAC_ASSERT(pFlac != NULL);
+ return drflac__seek_flac_frame(pFlac);
+}
- if (onRead(pUserData, pRawData, blockSize) != blockSize) {
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
- metadata.pRawData = pRawData;
- metadata.rawDataSize = blockSize;
+drflac_uint64 drflac__seek_forward_by_pcm_frames(drflac* pFlac, drflac_uint64 pcmFramesToSeek)
+{
+ drflac_uint64 pcmFramesRead = 0;
+ while (pcmFramesToSeek > 0) {
+ if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) {
+ if (!drflac__read_and_decode_next_flac_frame(pFlac)) {
+ break; /* Couldn't read the next frame, so just break from the loop and return. */
+ }
+ } else {
+ if (pFlac->currentFLACFrame.pcmFramesRemaining > pcmFramesToSeek) {
+ pcmFramesRead += pcmFramesToSeek;
+ pFlac->currentFLACFrame.pcmFramesRemaining -= (drflac_uint32)pcmFramesToSeek; /* <-- Safe cast. Will always be < currentFrame.pcmFramesRemaining < 65536. */
+ pcmFramesToSeek = 0;
+ } else {
+ pcmFramesRead += pFlac->currentFLACFrame.pcmFramesRemaining;
+ pcmFramesToSeek -= pFlac->currentFLACFrame.pcmFramesRemaining;
+ pFlac->currentFLACFrame.pcmFramesRemaining = 0;
+ }
+ }
+ }
- pRunningData = (const char*)pRawData;
- pRunningDataEnd = (const char*)pRawData + blockSize;
+ pFlac->currentPCMFrame += pcmFramesRead;
+ return pcmFramesRead;
+}
- metadata.data.picture.type = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
- metadata.data.picture.mimeLength = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
- /* Need space for the rest of the block */
- if ((pRunningDataEnd - pRunningData) - 24 < (drflac_int64)metadata.data.picture.mimeLength) { /* <-- Note the order of operations to avoid overflow to a valid value */
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
- metadata.data.picture.mime = pRunningData; pRunningData += metadata.data.picture.mimeLength;
- metadata.data.picture.descriptionLength = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+static drflac_bool32 drflac__seek_to_pcm_frame__brute_force(drflac* pFlac, drflac_uint64 pcmFrameIndex)
+{
+ drflac_bool32 isMidFrame = DRFLAC_FALSE;
+ drflac_uint64 runningPCMFrameCount;
- /* Need space for the rest of the block */
- if ((pRunningDataEnd - pRunningData) - 20 < (drflac_int64)metadata.data.picture.descriptionLength) { /* <-- Note the order of operations to avoid overflow to a valid value */
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
- metadata.data.picture.description = pRunningData; pRunningData += metadata.data.picture.descriptionLength;
- metadata.data.picture.width = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
- metadata.data.picture.height = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
- metadata.data.picture.colorDepth = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
- metadata.data.picture.indexColorCount = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
- metadata.data.picture.pictureDataSize = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
- metadata.data.picture.pPictureData = (const drflac_uint8*)pRunningData;
+ DRFLAC_ASSERT(pFlac != NULL);
- /* Need space for the picture after the block */
- if (pRunningDataEnd - pRunningData < (drflac_int64)metadata.data.picture.pictureDataSize) { /* <-- Note the order of operations to avoid overflow to a valid value */
- DRFLAC_FREE(pRawData);
- return DRFLAC_FALSE;
- }
+ /* If we are seeking forward we start from the current position. Otherwise we need to start all the way from the start of the file. */
+ if (pcmFrameIndex >= pFlac->currentPCMFrame) {
+ /* Seeking forward. Need to seek from the current position. */
+ runningPCMFrameCount = pFlac->currentPCMFrame;
- onMeta(pUserDataMD, &metadata);
+ /* The frame header for the first frame may not yet have been read. We need to do that if necessary. */
+ if (pFlac->currentPCMFrame == 0 && pFlac->currentFLACFrame.pcmFramesRemaining == 0) {
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ return DRFLAC_FALSE;
+ }
+ } else {
+ isMidFrame = DRFLAC_TRUE;
+ }
+ } else {
+ /* Seeking backwards. Need to seek from the start of the file. */
+ runningPCMFrameCount = 0;
- DRFLAC_FREE(pRawData);
- }
- } break;
+ /* Move back to the start. */
+ if (!drflac__seek_to_first_frame(pFlac)) {
+ return DRFLAC_FALSE;
+ }
- case DRFLAC_METADATA_BLOCK_TYPE_PADDING:
- {
- if (onMeta) {
- metadata.data.padding.unused = 0;
+ /* Decode the first frame in preparation for sample-exact seeking below. */
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ return DRFLAC_FALSE;
+ }
+ }
- /* Padding doesn't have anything meaningful in it, so just skip over it, but make sure the caller is aware of it by firing the callback. */
- if (!onSeek(pUserData, blockSize, drflac_seek_origin_current)) {
- isLastBlock = DRFLAC_TRUE; /* An error occurred while seeking. Attempt to recover by treating this as the last block which will in turn terminate the loop. */
- } else {
- onMeta(pUserDataMD, &metadata);
- }
- }
- } break;
+ /*
+ We need to as quickly as possible find the frame that contains the target sample. To do this, we iterate over each frame and inspect its
+ header. If based on the header we can determine that the frame contains the sample, we do a full decode of that frame.
+ */
+ for (;;) {
+ drflac_uint64 pcmFrameCountInThisFLACFrame;
+ drflac_uint64 firstPCMFrameInFLACFrame = 0;
+ drflac_uint64 lastPCMFrameInFLACFrame = 0;
- case DRFLAC_METADATA_BLOCK_TYPE_INVALID:
- {
- /* Invalid chunk. Just skip over this one. */
- if (onMeta) {
- if (!onSeek(pUserData, blockSize, drflac_seek_origin_current)) {
- isLastBlock = DRFLAC_TRUE; /* An error occurred while seeking. Attempt to recover by treating this as the last block which will in turn terminate the loop. */
- }
- }
- } break;
+ drflac__get_pcm_frame_range_of_current_flac_frame(pFlac, &firstPCMFrameInFLACFrame, &lastPCMFrameInFLACFrame);
- default:
- {
- /*
- It's an unknown chunk, but not necessarily invalid. There's a chance more metadata blocks might be defined later on, so we
- can at the very least report the chunk to the application and let it look at the raw data.
- */
- if (onMeta) {
- void* pRawData = DRFLAC_MALLOC(blockSize);
- if (pRawData == NULL) {
+ pcmFrameCountInThisFLACFrame = (lastPCMFrameInFLACFrame - firstPCMFrameInFLACFrame) + 1;
+ if (pcmFrameIndex < (runningPCMFrameCount + pcmFrameCountInThisFLACFrame)) {
+ /*
+ The sample should be in this frame. We need to fully decode it, however if it's an invalid frame (a CRC mismatch), we need to pretend
+ it never existed and keep iterating.
+ */
+ drflac_uint64 pcmFramesToDecode = pcmFrameIndex - runningPCMFrameCount;
+
+ if (!isMidFrame) {
+ drflac_result result = drflac__decode_flac_frame(pFlac);
+ if (result == DRFLAC_SUCCESS) {
+ /* The frame is valid. We just need to skip over some samples to ensure it's sample-exact. */
+ return drflac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; /* <-- If this fails, something bad has happened (it should never fail). */
+ } else {
+ if (result == DRFLAC_CRC_MISMATCH) {
+ goto next_iteration; /* CRC mismatch. Pretend this frame never existed. */
+ } else {
return DRFLAC_FALSE;
}
-
- if (onRead(pUserData, pRawData, blockSize) != blockSize) {
- DRFLAC_FREE(pRawData);
+ }
+ } else {
+ /* We started seeking mid-frame which means we need to skip the frame decoding part. */
+ return drflac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode;
+ }
+ } else {
+ /*
+ It's not in this frame. We need to seek past the frame, but check if there was a CRC mismatch. If so, we pretend this
+ frame never existed and leave the running sample count untouched.
+ */
+ if (!isMidFrame) {
+ drflac_result result = drflac__seek_to_next_flac_frame(pFlac);
+ if (result == DRFLAC_SUCCESS) {
+ runningPCMFrameCount += pcmFrameCountInThisFLACFrame;
+ } else {
+ if (result == DRFLAC_CRC_MISMATCH) {
+ goto next_iteration; /* CRC mismatch. Pretend this frame never existed. */
+ } else {
return DRFLAC_FALSE;
}
-
- metadata.pRawData = pRawData;
- metadata.rawDataSize = blockSize;
- onMeta(pUserDataMD, &metadata);
-
- DRFLAC_FREE(pRawData);
}
- } break;
- }
+ } else {
+ /*
+ We started seeking mid-frame which means we need to seek by reading to the end of the frame instead of with
+ drflac__seek_to_next_flac_frame() which only works if the decoder is sitting on the byte just after the frame header.
+ */
+ runningPCMFrameCount += pFlac->currentFLACFrame.pcmFramesRemaining;
+ pFlac->currentFLACFrame.pcmFramesRemaining = 0;
+ isMidFrame = DRFLAC_FALSE;
+ }
- /* If we're not handling metadata, just skip over the block. If we are, it will have been handled earlier in the switch statement above. */
- if (onMeta == NULL && blockSize > 0) {
- if (!onSeek(pUserData, blockSize, drflac_seek_origin_current)) {
- isLastBlock = DRFLAC_TRUE;
+ /* If we are seeking to the end of the file and we've just hit it, we're done. */
+ if (pcmFrameIndex == pFlac->totalPCMFrameCount && runningPCMFrameCount == pFlac->totalPCMFrameCount) {
+ return DRFLAC_TRUE;
}
}
- runningFilePos += blockSize;
- if (isLastBlock) {
- break;
+ next_iteration:
+ /* Grab the next frame in preparation for the next iteration. */
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ return DRFLAC_FALSE;
}
}
+}
- *pSeektablePos = seektablePos;
- *pSeektableSize = seektableSize;
- *pFirstFramePos = runningFilePos;
- return DRFLAC_TRUE;
-}
+#if !defined(DR_FLAC_NO_CRC)
+/*
+We use an average compression ratio to determine our approximate start location. FLAC files are generally about 50%-70% the size of their
+uncompressed counterparts so we'll use this as a basis. I'm going to split the middle and use a factor of 0.6 to determine the starting
+location.
+*/
+#define DRFLAC_BINARY_SEARCH_APPROX_COMPRESSION_RATIO 0.6f
-drflac_bool32 drflac__init_private__native(drflac_init_info* pInit, drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData, void* pUserDataMD, drflac_bool32 relaxed)
+static drflac_bool32 drflac__seek_to_approximate_flac_frame_to_byte(drflac* pFlac, drflac_uint64 targetByte, drflac_uint64 rangeLo, drflac_uint64 rangeHi, drflac_uint64* pLastSuccessfulSeekOffset)
{
- /* Pre Condition: The bit stream should be sitting just past the 4-byte id header. */
+ DRFLAC_ASSERT(pFlac != NULL);
+ DRFLAC_ASSERT(pLastSuccessfulSeekOffset != NULL);
+ DRFLAC_ASSERT(targetByte >= rangeLo);
+ DRFLAC_ASSERT(targetByte <= rangeHi);
- drflac_uint8 isLastBlock;
- drflac_uint8 blockType;
- drflac_uint32 blockSize;
+ *pLastSuccessfulSeekOffset = pFlac->firstFLACFramePosInBytes;
- (void)onSeek;
+ for (;;) {
+ /* When seeking to a byte, failure probably means we've attempted to seek beyond the end of the stream. To counter this we just halve it each attempt. */
+ if (!drflac__seek_to_byte(&pFlac->bs, targetByte)) {
+ /* If we couldn't even seek to the first byte in the stream we have a problem. Just abandon the whole thing. */
+ if (targetByte == 0) {
+ drflac__seek_to_first_frame(pFlac); /* Try to recover. */
+ return DRFLAC_FALSE;
+ }
- pInit->container = drflac_container_native;
+ /* Halve the byte location and continue. */
+ targetByte = rangeLo + ((rangeHi - rangeLo)/2);
+ rangeHi = targetByte;
+ } else {
+ /* Getting here should mean that we have seeked to an appropriate byte. */
- /* The first metadata block should be the STREAMINFO block. */
- if (!drflac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize)) {
- return DRFLAC_FALSE;
- }
+ /* Clear the details of the FLAC frame so we don't misreport data. */
+ DRFLAC_ZERO_MEMORY(&pFlac->currentFLACFrame, sizeof(pFlac->currentFLACFrame));
- if (blockType != DRFLAC_METADATA_BLOCK_TYPE_STREAMINFO || blockSize != 34) {
- if (!relaxed) {
- /* We're opening in strict mode and the first block is not the STREAMINFO block. Error. */
- return DRFLAC_FALSE;
- } else {
/*
- Relaxed mode. To open from here we need to just find the first frame and set the sample rate, etc. to whatever is defined
- for that frame.
+ Now seek to the next FLAC frame. We need to decode the entire frame (not just the header) because it's possible for the header to incorrectly pass the
+ CRC check and return bad data. We need to decode the entire frame to be more certain. Although this seems unlikely, this has happened to me in testing
+ to it needs to stay this way for now.
*/
- pInit->hasStreamInfoBlock = DRFLAC_FALSE;
- pInit->hasMetadataBlocks = DRFLAC_FALSE;
-
- if (!drflac__read_next_flac_frame_header(&pInit->bs, 0, &pInit->firstFrameHeader)) {
- return DRFLAC_FALSE; /* Couldn't find a frame. */
- }
-
- if (pInit->firstFrameHeader.bitsPerSample == 0) {
- return DRFLAC_FALSE; /* Failed to initialize because the first frame depends on the STREAMINFO block, which does not exist. */
+#if 1
+ if (!drflac__read_and_decode_next_flac_frame(pFlac)) {
+ /* Halve the byte location and continue. */
+ targetByte = rangeLo + ((rangeHi - rangeLo)/2);
+ rangeHi = targetByte;
+ } else {
+ break;
}
-
- pInit->sampleRate = pInit->firstFrameHeader.sampleRate;
- pInit->channels = drflac__get_channel_count_from_channel_assignment(pInit->firstFrameHeader.channelAssignment);
- pInit->bitsPerSample = pInit->firstFrameHeader.bitsPerSample;
- pInit->maxBlockSize = 65535; /* <-- See notes here: https://xiph.org/flac/format.html#metadata_block_streaminfo */
+#else
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ /* Halve the byte location and continue. */
+ targetByte = rangeLo + ((rangeHi - rangeLo)/2);
+ rangeHi = targetByte;
+ } else {
+ break;
+ }
+#endif
+ }
+ }
+
+ /* The current PCM frame needs to be updated based on the frame we just seeked to. */
+ drflac__get_pcm_frame_range_of_current_flac_frame(pFlac, &pFlac->currentPCMFrame, NULL);
+
+ DRFLAC_ASSERT(targetByte <= rangeHi);
+
+ *pLastSuccessfulSeekOffset = targetByte;
+ return DRFLAC_TRUE;
+}
+
+static drflac_bool32 drflac__decode_flac_frame_and_seek_forward_by_pcm_frames(drflac* pFlac, drflac_uint64 offset)
+{
+ /* This section of code would be used if we were only decoding the FLAC frame header when calling drflac__seek_to_approximate_flac_frame_to_byte(). */
+#if 0
+ if (drflac__decode_flac_frame(pFlac) != DRFLAC_SUCCESS) {
+ /* We failed to decode this frame which may be due to it being corrupt. We'll just use the next valid FLAC frame. */
+ if (drflac__read_and_decode_next_flac_frame(pFlac) == DRFLAC_FALSE) {
+ return DRFLAC_FALSE;
+ }
+ }
+#endif
+
+ return drflac__seek_forward_by_pcm_frames(pFlac, offset) == offset;
+}
+
+
+static drflac_bool32 drflac__seek_to_pcm_frame__binary_search_internal(drflac* pFlac, drflac_uint64 pcmFrameIndex, drflac_uint64 byteRangeLo, drflac_uint64 byteRangeHi)
+{
+ /* This assumes pFlac->currentPCMFrame is sitting on byteRangeLo upon entry. */
+
+ drflac_uint64 targetByte;
+ drflac_uint64 pcmRangeLo = pFlac->totalPCMFrameCount;
+ drflac_uint64 pcmRangeHi = 0;
+ drflac_uint64 lastSuccessfulSeekOffset = (drflac_uint64)-1;
+ drflac_uint64 closestSeekOffsetBeforeTargetPCMFrame = byteRangeLo;
+ drflac_uint32 seekForwardThreshold = (pFlac->maxBlockSizeInPCMFrames != 0) ? pFlac->maxBlockSizeInPCMFrames*2 : 4096;
+
+ targetByte = byteRangeLo + (drflac_uint64)(((pcmFrameIndex - pFlac->currentPCMFrame) * pFlac->channels * pFlac->bitsPerSample/8.0f) * DRFLAC_BINARY_SEARCH_APPROX_COMPRESSION_RATIO);
+ if (targetByte > byteRangeHi) {
+ targetByte = byteRangeHi;
+ }
+
+ for (;;) {
+ if (drflac__seek_to_approximate_flac_frame_to_byte(pFlac, targetByte, byteRangeLo, byteRangeHi, &lastSuccessfulSeekOffset)) {
+ /* We found a FLAC frame. We need to check if it contains the sample we're looking for. */
+ drflac_uint64 newPCMRangeLo;
+ drflac_uint64 newPCMRangeHi;
+ drflac__get_pcm_frame_range_of_current_flac_frame(pFlac, &newPCMRangeLo, &newPCMRangeHi);
+
+ /* If we selected the same frame, it means we should be pretty close. Just decode the rest. */
+ if (pcmRangeLo == newPCMRangeLo) {
+ if (!drflac__seek_to_approximate_flac_frame_to_byte(pFlac, closestSeekOffsetBeforeTargetPCMFrame, closestSeekOffsetBeforeTargetPCMFrame, byteRangeHi, &lastSuccessfulSeekOffset)) {
+ break; /* Failed to seek to closest frame. */
+ }
+
+ if (drflac__decode_flac_frame_and_seek_forward_by_pcm_frames(pFlac, pcmFrameIndex - pFlac->currentPCMFrame)) {
+ return DRFLAC_TRUE;
+ } else {
+ break; /* Failed to seek forward. */
+ }
+ }
+
+ pcmRangeLo = newPCMRangeLo;
+ pcmRangeHi = newPCMRangeHi;
+
+ if (pcmRangeLo <= pcmFrameIndex && pcmRangeHi >= pcmFrameIndex) {
+ /* The target PCM frame is in this FLAC frame. */
+ if (drflac__decode_flac_frame_and_seek_forward_by_pcm_frames(pFlac, pcmFrameIndex - pFlac->currentPCMFrame) ) {
+ return DRFLAC_TRUE;
+ } else {
+ break; /* Failed to seek to FLAC frame. */
+ }
+ } else {
+ const float approxCompressionRatio = (lastSuccessfulSeekOffset - pFlac->firstFLACFramePosInBytes) / (pcmRangeLo * pFlac->channels * pFlac->bitsPerSample/8.0f);
+
+ if (pcmRangeLo > pcmFrameIndex) {
+ /* We seeked too far forward. We need to move our target byte backward and try again. */
+ byteRangeHi = lastSuccessfulSeekOffset;
+ if (byteRangeLo > byteRangeHi) {
+ byteRangeLo = byteRangeHi;
+ }
+
+ targetByte = byteRangeLo + ((byteRangeHi - byteRangeLo) / 2);
+ if (targetByte < byteRangeLo) {
+ targetByte = byteRangeLo;
+ }
+ } else /*if (pcmRangeHi < pcmFrameIndex)*/ {
+ /* We didn't seek far enough. We need to move our target byte forward and try again. */
+
+ /* If we're close enough we can just seek forward. */
+ if ((pcmFrameIndex - pcmRangeLo) < seekForwardThreshold) {
+ if (drflac__decode_flac_frame_and_seek_forward_by_pcm_frames(pFlac, pcmFrameIndex - pFlac->currentPCMFrame)) {
+ return DRFLAC_TRUE;
+ } else {
+ break; /* Failed to seek to FLAC frame. */
+ }
+ } else {
+ byteRangeLo = lastSuccessfulSeekOffset;
+ if (byteRangeHi < byteRangeLo) {
+ byteRangeHi = byteRangeLo;
+ }
+
+ targetByte = lastSuccessfulSeekOffset + (drflac_uint64)(((pcmFrameIndex-pcmRangeLo) * pFlac->channels * pFlac->bitsPerSample/8.0f) * approxCompressionRatio);
+ if (targetByte > byteRangeHi) {
+ targetByte = byteRangeHi;
+ }
+
+ if (closestSeekOffsetBeforeTargetPCMFrame < lastSuccessfulSeekOffset) {
+ closestSeekOffsetBeforeTargetPCMFrame = lastSuccessfulSeekOffset;
+ }
+ }
+ }
+ }
+ } else {
+ /* Getting here is really bad. We just recover as best we can, but moving to the first frame in the stream, and then abort. */
+ break;
+ }
+ }
+
+ drflac__seek_to_first_frame(pFlac); /* <-- Try to recover. */
+ return DRFLAC_FALSE;
+}
+
+static drflac_bool32 drflac__seek_to_pcm_frame__binary_search(drflac* pFlac, drflac_uint64 pcmFrameIndex)
+{
+ drflac_uint64 byteRangeLo;
+ drflac_uint64 byteRangeHi;
+ drflac_uint32 seekForwardThreshold = (pFlac->maxBlockSizeInPCMFrames != 0) ? pFlac->maxBlockSizeInPCMFrames*2 : 4096;
+
+ /* Our algorithm currently assumes the PCM frame */
+ if (drflac__seek_to_first_frame(pFlac) == DRFLAC_FALSE) {
+ return DRFLAC_FALSE;
+ }
+
+ /* If we're close enough to the start, just move to the start and seek forward. */
+ if (pcmFrameIndex < seekForwardThreshold) {
+ return drflac__seek_forward_by_pcm_frames(pFlac, pcmFrameIndex) == pcmFrameIndex;
+ }
+
+ /*
+ Our starting byte range is the byte position of the first FLAC frame and the approximate end of the file as if it were completely uncompressed. This ensures
+ the entire file is included, even though most of the time it'll exceed the end of the actual stream. This is OK as the frame searching logic will handle it.
+ */
+ byteRangeLo = pFlac->firstFLACFramePosInBytes;
+ byteRangeHi = pFlac->firstFLACFramePosInBytes + (drflac_uint64)(pFlac->totalPCMFrameCount * pFlac->channels * pFlac->bitsPerSample/8.0f);
+
+ return drflac__seek_to_pcm_frame__binary_search_internal(pFlac, pcmFrameIndex, byteRangeLo, byteRangeHi);
+}
+#endif /* !DR_FLAC_NO_CRC */
+
+static drflac_bool32 drflac__seek_to_pcm_frame__seek_table(drflac* pFlac, drflac_uint64 pcmFrameIndex)
+{
+ drflac_uint32 iClosestSeekpoint = 0;
+ drflac_bool32 isMidFrame = DRFLAC_FALSE;
+ drflac_uint64 runningPCMFrameCount;
+ drflac_uint32 iSeekpoint;
+
+
+ DRFLAC_ASSERT(pFlac != NULL);
+
+ if (pFlac->pSeekpoints == NULL || pFlac->seekpointCount == 0) {
+ return DRFLAC_FALSE;
+ }
+
+ for (iSeekpoint = 0; iSeekpoint < pFlac->seekpointCount; ++iSeekpoint) {
+ if (pFlac->pSeekpoints[iSeekpoint].firstPCMFrame >= pcmFrameIndex) {
+ break;
+ }
+
+ iClosestSeekpoint = iSeekpoint;
+ }
+
+#if !defined(DR_FLAC_NO_CRC)
+ /* At this point we should know the closest seek point. We can use a binary search for this. We need to know the total sample count for this. */
+ if (pFlac->totalPCMFrameCount > 0) {
+ drflac_uint64 byteRangeLo;
+ drflac_uint64 byteRangeHi;
+
+ byteRangeHi = pFlac->firstFLACFramePosInBytes + (drflac_uint64)(pFlac->totalPCMFrameCount * pFlac->channels * pFlac->bitsPerSample/8.0f);
+ byteRangeLo = pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset;
+
+ if (iClosestSeekpoint < pFlac->seekpointCount-1) {
+ if (pFlac->pSeekpoints[iClosestSeekpoint+1].firstPCMFrame != (((drflac_uint64)0xFFFFFFFF << 32) | 0xFFFFFFFF)) { /* Is it a placeholder seekpoint. */
+ byteRangeHi = pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint+1].flacFrameOffset-1; /* Must be zero based. */
+ }
+ }
+
+ if (drflac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset)) {
+ if (drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ drflac__get_pcm_frame_range_of_current_flac_frame(pFlac, &pFlac->currentPCMFrame, NULL);
+
+ if (drflac__seek_to_pcm_frame__binary_search_internal(pFlac, pcmFrameIndex, byteRangeLo, byteRangeHi)) {
+ return DRFLAC_TRUE;
+ }
+ }
+ }
+ }
+#endif /* !DR_FLAC_NO_CRC */
+
+ /* Getting here means we need to use a slower algorithm because the binary search method failed or cannot be used. */
+
+ /*
+ If we are seeking forward and the closest seekpoint is _before_ the current sample, we just seek forward from where we are. Otherwise we start seeking
+ from the seekpoint's first sample.
+ */
+ if (pcmFrameIndex >= pFlac->currentPCMFrame && pFlac->pSeekpoints[iClosestSeekpoint].firstPCMFrame <= pFlac->currentPCMFrame) {
+ /* Optimized case. Just seek forward from where we are. */
+ runningPCMFrameCount = pFlac->currentPCMFrame;
+
+ /* The frame header for the first frame may not yet have been read. We need to do that if necessary. */
+ if (pFlac->currentPCMFrame == 0 && pFlac->currentFLACFrame.pcmFramesRemaining == 0) {
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ return DRFLAC_FALSE;
+ }
+ } else {
+ isMidFrame = DRFLAC_TRUE;
+ }
+ } else {
+ /* Slower case. Seek to the start of the seekpoint and then seek forward from there. */
+ runningPCMFrameCount = pFlac->pSeekpoints[iClosestSeekpoint].firstPCMFrame;
+
+ if (!drflac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset)) {
+ return DRFLAC_FALSE;
+ }
+
+ /* Grab the frame the seekpoint is sitting on in preparation for the sample-exact seeking below. */
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ return DRFLAC_FALSE;
+ }
+ }
+
+ for (;;) {
+ drflac_uint64 pcmFrameCountInThisFLACFrame;
+ drflac_uint64 firstPCMFrameInFLACFrame = 0;
+ drflac_uint64 lastPCMFrameInFLACFrame = 0;
+
+ drflac__get_pcm_frame_range_of_current_flac_frame(pFlac, &firstPCMFrameInFLACFrame, &lastPCMFrameInFLACFrame);
+
+ pcmFrameCountInThisFLACFrame = (lastPCMFrameInFLACFrame - firstPCMFrameInFLACFrame) + 1;
+ if (pcmFrameIndex < (runningPCMFrameCount + pcmFrameCountInThisFLACFrame)) {
+ /*
+ The sample should be in this frame. We need to fully decode it, but if it's an invalid frame (a CRC mismatch) we need to pretend
+ it never existed and keep iterating.
+ */
+ drflac_uint64 pcmFramesToDecode = pcmFrameIndex - runningPCMFrameCount;
+
+ if (!isMidFrame) {
+ drflac_result result = drflac__decode_flac_frame(pFlac);
+ if (result == DRFLAC_SUCCESS) {
+ /* The frame is valid. We just need to skip over some samples to ensure it's sample-exact. */
+ return drflac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; /* <-- If this fails, something bad has happened (it should never fail). */
+ } else {
+ if (result == DRFLAC_CRC_MISMATCH) {
+ goto next_iteration; /* CRC mismatch. Pretend this frame never existed. */
+ } else {
+ return DRFLAC_FALSE;
+ }
+ }
+ } else {
+ /* We started seeking mid-frame which means we need to skip the frame decoding part. */
+ return drflac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode;
+ }
+ } else {
+ /*
+ It's not in this frame. We need to seek past the frame, but check if there was a CRC mismatch. If so, we pretend this
+ frame never existed and leave the running sample count untouched.
+ */
+ if (!isMidFrame) {
+ drflac_result result = drflac__seek_to_next_flac_frame(pFlac);
+ if (result == DRFLAC_SUCCESS) {
+ runningPCMFrameCount += pcmFrameCountInThisFLACFrame;
+ } else {
+ if (result == DRFLAC_CRC_MISMATCH) {
+ goto next_iteration; /* CRC mismatch. Pretend this frame never existed. */
+ } else {
+ return DRFLAC_FALSE;
+ }
+ }
+ } else {
+ /*
+ We started seeking mid-frame which means we need to seek by reading to the end of the frame instead of with
+ drflac__seek_to_next_flac_frame() which only works if the decoder is sitting on the byte just after the frame header.
+ */
+ runningPCMFrameCount += pFlac->currentFLACFrame.pcmFramesRemaining;
+ pFlac->currentFLACFrame.pcmFramesRemaining = 0;
+ isMidFrame = DRFLAC_FALSE;
+ }
+
+ /* If we are seeking to the end of the file and we've just hit it, we're done. */
+ if (pcmFrameIndex == pFlac->totalPCMFrameCount && runningPCMFrameCount == pFlac->totalPCMFrameCount) {
+ return DRFLAC_TRUE;
+ }
+ }
+
+ next_iteration:
+ /* Grab the next frame in preparation for the next iteration. */
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ return DRFLAC_FALSE;
+ }
+ }
+}
+
+
+#ifndef DR_FLAC_NO_OGG
+typedef struct
+{
+ drflac_uint8 capturePattern[4]; /* Should be "OggS" */
+ drflac_uint8 structureVersion; /* Always 0. */
+ drflac_uint8 headerType;
+ drflac_uint64 granulePosition;
+ drflac_uint32 serialNumber;
+ drflac_uint32 sequenceNumber;
+ drflac_uint32 checksum;
+ drflac_uint8 segmentCount;
+ drflac_uint8 segmentTable[255];
+} drflac_ogg_page_header;
+#endif
+
+typedef struct
+{
+ drflac_read_proc onRead;
+ drflac_seek_proc onSeek;
+ drflac_meta_proc onMeta;
+ drflac_container container;
+ void* pUserData;
+ void* pUserDataMD;
+ drflac_uint32 sampleRate;
+ drflac_uint8 channels;
+ drflac_uint8 bitsPerSample;
+ drflac_uint64 totalPCMFrameCount;
+ drflac_uint16 maxBlockSizeInPCMFrames;
+ drflac_uint64 runningFilePos;
+ drflac_bool32 hasStreamInfoBlock;
+ drflac_bool32 hasMetadataBlocks;
+ drflac_bs bs; /* <-- A bit streamer is required for loading data during initialization. */
+ drflac_frame_header firstFrameHeader; /* <-- The header of the first frame that was read during relaxed initalization. Only set if there is no STREAMINFO block. */
+
+#ifndef DR_FLAC_NO_OGG
+ drflac_uint32 oggSerial;
+ drflac_uint64 oggFirstBytePos;
+ drflac_ogg_page_header oggBosHeader;
+#endif
+} drflac_init_info;
+
+static DRFLAC_INLINE void drflac__decode_block_header(drflac_uint32 blockHeader, drflac_uint8* isLastBlock, drflac_uint8* blockType, drflac_uint32* blockSize)
+{
+ blockHeader = drflac__be2host_32(blockHeader);
+ *isLastBlock = (blockHeader & 0x80000000UL) >> 31;
+ *blockType = (blockHeader & 0x7F000000UL) >> 24;
+ *blockSize = (blockHeader & 0x00FFFFFFUL);
+}
+
+static DRFLAC_INLINE drflac_bool32 drflac__read_and_decode_block_header(drflac_read_proc onRead, void* pUserData, drflac_uint8* isLastBlock, drflac_uint8* blockType, drflac_uint32* blockSize)
+{
+ drflac_uint32 blockHeader;
+
+ *blockSize = 0;
+ if (onRead(pUserData, &blockHeader, 4) != 4) {
+ return DRFLAC_FALSE;
+ }
+
+ drflac__decode_block_header(blockHeader, isLastBlock, blockType, blockSize);
+ return DRFLAC_TRUE;
+}
+
+drflac_bool32 drflac__read_streaminfo(drflac_read_proc onRead, void* pUserData, drflac_streaminfo* pStreamInfo)
+{
+ drflac_uint32 blockSizes;
+ drflac_uint64 frameSizes = 0;
+ drflac_uint64 importantProps;
+ drflac_uint8 md5[16];
+
+ /* min/max block size. */
+ if (onRead(pUserData, &blockSizes, 4) != 4) {
+ return DRFLAC_FALSE;
+ }
+
+ /* min/max frame size. */
+ if (onRead(pUserData, &frameSizes, 6) != 6) {
+ return DRFLAC_FALSE;
+ }
+
+ /* Sample rate, channels, bits per sample and total sample count. */
+ if (onRead(pUserData, &importantProps, 8) != 8) {
+ return DRFLAC_FALSE;
+ }
+
+ /* MD5 */
+ if (onRead(pUserData, md5, sizeof(md5)) != sizeof(md5)) {
+ return DRFLAC_FALSE;
+ }
+
+ blockSizes = drflac__be2host_32(blockSizes);
+ frameSizes = drflac__be2host_64(frameSizes);
+ importantProps = drflac__be2host_64(importantProps);
+
+ pStreamInfo->minBlockSizeInPCMFrames = (blockSizes & 0xFFFF0000) >> 16;
+ pStreamInfo->maxBlockSizeInPCMFrames = (blockSizes & 0x0000FFFF);
+ pStreamInfo->minFrameSizeInPCMFrames = (drflac_uint32)((frameSizes & (((drflac_uint64)0x00FFFFFF << 16) << 24)) >> 40);
+ pStreamInfo->maxFrameSizeInPCMFrames = (drflac_uint32)((frameSizes & (((drflac_uint64)0x00FFFFFF << 16) << 0)) >> 16);
+ pStreamInfo->sampleRate = (drflac_uint32)((importantProps & (((drflac_uint64)0x000FFFFF << 16) << 28)) >> 44);
+ pStreamInfo->channels = (drflac_uint8 )((importantProps & (((drflac_uint64)0x0000000E << 16) << 24)) >> 41) + 1;
+ pStreamInfo->bitsPerSample = (drflac_uint8 )((importantProps & (((drflac_uint64)0x0000001F << 16) << 20)) >> 36) + 1;
+ pStreamInfo->totalPCMFrameCount = ((importantProps & ((((drflac_uint64)0x0000000F << 16) << 16) | 0xFFFFFFFF)));
+ DRFLAC_COPY_MEMORY(pStreamInfo->md5, md5, sizeof(md5));
+
+ return DRFLAC_TRUE;
+}
+
+
+static void* drflac__malloc_default(size_t sz, void* pUserData)
+{
+ (void)pUserData;
+ return DRFLAC_MALLOC(sz);
+}
+
+static void* drflac__realloc_default(void* p, size_t sz, void* pUserData)
+{
+ (void)pUserData;
+ return DRFLAC_REALLOC(p, sz);
+}
+
+static void drflac__free_default(void* p, void* pUserData)
+{
+ (void)pUserData;
+ DRFLAC_FREE(p);
+}
+
+
+static void* drflac__malloc_from_callbacks(size_t sz, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks == NULL) {
+ return NULL;
+ }
+
+ if (pAllocationCallbacks->onMalloc != NULL) {
+ return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData);
+ }
+
+ /* Try using realloc(). */
+ if (pAllocationCallbacks->onRealloc != NULL) {
+ return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData);
+ }
+
+ return NULL;
+}
+
+static void* drflac__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks == NULL) {
+ return NULL;
+ }
+
+ if (pAllocationCallbacks->onRealloc != NULL) {
+ return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData);
+ }
+
+ /* Try emulating realloc() in terms of malloc()/free(). */
+ if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) {
+ void* p2;
+
+ p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData);
+ if (p2 == NULL) {
+ return NULL;
+ }
+
+ if (p != NULL) {
+ DRFLAC_COPY_MEMORY(p2, p, szOld);
+ pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
+ }
+
+ return p2;
+ }
+
+ return NULL;
+}
+
+static void drflac__free_from_callbacks(void* p, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ if (p == NULL || pAllocationCallbacks == NULL) {
+ return;
+ }
+
+ if (pAllocationCallbacks->onFree != NULL) {
+ pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
+ }
+}
+
+
+drflac_bool32 drflac__read_and_decode_metadata(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData, void* pUserDataMD, drflac_uint64* pFirstFramePos, drflac_uint64* pSeektablePos, drflac_uint32* pSeektableSize, drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ /*
+ We want to keep track of the byte position in the stream of the seektable. At the time of calling this function we know that
+ we'll be sitting on byte 42.
+ */
+ drflac_uint64 runningFilePos = 42;
+ drflac_uint64 seektablePos = 0;
+ drflac_uint32 seektableSize = 0;
+
+ for (;;) {
+ drflac_metadata metadata;
+ drflac_uint8 isLastBlock = 0;
+ drflac_uint8 blockType;
+ drflac_uint32 blockSize;
+ if (drflac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize) == DRFLAC_FALSE) {
+ return DRFLAC_FALSE;
+ }
+ runningFilePos += 4;
+
+ metadata.type = blockType;
+ metadata.pRawData = NULL;
+ metadata.rawDataSize = 0;
+
+ switch (blockType)
+ {
+ case DRFLAC_METADATA_BLOCK_TYPE_APPLICATION:
+ {
+ if (blockSize < 4) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onMeta) {
+ void* pRawData = drflac__malloc_from_callbacks(blockSize, pAllocationCallbacks);
+ if (pRawData == NULL) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onRead(pUserData, pRawData, blockSize) != blockSize) {
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ metadata.pRawData = pRawData;
+ metadata.rawDataSize = blockSize;
+ metadata.data.application.id = drflac__be2host_32(*(drflac_uint32*)pRawData);
+ metadata.data.application.pData = (const void*)((drflac_uint8*)pRawData + sizeof(drflac_uint32));
+ metadata.data.application.dataSize = blockSize - sizeof(drflac_uint32);
+ onMeta(pUserDataMD, &metadata);
+
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ }
+ } break;
+
+ case DRFLAC_METADATA_BLOCK_TYPE_SEEKTABLE:
+ {
+ seektablePos = runningFilePos;
+ seektableSize = blockSize;
+
+ if (onMeta) {
+ drflac_uint32 iSeekpoint;
+ void* pRawData;
+
+ pRawData = drflac__malloc_from_callbacks(blockSize, pAllocationCallbacks);
+ if (pRawData == NULL) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onRead(pUserData, pRawData, blockSize) != blockSize) {
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ metadata.pRawData = pRawData;
+ metadata.rawDataSize = blockSize;
+ metadata.data.seektable.seekpointCount = blockSize/sizeof(drflac_seekpoint);
+ metadata.data.seektable.pSeekpoints = (const drflac_seekpoint*)pRawData;
+
+ /* Endian swap. */
+ for (iSeekpoint = 0; iSeekpoint < metadata.data.seektable.seekpointCount; ++iSeekpoint) {
+ drflac_seekpoint* pSeekpoint = (drflac_seekpoint*)pRawData + iSeekpoint;
+ pSeekpoint->firstPCMFrame = drflac__be2host_64(pSeekpoint->firstPCMFrame);
+ pSeekpoint->flacFrameOffset = drflac__be2host_64(pSeekpoint->flacFrameOffset);
+ pSeekpoint->pcmFrameCount = drflac__be2host_16(pSeekpoint->pcmFrameCount);
+ }
+
+ onMeta(pUserDataMD, &metadata);
+
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ }
+ } break;
+
+ case DRFLAC_METADATA_BLOCK_TYPE_VORBIS_COMMENT:
+ {
+ if (blockSize < 8) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onMeta) {
+ void* pRawData;
+ const char* pRunningData;
+ const char* pRunningDataEnd;
+ drflac_uint32 i;
+
+ pRawData = drflac__malloc_from_callbacks(blockSize, pAllocationCallbacks);
+ if (pRawData == NULL) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onRead(pUserData, pRawData, blockSize) != blockSize) {
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ metadata.pRawData = pRawData;
+ metadata.rawDataSize = blockSize;
+
+ pRunningData = (const char*)pRawData;
+ pRunningDataEnd = (const char*)pRawData + blockSize;
+
+ metadata.data.vorbis_comment.vendorLength = drflac__le2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+
+ /* Need space for the rest of the block */
+ if ((pRunningDataEnd - pRunningData) - 4 < (drflac_int64)metadata.data.vorbis_comment.vendorLength) { /* <-- Note the order of operations to avoid overflow to a valid value */
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+ metadata.data.vorbis_comment.vendor = pRunningData; pRunningData += metadata.data.vorbis_comment.vendorLength;
+ metadata.data.vorbis_comment.commentCount = drflac__le2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+
+ /* Need space for 'commentCount' comments after the block, which at minimum is a drflac_uint32 per comment */
+ if ((pRunningDataEnd - pRunningData) / sizeof(drflac_uint32) < metadata.data.vorbis_comment.commentCount) { /* <-- Note the order of operations to avoid overflow to a valid value */
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+ metadata.data.vorbis_comment.pComments = pRunningData;
+
+ /* Check that the comments section is valid before passing it to the callback */
+ for (i = 0; i < metadata.data.vorbis_comment.commentCount; ++i) {
+ drflac_uint32 commentLength;
+
+ if (pRunningDataEnd - pRunningData < 4) {
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ commentLength = drflac__le2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+ if (pRunningDataEnd - pRunningData < (drflac_int64)commentLength) { /* <-- Note the order of operations to avoid overflow to a valid value */
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+ pRunningData += commentLength;
+ }
+
+ onMeta(pUserDataMD, &metadata);
+
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ }
+ } break;
+
+ case DRFLAC_METADATA_BLOCK_TYPE_CUESHEET:
+ {
+ if (blockSize < 396) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onMeta) {
+ void* pRawData;
+ const char* pRunningData;
+ const char* pRunningDataEnd;
+ drflac_uint8 iTrack;
+ drflac_uint8 iIndex;
+
+ pRawData = drflac__malloc_from_callbacks(blockSize, pAllocationCallbacks);
+ if (pRawData == NULL) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onRead(pUserData, pRawData, blockSize) != blockSize) {
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ metadata.pRawData = pRawData;
+ metadata.rawDataSize = blockSize;
+
+ pRunningData = (const char*)pRawData;
+ pRunningDataEnd = (const char*)pRawData + blockSize;
+
+ DRFLAC_COPY_MEMORY(metadata.data.cuesheet.catalog, pRunningData, 128); pRunningData += 128;
+ metadata.data.cuesheet.leadInSampleCount = drflac__be2host_64(*(const drflac_uint64*)pRunningData); pRunningData += 8;
+ metadata.data.cuesheet.isCD = (pRunningData[0] & 0x80) != 0; pRunningData += 259;
+ metadata.data.cuesheet.trackCount = pRunningData[0]; pRunningData += 1;
+ metadata.data.cuesheet.pTrackData = pRunningData;
+
+ /* Check that the cuesheet tracks are valid before passing it to the callback */
+ for (iTrack = 0; iTrack < metadata.data.cuesheet.trackCount; ++iTrack) {
+ drflac_uint8 indexCount;
+ drflac_uint32 indexPointSize;
+
+ if (pRunningDataEnd - pRunningData < 36) {
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ /* Skip to the index point count */
+ pRunningData += 35;
+ indexCount = pRunningData[0]; pRunningData += 1;
+ indexPointSize = indexCount * sizeof(drflac_cuesheet_track_index);
+ if (pRunningDataEnd - pRunningData < (drflac_int64)indexPointSize) {
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ /* Endian swap. */
+ for (iIndex = 0; iIndex < indexCount; ++iIndex) {
+ drflac_cuesheet_track_index* pTrack = (drflac_cuesheet_track_index*)pRunningData;
+ pRunningData += sizeof(drflac_cuesheet_track_index);
+ pTrack->offset = drflac__be2host_64(pTrack->offset);
+ }
+ }
+
+ onMeta(pUserDataMD, &metadata);
+
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ }
+ } break;
+
+ case DRFLAC_METADATA_BLOCK_TYPE_PICTURE:
+ {
+ if (blockSize < 32) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onMeta) {
+ void* pRawData;
+ const char* pRunningData;
+ const char* pRunningDataEnd;
+
+ pRawData = drflac__malloc_from_callbacks(blockSize, pAllocationCallbacks);
+ if (pRawData == NULL) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onRead(pUserData, pRawData, blockSize) != blockSize) {
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ metadata.pRawData = pRawData;
+ metadata.rawDataSize = blockSize;
+
+ pRunningData = (const char*)pRawData;
+ pRunningDataEnd = (const char*)pRawData + blockSize;
+
+ metadata.data.picture.type = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+ metadata.data.picture.mimeLength = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+
+ /* Need space for the rest of the block */
+ if ((pRunningDataEnd - pRunningData) - 24 < (drflac_int64)metadata.data.picture.mimeLength) { /* <-- Note the order of operations to avoid overflow to a valid value */
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+ metadata.data.picture.mime = pRunningData; pRunningData += metadata.data.picture.mimeLength;
+ metadata.data.picture.descriptionLength = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+
+ /* Need space for the rest of the block */
+ if ((pRunningDataEnd - pRunningData) - 20 < (drflac_int64)metadata.data.picture.descriptionLength) { /* <-- Note the order of operations to avoid overflow to a valid value */
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+ metadata.data.picture.description = pRunningData; pRunningData += metadata.data.picture.descriptionLength;
+ metadata.data.picture.width = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+ metadata.data.picture.height = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+ metadata.data.picture.colorDepth = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+ metadata.data.picture.indexColorCount = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+ metadata.data.picture.pictureDataSize = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
+ metadata.data.picture.pPictureData = (const drflac_uint8*)pRunningData;
+
+ /* Need space for the picture after the block */
+ if (pRunningDataEnd - pRunningData < (drflac_int64)metadata.data.picture.pictureDataSize) { /* <-- Note the order of operations to avoid overflow to a valid value */
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ onMeta(pUserDataMD, &metadata);
+
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ }
+ } break;
+
+ case DRFLAC_METADATA_BLOCK_TYPE_PADDING:
+ {
+ if (onMeta) {
+ metadata.data.padding.unused = 0;
+
+ /* Padding doesn't have anything meaningful in it, so just skip over it, but make sure the caller is aware of it by firing the callback. */
+ if (!onSeek(pUserData, blockSize, drflac_seek_origin_current)) {
+ isLastBlock = DRFLAC_TRUE; /* An error occurred while seeking. Attempt to recover by treating this as the last block which will in turn terminate the loop. */
+ } else {
+ onMeta(pUserDataMD, &metadata);
+ }
+ }
+ } break;
+
+ case DRFLAC_METADATA_BLOCK_TYPE_INVALID:
+ {
+ /* Invalid chunk. Just skip over this one. */
+ if (onMeta) {
+ if (!onSeek(pUserData, blockSize, drflac_seek_origin_current)) {
+ isLastBlock = DRFLAC_TRUE; /* An error occurred while seeking. Attempt to recover by treating this as the last block which will in turn terminate the loop. */
+ }
+ }
+ } break;
+
+ default:
+ {
+ /*
+ It's an unknown chunk, but not necessarily invalid. There's a chance more metadata blocks might be defined later on, so we
+ can at the very least report the chunk to the application and let it look at the raw data.
+ */
+ if (onMeta) {
+ void* pRawData = drflac__malloc_from_callbacks(blockSize, pAllocationCallbacks);
+ if (pRawData == NULL) {
+ return DRFLAC_FALSE;
+ }
+
+ if (onRead(pUserData, pRawData, blockSize) != blockSize) {
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ return DRFLAC_FALSE;
+ }
+
+ metadata.pRawData = pRawData;
+ metadata.rawDataSize = blockSize;
+ onMeta(pUserDataMD, &metadata);
+
+ drflac__free_from_callbacks(pRawData, pAllocationCallbacks);
+ }
+ } break;
+ }
+
+ /* If we're not handling metadata, just skip over the block. If we are, it will have been handled earlier in the switch statement above. */
+ if (onMeta == NULL && blockSize > 0) {
+ if (!onSeek(pUserData, blockSize, drflac_seek_origin_current)) {
+ isLastBlock = DRFLAC_TRUE;
+ }
+ }
+
+ runningFilePos += blockSize;
+ if (isLastBlock) {
+ break;
+ }
+ }
+
+ *pSeektablePos = seektablePos;
+ *pSeektableSize = seektableSize;
+ *pFirstFramePos = runningFilePos;
+
+ return DRFLAC_TRUE;
+}
+
+drflac_bool32 drflac__init_private__native(drflac_init_info* pInit, drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData, void* pUserDataMD, drflac_bool32 relaxed)
+{
+ /* Pre Condition: The bit stream should be sitting just past the 4-byte id header. */
+
+ drflac_uint8 isLastBlock;
+ drflac_uint8 blockType;
+ drflac_uint32 blockSize;
+
+ (void)onSeek;
+
+ pInit->container = drflac_container_native;
+
+ /* The first metadata block should be the STREAMINFO block. */
+ if (!drflac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize)) {
+ return DRFLAC_FALSE;
+ }
+
+ if (blockType != DRFLAC_METADATA_BLOCK_TYPE_STREAMINFO || blockSize != 34) {
+ if (!relaxed) {
+ /* We're opening in strict mode and the first block is not the STREAMINFO block. Error. */
+ return DRFLAC_FALSE;
+ } else {
+ /*
+ Relaxed mode. To open from here we need to just find the first frame and set the sample rate, etc. to whatever is defined
+ for that frame.
+ */
+ pInit->hasStreamInfoBlock = DRFLAC_FALSE;
+ pInit->hasMetadataBlocks = DRFLAC_FALSE;
+
+ if (!drflac__read_next_flac_frame_header(&pInit->bs, 0, &pInit->firstFrameHeader)) {
+ return DRFLAC_FALSE; /* Couldn't find a frame. */
+ }
+
+ if (pInit->firstFrameHeader.bitsPerSample == 0) {
+ return DRFLAC_FALSE; /* Failed to initialize because the first frame depends on the STREAMINFO block, which does not exist. */
+ }
+
+ pInit->sampleRate = pInit->firstFrameHeader.sampleRate;
+ pInit->channels = drflac__get_channel_count_from_channel_assignment(pInit->firstFrameHeader.channelAssignment);
+ pInit->bitsPerSample = pInit->firstFrameHeader.bitsPerSample;
+ pInit->maxBlockSizeInPCMFrames = 65535; /* <-- See notes here: https://xiph.org/flac/format.html#metadata_block_streaminfo */
+ return DRFLAC_TRUE;
+ }
+ } else {
+ drflac_streaminfo streaminfo;
+ if (!drflac__read_streaminfo(onRead, pUserData, &streaminfo)) {
+ return DRFLAC_FALSE;
+ }
+
+ pInit->hasStreamInfoBlock = DRFLAC_TRUE;
+ pInit->sampleRate = streaminfo.sampleRate;
+ pInit->channels = streaminfo.channels;
+ pInit->bitsPerSample = streaminfo.bitsPerSample;
+ pInit->totalPCMFrameCount = streaminfo.totalPCMFrameCount;
+ pInit->maxBlockSizeInPCMFrames = streaminfo.maxBlockSizeInPCMFrames; /* Don't care about the min block size - only the max (used for determining the size of the memory allocation). */
+ pInit->hasMetadataBlocks = !isLastBlock;
+
+ if (onMeta) {
+ drflac_metadata metadata;
+ metadata.type = DRFLAC_METADATA_BLOCK_TYPE_STREAMINFO;
+ metadata.pRawData = NULL;
+ metadata.rawDataSize = 0;
+ metadata.data.streaminfo = streaminfo;
+ onMeta(pUserDataMD, &metadata);
+ }
+
+ return DRFLAC_TRUE;
+ }
+}
+
+#ifndef DR_FLAC_NO_OGG
+#define DRFLAC_OGG_MAX_PAGE_SIZE 65307
+#define DRFLAC_OGG_CAPTURE_PATTERN_CRC32 1605413199 /* CRC-32 of "OggS". */
+
+typedef enum
+{
+ drflac_ogg_recover_on_crc_mismatch,
+ drflac_ogg_fail_on_crc_mismatch
+} drflac_ogg_crc_mismatch_recovery;
+
+#ifndef DR_FLAC_NO_CRC
+static drflac_uint32 drflac__crc32_table[] = {
+ 0x00000000L, 0x04C11DB7L, 0x09823B6EL, 0x0D4326D9L,
+ 0x130476DCL, 0x17C56B6BL, 0x1A864DB2L, 0x1E475005L,
+ 0x2608EDB8L, 0x22C9F00FL, 0x2F8AD6D6L, 0x2B4BCB61L,
+ 0x350C9B64L, 0x31CD86D3L, 0x3C8EA00AL, 0x384FBDBDL,
+ 0x4C11DB70L, 0x48D0C6C7L, 0x4593E01EL, 0x4152FDA9L,
+ 0x5F15ADACL, 0x5BD4B01BL, 0x569796C2L, 0x52568B75L,
+ 0x6A1936C8L, 0x6ED82B7FL, 0x639B0DA6L, 0x675A1011L,
+ 0x791D4014L, 0x7DDC5DA3L, 0x709F7B7AL, 0x745E66CDL,
+ 0x9823B6E0L, 0x9CE2AB57L, 0x91A18D8EL, 0x95609039L,
+ 0x8B27C03CL, 0x8FE6DD8BL, 0x82A5FB52L, 0x8664E6E5L,
+ 0xBE2B5B58L, 0xBAEA46EFL, 0xB7A96036L, 0xB3687D81L,
+ 0xAD2F2D84L, 0xA9EE3033L, 0xA4AD16EAL, 0xA06C0B5DL,
+ 0xD4326D90L, 0xD0F37027L, 0xDDB056FEL, 0xD9714B49L,
+ 0xC7361B4CL, 0xC3F706FBL, 0xCEB42022L, 0xCA753D95L,
+ 0xF23A8028L, 0xF6FB9D9FL, 0xFBB8BB46L, 0xFF79A6F1L,
+ 0xE13EF6F4L, 0xE5FFEB43L, 0xE8BCCD9AL, 0xEC7DD02DL,
+ 0x34867077L, 0x30476DC0L, 0x3D044B19L, 0x39C556AEL,
+ 0x278206ABL, 0x23431B1CL, 0x2E003DC5L, 0x2AC12072L,
+ 0x128E9DCFL, 0x164F8078L, 0x1B0CA6A1L, 0x1FCDBB16L,
+ 0x018AEB13L, 0x054BF6A4L, 0x0808D07DL, 0x0CC9CDCAL,
+ 0x7897AB07L, 0x7C56B6B0L, 0x71159069L, 0x75D48DDEL,
+ 0x6B93DDDBL, 0x6F52C06CL, 0x6211E6B5L, 0x66D0FB02L,
+ 0x5E9F46BFL, 0x5A5E5B08L, 0x571D7DD1L, 0x53DC6066L,
+ 0x4D9B3063L, 0x495A2DD4L, 0x44190B0DL, 0x40D816BAL,
+ 0xACA5C697L, 0xA864DB20L, 0xA527FDF9L, 0xA1E6E04EL,
+ 0xBFA1B04BL, 0xBB60ADFCL, 0xB6238B25L, 0xB2E29692L,
+ 0x8AAD2B2FL, 0x8E6C3698L, 0x832F1041L, 0x87EE0DF6L,
+ 0x99A95DF3L, 0x9D684044L, 0x902B669DL, 0x94EA7B2AL,
+ 0xE0B41DE7L, 0xE4750050L, 0xE9362689L, 0xEDF73B3EL,
+ 0xF3B06B3BL, 0xF771768CL, 0xFA325055L, 0xFEF34DE2L,
+ 0xC6BCF05FL, 0xC27DEDE8L, 0xCF3ECB31L, 0xCBFFD686L,
+ 0xD5B88683L, 0xD1799B34L, 0xDC3ABDEDL, 0xD8FBA05AL,
+ 0x690CE0EEL, 0x6DCDFD59L, 0x608EDB80L, 0x644FC637L,
+ 0x7A089632L, 0x7EC98B85L, 0x738AAD5CL, 0x774BB0EBL,
+ 0x4F040D56L, 0x4BC510E1L, 0x46863638L, 0x42472B8FL,
+ 0x5C007B8AL, 0x58C1663DL, 0x558240E4L, 0x51435D53L,
+ 0x251D3B9EL, 0x21DC2629L, 0x2C9F00F0L, 0x285E1D47L,
+ 0x36194D42L, 0x32D850F5L, 0x3F9B762CL, 0x3B5A6B9BL,
+ 0x0315D626L, 0x07D4CB91L, 0x0A97ED48L, 0x0E56F0FFL,
+ 0x1011A0FAL, 0x14D0BD4DL, 0x19939B94L, 0x1D528623L,
+ 0xF12F560EL, 0xF5EE4BB9L, 0xF8AD6D60L, 0xFC6C70D7L,
+ 0xE22B20D2L, 0xE6EA3D65L, 0xEBA91BBCL, 0xEF68060BL,
+ 0xD727BBB6L, 0xD3E6A601L, 0xDEA580D8L, 0xDA649D6FL,
+ 0xC423CD6AL, 0xC0E2D0DDL, 0xCDA1F604L, 0xC960EBB3L,
+ 0xBD3E8D7EL, 0xB9FF90C9L, 0xB4BCB610L, 0xB07DABA7L,
+ 0xAE3AFBA2L, 0xAAFBE615L, 0xA7B8C0CCL, 0xA379DD7BL,
+ 0x9B3660C6L, 0x9FF77D71L, 0x92B45BA8L, 0x9675461FL,
+ 0x8832161AL, 0x8CF30BADL, 0x81B02D74L, 0x857130C3L,
+ 0x5D8A9099L, 0x594B8D2EL, 0x5408ABF7L, 0x50C9B640L,
+ 0x4E8EE645L, 0x4A4FFBF2L, 0x470CDD2BL, 0x43CDC09CL,
+ 0x7B827D21L, 0x7F436096L, 0x7200464FL, 0x76C15BF8L,
+ 0x68860BFDL, 0x6C47164AL, 0x61043093L, 0x65C52D24L,
+ 0x119B4BE9L, 0x155A565EL, 0x18197087L, 0x1CD86D30L,
+ 0x029F3D35L, 0x065E2082L, 0x0B1D065BL, 0x0FDC1BECL,
+ 0x3793A651L, 0x3352BBE6L, 0x3E119D3FL, 0x3AD08088L,
+ 0x2497D08DL, 0x2056CD3AL, 0x2D15EBE3L, 0x29D4F654L,
+ 0xC5A92679L, 0xC1683BCEL, 0xCC2B1D17L, 0xC8EA00A0L,
+ 0xD6AD50A5L, 0xD26C4D12L, 0xDF2F6BCBL, 0xDBEE767CL,
+ 0xE3A1CBC1L, 0xE760D676L, 0xEA23F0AFL, 0xEEE2ED18L,
+ 0xF0A5BD1DL, 0xF464A0AAL, 0xF9278673L, 0xFDE69BC4L,
+ 0x89B8FD09L, 0x8D79E0BEL, 0x803AC667L, 0x84FBDBD0L,
+ 0x9ABC8BD5L, 0x9E7D9662L, 0x933EB0BBL, 0x97FFAD0CL,
+ 0xAFB010B1L, 0xAB710D06L, 0xA6322BDFL, 0xA2F33668L,
+ 0xBCB4666DL, 0xB8757BDAL, 0xB5365D03L, 0xB1F740B4L
+};
+#endif
+
+static DRFLAC_INLINE drflac_uint32 drflac_crc32_byte(drflac_uint32 crc32, drflac_uint8 data)
+{
+#ifndef DR_FLAC_NO_CRC
+ return (crc32 << 8) ^ drflac__crc32_table[(drflac_uint8)((crc32 >> 24) & 0xFF) ^ data];
+#else
+ (void)data;
+ return crc32;
+#endif
+}
+
+#if 0
+static DRFLAC_INLINE drflac_uint32 drflac_crc32_uint32(drflac_uint32 crc32, drflac_uint32 data)
+{
+ crc32 = drflac_crc32_byte(crc32, (drflac_uint8)((data >> 24) & 0xFF));
+ crc32 = drflac_crc32_byte(crc32, (drflac_uint8)((data >> 16) & 0xFF));
+ crc32 = drflac_crc32_byte(crc32, (drflac_uint8)((data >> 8) & 0xFF));
+ crc32 = drflac_crc32_byte(crc32, (drflac_uint8)((data >> 0) & 0xFF));
+ return crc32;
+}
+
+static DRFLAC_INLINE drflac_uint32 drflac_crc32_uint64(drflac_uint32 crc32, drflac_uint64 data)
+{
+ crc32 = drflac_crc32_uint32(crc32, (drflac_uint32)((data >> 32) & 0xFFFFFFFF));
+ crc32 = drflac_crc32_uint32(crc32, (drflac_uint32)((data >> 0) & 0xFFFFFFFF));
+ return crc32;
+}
+#endif
+
+static DRFLAC_INLINE drflac_uint32 drflac_crc32_buffer(drflac_uint32 crc32, drflac_uint8* pData, drflac_uint32 dataSize)
+{
+ /* This can be optimized. */
+ drflac_uint32 i;
+ for (i = 0; i < dataSize; ++i) {
+ crc32 = drflac_crc32_byte(crc32, pData[i]);
+ }
+ return crc32;
+}
+
+
+static DRFLAC_INLINE drflac_bool32 drflac_ogg__is_capture_pattern(drflac_uint8 pattern[4])
+{
+ return pattern[0] == 'O' && pattern[1] == 'g' && pattern[2] == 'g' && pattern[3] == 'S';
+}
+
+static DRFLAC_INLINE drflac_uint32 drflac_ogg__get_page_header_size(drflac_ogg_page_header* pHeader)
+{
+ return 27 + pHeader->segmentCount;
+}
+
+static DRFLAC_INLINE drflac_uint32 drflac_ogg__get_page_body_size(drflac_ogg_page_header* pHeader)
+{
+ drflac_uint32 pageBodySize = 0;
+ int i;
+
+ for (i = 0; i < pHeader->segmentCount; ++i) {
+ pageBodySize += pHeader->segmentTable[i];
+ }
+
+ return pageBodySize;
+}
+
+drflac_result drflac_ogg__read_page_header_after_capture_pattern(drflac_read_proc onRead, void* pUserData, drflac_ogg_page_header* pHeader, drflac_uint32* pBytesRead, drflac_uint32* pCRC32)
+{
+ drflac_uint8 data[23];
+ drflac_uint32 i;
+
+ DRFLAC_ASSERT(*pCRC32 == DRFLAC_OGG_CAPTURE_PATTERN_CRC32);
+
+ if (onRead(pUserData, data, 23) != 23) {
+ return DRFLAC_END_OF_STREAM;
+ }
+ *pBytesRead += 23;
+
+ /*
+ It's not actually used, but set the capture pattern to 'OggS' for completeness. Not doing this will cause static analysers to complain about
+ us trying to access uninitialized data. We could alternatively just comment out this member of the drflac_ogg_page_header structure, but I
+ like to have it map to the structure of the underlying data.
+ */
+ pHeader->capturePattern[0] = 'O';
+ pHeader->capturePattern[1] = 'g';
+ pHeader->capturePattern[2] = 'g';
+ pHeader->capturePattern[3] = 'S';
+
+ pHeader->structureVersion = data[0];
+ pHeader->headerType = data[1];
+ DRFLAC_COPY_MEMORY(&pHeader->granulePosition, &data[ 2], 8);
+ DRFLAC_COPY_MEMORY(&pHeader->serialNumber, &data[10], 4);
+ DRFLAC_COPY_MEMORY(&pHeader->sequenceNumber, &data[14], 4);
+ DRFLAC_COPY_MEMORY(&pHeader->checksum, &data[18], 4);
+ pHeader->segmentCount = data[22];
+
+ /* Calculate the CRC. Note that for the calculation the checksum part of the page needs to be set to 0. */
+ data[18] = 0;
+ data[19] = 0;
+ data[20] = 0;
+ data[21] = 0;
+
+ for (i = 0; i < 23; ++i) {
+ *pCRC32 = drflac_crc32_byte(*pCRC32, data[i]);
+ }
+
+
+ if (onRead(pUserData, pHeader->segmentTable, pHeader->segmentCount) != pHeader->segmentCount) {
+ return DRFLAC_END_OF_STREAM;
+ }
+ *pBytesRead += pHeader->segmentCount;
+
+ for (i = 0; i < pHeader->segmentCount; ++i) {
+ *pCRC32 = drflac_crc32_byte(*pCRC32, pHeader->segmentTable[i]);
+ }
+
+ return DRFLAC_SUCCESS;
+}
+
+drflac_result drflac_ogg__read_page_header(drflac_read_proc onRead, void* pUserData, drflac_ogg_page_header* pHeader, drflac_uint32* pBytesRead, drflac_uint32* pCRC32)
+{
+ drflac_uint8 id[4];
+
+ *pBytesRead = 0;
+
+ if (onRead(pUserData, id, 4) != 4) {
+ return DRFLAC_END_OF_STREAM;
+ }
+ *pBytesRead += 4;
+
+ /* We need to read byte-by-byte until we find the OggS capture pattern. */
+ for (;;) {
+ if (drflac_ogg__is_capture_pattern(id)) {
+ drflac_result result;
+
+ *pCRC32 = DRFLAC_OGG_CAPTURE_PATTERN_CRC32;
+
+ result = drflac_ogg__read_page_header_after_capture_pattern(onRead, pUserData, pHeader, pBytesRead, pCRC32);
+ if (result == DRFLAC_SUCCESS) {
+ return DRFLAC_SUCCESS;
+ } else {
+ if (result == DRFLAC_CRC_MISMATCH) {
+ continue;
+ } else {
+ return result;
+ }
+ }
+ } else {
+ /* The first 4 bytes did not equal the capture pattern. Read the next byte and try again. */
+ id[0] = id[1];
+ id[1] = id[2];
+ id[2] = id[3];
+ if (onRead(pUserData, &id[3], 1) != 1) {
+ return DRFLAC_END_OF_STREAM;
+ }
+ *pBytesRead += 1;
+ }
+ }
+}
+
+
+/*
+The main part of the Ogg encapsulation is the conversion from the physical Ogg bitstream to the native FLAC bitstream. It works
+in three general stages: Ogg Physical Bitstream -> Ogg/FLAC Logical Bitstream -> FLAC Native Bitstream. dr_flac is designed
+in such a way that the core sections assume everything is delivered in native format. Therefore, for each encapsulation type
+dr_flac is supporting there needs to be a layer sitting on top of the onRead and onSeek callbacks that ensures the bits read from
+the physical Ogg bitstream are converted and delivered in native FLAC format.
+*/
+typedef struct
+{
+ drflac_read_proc onRead; /* The original onRead callback from drflac_open() and family. */
+ drflac_seek_proc onSeek; /* The original onSeek callback from drflac_open() and family. */
+ void* pUserData; /* The user data passed on onRead and onSeek. This is the user data that was passed on drflac_open() and family. */
+ drflac_uint64 currentBytePos; /* The position of the byte we are sitting on in the physical byte stream. Used for efficient seeking. */
+ drflac_uint64 firstBytePos; /* The position of the first byte in the physical bitstream. Points to the start of the "OggS" identifier of the FLAC bos page. */
+ drflac_uint32 serialNumber; /* The serial number of the FLAC audio pages. This is determined by the initial header page that was read during initialization. */
+ drflac_ogg_page_header bosPageHeader; /* Used for seeking. */
+ drflac_ogg_page_header currentPageHeader;
+ drflac_uint32 bytesRemainingInPage;
+ drflac_uint32 pageDataSize;
+ drflac_uint8 pageData[DRFLAC_OGG_MAX_PAGE_SIZE];
+} drflac_oggbs; /* oggbs = Ogg Bitstream */
+
+static size_t drflac_oggbs__read_physical(drflac_oggbs* oggbs, void* bufferOut, size_t bytesToRead)
+{
+ size_t bytesActuallyRead = oggbs->onRead(oggbs->pUserData, bufferOut, bytesToRead);
+ oggbs->currentBytePos += bytesActuallyRead;
+
+ return bytesActuallyRead;
+}
+
+static drflac_bool32 drflac_oggbs__seek_physical(drflac_oggbs* oggbs, drflac_uint64 offset, drflac_seek_origin origin)
+{
+ if (origin == drflac_seek_origin_start) {
+ if (offset <= 0x7FFFFFFF) {
+ if (!oggbs->onSeek(oggbs->pUserData, (int)offset, drflac_seek_origin_start)) {
+ return DRFLAC_FALSE;
+ }
+ oggbs->currentBytePos = offset;
+
+ return DRFLAC_TRUE;
+ } else {
+ if (!oggbs->onSeek(oggbs->pUserData, 0x7FFFFFFF, drflac_seek_origin_start)) {
+ return DRFLAC_FALSE;
+ }
+ oggbs->currentBytePos = offset;
+
+ return drflac_oggbs__seek_physical(oggbs, offset - 0x7FFFFFFF, drflac_seek_origin_current);
+ }
+ } else {
+ while (offset > 0x7FFFFFFF) {
+ if (!oggbs->onSeek(oggbs->pUserData, 0x7FFFFFFF, drflac_seek_origin_current)) {
+ return DRFLAC_FALSE;
+ }
+ oggbs->currentBytePos += 0x7FFFFFFF;
+ offset -= 0x7FFFFFFF;
+ }
+
+ if (!oggbs->onSeek(oggbs->pUserData, (int)offset, drflac_seek_origin_current)) { /* <-- Safe cast thanks to the loop above. */
+ return DRFLAC_FALSE;
+ }
+ oggbs->currentBytePos += offset;
+
+ return DRFLAC_TRUE;
+ }
+}
+
+static drflac_bool32 drflac_oggbs__goto_next_page(drflac_oggbs* oggbs, drflac_ogg_crc_mismatch_recovery recoveryMethod)
+{
+ drflac_ogg_page_header header;
+ for (;;) {
+ drflac_uint32 crc32 = 0;
+ drflac_uint32 bytesRead;
+ drflac_uint32 pageBodySize;
+#ifndef DR_FLAC_NO_CRC
+ drflac_uint32 actualCRC32;
+#endif
+
+ if (drflac_ogg__read_page_header(oggbs->onRead, oggbs->pUserData, &header, &bytesRead, &crc32) != DRFLAC_SUCCESS) {
+ return DRFLAC_FALSE;
+ }
+ oggbs->currentBytePos += bytesRead;
+
+ pageBodySize = drflac_ogg__get_page_body_size(&header);
+ if (pageBodySize > DRFLAC_OGG_MAX_PAGE_SIZE) {
+ continue; /* Invalid page size. Assume it's corrupted and just move to the next page. */
+ }
+
+ if (header.serialNumber != oggbs->serialNumber) {
+ /* It's not a FLAC page. Skip it. */
+ if (pageBodySize > 0 && !drflac_oggbs__seek_physical(oggbs, pageBodySize, drflac_seek_origin_current)) {
+ return DRFLAC_FALSE;
+ }
+ continue;
+ }
+
+
+ /* We need to read the entire page and then do a CRC check on it. If there's a CRC mismatch we need to skip this page. */
+ if (drflac_oggbs__read_physical(oggbs, oggbs->pageData, pageBodySize) != pageBodySize) {
+ return DRFLAC_FALSE;
+ }
+ oggbs->pageDataSize = pageBodySize;
+
+#ifndef DR_FLAC_NO_CRC
+ actualCRC32 = drflac_crc32_buffer(crc32, oggbs->pageData, oggbs->pageDataSize);
+ if (actualCRC32 != header.checksum) {
+ if (recoveryMethod == drflac_ogg_recover_on_crc_mismatch) {
+ continue; /* CRC mismatch. Skip this page. */
+ } else {
+ /*
+ Even though we are failing on a CRC mismatch, we still want our stream to be in a good state. Therefore we
+ go to the next valid page to ensure we're in a good state, but return false to let the caller know that the
+ seek did not fully complete.
+ */
+ drflac_oggbs__goto_next_page(oggbs, drflac_ogg_recover_on_crc_mismatch);
+ return DRFLAC_FALSE;
+ }
+ }
+#else
+ (void)recoveryMethod; /* <-- Silence a warning. */
+#endif
+
+ oggbs->currentPageHeader = header;
+ oggbs->bytesRemainingInPage = pageBodySize;
+ return DRFLAC_TRUE;
+ }
+}
+
+/* Function below is unused at the moment, but I might be re-adding it later. */
+#if 0
+static drflac_uint8 drflac_oggbs__get_current_segment_index(drflac_oggbs* oggbs, drflac_uint8* pBytesRemainingInSeg)
+{
+ drflac_uint32 bytesConsumedInPage = drflac_ogg__get_page_body_size(&oggbs->currentPageHeader) - oggbs->bytesRemainingInPage;
+ drflac_uint8 iSeg = 0;
+ drflac_uint32 iByte = 0;
+ while (iByte < bytesConsumedInPage) {
+ drflac_uint8 segmentSize = oggbs->currentPageHeader.segmentTable[iSeg];
+ if (iByte + segmentSize > bytesConsumedInPage) {
+ break;
+ } else {
+ iSeg += 1;
+ iByte += segmentSize;
+ }
+ }
+
+ *pBytesRemainingInSeg = oggbs->currentPageHeader.segmentTable[iSeg] - (drflac_uint8)(bytesConsumedInPage - iByte);
+ return iSeg;
+}
+
+static drflac_bool32 drflac_oggbs__seek_to_next_packet(drflac_oggbs* oggbs)
+{
+ /* The current packet ends when we get to the segment with a lacing value of < 255 which is not at the end of a page. */
+ for (;;) {
+ drflac_bool32 atEndOfPage = DRFLAC_FALSE;
+
+ drflac_uint8 bytesRemainingInSeg;
+ drflac_uint8 iFirstSeg = drflac_oggbs__get_current_segment_index(oggbs, &bytesRemainingInSeg);
+
+ drflac_uint32 bytesToEndOfPacketOrPage = bytesRemainingInSeg;
+ for (drflac_uint8 iSeg = iFirstSeg; iSeg < oggbs->currentPageHeader.segmentCount; ++iSeg) {
+ drflac_uint8 segmentSize = oggbs->currentPageHeader.segmentTable[iSeg];
+ if (segmentSize < 255) {
+ if (iSeg == oggbs->currentPageHeader.segmentCount-1) {
+ atEndOfPage = DRFLAC_TRUE;
+ }
+
+ break;
+ }
+
+ bytesToEndOfPacketOrPage += segmentSize;
+ }
+
+ /*
+ At this point we will have found either the packet or the end of the page. If were at the end of the page we'll
+ want to load the next page and keep searching for the end of the packet.
+ */
+ drflac_oggbs__seek_physical(oggbs, bytesToEndOfPacketOrPage, drflac_seek_origin_current);
+ oggbs->bytesRemainingInPage -= bytesToEndOfPacketOrPage;
+
+ if (atEndOfPage) {
+ /*
+ We're potentially at the next packet, but we need to check the next page first to be sure because the packet may
+ straddle pages.
+ */
+ if (!drflac_oggbs__goto_next_page(oggbs)) {
+ return DRFLAC_FALSE;
+ }
+
+ /* If it's a fresh packet it most likely means we're at the next packet. */
+ if ((oggbs->currentPageHeader.headerType & 0x01) == 0) {
+ return DRFLAC_TRUE;
+ }
+ } else {
+ /* We're at the next packet. */
return DRFLAC_TRUE;
}
+ }
+}
+
+static drflac_bool32 drflac_oggbs__seek_to_next_frame(drflac_oggbs* oggbs)
+{
+ /* The bitstream should be sitting on the first byte just after the header of the frame. */
+
+ /* What we're actually doing here is seeking to the start of the next packet. */
+ return drflac_oggbs__seek_to_next_packet(oggbs);
+}
+#endif
+
+static size_t drflac__on_read_ogg(void* pUserData, void* bufferOut, size_t bytesToRead)
+{
+ drflac_oggbs* oggbs = (drflac_oggbs*)pUserData;
+ drflac_uint8* pRunningBufferOut = (drflac_uint8*)bufferOut;
+ size_t bytesRead = 0;
+
+ DRFLAC_ASSERT(oggbs != NULL);
+ DRFLAC_ASSERT(pRunningBufferOut != NULL);
+
+ /* Reading is done page-by-page. If we've run out of bytes in the page we need to move to the next one. */
+ while (bytesRead < bytesToRead) {
+ size_t bytesRemainingToRead = bytesToRead - bytesRead;
+
+ if (oggbs->bytesRemainingInPage >= bytesRemainingToRead) {
+ DRFLAC_COPY_MEMORY(pRunningBufferOut, oggbs->pageData + (oggbs->pageDataSize - oggbs->bytesRemainingInPage), bytesRemainingToRead);
+ bytesRead += bytesRemainingToRead;
+ oggbs->bytesRemainingInPage -= (drflac_uint32)bytesRemainingToRead;
+ break;
+ }
+
+ /* If we get here it means some of the requested data is contained in the next pages. */
+ if (oggbs->bytesRemainingInPage > 0) {
+ DRFLAC_COPY_MEMORY(pRunningBufferOut, oggbs->pageData + (oggbs->pageDataSize - oggbs->bytesRemainingInPage), oggbs->bytesRemainingInPage);
+ bytesRead += oggbs->bytesRemainingInPage;
+ pRunningBufferOut += oggbs->bytesRemainingInPage;
+ oggbs->bytesRemainingInPage = 0;
+ }
+
+ DRFLAC_ASSERT(bytesRemainingToRead > 0);
+ if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_recover_on_crc_mismatch)) {
+ break; /* Failed to go to the next page. Might have simply hit the end of the stream. */
+ }
+ }
+
+ return bytesRead;
+}
+
+static drflac_bool32 drflac__on_seek_ogg(void* pUserData, int offset, drflac_seek_origin origin)
+{
+ drflac_oggbs* oggbs = (drflac_oggbs*)pUserData;
+ int bytesSeeked = 0;
+
+ DRFLAC_ASSERT(oggbs != NULL);
+ DRFLAC_ASSERT(offset >= 0); /* <-- Never seek backwards. */
+
+ /* Seeking is always forward which makes things a lot simpler. */
+ if (origin == drflac_seek_origin_start) {
+ if (!drflac_oggbs__seek_physical(oggbs, (int)oggbs->firstBytePos, drflac_seek_origin_start)) {
+ return DRFLAC_FALSE;
+ }
+
+ if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_fail_on_crc_mismatch)) {
+ return DRFLAC_FALSE;
+ }
+
+ return drflac__on_seek_ogg(pUserData, offset, drflac_seek_origin_current);
+ }
+
+ DRFLAC_ASSERT(origin == drflac_seek_origin_current);
+
+ while (bytesSeeked < offset) {
+ int bytesRemainingToSeek = offset - bytesSeeked;
+ DRFLAC_ASSERT(bytesRemainingToSeek >= 0);
+
+ if (oggbs->bytesRemainingInPage >= (size_t)bytesRemainingToSeek) {
+ bytesSeeked += bytesRemainingToSeek;
+ (void)bytesSeeked; /* <-- Silence a dead store warning emitted by Clang Static Analyzer. */
+ oggbs->bytesRemainingInPage -= bytesRemainingToSeek;
+ break;
+ }
+
+ /* If we get here it means some of the requested data is contained in the next pages. */
+ if (oggbs->bytesRemainingInPage > 0) {
+ bytesSeeked += (int)oggbs->bytesRemainingInPage;
+ oggbs->bytesRemainingInPage = 0;
+ }
+
+ DRFLAC_ASSERT(bytesRemainingToSeek > 0);
+ if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_fail_on_crc_mismatch)) {
+ /* Failed to go to the next page. We either hit the end of the stream or had a CRC mismatch. */
+ return DRFLAC_FALSE;
+ }
+ }
+
+ return DRFLAC_TRUE;
+}
+
+
+drflac_bool32 drflac_ogg__seek_to_pcm_frame(drflac* pFlac, drflac_uint64 pcmFrameIndex)
+{
+ drflac_oggbs* oggbs = (drflac_oggbs*)pFlac->_oggbs;
+ drflac_uint64 originalBytePos;
+ drflac_uint64 runningGranulePosition;
+ drflac_uint64 runningFrameBytePos;
+ drflac_uint64 runningPCMFrameCount;
+
+ DRFLAC_ASSERT(oggbs != NULL);
+
+ originalBytePos = oggbs->currentBytePos; /* For recovery. Points to the OggS identifier. */
+
+ /* First seek to the first frame. */
+ if (!drflac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes)) {
+ return DRFLAC_FALSE;
+ }
+ oggbs->bytesRemainingInPage = 0;
+
+ runningGranulePosition = 0;
+ for (;;) {
+ if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_recover_on_crc_mismatch)) {
+ drflac_oggbs__seek_physical(oggbs, originalBytePos, drflac_seek_origin_start);
+ return DRFLAC_FALSE; /* Never did find that sample... */
+ }
+
+ runningFrameBytePos = oggbs->currentBytePos - drflac_ogg__get_page_header_size(&oggbs->currentPageHeader) - oggbs->pageDataSize;
+ if (oggbs->currentPageHeader.granulePosition >= pcmFrameIndex) {
+ break; /* The sample is somewhere in the previous page. */
+ }
+
+ /*
+ At this point we know the sample is not in the previous page. It could possibly be in this page. For simplicity we
+ disregard any pages that do not begin a fresh packet.
+ */
+ if ((oggbs->currentPageHeader.headerType & 0x01) == 0) { /* <-- Is it a fresh page? */
+ if (oggbs->currentPageHeader.segmentTable[0] >= 2) {
+ drflac_uint8 firstBytesInPage[2];
+ firstBytesInPage[0] = oggbs->pageData[0];
+ firstBytesInPage[1] = oggbs->pageData[1];
+
+ if ((firstBytesInPage[0] == 0xFF) && (firstBytesInPage[1] & 0xFC) == 0xF8) { /* <-- Does the page begin with a frame's sync code? */
+ runningGranulePosition = oggbs->currentPageHeader.granulePosition;
+ }
+
+ continue;
+ }
+ }
+ }
+
+ /*
+ We found the page that that is closest to the sample, so now we need to find it. The first thing to do is seek to the
+ start of that page. In the loop above we checked that it was a fresh page which means this page is also the start of
+ a new frame. This property means that after we've seeked to the page we can immediately start looping over frames until
+ we find the one containing the target sample.
+ */
+ if (!drflac_oggbs__seek_physical(oggbs, runningFrameBytePos, drflac_seek_origin_start)) {
+ return DRFLAC_FALSE;
+ }
+ if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_recover_on_crc_mismatch)) {
+ return DRFLAC_FALSE;
+ }
+
+ /*
+ At this point we'll be sitting on the first byte of the frame header of the first frame in the page. We just keep
+ looping over these frames until we find the one containing the sample we're after.
+ */
+ runningPCMFrameCount = runningGranulePosition;
+ for (;;) {
+ /*
+ There are two ways to find the sample and seek past irrelevant frames:
+ 1) Use the native FLAC decoder.
+ 2) Use Ogg's framing system.
+
+ Both of these options have their own pros and cons. Using the native FLAC decoder is slower because it needs to
+ do a full decode of the frame. Using Ogg's framing system is faster, but more complicated and involves some code
+ duplication for the decoding of frame headers.
+
+ Another thing to consider is that using the Ogg framing system will perform direct seeking of the physical Ogg
+ bitstream. This is important to consider because it means we cannot read data from the drflac_bs object using the
+ standard drflac__*() APIs because that will read in extra data for its own internal caching which in turn breaks
+ the positioning of the read pointer of the physical Ogg bitstream. Therefore, anything that would normally be read
+ using the native FLAC decoding APIs, such as drflac__read_next_flac_frame_header(), need to be re-implemented so as to
+ avoid the use of the drflac_bs object.
+
+ Considering these issues, I have decided to use the slower native FLAC decoding method for the following reasons:
+ 1) Seeking is already partially accelerated using Ogg's paging system in the code block above.
+ 2) Seeking in an Ogg encapsulated FLAC stream is probably quite uncommon.
+ 3) Simplicity.
+ */
+ drflac_uint64 firstPCMFrameInFLACFrame = 0;
+ drflac_uint64 lastPCMFrameInFLACFrame = 0;
+ drflac_uint64 pcmFrameCountInThisFrame;
+
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ return DRFLAC_FALSE;
+ }
+
+ drflac__get_pcm_frame_range_of_current_flac_frame(pFlac, &firstPCMFrameInFLACFrame, &lastPCMFrameInFLACFrame);
+
+ pcmFrameCountInThisFrame = (lastPCMFrameInFLACFrame - firstPCMFrameInFLACFrame) + 1;
+
+ /* If we are seeking to the end of the file and we've just hit it, we're done. */
+ if (pcmFrameIndex == pFlac->totalPCMFrameCount && (runningPCMFrameCount + pcmFrameCountInThisFrame) == pFlac->totalPCMFrameCount) {
+ drflac_result result = drflac__decode_flac_frame(pFlac);
+ if (result == DRFLAC_SUCCESS) {
+ pFlac->currentPCMFrame = pcmFrameIndex;
+ pFlac->currentFLACFrame.pcmFramesRemaining = 0;
+ return DRFLAC_TRUE;
+ } else {
+ return DRFLAC_FALSE;
+ }
+ }
+
+ if (pcmFrameIndex < (runningPCMFrameCount + pcmFrameCountInThisFrame)) {
+ /*
+ The sample should be in this FLAC frame. We need to fully decode it, however if it's an invalid frame (a CRC mismatch), we need to pretend
+ it never existed and keep iterating.
+ */
+ drflac_result result = drflac__decode_flac_frame(pFlac);
+ if (result == DRFLAC_SUCCESS) {
+ /* The frame is valid. We just need to skip over some samples to ensure it's sample-exact. */
+ drflac_uint64 pcmFramesToDecode = (size_t)(pcmFrameIndex - runningPCMFrameCount); /* <-- Safe cast because the maximum number of samples in a frame is 65535. */
+ if (pcmFramesToDecode == 0) {
+ return DRFLAC_TRUE;
+ }
+
+ pFlac->currentPCMFrame = runningPCMFrameCount;
+
+ return drflac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; /* <-- If this fails, something bad has happened (it should never fail). */
+ } else {
+ if (result == DRFLAC_CRC_MISMATCH) {
+ continue; /* CRC mismatch. Pretend this frame never existed. */
+ } else {
+ return DRFLAC_FALSE;
+ }
+ }
+ } else {
+ /*
+ It's not in this frame. We need to seek past the frame, but check if there was a CRC mismatch. If so, we pretend this
+ frame never existed and leave the running sample count untouched.
+ */
+ drflac_result result = drflac__seek_to_next_flac_frame(pFlac);
+ if (result == DRFLAC_SUCCESS) {
+ runningPCMFrameCount += pcmFrameCountInThisFrame;
+ } else {
+ if (result == DRFLAC_CRC_MISMATCH) {
+ continue; /* CRC mismatch. Pretend this frame never existed. */
+ } else {
+ return DRFLAC_FALSE;
+ }
+ }
+ }
+ }
+}
+
+
+
+drflac_bool32 drflac__init_private__ogg(drflac_init_info* pInit, drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData, void* pUserDataMD, drflac_bool32 relaxed)
+{
+ drflac_ogg_page_header header;
+ drflac_uint32 crc32 = DRFLAC_OGG_CAPTURE_PATTERN_CRC32;
+ drflac_uint32 bytesRead = 0;
+
+ /* Pre Condition: The bit stream should be sitting just past the 4-byte OggS capture pattern. */
+ (void)relaxed;
+
+ pInit->container = drflac_container_ogg;
+ pInit->oggFirstBytePos = 0;
+
+ /*
+ We'll get here if the first 4 bytes of the stream were the OggS capture pattern, however it doesn't necessarily mean the
+ stream includes FLAC encoded audio. To check for this we need to scan the beginning-of-stream page markers and check if
+ any match the FLAC specification. Important to keep in mind that the stream may be multiplexed.
+ */
+ if (drflac_ogg__read_page_header_after_capture_pattern(onRead, pUserData, &header, &bytesRead, &crc32) != DRFLAC_SUCCESS) {
+ return DRFLAC_FALSE;
+ }
+ pInit->runningFilePos += bytesRead;
+
+ for (;;) {
+ int pageBodySize;
+
+ /* Break if we're past the beginning of stream page. */
+ if ((header.headerType & 0x02) == 0) {
+ return DRFLAC_FALSE;
+ }
+
+ /* Check if it's a FLAC header. */
+ pageBodySize = drflac_ogg__get_page_body_size(&header);
+ if (pageBodySize == 51) { /* 51 = the lacing value of the FLAC header packet. */
+ /* It could be a FLAC page... */
+ drflac_uint32 bytesRemainingInPage = pageBodySize;
+ drflac_uint8 packetType;
+
+ if (onRead(pUserData, &packetType, 1) != 1) {
+ return DRFLAC_FALSE;
+ }
+
+ bytesRemainingInPage -= 1;
+ if (packetType == 0x7F) {
+ /* Increasingly more likely to be a FLAC page... */
+ drflac_uint8 sig[4];
+ if (onRead(pUserData, sig, 4) != 4) {
+ return DRFLAC_FALSE;
+ }
+
+ bytesRemainingInPage -= 4;
+ if (sig[0] == 'F' && sig[1] == 'L' && sig[2] == 'A' && sig[3] == 'C') {
+ /* Almost certainly a FLAC page... */
+ drflac_uint8 mappingVersion[2];
+ if (onRead(pUserData, mappingVersion, 2) != 2) {
+ return DRFLAC_FALSE;
+ }
+
+ if (mappingVersion[0] != 1) {
+ return DRFLAC_FALSE; /* Only supporting version 1.x of the Ogg mapping. */
+ }
+
+ /*
+ The next 2 bytes are the non-audio packets, not including this one. We don't care about this because we're going to
+ be handling it in a generic way based on the serial number and packet types.
+ */
+ if (!onSeek(pUserData, 2, drflac_seek_origin_current)) {
+ return DRFLAC_FALSE;
+ }
+
+ /* Expecting the native FLAC signature "fLaC". */
+ if (onRead(pUserData, sig, 4) != 4) {
+ return DRFLAC_FALSE;
+ }
+
+ if (sig[0] == 'f' && sig[1] == 'L' && sig[2] == 'a' && sig[3] == 'C') {
+ /* The remaining data in the page should be the STREAMINFO block. */
+ drflac_streaminfo streaminfo;
+ drflac_uint8 isLastBlock;
+ drflac_uint8 blockType;
+ drflac_uint32 blockSize;
+ if (!drflac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize)) {
+ return DRFLAC_FALSE;
+ }
+
+ if (blockType != DRFLAC_METADATA_BLOCK_TYPE_STREAMINFO || blockSize != 34) {
+ return DRFLAC_FALSE; /* Invalid block type. First block must be the STREAMINFO block. */
+ }
+
+ if (drflac__read_streaminfo(onRead, pUserData, &streaminfo)) {
+ /* Success! */
+ pInit->hasStreamInfoBlock = DRFLAC_TRUE;
+ pInit->sampleRate = streaminfo.sampleRate;
+ pInit->channels = streaminfo.channels;
+ pInit->bitsPerSample = streaminfo.bitsPerSample;
+ pInit->totalPCMFrameCount = streaminfo.totalPCMFrameCount;
+ pInit->maxBlockSizeInPCMFrames = streaminfo.maxBlockSizeInPCMFrames;
+ pInit->hasMetadataBlocks = !isLastBlock;
+
+ if (onMeta) {
+ drflac_metadata metadata;
+ metadata.type = DRFLAC_METADATA_BLOCK_TYPE_STREAMINFO;
+ metadata.pRawData = NULL;
+ metadata.rawDataSize = 0;
+ metadata.data.streaminfo = streaminfo;
+ onMeta(pUserDataMD, &metadata);
+ }
+
+ pInit->runningFilePos += pageBodySize;
+ pInit->oggFirstBytePos = pInit->runningFilePos - 79; /* Subtracting 79 will place us right on top of the "OggS" identifier of the FLAC bos page. */
+ pInit->oggSerial = header.serialNumber;
+ pInit->oggBosHeader = header;
+ break;
+ } else {
+ /* Failed to read STREAMINFO block. Aww, so close... */
+ return DRFLAC_FALSE;
+ }
+ } else {
+ /* Invalid file. */
+ return DRFLAC_FALSE;
+ }
+ } else {
+ /* Not a FLAC header. Skip it. */
+ if (!onSeek(pUserData, bytesRemainingInPage, drflac_seek_origin_current)) {
+ return DRFLAC_FALSE;
+ }
+ }
+ } else {
+ /* Not a FLAC header. Seek past the entire page and move on to the next. */
+ if (!onSeek(pUserData, bytesRemainingInPage, drflac_seek_origin_current)) {
+ return DRFLAC_FALSE;
+ }
+ }
+ } else {
+ if (!onSeek(pUserData, pageBodySize, drflac_seek_origin_current)) {
+ return DRFLAC_FALSE;
+ }
+ }
+
+ pInit->runningFilePos += pageBodySize;
+
+
+ /* Read the header of the next page. */
+ if (drflac_ogg__read_page_header(onRead, pUserData, &header, &bytesRead, &crc32) != DRFLAC_SUCCESS) {
+ return DRFLAC_FALSE;
+ }
+ pInit->runningFilePos += bytesRead;
+ }
+
+ /*
+ If we get here it means we found a FLAC audio stream. We should be sitting on the first byte of the header of the next page. The next
+ packets in the FLAC logical stream contain the metadata. The only thing left to do in the initialization phase for Ogg is to create the
+ Ogg bistream object.
+ */
+ pInit->hasMetadataBlocks = DRFLAC_TRUE; /* <-- Always have at least VORBIS_COMMENT metadata block. */
+ return DRFLAC_TRUE;
+}
+#endif
+
+drflac_bool32 drflac__init_private(drflac_init_info* pInit, drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, drflac_container container, void* pUserData, void* pUserDataMD)
+{
+ drflac_bool32 relaxed;
+ drflac_uint8 id[4];
+
+ if (pInit == NULL || onRead == NULL || onSeek == NULL) {
+ return DRFLAC_FALSE;
+ }
+
+ DRFLAC_ZERO_MEMORY(pInit, sizeof(*pInit));
+ pInit->onRead = onRead;
+ pInit->onSeek = onSeek;
+ pInit->onMeta = onMeta;
+ pInit->container = container;
+ pInit->pUserData = pUserData;
+ pInit->pUserDataMD = pUserDataMD;
+
+ pInit->bs.onRead = onRead;
+ pInit->bs.onSeek = onSeek;
+ pInit->bs.pUserData = pUserData;
+ drflac__reset_cache(&pInit->bs);
+
+
+ /* If the container is explicitly defined then we can try opening in relaxed mode. */
+ relaxed = container != drflac_container_unknown;
+
+ /* Skip over any ID3 tags. */
+ for (;;) {
+ if (onRead(pUserData, id, 4) != 4) {
+ return DRFLAC_FALSE; /* Ran out of data. */
+ }
+ pInit->runningFilePos += 4;
+
+ if (id[0] == 'I' && id[1] == 'D' && id[2] == '3') {
+ drflac_uint8 header[6];
+ drflac_uint8 flags;
+ drflac_uint32 headerSize;
+
+ if (onRead(pUserData, header, 6) != 6) {
+ return DRFLAC_FALSE; /* Ran out of data. */
+ }
+ pInit->runningFilePos += 6;
+
+ flags = header[1];
+
+ DRFLAC_COPY_MEMORY(&headerSize, header+2, 4);
+ headerSize = drflac__unsynchsafe_32(drflac__be2host_32(headerSize));
+ if (flags & 0x10) {
+ headerSize += 10;
+ }
+
+ if (!onSeek(pUserData, headerSize, drflac_seek_origin_current)) {
+ return DRFLAC_FALSE; /* Failed to seek past the tag. */
+ }
+ pInit->runningFilePos += headerSize;
+ } else {
+ break;
+ }
+ }
+
+ if (id[0] == 'f' && id[1] == 'L' && id[2] == 'a' && id[3] == 'C') {
+ return drflac__init_private__native(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed);
+ }
+#ifndef DR_FLAC_NO_OGG
+ if (id[0] == 'O' && id[1] == 'g' && id[2] == 'g' && id[3] == 'S') {
+ return drflac__init_private__ogg(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed);
+ }
+#endif
+
+ /* If we get here it means we likely don't have a header. Try opening in relaxed mode, if applicable. */
+ if (relaxed) {
+ if (container == drflac_container_native) {
+ return drflac__init_private__native(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed);
+ }
+#ifndef DR_FLAC_NO_OGG
+ if (container == drflac_container_ogg) {
+ return drflac__init_private__ogg(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed);
+ }
+#endif
+ }
+
+ /* Unsupported container. */
+ return DRFLAC_FALSE;
+}
+
+void drflac__init_from_info(drflac* pFlac, const drflac_init_info* pInit)
+{
+ DRFLAC_ASSERT(pFlac != NULL);
+ DRFLAC_ASSERT(pInit != NULL);
+
+ DRFLAC_ZERO_MEMORY(pFlac, sizeof(*pFlac));
+ pFlac->bs = pInit->bs;
+ pFlac->onMeta = pInit->onMeta;
+ pFlac->pUserDataMD = pInit->pUserDataMD;
+ pFlac->maxBlockSizeInPCMFrames = pInit->maxBlockSizeInPCMFrames;
+ pFlac->sampleRate = pInit->sampleRate;
+ pFlac->channels = (drflac_uint8)pInit->channels;
+ pFlac->bitsPerSample = (drflac_uint8)pInit->bitsPerSample;
+ pFlac->totalPCMFrameCount = pInit->totalPCMFrameCount;
+ pFlac->container = pInit->container;
+}
+
+
+drflac* drflac_open_with_metadata_private(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, drflac_container container, void* pUserData, void* pUserDataMD, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ drflac_init_info init;
+ drflac_uint32 allocationSize;
+ drflac_uint32 wholeSIMDVectorCountPerChannel;
+ drflac_uint32 decodedSamplesAllocationSize;
+#ifndef DR_FLAC_NO_OGG
+ drflac_oggbs oggbs;
+#endif
+ drflac_uint64 firstFramePos;
+ drflac_uint64 seektablePos;
+ drflac_uint32 seektableSize;
+ drflac_allocation_callbacks allocationCallbacks;
+ drflac* pFlac;
+
+ /* CPU support first. */
+ drflac__init_cpu_caps();
+
+ if (!drflac__init_private(&init, onRead, onSeek, onMeta, container, pUserData, pUserDataMD)) {
+ return NULL;
+ }
+
+ if (pAllocationCallbacks != NULL) {
+ allocationCallbacks = *pAllocationCallbacks;
+ if (allocationCallbacks.onFree == NULL || (allocationCallbacks.onMalloc == NULL && allocationCallbacks.onRealloc == NULL)) {
+ return NULL; /* Invalid allocation callbacks. */
+ }
+ } else {
+ allocationCallbacks.pUserData = NULL;
+ allocationCallbacks.onMalloc = drflac__malloc_default;
+ allocationCallbacks.onRealloc = drflac__realloc_default;
+ allocationCallbacks.onFree = drflac__free_default;
+ }
+
+
+ /*
+ The size of the allocation for the drflac object needs to be large enough to fit the following:
+ 1) The main members of the drflac structure
+ 2) A block of memory large enough to store the decoded samples of the largest frame in the stream
+ 3) If the container is Ogg, a drflac_oggbs object
+
+ The complicated part of the allocation is making sure there's enough room the decoded samples, taking into consideration
+ the different SIMD instruction sets.
+ */
+ allocationSize = sizeof(drflac);
+
+ /*
+ The allocation size for decoded frames depends on the number of 32-bit integers that fit inside the largest SIMD vector
+ we are supporting.
+ */
+ if ((init.maxBlockSizeInPCMFrames % (DRFLAC_MAX_SIMD_VECTOR_SIZE / sizeof(drflac_int32))) == 0) {
+ wholeSIMDVectorCountPerChannel = (init.maxBlockSizeInPCMFrames / (DRFLAC_MAX_SIMD_VECTOR_SIZE / sizeof(drflac_int32)));
+ } else {
+ wholeSIMDVectorCountPerChannel = (init.maxBlockSizeInPCMFrames / (DRFLAC_MAX_SIMD_VECTOR_SIZE / sizeof(drflac_int32))) + 1;
+ }
+
+ decodedSamplesAllocationSize = wholeSIMDVectorCountPerChannel * DRFLAC_MAX_SIMD_VECTOR_SIZE * init.channels;
+
+ allocationSize += decodedSamplesAllocationSize;
+ allocationSize += DRFLAC_MAX_SIMD_VECTOR_SIZE; /* Allocate extra bytes to ensure we have enough for alignment. */
+
+#ifndef DR_FLAC_NO_OGG
+ /* There's additional data required for Ogg streams. */
+ if (init.container == drflac_container_ogg) {
+ allocationSize += sizeof(drflac_oggbs);
+ }
+
+ DRFLAC_ZERO_MEMORY(&oggbs, sizeof(oggbs));
+ if (init.container == drflac_container_ogg) {
+ oggbs.onRead = onRead;
+ oggbs.onSeek = onSeek;
+ oggbs.pUserData = pUserData;
+ oggbs.currentBytePos = init.oggFirstBytePos;
+ oggbs.firstBytePos = init.oggFirstBytePos;
+ oggbs.serialNumber = init.oggSerial;
+ oggbs.bosPageHeader = init.oggBosHeader;
+ oggbs.bytesRemainingInPage = 0;
+ }
+#endif
+
+ /*
+ This part is a bit awkward. We need to load the seektable so that it can be referenced in-memory, but I want the drflac object to
+ consist of only a single heap allocation. To this, the size of the seek table needs to be known, which we determine when reading
+ and decoding the metadata.
+ */
+ firstFramePos = 42; /* <-- We know we are at byte 42 at this point. */
+ seektablePos = 0;
+ seektableSize = 0;
+ if (init.hasMetadataBlocks) {
+ drflac_read_proc onReadOverride = onRead;
+ drflac_seek_proc onSeekOverride = onSeek;
+ void* pUserDataOverride = pUserData;
+
+#ifndef DR_FLAC_NO_OGG
+ if (init.container == drflac_container_ogg) {
+ onReadOverride = drflac__on_read_ogg;
+ onSeekOverride = drflac__on_seek_ogg;
+ pUserDataOverride = (void*)&oggbs;
+ }
+#endif
+
+ if (!drflac__read_and_decode_metadata(onReadOverride, onSeekOverride, onMeta, pUserDataOverride, pUserDataMD, &firstFramePos, &seektablePos, &seektableSize, &allocationCallbacks)) {
+ return NULL;
+ }
+
+ allocationSize += seektableSize;
+ }
+
+
+ pFlac = (drflac*)drflac__malloc_from_callbacks(allocationSize, &allocationCallbacks);
+ if (pFlac == NULL) {
+ return NULL;
+ }
+
+ drflac__init_from_info(pFlac, &init);
+ pFlac->allocationCallbacks = allocationCallbacks;
+ pFlac->pDecodedSamples = (drflac_int32*)drflac_align((size_t)pFlac->pExtraData, DRFLAC_MAX_SIMD_VECTOR_SIZE);
+
+#ifndef DR_FLAC_NO_OGG
+ if (init.container == drflac_container_ogg) {
+ drflac_oggbs* pInternalOggbs = (drflac_oggbs*)((drflac_uint8*)pFlac->pDecodedSamples + decodedSamplesAllocationSize + seektableSize);
+ *pInternalOggbs = oggbs;
+
+ /* The Ogg bistream needs to be layered on top of the original bitstream. */
+ pFlac->bs.onRead = drflac__on_read_ogg;
+ pFlac->bs.onSeek = drflac__on_seek_ogg;
+ pFlac->bs.pUserData = (void*)pInternalOggbs;
+ pFlac->_oggbs = (void*)pInternalOggbs;
+ }
+#endif
+
+ pFlac->firstFLACFramePosInBytes = firstFramePos;
+
+ /* NOTE: Seektables are not currently compatible with Ogg encapsulation (Ogg has its own accelerated seeking system). I may change this later, so I'm leaving this here for now. */
+#ifndef DR_FLAC_NO_OGG
+ if (init.container == drflac_container_ogg)
+ {
+ pFlac->pSeekpoints = NULL;
+ pFlac->seekpointCount = 0;
+ }
+ else
+#endif
+ {
+ /* If we have a seektable we need to load it now, making sure we move back to where we were previously. */
+ if (seektablePos != 0) {
+ pFlac->seekpointCount = seektableSize / sizeof(*pFlac->pSeekpoints);
+ pFlac->pSeekpoints = (drflac_seekpoint*)((drflac_uint8*)pFlac->pDecodedSamples + decodedSamplesAllocationSize);
+
+ DRFLAC_ASSERT(pFlac->bs.onSeek != NULL);
+ DRFLAC_ASSERT(pFlac->bs.onRead != NULL);
+
+ /* Seek to the seektable, then just read directly into our seektable buffer. */
+ if (pFlac->bs.onSeek(pFlac->bs.pUserData, (int)seektablePos, drflac_seek_origin_start)) {
+ if (pFlac->bs.onRead(pFlac->bs.pUserData, pFlac->pSeekpoints, seektableSize) == seektableSize) {
+ /* Endian swap. */
+ drflac_uint32 iSeekpoint;
+ for (iSeekpoint = 0; iSeekpoint < pFlac->seekpointCount; ++iSeekpoint) {
+ pFlac->pSeekpoints[iSeekpoint].firstPCMFrame = drflac__be2host_64(pFlac->pSeekpoints[iSeekpoint].firstPCMFrame);
+ pFlac->pSeekpoints[iSeekpoint].flacFrameOffset = drflac__be2host_64(pFlac->pSeekpoints[iSeekpoint].flacFrameOffset);
+ pFlac->pSeekpoints[iSeekpoint].pcmFrameCount = drflac__be2host_16(pFlac->pSeekpoints[iSeekpoint].pcmFrameCount);
+ }
+ } else {
+ /* Failed to read the seektable. Pretend we don't have one. */
+ pFlac->pSeekpoints = NULL;
+ pFlac->seekpointCount = 0;
+ }
+
+ /* We need to seek back to where we were. If this fails it's a critical error. */
+ if (!pFlac->bs.onSeek(pFlac->bs.pUserData, (int)pFlac->firstFLACFramePosInBytes, drflac_seek_origin_start)) {
+ drflac__free_from_callbacks(pFlac, &allocationCallbacks);
+ return NULL;
+ }
+ } else {
+ /* Failed to seek to the seektable. Ominous sign, but for now we can just pretend we don't have one. */
+ pFlac->pSeekpoints = NULL;
+ pFlac->seekpointCount = 0;
+ }
+ }
+ }
+
+
+ /*
+ If we get here, but don't have a STREAMINFO block, it means we've opened the stream in relaxed mode and need to decode
+ the first frame.
+ */
+ if (!init.hasStreamInfoBlock) {
+ pFlac->currentFLACFrame.header = init.firstFrameHeader;
+ do
+ {
+ drflac_result result = drflac__decode_flac_frame(pFlac);
+ if (result == DRFLAC_SUCCESS) {
+ break;
+ } else {
+ if (result == DRFLAC_CRC_MISMATCH) {
+ if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) {
+ drflac__free_from_callbacks(pFlac, &allocationCallbacks);
+ return NULL;
+ }
+ continue;
+ } else {
+ drflac__free_from_callbacks(pFlac, &allocationCallbacks);
+ return NULL;
+ }
+ }
+ } while (1);
+ }
+
+ return pFlac;
+}
+
+
+
+#ifndef DR_FLAC_NO_STDIO
+#include
+
+static size_t drflac__on_read_stdio(void* pUserData, void* bufferOut, size_t bytesToRead)
+{
+ return fread(bufferOut, 1, bytesToRead, (FILE*)pUserData);
+}
+
+static drflac_bool32 drflac__on_seek_stdio(void* pUserData, int offset, drflac_seek_origin origin)
+{
+ DRFLAC_ASSERT(offset >= 0); /* <-- Never seek backwards. */
+
+ return fseek((FILE*)pUserData, offset, (origin == drflac_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0;
+}
+
+static FILE* drflac__fopen(const char* filename)
+{
+ FILE* pFile;
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+ if (fopen_s(&pFile, filename, "rb") != 0) {
+ return NULL;
+ }
+#else
+ pFile = fopen(filename, "rb");
+ if (pFile == NULL) {
+ return NULL;
+ }
+#endif
+
+ return pFile;
+}
+
+
+drflac* drflac_open_file(const char* filename, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ drflac* pFlac;
+ FILE* pFile;
+
+ pFile = drflac__fopen(filename);
+ if (pFile == NULL) {
+ return NULL;
+ }
+
+ pFlac = drflac_open(drflac__on_read_stdio, drflac__on_seek_stdio, (void*)pFile, pAllocationCallbacks);
+ if (pFlac == NULL) {
+ fclose(pFile);
+ return NULL;
+ }
+
+ return pFlac;
+}
+
+drflac* drflac_open_file_with_metadata(const char* filename, drflac_meta_proc onMeta, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ drflac* pFlac;
+ FILE* pFile;
+
+ pFile = drflac__fopen(filename);
+ if (pFile == NULL) {
+ return NULL;
+ }
+
+ pFlac = drflac_open_with_metadata_private(drflac__on_read_stdio, drflac__on_seek_stdio, onMeta, drflac_container_unknown, (void*)pFile, pUserData, pAllocationCallbacks);
+ if (pFlac == NULL) {
+ fclose(pFile);
+ return pFlac;
+ }
+
+ return pFlac;
+}
+#endif /* DR_FLAC_NO_STDIO */
+
+static size_t drflac__on_read_memory(void* pUserData, void* bufferOut, size_t bytesToRead)
+{
+ drflac__memory_stream* memoryStream = (drflac__memory_stream*)pUserData;
+ size_t bytesRemaining;
+
+ DRFLAC_ASSERT(memoryStream != NULL);
+ DRFLAC_ASSERT(memoryStream->dataSize >= memoryStream->currentReadPos);
+
+ bytesRemaining = memoryStream->dataSize - memoryStream->currentReadPos;
+ if (bytesToRead > bytesRemaining) {
+ bytesToRead = bytesRemaining;
+ }
+
+ if (bytesToRead > 0) {
+ DRFLAC_COPY_MEMORY(bufferOut, memoryStream->data + memoryStream->currentReadPos, bytesToRead);
+ memoryStream->currentReadPos += bytesToRead;
+ }
+
+ return bytesToRead;
+}
+
+static drflac_bool32 drflac__on_seek_memory(void* pUserData, int offset, drflac_seek_origin origin)
+{
+ drflac__memory_stream* memoryStream = (drflac__memory_stream*)pUserData;
+
+ DRFLAC_ASSERT(memoryStream != NULL);
+ DRFLAC_ASSERT(offset >= 0); /* <-- Never seek backwards. */
+
+ if (offset > (drflac_int64)memoryStream->dataSize) {
+ return DRFLAC_FALSE;
+ }
+
+ if (origin == drflac_seek_origin_current) {
+ if (memoryStream->currentReadPos + offset <= memoryStream->dataSize) {
+ memoryStream->currentReadPos += offset;
+ } else {
+ return DRFLAC_FALSE; /* Trying to seek too far forward. */
+ }
} else {
- drflac_streaminfo streaminfo;
- if (!drflac__read_streaminfo(onRead, pUserData, &streaminfo)) {
- return DRFLAC_FALSE;
+ if ((drflac_uint32)offset <= memoryStream->dataSize) {
+ memoryStream->currentReadPos = offset;
+ } else {
+ return DRFLAC_FALSE; /* Trying to seek too far forward. */
}
+ }
- pInit->hasStreamInfoBlock = DRFLAC_TRUE;
- pInit->sampleRate = streaminfo.sampleRate;
- pInit->channels = streaminfo.channels;
- pInit->bitsPerSample = streaminfo.bitsPerSample;
- pInit->totalSampleCount = streaminfo.totalSampleCount;
- pInit->maxBlockSize = streaminfo.maxBlockSize; /* Don't care about the min block size - only the max (used for determining the size of the memory allocation). */
- pInit->hasMetadataBlocks = !isLastBlock;
+ return DRFLAC_TRUE;
+}
- if (onMeta) {
- drflac_metadata metadata;
- metadata.type = DRFLAC_METADATA_BLOCK_TYPE_STREAMINFO;
- metadata.pRawData = NULL;
- metadata.rawDataSize = 0;
- metadata.data.streaminfo = streaminfo;
- onMeta(pUserDataMD, &metadata);
- }
+drflac* drflac_open_memory(const void* data, size_t dataSize, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ drflac__memory_stream memoryStream;
+ drflac* pFlac;
- return DRFLAC_TRUE;
+ memoryStream.data = (const unsigned char*)data;
+ memoryStream.dataSize = dataSize;
+ memoryStream.currentReadPos = 0;
+ pFlac = drflac_open(drflac__on_read_memory, drflac__on_seek_memory, &memoryStream, pAllocationCallbacks);
+ if (pFlac == NULL) {
+ return NULL;
+ }
+
+ pFlac->memoryStream = memoryStream;
+
+ /* This is an awful hack... */
+#ifndef DR_FLAC_NO_OGG
+ if (pFlac->container == drflac_container_ogg)
+ {
+ drflac_oggbs* oggbs = (drflac_oggbs*)pFlac->_oggbs;
+ oggbs->pUserData = &pFlac->memoryStream;
+ }
+ else
+#endif
+ {
+ pFlac->bs.pUserData = &pFlac->memoryStream;
}
+
+ return pFlac;
}
+drflac* drflac_open_memory_with_metadata(const void* data, size_t dataSize, drflac_meta_proc onMeta, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ drflac__memory_stream memoryStream;
+ drflac* pFlac;
+
+ memoryStream.data = (const unsigned char*)data;
+ memoryStream.dataSize = dataSize;
+ memoryStream.currentReadPos = 0;
+ pFlac = drflac_open_with_metadata_private(drflac__on_read_memory, drflac__on_seek_memory, onMeta, drflac_container_unknown, &memoryStream, pUserData, pAllocationCallbacks);
+ if (pFlac == NULL) {
+ return NULL;
+ }
+
+ pFlac->memoryStream = memoryStream;
+
+ /* This is an awful hack... */
#ifndef DR_FLAC_NO_OGG
-#define DRFLAC_OGG_MAX_PAGE_SIZE 65307
-#define DRFLAC_OGG_CAPTURE_PATTERN_CRC32 1605413199 /* CRC-32 of "OggS". */
+ if (pFlac->container == drflac_container_ogg)
+ {
+ drflac_oggbs* oggbs = (drflac_oggbs*)pFlac->_oggbs;
+ oggbs->pUserData = &pFlac->memoryStream;
+ }
+ else
+#endif
+ {
+ pFlac->bs.pUserData = &pFlac->memoryStream;
+ }
-typedef enum
+ return pFlac;
+}
+
+
+
+drflac* drflac_open(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks)
{
- drflac_ogg_recover_on_crc_mismatch,
- drflac_ogg_fail_on_crc_mismatch
-} drflac_ogg_crc_mismatch_recovery;
+ return drflac_open_with_metadata_private(onRead, onSeek, NULL, drflac_container_unknown, pUserData, pUserData, pAllocationCallbacks);
+}
+drflac* drflac_open_relaxed(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_container container, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ return drflac_open_with_metadata_private(onRead, onSeek, NULL, container, pUserData, pUserData, pAllocationCallbacks);
+}
-#ifndef DR_FLAC_NO_CRC
-static drflac_uint32 drflac__crc32_table[] = {
- 0x00000000L, 0x04C11DB7L, 0x09823B6EL, 0x0D4326D9L,
- 0x130476DCL, 0x17C56B6BL, 0x1A864DB2L, 0x1E475005L,
- 0x2608EDB8L, 0x22C9F00FL, 0x2F8AD6D6L, 0x2B4BCB61L,
- 0x350C9B64L, 0x31CD86D3L, 0x3C8EA00AL, 0x384FBDBDL,
- 0x4C11DB70L, 0x48D0C6C7L, 0x4593E01EL, 0x4152FDA9L,
- 0x5F15ADACL, 0x5BD4B01BL, 0x569796C2L, 0x52568B75L,
- 0x6A1936C8L, 0x6ED82B7FL, 0x639B0DA6L, 0x675A1011L,
- 0x791D4014L, 0x7DDC5DA3L, 0x709F7B7AL, 0x745E66CDL,
- 0x9823B6E0L, 0x9CE2AB57L, 0x91A18D8EL, 0x95609039L,
- 0x8B27C03CL, 0x8FE6DD8BL, 0x82A5FB52L, 0x8664E6E5L,
- 0xBE2B5B58L, 0xBAEA46EFL, 0xB7A96036L, 0xB3687D81L,
- 0xAD2F2D84L, 0xA9EE3033L, 0xA4AD16EAL, 0xA06C0B5DL,
- 0xD4326D90L, 0xD0F37027L, 0xDDB056FEL, 0xD9714B49L,
- 0xC7361B4CL, 0xC3F706FBL, 0xCEB42022L, 0xCA753D95L,
- 0xF23A8028L, 0xF6FB9D9FL, 0xFBB8BB46L, 0xFF79A6F1L,
- 0xE13EF6F4L, 0xE5FFEB43L, 0xE8BCCD9AL, 0xEC7DD02DL,
- 0x34867077L, 0x30476DC0L, 0x3D044B19L, 0x39C556AEL,
- 0x278206ABL, 0x23431B1CL, 0x2E003DC5L, 0x2AC12072L,
- 0x128E9DCFL, 0x164F8078L, 0x1B0CA6A1L, 0x1FCDBB16L,
- 0x018AEB13L, 0x054BF6A4L, 0x0808D07DL, 0x0CC9CDCAL,
- 0x7897AB07L, 0x7C56B6B0L, 0x71159069L, 0x75D48DDEL,
- 0x6B93DDDBL, 0x6F52C06CL, 0x6211E6B5L, 0x66D0FB02L,
- 0x5E9F46BFL, 0x5A5E5B08L, 0x571D7DD1L, 0x53DC6066L,
- 0x4D9B3063L, 0x495A2DD4L, 0x44190B0DL, 0x40D816BAL,
- 0xACA5C697L, 0xA864DB20L, 0xA527FDF9L, 0xA1E6E04EL,
- 0xBFA1B04BL, 0xBB60ADFCL, 0xB6238B25L, 0xB2E29692L,
- 0x8AAD2B2FL, 0x8E6C3698L, 0x832F1041L, 0x87EE0DF6L,
- 0x99A95DF3L, 0x9D684044L, 0x902B669DL, 0x94EA7B2AL,
- 0xE0B41DE7L, 0xE4750050L, 0xE9362689L, 0xEDF73B3EL,
- 0xF3B06B3BL, 0xF771768CL, 0xFA325055L, 0xFEF34DE2L,
- 0xC6BCF05FL, 0xC27DEDE8L, 0xCF3ECB31L, 0xCBFFD686L,
- 0xD5B88683L, 0xD1799B34L, 0xDC3ABDEDL, 0xD8FBA05AL,
- 0x690CE0EEL, 0x6DCDFD59L, 0x608EDB80L, 0x644FC637L,
- 0x7A089632L, 0x7EC98B85L, 0x738AAD5CL, 0x774BB0EBL,
- 0x4F040D56L, 0x4BC510E1L, 0x46863638L, 0x42472B8FL,
- 0x5C007B8AL, 0x58C1663DL, 0x558240E4L, 0x51435D53L,
- 0x251D3B9EL, 0x21DC2629L, 0x2C9F00F0L, 0x285E1D47L,
- 0x36194D42L, 0x32D850F5L, 0x3F9B762CL, 0x3B5A6B9BL,
- 0x0315D626L, 0x07D4CB91L, 0x0A97ED48L, 0x0E56F0FFL,
- 0x1011A0FAL, 0x14D0BD4DL, 0x19939B94L, 0x1D528623L,
- 0xF12F560EL, 0xF5EE4BB9L, 0xF8AD6D60L, 0xFC6C70D7L,
- 0xE22B20D2L, 0xE6EA3D65L, 0xEBA91BBCL, 0xEF68060BL,
- 0xD727BBB6L, 0xD3E6A601L, 0xDEA580D8L, 0xDA649D6FL,
- 0xC423CD6AL, 0xC0E2D0DDL, 0xCDA1F604L, 0xC960EBB3L,
- 0xBD3E8D7EL, 0xB9FF90C9L, 0xB4BCB610L, 0xB07DABA7L,
- 0xAE3AFBA2L, 0xAAFBE615L, 0xA7B8C0CCL, 0xA379DD7BL,
- 0x9B3660C6L, 0x9FF77D71L, 0x92B45BA8L, 0x9675461FL,
- 0x8832161AL, 0x8CF30BADL, 0x81B02D74L, 0x857130C3L,
- 0x5D8A9099L, 0x594B8D2EL, 0x5408ABF7L, 0x50C9B640L,
- 0x4E8EE645L, 0x4A4FFBF2L, 0x470CDD2BL, 0x43CDC09CL,
- 0x7B827D21L, 0x7F436096L, 0x7200464FL, 0x76C15BF8L,
- 0x68860BFDL, 0x6C47164AL, 0x61043093L, 0x65C52D24L,
- 0x119B4BE9L, 0x155A565EL, 0x18197087L, 0x1CD86D30L,
- 0x029F3D35L, 0x065E2082L, 0x0B1D065BL, 0x0FDC1BECL,
- 0x3793A651L, 0x3352BBE6L, 0x3E119D3FL, 0x3AD08088L,
- 0x2497D08DL, 0x2056CD3AL, 0x2D15EBE3L, 0x29D4F654L,
- 0xC5A92679L, 0xC1683BCEL, 0xCC2B1D17L, 0xC8EA00A0L,
- 0xD6AD50A5L, 0xD26C4D12L, 0xDF2F6BCBL, 0xDBEE767CL,
- 0xE3A1CBC1L, 0xE760D676L, 0xEA23F0AFL, 0xEEE2ED18L,
- 0xF0A5BD1DL, 0xF464A0AAL, 0xF9278673L, 0xFDE69BC4L,
- 0x89B8FD09L, 0x8D79E0BEL, 0x803AC667L, 0x84FBDBD0L,
- 0x9ABC8BD5L, 0x9E7D9662L, 0x933EB0BBL, 0x97FFAD0CL,
- 0xAFB010B1L, 0xAB710D06L, 0xA6322BDFL, 0xA2F33668L,
- 0xBCB4666DL, 0xB8757BDAL, 0xB5365D03L, 0xB1F740B4L
-};
+drflac* drflac_open_with_metadata(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ return drflac_open_with_metadata_private(onRead, onSeek, onMeta, drflac_container_unknown, pUserData, pUserData, pAllocationCallbacks);
+}
+drflac* drflac_open_with_metadata_relaxed(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, drflac_container container, void* pUserData, const drflac_allocation_callbacks* pAllocationCallbacks)
+{
+ return drflac_open_with_metadata_private(onRead, onSeek, onMeta, container, pUserData, pUserData, pAllocationCallbacks);
+}
+
+void drflac_close(drflac* pFlac)
+{
+ if (pFlac == NULL) {
+ return;
+ }
+
+#ifndef DR_FLAC_NO_STDIO
+ /*
+ If we opened the file with drflac_open_file() we will want to close the file handle. We can know whether or not drflac_open_file()
+ was used by looking at the callbacks.
+ */
+ if (pFlac->bs.onRead == drflac__on_read_stdio) {
+ fclose((FILE*)pFlac->bs.pUserData);
+ }
+
+#ifndef DR_FLAC_NO_OGG
+ /* Need to clean up Ogg streams a bit differently due to the way the bit streaming is chained. */
+ if (pFlac->container == drflac_container_ogg) {
+ drflac_oggbs* oggbs = (drflac_oggbs*)pFlac->_oggbs;
+ DRFLAC_ASSERT(pFlac->bs.onRead == drflac__on_read_ogg);
+
+ if (oggbs->onRead == drflac__on_read_stdio) {
+ fclose((FILE*)oggbs->pUserData);
+ }
+ }
+#endif
#endif
-static DRFLAC_INLINE drflac_uint32 drflac_crc32_byte(drflac_uint32 crc32, drflac_uint8 data)
+ drflac__free_from_callbacks(pFlac, &pFlac->allocationCallbacks);
+}
+
+
+#if 0
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_left_side__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
-#ifndef DR_FLAC_NO_CRC
- return (crc32 << 8) ^ drflac__crc32_table[(drflac_uint8)((crc32 >> 24) & 0xFF) ^ data];
-#else
- (void)data;
- return crc32;
+ drflac_uint64 i;
+ for (i = 0; i < frameCount; ++i) {
+ drflac_int32 left = pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 side = pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+ drflac_int32 right = left - side;
+
+ pOutputSamples[i*2+0] = left;
+ pOutputSamples[i*2+1] = right;
+ }
+}
#endif
+
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_left_side__scalar(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
+{
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
+
+ drflac_int32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 left0 = pInputSamples0[i*4+0] << shift0;
+ drflac_int32 left1 = pInputSamples0[i*4+1] << shift0;
+ drflac_int32 left2 = pInputSamples0[i*4+2] << shift0;
+ drflac_int32 left3 = pInputSamples0[i*4+3] << shift0;
+
+ drflac_int32 side0 = pInputSamples1[i*4+0] << shift1;
+ drflac_int32 side1 = pInputSamples1[i*4+1] << shift1;
+ drflac_int32 side2 = pInputSamples1[i*4+2] << shift1;
+ drflac_int32 side3 = pInputSamples1[i*4+3] << shift1;
+
+ drflac_int32 right0 = left0 - side0;
+ drflac_int32 right1 = left1 - side1;
+ drflac_int32 right2 = left2 - side2;
+ drflac_int32 right3 = left3 - side3;
+
+ pOutputSamples[i*8+0] = left0;
+ pOutputSamples[i*8+1] = right0;
+ pOutputSamples[i*8+2] = left1;
+ pOutputSamples[i*8+3] = right1;
+ pOutputSamples[i*8+4] = left2;
+ pOutputSamples[i*8+5] = right2;
+ pOutputSamples[i*8+6] = left3;
+ pOutputSamples[i*8+7] = right3;
+ }
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ int left = pInputSamples0[i] << shift0;
+ int side = pInputSamples1[i] << shift1;
+ int right = left - side;
+
+ pOutputSamples[i*2+0] = left;
+ pOutputSamples[i*2+1] = right;
+ }
}
-#if 0
-static DRFLAC_INLINE drflac_uint32 drflac_crc32_uint32(drflac_uint32 crc32, drflac_uint32 data)
+#if defined(DRFLAC_SUPPORT_SSE2)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_left_side__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- crc32 = drflac_crc32_byte(crc32, (drflac_uint8)((data >> 24) & 0xFF));
- crc32 = drflac_crc32_byte(crc32, (drflac_uint8)((data >> 16) & 0xFF));
- crc32 = drflac_crc32_byte(crc32, (drflac_uint8)((data >> 8) & 0xFF));
- crc32 = drflac_crc32_byte(crc32, (drflac_uint8)((data >> 0) & 0xFF));
- return crc32;
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
+
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
+
+ frameCount4 = frameCount >> 2;
+
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0);
+ __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1);
+ __m128i right = _mm_sub_epi32(left, side);
+
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right));
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right));
+ }
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 left = pInputSamples0[i] << shift0;
+ drflac_int32 side = pInputSamples1[i] << shift1;
+ drflac_int32 right = left - side;
+
+ pOutputSamples[i*2+0] = left;
+ pOutputSamples[i*2+1] = right;
+ }
}
+#endif
-static DRFLAC_INLINE drflac_uint32 drflac_crc32_uint64(drflac_uint32 crc32, drflac_uint64 data)
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_left_side__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- crc32 = drflac_crc32_uint32(crc32, (drflac_uint32)((data >> 32) & 0xFFFFFFFF));
- crc32 = drflac_crc32_uint32(crc32, (drflac_uint32)((data >> 0) & 0xFFFFFFFF));
- return crc32;
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
+ int32x4_t shift0_4;
+ int32x4_t shift1_4;
+
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
+
+ frameCount4 = frameCount >> 2;
+
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+
+ shift0_4 = vdupq_n_s32(shift0);
+ shift1_4 = vdupq_n_s32(shift1);
+
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t left;
+ int32x4_t side;
+ int32x4_t right;
+
+ left = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), shift0_4);
+ side = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), shift1_4);
+ right = vsubq_s32(left, side);
+
+ drflac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right));
+ }
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 left = pInputSamples0[i] << shift0;
+ drflac_int32 side = pInputSamples1[i] << shift1;
+ drflac_int32 right = left - side;
+
+ pOutputSamples[i*2+0] = left;
+ pOutputSamples[i*2+1] = right;
+ }
}
#endif
-static DRFLAC_INLINE drflac_uint32 drflac_crc32_buffer(drflac_uint32 crc32, drflac_uint8* pData, drflac_uint32 dataSize)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_left_side(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- /* This can be optimized. */
- drflac_uint32 i;
- for (i = 0; i < dataSize; ++i) {
- crc32 = drflac_crc32_byte(crc32, pData[i]);
+#if defined(DRFLAC_SUPPORT_SSE2)
+ if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s32__decode_left_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s32__decode_left_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#endif
+ {
+ /* Scalar fallback. */
+#if 0
+ drflac_read_pcm_frames_s32__decode_left_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#else
+ drflac_read_pcm_frames_s32__decode_left_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#endif
}
- return crc32;
}
-static DRFLAC_INLINE drflac_bool32 drflac_ogg__is_capture_pattern(drflac_uint8 pattern[4])
+#if 0
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_right_side__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- return pattern[0] == 'O' && pattern[1] == 'g' && pattern[2] == 'g' && pattern[3] == 'S';
+ drflac_uint64 i;
+ for (i = 0; i < frameCount; ++i) {
+ drflac_int32 side = pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 right = pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+ drflac_int32 left = right + side;
+
+ pOutputSamples[i*2+0] = left;
+ pOutputSamples[i*2+1] = right;
+ }
}
+#endif
-static DRFLAC_INLINE drflac_uint32 drflac_ogg__get_page_header_size(drflac_ogg_page_header* pHeader)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_right_side__scalar(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- return 27 + pHeader->segmentCount;
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
+
+ drflac_int32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 side0 = pInputSamples0[i*4+0] << shift0;
+ drflac_int32 side1 = pInputSamples0[i*4+1] << shift0;
+ drflac_int32 side2 = pInputSamples0[i*4+2] << shift0;
+ drflac_int32 side3 = pInputSamples0[i*4+3] << shift0;
+
+ drflac_int32 right0 = pInputSamples1[i*4+0] << shift1;
+ drflac_int32 right1 = pInputSamples1[i*4+1] << shift1;
+ drflac_int32 right2 = pInputSamples1[i*4+2] << shift1;
+ drflac_int32 right3 = pInputSamples1[i*4+3] << shift1;
+
+ drflac_int32 left0 = right0 + side0;
+ drflac_int32 left1 = right1 + side1;
+ drflac_int32 left2 = right2 + side2;
+ drflac_int32 left3 = right3 + side3;
+
+ pOutputSamples[i*8+0] = left0;
+ pOutputSamples[i*8+1] = right0;
+ pOutputSamples[i*8+2] = left1;
+ pOutputSamples[i*8+3] = right1;
+ pOutputSamples[i*8+4] = left2;
+ pOutputSamples[i*8+5] = right2;
+ pOutputSamples[i*8+6] = left3;
+ pOutputSamples[i*8+7] = right3;
+ }
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 side = pInputSamples0[i] << shift0;
+ drflac_int32 right = pInputSamples1[i] << shift1;
+ drflac_int32 left = right + side;
+
+ pOutputSamples[i*2+0] = left;
+ pOutputSamples[i*2+1] = right;
+ }
}
-static DRFLAC_INLINE drflac_uint32 drflac_ogg__get_page_body_size(drflac_ogg_page_header* pHeader)
+#if defined(DRFLAC_SUPPORT_SSE2)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_right_side__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- drflac_uint32 pageBodySize = 0;
- int i;
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
- for (i = 0; i < pHeader->segmentCount; ++i) {
- pageBodySize += pHeader->segmentTable[i];
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
+
+ frameCount4 = frameCount >> 2;
+
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0);
+ __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1);
+ __m128i left = _mm_add_epi32(right, side);
+
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right));
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right));
}
- return pageBodySize;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 side = pInputSamples0[i] << shift0;
+ drflac_int32 right = pInputSamples1[i] << shift1;
+ drflac_int32 left = right + side;
+
+ pOutputSamples[i*2+0] = left;
+ pOutputSamples[i*2+1] = right;
+ }
}
+#endif
-drflac_result drflac_ogg__read_page_header_after_capture_pattern(drflac_read_proc onRead, void* pUserData, drflac_ogg_page_header* pHeader, drflac_uint32* pBytesRead, drflac_uint32* pCRC32)
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_right_side__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- drflac_uint8 data[23];
- drflac_uint32 i;
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
+ int32x4_t shift0_4;
+ int32x4_t shift1_4;
- drflac_assert(*pCRC32 == DRFLAC_OGG_CAPTURE_PATTERN_CRC32);
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
- if (onRead(pUserData, data, 23) != 23) {
- return DRFLAC_END_OF_STREAM;
- }
- *pBytesRead += 23;
+ frameCount4 = frameCount >> 2;
- pHeader->structureVersion = data[0];
- pHeader->headerType = data[1];
- drflac_copy_memory(&pHeader->granulePosition, &data[ 2], 8);
- drflac_copy_memory(&pHeader->serialNumber, &data[10], 4);
- drflac_copy_memory(&pHeader->sequenceNumber, &data[14], 4);
- drflac_copy_memory(&pHeader->checksum, &data[18], 4);
- pHeader->segmentCount = data[22];
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- /* Calculate the CRC. Note that for the calculation the checksum part of the page needs to be set to 0. */
- data[18] = 0;
- data[19] = 0;
- data[20] = 0;
- data[21] = 0;
+ shift0_4 = vdupq_n_s32(shift0);
+ shift1_4 = vdupq_n_s32(shift1);
- for (i = 0; i < 23; ++i) {
- *pCRC32 = drflac_crc32_byte(*pCRC32, data[i]);
- }
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t side;
+ int32x4_t right;
+ int32x4_t left;
+ side = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), shift0_4);
+ right = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), shift1_4);
+ left = vaddq_s32(right, side);
- if (onRead(pUserData, pHeader->segmentTable, pHeader->segmentCount) != pHeader->segmentCount) {
- return DRFLAC_END_OF_STREAM;
+ drflac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right));
}
- *pBytesRead += pHeader->segmentCount;
- for (i = 0; i < pHeader->segmentCount; ++i) {
- *pCRC32 = drflac_crc32_byte(*pCRC32, pHeader->segmentTable[i]);
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 side = pInputSamples0[i] << shift0;
+ drflac_int32 right = pInputSamples1[i] << shift1;
+ drflac_int32 left = right + side;
+
+ pOutputSamples[i*2+0] = left;
+ pOutputSamples[i*2+1] = right;
}
+}
+#endif
- return DRFLAC_SUCCESS;
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_right_side(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
+{
+#if defined(DRFLAC_SUPPORT_SSE2)
+ if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s32__decode_right_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s32__decode_right_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#endif
+ {
+ /* Scalar fallback. */
+#if 0
+ drflac_read_pcm_frames_s32__decode_right_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#else
+ drflac_read_pcm_frames_s32__decode_right_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#endif
+ }
}
-drflac_result drflac_ogg__read_page_header(drflac_read_proc onRead, void* pUserData, drflac_ogg_page_header* pHeader, drflac_uint32* pBytesRead, drflac_uint32* pCRC32)
+
+#if 0
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_mid_side__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- drflac_uint8 id[4];
+ for (drflac_uint64 i = 0; i < frameCount; ++i) {
+ int mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ int side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
- *pBytesRead = 0;
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
- if (onRead(pUserData, id, 4) != 4) {
- return DRFLAC_END_OF_STREAM;
+ pOutputSamples[i*2+0] = ((mid + side) >> 1) << unusedBitsPerSample;
+ pOutputSamples[i*2+1] = ((mid - side) >> 1) << unusedBitsPerSample;
}
- *pBytesRead += 4;
+}
+#endif
- /* We need to read byte-by-byte until we find the OggS capture pattern. */
- for (;;) {
- if (drflac_ogg__is_capture_pattern(id)) {
- drflac_result result;
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_mid_side__scalar(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
+{
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
- *pCRC32 = DRFLAC_OGG_CAPTURE_PATTERN_CRC32;
+ drflac_int32 shift = unusedBitsPerSample;
+ if (shift > 0) {
+ shift -= 1;
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 temp0L;
+ drflac_int32 temp1L;
+ drflac_int32 temp2L;
+ drflac_int32 temp3L;
+ drflac_int32 temp0R;
+ drflac_int32 temp1R;
+ drflac_int32 temp2R;
+ drflac_int32 temp3R;
+
+ drflac_int32 mid0 = pInputSamples0[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid1 = pInputSamples0[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid2 = pInputSamples0[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid3 = pInputSamples0[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+
+ drflac_int32 side0 = pInputSamples1[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side1 = pInputSamples1[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side2 = pInputSamples1[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side3 = pInputSamples1[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
- result = drflac_ogg__read_page_header_after_capture_pattern(onRead, pUserData, pHeader, pBytesRead, pCRC32);
- if (result == DRFLAC_SUCCESS) {
- return DRFLAC_SUCCESS;
- } else {
- if (result == DRFLAC_CRC_MISMATCH) {
- continue;
- } else {
- return result;
- }
- }
- } else {
- /* The first 4 bytes did not equal the capture pattern. Read the next byte and try again. */
- id[0] = id[1];
- id[1] = id[2];
- id[2] = id[3];
- if (onRead(pUserData, &id[3], 1) != 1) {
- return DRFLAC_END_OF_STREAM;
- }
- *pBytesRead += 1;
+ mid0 = (((drflac_uint32)mid0) << 1) | (side0 & 0x01);
+ mid1 = (((drflac_uint32)mid1) << 1) | (side1 & 0x01);
+ mid2 = (((drflac_uint32)mid2) << 1) | (side2 & 0x01);
+ mid3 = (((drflac_uint32)mid3) << 1) | (side3 & 0x01);
+
+ temp0L = ((mid0 + side0) << shift);
+ temp1L = ((mid1 + side1) << shift);
+ temp2L = ((mid2 + side2) << shift);
+ temp3L = ((mid3 + side3) << shift);
+
+ temp0R = ((mid0 - side0) << shift);
+ temp1R = ((mid1 - side1) << shift);
+ temp2R = ((mid2 - side2) << shift);
+ temp3R = ((mid3 - side3) << shift);
+
+ pOutputSamples[i*8+0] = temp0L;
+ pOutputSamples[i*8+1] = temp0R;
+ pOutputSamples[i*8+2] = temp1L;
+ pOutputSamples[i*8+3] = temp1R;
+ pOutputSamples[i*8+4] = temp2L;
+ pOutputSamples[i*8+5] = temp2R;
+ pOutputSamples[i*8+6] = temp3L;
+ pOutputSamples[i*8+7] = temp3R;
+ }
+ } else {
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 temp0L;
+ drflac_int32 temp1L;
+ drflac_int32 temp2L;
+ drflac_int32 temp3L;
+ drflac_int32 temp0R;
+ drflac_int32 temp1R;
+ drflac_int32 temp2R;
+ drflac_int32 temp3R;
+
+ drflac_int32 mid0 = pInputSamples0[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid1 = pInputSamples0[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid2 = pInputSamples0[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid3 = pInputSamples0[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+
+ drflac_int32 side0 = pInputSamples1[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side1 = pInputSamples1[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side2 = pInputSamples1[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side3 = pInputSamples1[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
+ mid0 = (((drflac_uint32)mid0) << 1) | (side0 & 0x01);
+ mid1 = (((drflac_uint32)mid1) << 1) | (side1 & 0x01);
+ mid2 = (((drflac_uint32)mid2) << 1) | (side2 & 0x01);
+ mid3 = (((drflac_uint32)mid3) << 1) | (side3 & 0x01);
+
+ temp0L = ((mid0 + side0) >> 1);
+ temp1L = ((mid1 + side1) >> 1);
+ temp2L = ((mid2 + side2) >> 1);
+ temp3L = ((mid3 + side3) >> 1);
+
+ temp0R = ((mid0 - side0) >> 1);
+ temp1R = ((mid1 - side1) >> 1);
+ temp2R = ((mid2 - side2) >> 1);
+ temp3R = ((mid3 - side3) >> 1);
+
+ pOutputSamples[i*8+0] = temp0L;
+ pOutputSamples[i*8+1] = temp0R;
+ pOutputSamples[i*8+2] = temp1L;
+ pOutputSamples[i*8+3] = temp1R;
+ pOutputSamples[i*8+4] = temp2L;
+ pOutputSamples[i*8+5] = temp2R;
+ pOutputSamples[i*8+6] = temp3L;
+ pOutputSamples[i*8+7] = temp3R;
}
}
-}
-
-/*
-The main part of the Ogg encapsulation is the conversion from the physical Ogg bitstream to the native FLAC bitstream. It works
-in three general stages: Ogg Physical Bitstream -> Ogg/FLAC Logical Bitstream -> FLAC Native Bitstream. dr_flac is designed
-in such a way that the core sections assume everything is delivered in native format. Therefore, for each encapsulation type
-dr_flac is supporting there needs to be a layer sitting on top of the onRead and onSeek callbacks that ensures the bits read from
-the physical Ogg bitstream are converted and delivered in native FLAC format.
-*/
-typedef struct
-{
- drflac_read_proc onRead; /* The original onRead callback from drflac_open() and family. */
- drflac_seek_proc onSeek; /* The original onSeek callback from drflac_open() and family. */
- void* pUserData; /* The user data passed on onRead and onSeek. This is the user data that was passed on drflac_open() and family. */
- drflac_uint64 currentBytePos; /* The position of the byte we are sitting on in the physical byte stream. Used for efficient seeking. */
- drflac_uint64 firstBytePos; /* The position of the first byte in the physical bitstream. Points to the start of the "OggS" identifier of the FLAC bos page. */
- drflac_uint32 serialNumber; /* The serial number of the FLAC audio pages. This is determined by the initial header page that was read during initialization. */
- drflac_ogg_page_header bosPageHeader; /* Used for seeking. */
- drflac_ogg_page_header currentPageHeader;
- drflac_uint32 bytesRemainingInPage;
- drflac_uint32 pageDataSize;
- drflac_uint8 pageData[DRFLAC_OGG_MAX_PAGE_SIZE];
-} drflac_oggbs; /* oggbs = Ogg Bitstream */
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
-static size_t drflac_oggbs__read_physical(drflac_oggbs* oggbs, void* bufferOut, size_t bytesToRead)
-{
- size_t bytesActuallyRead = oggbs->onRead(oggbs->pUserData, bufferOut, bytesToRead);
- oggbs->currentBytePos += bytesActuallyRead;
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
- return bytesActuallyRead;
+ pOutputSamples[i*2+0] = ((mid + side) >> 1) << unusedBitsPerSample;
+ pOutputSamples[i*2+1] = ((mid - side) >> 1) << unusedBitsPerSample;
+ }
}
-static drflac_bool32 drflac_oggbs__seek_physical(drflac_oggbs* oggbs, drflac_uint64 offset, drflac_seek_origin origin)
+#if defined(DRFLAC_SUPPORT_SSE2)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_mid_side__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- if (origin == drflac_seek_origin_start) {
- if (offset <= 0x7FFFFFFF) {
- if (!oggbs->onSeek(oggbs->pUserData, (int)offset, drflac_seek_origin_start)) {
- return DRFLAC_FALSE;
- }
- oggbs->currentBytePos = offset;
+ drflac_uint64 i;
+ drflac_uint64 frameCount4;
+ int shift;
- return DRFLAC_TRUE;
- } else {
- if (!oggbs->onSeek(oggbs->pUserData, 0x7FFFFFFF, drflac_seek_origin_start)) {
- return DRFLAC_FALSE;
- }
- oggbs->currentBytePos = offset;
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
- return drflac_oggbs__seek_physical(oggbs, offset - 0x7FFFFFFF, drflac_seek_origin_current);
+ frameCount4 = frameCount >> 2;
+
+ shift = unusedBitsPerSample;
+ if (shift == 0) {
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i mid;
+ __m128i side;
+ __m128i left;
+ __m128i right;
+
+ mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+
+ mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01)));
+
+ left = _mm_srai_epi32(_mm_add_epi32(mid, side), 1);
+ right = _mm_srai_epi32(_mm_sub_epi32(mid, side), 1);
+
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right));
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right));
}
- } else {
- while (offset > 0x7FFFFFFF) {
- if (!oggbs->onSeek(oggbs->pUserData, 0x7FFFFFFF, drflac_seek_origin_current)) {
- return DRFLAC_FALSE;
- }
- oggbs->currentBytePos += 0x7FFFFFFF;
- offset -= 0x7FFFFFFF;
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
+
+ pOutputSamples[i*2+0] = ((mid + side) >> 1);
+ pOutputSamples[i*2+1] = ((mid - side) >> 1);
}
+ } else {
+ shift -= 1;
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i mid;
+ __m128i side;
+ __m128i left;
+ __m128i right;
- if (!oggbs->onSeek(oggbs->pUserData, (int)offset, drflac_seek_origin_current)) { /* <-- Safe cast thanks to the loop above. */
- return DRFLAC_FALSE;
+ mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+
+ mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01)));
+
+ left = _mm_slli_epi32(_mm_add_epi32(mid, side), shift);
+ right = _mm_slli_epi32(_mm_sub_epi32(mid, side), shift);
+
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right));
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right));
}
- oggbs->currentBytePos += offset;
- return DRFLAC_TRUE;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
+
+ pOutputSamples[i*2+0] = ((mid + side) << shift);
+ pOutputSamples[i*2+1] = ((mid - side) << shift);
+ }
}
}
+#endif
-static drflac_bool32 drflac_oggbs__goto_next_page(drflac_oggbs* oggbs, drflac_ogg_crc_mismatch_recovery recoveryMethod)
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_mid_side__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- drflac_ogg_page_header header;
- for (;;) {
- drflac_uint32 crc32 = 0;
- drflac_uint32 bytesRead;
- drflac_uint32 pageBodySize;
-#ifndef DR_FLAC_NO_CRC
- drflac_uint32 actualCRC32;
-#endif
+ drflac_uint64 i;
+ drflac_uint64 frameCount4;
+ int shift;
+ int32x4_t wbpsShift0_4; /* wbps = Wasted Bits Per Sample */
+ int32x4_t wbpsShift1_4; /* wbps = Wasted Bits Per Sample */
+ int32x4_t one4;
- if (drflac_ogg__read_page_header(oggbs->onRead, oggbs->pUserData, &header, &bytesRead, &crc32) != DRFLAC_SUCCESS) {
- return DRFLAC_FALSE;
- }
- oggbs->currentBytePos += bytesRead;
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
- pageBodySize = drflac_ogg__get_page_body_size(&header);
- if (pageBodySize > DRFLAC_OGG_MAX_PAGE_SIZE) {
- continue; /* Invalid page size. Assume it's corrupted and just move to the next page. */
- }
+ frameCount4 = frameCount >> 2;
- if (header.serialNumber != oggbs->serialNumber) {
- /* It's not a FLAC page. Skip it. */
- if (pageBodySize > 0 && !drflac_oggbs__seek_physical(oggbs, pageBodySize, drflac_seek_origin_current)) {
- return DRFLAC_FALSE;
- }
- continue;
- }
+ wbpsShift0_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ wbpsShift1_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+ one4 = vdupq_n_s32(1);
+ shift = unusedBitsPerSample;
+ if (shift == 0) {
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t mid;
+ int32x4_t side;
+ int32x4_t left;
+ int32x4_t right;
- /* We need to read the entire page and then do a CRC check on it. If there's a CRC mismatch we need to skip this page. */
- if (drflac_oggbs__read_physical(oggbs, oggbs->pageData, pageBodySize) != pageBodySize) {
- return DRFLAC_FALSE;
- }
- oggbs->pageDataSize = pageBodySize;
+ mid = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), wbpsShift0_4);
+ side = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), wbpsShift1_4);
-#ifndef DR_FLAC_NO_CRC
- actualCRC32 = drflac_crc32_buffer(crc32, oggbs->pageData, oggbs->pageDataSize);
- if (actualCRC32 != header.checksum) {
- if (recoveryMethod == drflac_ogg_recover_on_crc_mismatch) {
- continue; /* CRC mismatch. Skip this page. */
- } else {
- /*
- Even though we are failing on a CRC mismatch, we still want our stream to be in a good state. Therefore we
- go to the next valid page to ensure we're in a good state, but return false to let the caller know that the
- seek did not fully complete.
- */
- drflac_oggbs__goto_next_page(oggbs, drflac_ogg_recover_on_crc_mismatch);
- return DRFLAC_FALSE;
- }
+ mid = vorrq_s32(vshlq_n_s32(mid, 1), vandq_s32(side, one4));
+
+ left = vshrq_n_s32(vaddq_s32(mid, side), 1);
+ right = vshrq_n_s32(vsubq_s32(mid, side), 1);
+
+ drflac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right));
}
-#else
- (void)recoveryMethod; /* <-- Silence a warning. */
-#endif
- oggbs->currentPageHeader = header;
- oggbs->bytesRemainingInPage = pageBodySize;
- return DRFLAC_TRUE;
- }
-}
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
-/* Function below is unused at the moment, but I might be re-adding it later. */
-#if 0
-static drflac_uint8 drflac_oggbs__get_current_segment_index(drflac_oggbs* oggbs, drflac_uint8* pBytesRemainingInSeg)
-{
- drflac_uint32 bytesConsumedInPage = drflac_ogg__get_page_body_size(&oggbs->currentPageHeader) - oggbs->bytesRemainingInPage;
- drflac_uint8 iSeg = 0;
- drflac_uint32 iByte = 0;
- while (iByte < bytesConsumedInPage) {
- drflac_uint8 segmentSize = oggbs->currentPageHeader.segmentTable[iSeg];
- if (iByte + segmentSize > bytesConsumedInPage) {
- break;
- } else {
- iSeg += 1;
- iByte += segmentSize;
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
+
+ pOutputSamples[i*2+0] = ((mid + side) >> 1);
+ pOutputSamples[i*2+1] = ((mid - side) >> 1);
}
- }
+ } else {
+ int32x4_t shift4;
- *pBytesRemainingInSeg = oggbs->currentPageHeader.segmentTable[iSeg] - (drflac_uint8)(bytesConsumedInPage - iByte);
- return iSeg;
-}
+ shift -= 1;
+ shift4 = vdupq_n_s32(shift);
-static drflac_bool32 drflac_oggbs__seek_to_next_packet(drflac_oggbs* oggbs)
-{
- /* The current packet ends when we get to the segment with a lacing value of < 255 which is not at the end of a page. */
- for (;;) {
- drflac_bool32 atEndOfPage = DRFLAC_FALSE;
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t mid;
+ int32x4_t side;
+ int32x4_t left;
+ int32x4_t right;
- drflac_uint8 bytesRemainingInSeg;
- drflac_uint8 iFirstSeg = drflac_oggbs__get_current_segment_index(oggbs, &bytesRemainingInSeg);
+ mid = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), wbpsShift0_4);
+ side = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), wbpsShift1_4);
- drflac_uint32 bytesToEndOfPacketOrPage = bytesRemainingInSeg;
- for (drflac_uint8 iSeg = iFirstSeg; iSeg < oggbs->currentPageHeader.segmentCount; ++iSeg) {
- drflac_uint8 segmentSize = oggbs->currentPageHeader.segmentTable[iSeg];
- if (segmentSize < 255) {
- if (iSeg == oggbs->currentPageHeader.segmentCount-1) {
- atEndOfPage = DRFLAC_TRUE;
- }
+ mid = vorrq_s32(vshlq_n_s32(mid, 1), vandq_s32(side, one4));
- break;
- }
+ left = vshlq_s32(vaddq_s32(mid, side), shift4);
+ right = vshlq_s32(vsubq_s32(mid, side), shift4);
- bytesToEndOfPacketOrPage += segmentSize;
+ drflac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right));
}
- /*
- At this point we will have found either the packet or the end of the page. If were at the end of the page we'll
- want to load the next page and keep searching for the end of the packet.
- */
- drflac_oggbs__seek_physical(oggbs, bytesToEndOfPacketOrPage, drflac_seek_origin_current);
- oggbs->bytesRemainingInPage -= bytesToEndOfPacketOrPage;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
- if (atEndOfPage) {
- /*
- We're potentially at the next packet, but we need to check the next page first to be sure because the packet may
- straddle pages.
- */
- if (!drflac_oggbs__goto_next_page(oggbs)) {
- return DRFLAC_FALSE;
- }
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
- /* If it's a fresh packet it most likely means we're at the next packet. */
- if ((oggbs->currentPageHeader.headerType & 0x01) == 0) {
- return DRFLAC_TRUE;
- }
- } else {
- /* We're at the next packet. */
- return DRFLAC_TRUE;
+ pOutputSamples[i*2+0] = ((mid + side) << shift);
+ pOutputSamples[i*2+1] = ((mid - side) << shift);
}
}
}
-
-static drflac_bool32 drflac_oggbs__seek_to_next_frame(drflac_oggbs* oggbs)
-{
- /* The bitstream should be sitting on the first byte just after the header of the frame. */
-
- /* What we're actually doing here is seeking to the start of the next packet. */
- return drflac_oggbs__seek_to_next_packet(oggbs);
-}
#endif
-static size_t drflac__on_read_ogg(void* pUserData, void* bufferOut, size_t bytesToRead)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_mid_side(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- drflac_oggbs* oggbs = (drflac_oggbs*)pUserData;
- drflac_uint8* pRunningBufferOut = (drflac_uint8*)bufferOut;
- size_t bytesRead = 0;
-
- drflac_assert(oggbs != NULL);
- drflac_assert(pRunningBufferOut != NULL);
+#if defined(DRFLAC_SUPPORT_SSE2)
+ if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s32__decode_mid_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s32__decode_mid_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#endif
+ {
+ /* Scalar fallback. */
+#if 0
+ drflac_read_pcm_frames_s32__decode_mid_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#else
+ drflac_read_pcm_frames_s32__decode_mid_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#endif
+ }
+}
- /* Reading is done page-by-page. If we've run out of bytes in the page we need to move to the next one. */
- while (bytesRead < bytesToRead) {
- size_t bytesRemainingToRead = bytesToRead - bytesRead;
- if (oggbs->bytesRemainingInPage >= bytesRemainingToRead) {
- drflac_copy_memory(pRunningBufferOut, oggbs->pageData + (oggbs->pageDataSize - oggbs->bytesRemainingInPage), bytesRemainingToRead);
- bytesRead += bytesRemainingToRead;
- oggbs->bytesRemainingInPage -= (drflac_uint32)bytesRemainingToRead;
- break;
- }
+#if 0
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_independent_stereo__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
+{
+ for (drflac_uint64 i = 0; i < frameCount; ++i) {
+ pOutputSamples[i*2+0] = (pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample));
+ pOutputSamples[i*2+1] = (pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample));
+ }
+}
+#endif
- /* If we get here it means some of the requested data is contained in the next pages. */
- if (oggbs->bytesRemainingInPage > 0) {
- drflac_copy_memory(pRunningBufferOut, oggbs->pageData + (oggbs->pageDataSize - oggbs->bytesRemainingInPage), oggbs->bytesRemainingInPage);
- bytesRead += oggbs->bytesRemainingInPage;
- pRunningBufferOut += oggbs->bytesRemainingInPage;
- oggbs->bytesRemainingInPage = 0;
- }
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_independent_stereo__scalar(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
+{
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
- drflac_assert(bytesRemainingToRead > 0);
- if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_recover_on_crc_mismatch)) {
- break; /* Failed to go to the next page. Might have simply hit the end of the stream. */
- }
+ drflac_int32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 tempL0 = pInputSamples0[i*4+0] << shift0;
+ drflac_int32 tempL1 = pInputSamples0[i*4+1] << shift0;
+ drflac_int32 tempL2 = pInputSamples0[i*4+2] << shift0;
+ drflac_int32 tempL3 = pInputSamples0[i*4+3] << shift0;
+
+ drflac_int32 tempR0 = pInputSamples1[i*4+0] << shift1;
+ drflac_int32 tempR1 = pInputSamples1[i*4+1] << shift1;
+ drflac_int32 tempR2 = pInputSamples1[i*4+2] << shift1;
+ drflac_int32 tempR3 = pInputSamples1[i*4+3] << shift1;
+
+ pOutputSamples[i*8+0] = tempL0;
+ pOutputSamples[i*8+1] = tempR0;
+ pOutputSamples[i*8+2] = tempL1;
+ pOutputSamples[i*8+3] = tempR1;
+ pOutputSamples[i*8+4] = tempL2;
+ pOutputSamples[i*8+5] = tempR2;
+ pOutputSamples[i*8+6] = tempL3;
+ pOutputSamples[i*8+7] = tempR3;
}
- return bytesRead;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ pOutputSamples[i*2+0] = (pInputSamples0[i] << shift0);
+ pOutputSamples[i*2+1] = (pInputSamples1[i] << shift1);
+ }
}
-static drflac_bool32 drflac__on_seek_ogg(void* pUserData, int offset, drflac_seek_origin origin)
+#if defined(DRFLAC_SUPPORT_SSE2)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_independent_stereo__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- drflac_oggbs* oggbs = (drflac_oggbs*)pUserData;
- int bytesSeeked = 0;
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
- drflac_assert(oggbs != NULL);
- drflac_assert(offset >= 0); /* <-- Never seek backwards. */
+ int shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ int shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- /* Seeking is always forward which makes things a lot simpler. */
- if (origin == drflac_seek_origin_start) {
- if (!drflac_oggbs__seek_physical(oggbs, (int)oggbs->firstBytePos, drflac_seek_origin_start)) {
- return DRFLAC_FALSE;
- }
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0);
+ __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1);
- if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_fail_on_crc_mismatch)) {
- return DRFLAC_FALSE;
- }
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right));
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right));
+ }
- return drflac__on_seek_ogg(pUserData, offset, drflac_seek_origin_current);
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ pOutputSamples[i*2+0] = (pInputSamples0[i] << shift0);
+ pOutputSamples[i*2+1] = (pInputSamples1[i] << shift1);
}
+}
+#endif
- drflac_assert(origin == drflac_seek_origin_current);
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_independent_stereo__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
+{
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
- while (bytesSeeked < offset) {
- int bytesRemainingToSeek = offset - bytesSeeked;
- drflac_assert(bytesRemainingToSeek >= 0);
+ int shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ int shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- if (oggbs->bytesRemainingInPage >= (size_t)bytesRemainingToSeek) {
- bytesSeeked += bytesRemainingToSeek;
- oggbs->bytesRemainingInPage -= bytesRemainingToSeek;
- break;
- }
+ int32x4_t shift4_0 = vdupq_n_s32(shift0);
+ int32x4_t shift4_1 = vdupq_n_s32(shift1);
- /* If we get here it means some of the requested data is contained in the next pages. */
- if (oggbs->bytesRemainingInPage > 0) {
- bytesSeeked += (int)oggbs->bytesRemainingInPage;
- oggbs->bytesRemainingInPage = 0;
- }
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t left;
+ int32x4_t right;
- drflac_assert(bytesRemainingToSeek > 0);
- if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_fail_on_crc_mismatch)) {
- /* Failed to go to the next page. We either hit the end of the stream or had a CRC mismatch. */
- return DRFLAC_FALSE;
- }
+ left = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), shift4_0);
+ right = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), shift4_1);
+
+ drflac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right));
}
- return DRFLAC_TRUE;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ pOutputSamples[i*2+0] = (pInputSamples0[i] << shift0);
+ pOutputSamples[i*2+1] = (pInputSamples1[i] << shift1);
+ }
}
+#endif
-drflac_bool32 drflac_ogg__seek_to_sample(drflac* pFlac, drflac_uint64 sampleIndex)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s32__decode_independent_stereo(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int32* pOutputSamples)
{
- drflac_oggbs* oggbs = (drflac_oggbs*)pFlac->_oggbs;
- drflac_uint64 originalBytePos;
- drflac_uint64 runningGranulePosition;
- drflac_uint64 runningFrameBytePos;
- drflac_uint64 runningSampleCount;
+#if defined(DRFLAC_SUPPORT_SSE2)
+ if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s32__decode_independent_stereo__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s32__decode_independent_stereo__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#endif
+ {
+ /* Scalar fallback. */
+#if 0
+ drflac_read_pcm_frames_s32__decode_independent_stereo__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#else
+ drflac_read_pcm_frames_s32__decode_independent_stereo__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#endif
+ }
+}
- drflac_assert(oggbs != NULL);
- originalBytePos = oggbs->currentBytePos; /* For recovery. */
+drflac_uint64 drflac_read_pcm_frames_s32(drflac* pFlac, drflac_uint64 framesToRead, drflac_int32* pBufferOut)
+{
+ drflac_uint64 framesRead;
+ drflac_int32 unusedBitsPerSample;
- /* First seek to the first frame. */
- if (!drflac__seek_to_byte(&pFlac->bs, pFlac->firstFramePos)) {
- return DRFLAC_FALSE;
+ if (pFlac == NULL || framesToRead == 0) {
+ return 0;
}
- oggbs->bytesRemainingInPage = 0;
-
- runningGranulePosition = 0;
- runningFrameBytePos = oggbs->currentBytePos; /* <-- Points to the OggS identifier. */
- for (;;) {
- if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_recover_on_crc_mismatch)) {
- drflac_oggbs__seek_physical(oggbs, originalBytePos, drflac_seek_origin_start);
- return DRFLAC_FALSE; /* Never did find that sample... */
- }
- runningFrameBytePos = oggbs->currentBytePos - drflac_ogg__get_page_header_size(&oggbs->currentPageHeader) - oggbs->pageDataSize;
- if (oggbs->currentPageHeader.granulePosition*pFlac->channels >= sampleIndex) {
- break; /* The sample is somewhere in the previous page. */
- }
+ if (pBufferOut == NULL) {
+ return drflac__seek_forward_by_pcm_frames(pFlac, framesToRead);
+ }
- /*
- At this point we know the sample is not in the previous page. It could possibly be in this page. For simplicity we
- disregard any pages that do not begin a fresh packet.
- */
- if ((oggbs->currentPageHeader.headerType & 0x01) == 0) { /* <-- Is it a fresh page? */
- if (oggbs->currentPageHeader.segmentTable[0] >= 2) {
- drflac_uint8 firstBytesInPage[2];
- firstBytesInPage[0] = oggbs->pageData[0];
- firstBytesInPage[1] = oggbs->pageData[1];
+ unusedBitsPerSample = 32 - pFlac->bitsPerSample;
- if ((firstBytesInPage[0] == 0xFF) && (firstBytesInPage[1] & 0xFC) == 0xF8) { /* <-- Does the page begin with a frame's sync code? */
- runningGranulePosition = oggbs->currentPageHeader.granulePosition*pFlac->channels;
- }
+ framesRead = 0;
+ while (framesToRead > 0) {
+ /* If we've run out of samples in this frame, go to the next. */
+ if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) {
+ if (!drflac__read_and_decode_next_flac_frame(pFlac)) {
+ break; /* Couldn't read the next frame, so just break from the loop and return. */
+ }
+ } else {
+ unsigned int channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment);
+ drflac_uint64 iFirstPCMFrame = pFlac->currentFLACFrame.header.blockSizeInPCMFrames - pFlac->currentFLACFrame.pcmFramesRemaining;
+ drflac_uint64 frameCountThisIteration = framesToRead;
- continue;
+ if (frameCountThisIteration > pFlac->currentFLACFrame.pcmFramesRemaining) {
+ frameCountThisIteration = pFlac->currentFLACFrame.pcmFramesRemaining;
}
- }
- }
- /*
- We found the page that that is closest to the sample, so now we need to find it. The first thing to do is seek to the
- start of that page. In the loop above we checked that it was a fresh page which means this page is also the start of
- a new frame. This property means that after we've seeked to the page we can immediately start looping over frames until
- we find the one containing the target sample.
- */
- if (!drflac_oggbs__seek_physical(oggbs, runningFrameBytePos, drflac_seek_origin_start)) {
- return DRFLAC_FALSE;
- }
- if (!drflac_oggbs__goto_next_page(oggbs, drflac_ogg_recover_on_crc_mismatch)) {
- return DRFLAC_FALSE;
- }
+ if (channelCount == 2) {
+ const drflac_int32* pDecodedSamples0 = pFlac->currentFLACFrame.subframes[0].pSamplesS32 + iFirstPCMFrame;
+ const drflac_int32* pDecodedSamples1 = pFlac->currentFLACFrame.subframes[1].pSamplesS32 + iFirstPCMFrame;
- /*
- At this point we'll be sitting on the first byte of the frame header of the first frame in the page. We just keep
- looping over these frames until we find the one containing the sample we're after.
- */
- runningSampleCount = runningGranulePosition;
- for (;;) {
- /*
- There are two ways to find the sample and seek past irrelevant frames:
- 1) Use the native FLAC decoder.
- 2) Use Ogg's framing system.
-
- Both of these options have their own pros and cons. Using the native FLAC decoder is slower because it needs to
- do a full decode of the frame. Using Ogg's framing system is faster, but more complicated and involves some code
- duplication for the decoding of frame headers.
-
- Another thing to consider is that using the Ogg framing system will perform direct seeking of the physical Ogg
- bitstream. This is important to consider because it means we cannot read data from the drflac_bs object using the
- standard drflac__*() APIs because that will read in extra data for its own internal caching which in turn breaks
- the positioning of the read pointer of the physical Ogg bitstream. Therefore, anything that would normally be read
- using the native FLAC decoding APIs, such as drflac__read_next_flac_frame_header(), need to be re-implemented so as to
- avoid the use of the drflac_bs object.
-
- Considering these issues, I have decided to use the slower native FLAC decoding method for the following reasons:
- 1) Seeking is already partially accelerated using Ogg's paging system in the code block above.
- 2) Seeking in an Ogg encapsulated FLAC stream is probably quite uncommon.
- 3) Simplicity.
- */
- drflac_uint64 firstSampleInFrame = 0;
- drflac_uint64 lastSampleInFrame = 0;
- drflac_uint64 sampleCountInThisFrame;
+ switch (pFlac->currentFLACFrame.header.channelAssignment)
+ {
+ case DRFLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE:
+ {
+ drflac_read_pcm_frames_s32__decode_left_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
+ } break;
- if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFrame.header)) {
- return DRFLAC_FALSE;
- }
+ case DRFLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE:
+ {
+ drflac_read_pcm_frames_s32__decode_right_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
+ } break;
- drflac__get_current_frame_sample_range(pFlac, &firstSampleInFrame, &lastSampleInFrame);
+ case DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE:
+ {
+ drflac_read_pcm_frames_s32__decode_mid_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
+ } break;
- sampleCountInThisFrame = (lastSampleInFrame - firstSampleInFrame) + 1;
- if (sampleIndex < (runningSampleCount + sampleCountInThisFrame)) {
- /*
- The sample should be in this frame. We need to fully decode it, however if it's an invalid frame (a CRC mismatch), we need to pretend
- it never existed and keep iterating.
- */
- drflac_result result = drflac__decode_flac_frame(pFlac);
- if (result == DRFLAC_SUCCESS) {
- /* The frame is valid. We just need to skip over some samples to ensure it's sample-exact. */
- drflac_uint64 samplesToDecode = (size_t)(sampleIndex - runningSampleCount); /* <-- Safe cast because the maximum number of samples in a frame is 65535. */
- if (samplesToDecode == 0) {
- return DRFLAC_TRUE;
- }
- return drflac__seek_forward_by_samples(pFlac, samplesToDecode) == samplesToDecode; /* <-- If this fails, something bad has happened (it should never fail). */
- } else {
- if (result == DRFLAC_CRC_MISMATCH) {
- continue; /* CRC mismatch. Pretend this frame never existed. */
- } else {
- return DRFLAC_FALSE;
+ case DRFLAC_CHANNEL_ASSIGNMENT_INDEPENDENT:
+ default:
+ {
+ drflac_read_pcm_frames_s32__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
+ } break;
}
- }
- } else {
- /*
- It's not in this frame. We need to seek past the frame, but check if there was a CRC mismatch. If so, we pretend this
- frame never existed and leave the running sample count untouched.
- */
- drflac_result result = drflac__seek_to_next_flac_frame(pFlac);
- if (result == DRFLAC_SUCCESS) {
- runningSampleCount += sampleCountInThisFrame;
} else {
- if (result == DRFLAC_CRC_MISMATCH) {
- continue; /* CRC mismatch. Pretend this frame never existed. */
- } else {
- return DRFLAC_FALSE;
+ /* Generic interleaving. */
+ drflac_uint64 i;
+ for (i = 0; i < frameCountThisIteration; ++i) {
+ unsigned int j;
+ for (j = 0; j < channelCount; ++j) {
+ pBufferOut[(i*channelCount)+j] = (drflac_int32)((drflac_uint32)(pFlac->currentFLACFrame.subframes[j].pSamplesS32[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[j].wastedBitsPerSample));
+ }
}
}
+
+ framesRead += frameCountThisIteration;
+ pBufferOut += frameCountThisIteration * channelCount;
+ framesToRead -= frameCountThisIteration;
+ pFlac->currentPCMFrame += frameCountThisIteration;
+ pFlac->currentFLACFrame.pcmFramesRemaining -= (drflac_uint32)frameCountThisIteration;
}
}
+
+ return framesRead;
}
-drflac_bool32 drflac__init_private__ogg(drflac_init_info* pInit, drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData, void* pUserDataMD, drflac_bool32 relaxed)
+#if 0
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_left_side__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
- drflac_ogg_page_header header;
- drflac_uint32 crc32 = DRFLAC_OGG_CAPTURE_PATTERN_CRC32;
- drflac_uint32 bytesRead = 0;
-
- /* Pre Condition: The bit stream should be sitting just past the 4-byte OggS capture pattern. */
- (void)relaxed;
+ drflac_uint64 i;
+ for (i = 0; i < frameCount; ++i) {
+ drflac_int32 left = pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 side = pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+ drflac_int32 right = left - side;
- pInit->container = drflac_container_ogg;
- pInit->oggFirstBytePos = 0;
+ left >>= 16;
+ right >>= 16;
- /*
- We'll get here if the first 4 bytes of the stream were the OggS capture pattern, however it doesn't necessarily mean the
- stream includes FLAC encoded audio. To check for this we need to scan the beginning-of-stream page markers and check if
- any match the FLAC specification. Important to keep in mind that the stream may be multiplexed.
- */
- if (drflac_ogg__read_page_header_after_capture_pattern(onRead, pUserData, &header, &bytesRead, &crc32) != DRFLAC_SUCCESS) {
- return DRFLAC_FALSE;
+ pOutputSamples[i*2+0] = (drflac_int16)left;
+ pOutputSamples[i*2+1] = (drflac_int16)right;
}
- pInit->runningFilePos += bytesRead;
+}
+#endif
- for (;;) {
- int pageBodySize;
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_left_side__scalar(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
+
+ drflac_int32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 left0 = pInputSamples0[i*4+0] << shift0;
+ drflac_int32 left1 = pInputSamples0[i*4+1] << shift0;
+ drflac_int32 left2 = pInputSamples0[i*4+2] << shift0;
+ drflac_int32 left3 = pInputSamples0[i*4+3] << shift0;
+
+ drflac_int32 side0 = pInputSamples1[i*4+0] << shift1;
+ drflac_int32 side1 = pInputSamples1[i*4+1] << shift1;
+ drflac_int32 side2 = pInputSamples1[i*4+2] << shift1;
+ drflac_int32 side3 = pInputSamples1[i*4+3] << shift1;
+
+ drflac_int32 right0 = left0 - side0;
+ drflac_int32 right1 = left1 - side1;
+ drflac_int32 right2 = left2 - side2;
+ drflac_int32 right3 = left3 - side3;
- /* Break if we're past the beginning of stream page. */
- if ((header.headerType & 0x02) == 0) {
- return DRFLAC_FALSE;
- }
+ left0 >>= 16;
+ left1 >>= 16;
+ left2 >>= 16;
+ left3 >>= 16;
- /* Check if it's a FLAC header. */
- pageBodySize = drflac_ogg__get_page_body_size(&header);
- if (pageBodySize == 51) { /* 51 = the lacing value of the FLAC header packet. */
- /* It could be a FLAC page... */
- drflac_uint32 bytesRemainingInPage = pageBodySize;
- drflac_uint8 packetType;
+ right0 >>= 16;
+ right1 >>= 16;
+ right2 >>= 16;
+ right3 >>= 16;
- if (onRead(pUserData, &packetType, 1) != 1) {
- return DRFLAC_FALSE;
- }
+ pOutputSamples[i*8+0] = (drflac_int16)left0;
+ pOutputSamples[i*8+1] = (drflac_int16)right0;
+ pOutputSamples[i*8+2] = (drflac_int16)left1;
+ pOutputSamples[i*8+3] = (drflac_int16)right1;
+ pOutputSamples[i*8+4] = (drflac_int16)left2;
+ pOutputSamples[i*8+5] = (drflac_int16)right2;
+ pOutputSamples[i*8+6] = (drflac_int16)left3;
+ pOutputSamples[i*8+7] = (drflac_int16)right3;
+ }
- bytesRemainingInPage -= 1;
- if (packetType == 0x7F) {
- /* Increasingly more likely to be a FLAC page... */
- drflac_uint8 sig[4];
- if (onRead(pUserData, sig, 4) != 4) {
- return DRFLAC_FALSE;
- }
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 left = pInputSamples0[i] << shift0;
+ drflac_int32 side = pInputSamples1[i] << shift1;
+ drflac_int32 right = left - side;
- bytesRemainingInPage -= 4;
- if (sig[0] == 'F' && sig[1] == 'L' && sig[2] == 'A' && sig[3] == 'C') {
- /* Almost certainly a FLAC page... */
- drflac_uint8 mappingVersion[2];
- if (onRead(pUserData, mappingVersion, 2) != 2) {
- return DRFLAC_FALSE;
- }
+ left >>= 16;
+ right >>= 16;
- if (mappingVersion[0] != 1) {
- return DRFLAC_FALSE; /* Only supporting version 1.x of the Ogg mapping. */
- }
+ pOutputSamples[i*2+0] = (drflac_int16)left;
+ pOutputSamples[i*2+1] = (drflac_int16)right;
+ }
+}
- /*
- The next 2 bytes are the non-audio packets, not including this one. We don't care about this because we're going to
- be handling it in a generic way based on the serial number and packet types.
- */
- if (!onSeek(pUserData, 2, drflac_seek_origin_current)) {
- return DRFLAC_FALSE;
- }
+#if defined(DRFLAC_SUPPORT_SSE2)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_left_side__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
- /* Expecting the native FLAC signature "fLaC". */
- if (onRead(pUserData, sig, 4) != 4) {
- return DRFLAC_FALSE;
- }
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
- if (sig[0] == 'f' && sig[1] == 'L' && sig[2] == 'a' && sig[3] == 'C') {
- /* The remaining data in the page should be the STREAMINFO block. */
- drflac_streaminfo streaminfo;
- drflac_uint8 isLastBlock;
- drflac_uint8 blockType;
- drflac_uint32 blockSize;
- if (!drflac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize)) {
- return DRFLAC_FALSE;
- }
+ frameCount4 = frameCount >> 2;
- if (blockType != DRFLAC_METADATA_BLOCK_TYPE_STREAMINFO || blockSize != 34) {
- return DRFLAC_FALSE; /* Invalid block type. First block must be the STREAMINFO block. */
- }
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- if (drflac__read_streaminfo(onRead, pUserData, &streaminfo)) {
- /* Success! */
- pInit->hasStreamInfoBlock = DRFLAC_TRUE;
- pInit->sampleRate = streaminfo.sampleRate;
- pInit->channels = streaminfo.channels;
- pInit->bitsPerSample = streaminfo.bitsPerSample;
- pInit->totalSampleCount = streaminfo.totalSampleCount;
- pInit->maxBlockSize = streaminfo.maxBlockSize;
- pInit->hasMetadataBlocks = !isLastBlock;
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0);
+ __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1);
+ __m128i right = _mm_sub_epi32(left, side);
- if (onMeta) {
- drflac_metadata metadata;
- metadata.type = DRFLAC_METADATA_BLOCK_TYPE_STREAMINFO;
- metadata.pRawData = NULL;
- metadata.rawDataSize = 0;
- metadata.data.streaminfo = streaminfo;
- onMeta(pUserDataMD, &metadata);
- }
+ left = _mm_srai_epi32(left, 16);
+ right = _mm_srai_epi32(right, 16);
- pInit->runningFilePos += pageBodySize;
- pInit->oggFirstBytePos = pInit->runningFilePos - 79; /* Subtracting 79 will place us right on top of the "OggS" identifier of the FLAC bos page. */
- pInit->oggSerial = header.serialNumber;
- pInit->oggBosHeader = header;
- break;
- } else {
- /* Failed to read STREAMINFO block. Aww, so close... */
- return DRFLAC_FALSE;
- }
- } else {
- /* Invalid file. */
- return DRFLAC_FALSE;
- }
- } else {
- /* Not a FLAC header. Skip it. */
- if (!onSeek(pUserData, bytesRemainingInPage, drflac_seek_origin_current)) {
- return DRFLAC_FALSE;
- }
- }
- } else {
- /* Not a FLAC header. Seek past the entire page and move on to the next. */
- if (!onSeek(pUserData, bytesRemainingInPage, drflac_seek_origin_current)) {
- return DRFLAC_FALSE;
- }
- }
- } else {
- if (!onSeek(pUserData, pageBodySize, drflac_seek_origin_current)) {
- return DRFLAC_FALSE;
- }
- }
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), drflac__mm_packs_interleaved_epi32(left, right));
+ }
- pInit->runningFilePos += pageBodySize;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 left = pInputSamples0[i] << shift0;
+ drflac_int32 side = pInputSamples1[i] << shift1;
+ drflac_int32 right = left - side;
+ left >>= 16;
+ right >>= 16;
- /* Read the header of the next page. */
- if (drflac_ogg__read_page_header(onRead, pUserData, &header, &bytesRead, &crc32) != DRFLAC_SUCCESS) {
- return DRFLAC_FALSE;
- }
- pInit->runningFilePos += bytesRead;
+ pOutputSamples[i*2+0] = (drflac_int16)left;
+ pOutputSamples[i*2+1] = (drflac_int16)right;
}
-
- /*
- If we get here it means we found a FLAC audio stream. We should be sitting on the first byte of the header of the next page. The next
- packets in the FLAC logical stream contain the metadata. The only thing left to do in the initialization phase for Ogg is to create the
- Ogg bistream object.
- */
- pInit->hasMetadataBlocks = DRFLAC_TRUE; /* <-- Always have at least VORBIS_COMMENT metadata block. */
- return DRFLAC_TRUE;
}
#endif
-drflac_bool32 drflac__init_private(drflac_init_info* pInit, drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, drflac_container container, void* pUserData, void* pUserDataMD)
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_left_side__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
- drflac_bool32 relaxed;
- drflac_uint8 id[4];
-
- if (pInit == NULL || onRead == NULL || onSeek == NULL) {
- return DRFLAC_FALSE;
- }
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
+ int32x4_t shift0_4;
+ int32x4_t shift1_4;
- drflac_zero_memory(pInit, sizeof(*pInit));
- pInit->onRead = onRead;
- pInit->onSeek = onSeek;
- pInit->onMeta = onMeta;
- pInit->container = container;
- pInit->pUserData = pUserData;
- pInit->pUserDataMD = pUserDataMD;
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
- pInit->bs.onRead = onRead;
- pInit->bs.onSeek = onSeek;
- pInit->bs.pUserData = pUserData;
- drflac__reset_cache(&pInit->bs);
+ frameCount4 = frameCount >> 2;
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- /* If the container is explicitly defined then we can try opening in relaxed mode. */
- relaxed = container != drflac_container_unknown;
+ shift0_4 = vdupq_n_s32(shift0);
+ shift1_4 = vdupq_n_s32(shift1);
- /* Skip over any ID3 tags. */
- for (;;) {
- if (onRead(pUserData, id, 4) != 4) {
- return DRFLAC_FALSE; /* Ran out of data. */
- }
- pInit->runningFilePos += 4;
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t left;
+ int32x4_t side;
+ int32x4_t right;
- if (id[0] == 'I' && id[1] == 'D' && id[2] == '3') {
- drflac_uint8 header[6];
- drflac_uint8 flags;
- drflac_uint32 headerSize;
+ left = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), shift0_4);
+ side = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), shift1_4);
+ right = vsubq_s32(left, side);
- if (onRead(pUserData, header, 6) != 6) {
- return DRFLAC_FALSE; /* Ran out of data. */
- }
- pInit->runningFilePos += 6;
+ left = vshrq_n_s32(left, 16);
+ right = vshrq_n_s32(right, 16);
- flags = header[1];
+ drflac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right)));
+ }
- drflac_copy_memory(&headerSize, header+2, 4);
- headerSize = drflac__unsynchsafe_32(drflac__be2host_32(headerSize));
- if (flags & 0x10) {
- headerSize += 10;
- }
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 left = pInputSamples0[i] << shift0;
+ drflac_int32 side = pInputSamples1[i] << shift1;
+ drflac_int32 right = left - side;
- if (!onSeek(pUserData, headerSize, drflac_seek_origin_current)) {
- return DRFLAC_FALSE; /* Failed to seek past the tag. */
- }
- pInit->runningFilePos += headerSize;
- } else {
- break;
- }
- }
+ left >>= 16;
+ right >>= 16;
- if (id[0] == 'f' && id[1] == 'L' && id[2] == 'a' && id[3] == 'C') {
- return drflac__init_private__native(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed);
- }
-#ifndef DR_FLAC_NO_OGG
- if (id[0] == 'O' && id[1] == 'g' && id[2] == 'g' && id[3] == 'S') {
- return drflac__init_private__ogg(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed);
+ pOutputSamples[i*2+0] = (drflac_int16)left;
+ pOutputSamples[i*2+1] = (drflac_int16)right;
}
+}
#endif
- /* If we get here it means we likely don't have a header. Try opening in relaxed mode, if applicable. */
- if (relaxed) {
- if (container == drflac_container_native) {
- return drflac__init_private__native(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed);
- }
-#ifndef DR_FLAC_NO_OGG
- if (container == drflac_container_ogg) {
- return drflac__init_private__ogg(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed);
- }
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_left_side(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+#if defined(DRFLAC_SUPPORT_SSE2)
+ if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s16__decode_left_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s16__decode_left_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#endif
+ {
+ /* Scalar fallback. */
+#if 0
+ drflac_read_pcm_frames_s16__decode_left_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#else
+ drflac_read_pcm_frames_s16__decode_left_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
#endif
}
-
- /* Unsupported container. */
- return DRFLAC_FALSE;
}
-void drflac__init_from_info(drflac* pFlac, drflac_init_info* pInit)
+
+#if 0
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_right_side__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
- drflac_assert(pFlac != NULL);
- drflac_assert(pInit != NULL);
+ drflac_uint64 i;
+ for (i = 0; i < frameCount; ++i) {
+ drflac_int32 side = pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 right = pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+ drflac_int32 left = right + side;
+
+ left >>= 16;
+ right >>= 16;
- drflac_zero_memory(pFlac, sizeof(*pFlac));
- pFlac->bs = pInit->bs;
- pFlac->onMeta = pInit->onMeta;
- pFlac->pUserDataMD = pInit->pUserDataMD;
- pFlac->maxBlockSize = pInit->maxBlockSize;
- pFlac->sampleRate = pInit->sampleRate;
- pFlac->channels = (drflac_uint8)pInit->channels;
- pFlac->bitsPerSample = (drflac_uint8)pInit->bitsPerSample;
- pFlac->totalSampleCount = pInit->totalSampleCount;
- pFlac->totalPCMFrameCount = pInit->totalSampleCount / pFlac->channels;
- pFlac->container = pInit->container;
+ pOutputSamples[i*2+0] = (drflac_int16)left;
+ pOutputSamples[i*2+1] = (drflac_int16)right;
+ }
}
+#endif
-drflac* drflac_open_with_metadata_private(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, drflac_container container, void* pUserData, void* pUserDataMD)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_right_side__scalar(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
- drflac_init_info init;
- drflac_uint32 allocationSize;
- drflac_uint32 wholeSIMDVectorCountPerChannel;
- drflac_uint32 decodedSamplesAllocationSize;
-#ifndef DR_FLAC_NO_OGG
- drflac_uint32 oggbsAllocationSize;
- drflac_oggbs oggbs;
-#endif
- drflac_uint64 firstFramePos;
- drflac_uint64 seektablePos;
- drflac_uint32 seektableSize;
- drflac* pFlac;
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
-#ifndef DRFLAC_NO_CPUID
- /* CPU support first. */
- drflac__init_cpu_caps();
-#endif
+ drflac_int32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 side0 = pInputSamples0[i*4+0] << shift0;
+ drflac_int32 side1 = pInputSamples0[i*4+1] << shift0;
+ drflac_int32 side2 = pInputSamples0[i*4+2] << shift0;
+ drflac_int32 side3 = pInputSamples0[i*4+3] << shift0;
- if (!drflac__init_private(&init, onRead, onSeek, onMeta, container, pUserData, pUserDataMD)) {
- return NULL;
- }
+ drflac_int32 right0 = pInputSamples1[i*4+0] << shift1;
+ drflac_int32 right1 = pInputSamples1[i*4+1] << shift1;
+ drflac_int32 right2 = pInputSamples1[i*4+2] << shift1;
+ drflac_int32 right3 = pInputSamples1[i*4+3] << shift1;
- /*
- The size of the allocation for the drflac object needs to be large enough to fit the following:
- 1) The main members of the drflac structure
- 2) A block of memory large enough to store the decoded samples of the largest frame in the stream
- 3) If the container is Ogg, a drflac_oggbs object
-
- The complicated part of the allocation is making sure there's enough room the decoded samples, taking into consideration
- the different SIMD instruction sets.
- */
- allocationSize = sizeof(drflac);
+ drflac_int32 left0 = right0 + side0;
+ drflac_int32 left1 = right1 + side1;
+ drflac_int32 left2 = right2 + side2;
+ drflac_int32 left3 = right3 + side3;
- /*
- The allocation size for decoded frames depends on the number of 32-bit integers that fit inside the largest SIMD vector
- we are supporting.
- */
- if (((init.maxBlockSize+DRFLAC_LEADING_SAMPLES) % (DRFLAC_MAX_SIMD_VECTOR_SIZE / sizeof(drflac_int32))) == 0) {
- wholeSIMDVectorCountPerChannel = ((init.maxBlockSize+DRFLAC_LEADING_SAMPLES) / (DRFLAC_MAX_SIMD_VECTOR_SIZE / sizeof(drflac_int32)));
- } else {
- wholeSIMDVectorCountPerChannel = ((init.maxBlockSize+DRFLAC_LEADING_SAMPLES) / (DRFLAC_MAX_SIMD_VECTOR_SIZE / sizeof(drflac_int32))) + 1;
+ left0 >>= 16;
+ left1 >>= 16;
+ left2 >>= 16;
+ left3 >>= 16;
+
+ right0 >>= 16;
+ right1 >>= 16;
+ right2 >>= 16;
+ right3 >>= 16;
+
+ pOutputSamples[i*8+0] = (drflac_int16)left0;
+ pOutputSamples[i*8+1] = (drflac_int16)right0;
+ pOutputSamples[i*8+2] = (drflac_int16)left1;
+ pOutputSamples[i*8+3] = (drflac_int16)right1;
+ pOutputSamples[i*8+4] = (drflac_int16)left2;
+ pOutputSamples[i*8+5] = (drflac_int16)right2;
+ pOutputSamples[i*8+6] = (drflac_int16)left3;
+ pOutputSamples[i*8+7] = (drflac_int16)right3;
}
- decodedSamplesAllocationSize = wholeSIMDVectorCountPerChannel * DRFLAC_MAX_SIMD_VECTOR_SIZE * init.channels;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 side = pInputSamples0[i] << shift0;
+ drflac_int32 right = pInputSamples1[i] << shift1;
+ drflac_int32 left = right + side;
- allocationSize += decodedSamplesAllocationSize;
- allocationSize += DRFLAC_MAX_SIMD_VECTOR_SIZE; /* Allocate extra bytes to ensure we have enough for alignment. */
+ left >>= 16;
+ right >>= 16;
-#ifndef DR_FLAC_NO_OGG
- /* There's additional data required for Ogg streams. */
- oggbsAllocationSize = 0;
- if (init.container == drflac_container_ogg) {
- oggbsAllocationSize = sizeof(drflac_oggbs);
- allocationSize += oggbsAllocationSize;
+ pOutputSamples[i*2+0] = (drflac_int16)left;
+ pOutputSamples[i*2+1] = (drflac_int16)right;
}
+}
- drflac_zero_memory(&oggbs, sizeof(oggbs));
- if (init.container == drflac_container_ogg) {
- oggbs.onRead = onRead;
- oggbs.onSeek = onSeek;
- oggbs.pUserData = pUserData;
- oggbs.currentBytePos = init.oggFirstBytePos;
- oggbs.firstBytePos = init.oggFirstBytePos;
- oggbs.serialNumber = init.oggSerial;
- oggbs.bosPageHeader = init.oggBosHeader;
- oggbs.bytesRemainingInPage = 0;
- }
-#endif
+#if defined(DRFLAC_SUPPORT_SSE2)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_right_side__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
- /*
- This part is a bit awkward. We need to load the seektable so that it can be referenced in-memory, but I want the drflac object to
- consist of only a single heap allocation. To this, the size of the seek table needs to be known, which we determine when reading
- and decoding the metadata.
- */
- firstFramePos = 42; /* <-- We know we are at byte 42 at this point. */
- seektablePos = 0;
- seektableSize = 0;
- if (init.hasMetadataBlocks) {
- drflac_read_proc onReadOverride = onRead;
- drflac_seek_proc onSeekOverride = onSeek;
- void* pUserDataOverride = pUserData;
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
-#ifndef DR_FLAC_NO_OGG
- if (init.container == drflac_container_ogg) {
- onReadOverride = drflac__on_read_ogg;
- onSeekOverride = drflac__on_seek_ogg;
- pUserDataOverride = (void*)&oggbs;
- }
-#endif
+ frameCount4 = frameCount >> 2;
- if (!drflac__read_and_decode_metadata(onReadOverride, onSeekOverride, onMeta, pUserDataOverride, pUserDataMD, &firstFramePos, &seektablePos, &seektableSize)) {
- return NULL;
- }
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- allocationSize += seektableSize;
- }
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0);
+ __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1);
+ __m128i left = _mm_add_epi32(right, side);
+ left = _mm_srai_epi32(left, 16);
+ right = _mm_srai_epi32(right, 16);
- pFlac = (drflac*)DRFLAC_MALLOC(allocationSize);
- drflac__init_from_info(pFlac, &init);
- pFlac->pDecodedSamples = (drflac_int32*)drflac_align((size_t)pFlac->pExtraData, DRFLAC_MAX_SIMD_VECTOR_SIZE);
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), drflac__mm_packs_interleaved_epi32(left, right));
+ }
-#ifndef DR_FLAC_NO_OGG
- if (init.container == drflac_container_ogg) {
- drflac_oggbs* pInternalOggbs = (drflac_oggbs*)((drflac_uint8*)pFlac->pDecodedSamples + decodedSamplesAllocationSize + seektableSize);
- *pInternalOggbs = oggbs;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 side = pInputSamples0[i] << shift0;
+ drflac_int32 right = pInputSamples1[i] << shift1;
+ drflac_int32 left = right + side;
- /* The Ogg bistream needs to be layered on top of the original bitstream. */
- pFlac->bs.onRead = drflac__on_read_ogg;
- pFlac->bs.onSeek = drflac__on_seek_ogg;
- pFlac->bs.pUserData = (void*)pInternalOggbs;
- pFlac->_oggbs = (void*)pInternalOggbs;
+ left >>= 16;
+ right >>= 16;
+
+ pOutputSamples[i*2+0] = (drflac_int16)left;
+ pOutputSamples[i*2+1] = (drflac_int16)right;
}
+}
#endif
- pFlac->firstFramePos = firstFramePos;
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_right_side__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
+ int32x4_t shift0_4;
+ int32x4_t shift1_4;
- /* NOTE: Seektables are not currently compatible with Ogg encapsulation (Ogg has its own accelerated seeking system). I may change this later, so I'm leaving this here for now. */
-#ifndef DR_FLAC_NO_OGG
- if (init.container == drflac_container_ogg)
- {
- pFlac->pSeekpoints = NULL;
- pFlac->seekpointCount = 0;
- }
- else
-#endif
- {
- /* If we have a seektable we need to load it now, making sure we move back to where we were previously. */
- if (seektablePos != 0) {
- pFlac->seekpointCount = seektableSize / sizeof(*pFlac->pSeekpoints);
- pFlac->pSeekpoints = (drflac_seekpoint*)((drflac_uint8*)pFlac->pDecodedSamples + decodedSamplesAllocationSize);
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
- /* Seek to the seektable, then just read directly into our seektable buffer. */
- if (pFlac->bs.onSeek(pFlac->bs.pUserData, (int)seektablePos, drflac_seek_origin_start)) {
- if (pFlac->bs.onRead(pFlac->bs.pUserData, pFlac->pSeekpoints, seektableSize) == seektableSize) {
- /* Endian swap. */
- drflac_uint32 iSeekpoint;
- for (iSeekpoint = 0; iSeekpoint < pFlac->seekpointCount; ++iSeekpoint) {
- pFlac->pSeekpoints[iSeekpoint].firstSample = drflac__be2host_64(pFlac->pSeekpoints[iSeekpoint].firstSample);
- pFlac->pSeekpoints[iSeekpoint].frameOffset = drflac__be2host_64(pFlac->pSeekpoints[iSeekpoint].frameOffset);
- pFlac->pSeekpoints[iSeekpoint].sampleCount = drflac__be2host_16(pFlac->pSeekpoints[iSeekpoint].sampleCount);
- }
- } else {
- /* Failed to read the seektable. Pretend we don't have one. */
- pFlac->pSeekpoints = NULL;
- pFlac->seekpointCount = 0;
- }
+ frameCount4 = frameCount >> 2;
- /* We need to seek back to where we were. If this fails it's a critical error. */
- if (!pFlac->bs.onSeek(pFlac->bs.pUserData, (int)pFlac->firstFramePos, drflac_seek_origin_start)) {
- DRFLAC_FREE(pFlac);
- return NULL;
- }
- } else {
- /* Failed to seek to the seektable. Ominous sign, but for now we can just pretend we don't have one. */
- pFlac->pSeekpoints = NULL;
- pFlac->seekpointCount = 0;
- }
- }
- }
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
-
- /*
- If we get here, but don't have a STREAMINFO block, it means we've opened the stream in relaxed mode and need to decode
- the first frame.
- */
- if (!init.hasStreamInfoBlock) {
- pFlac->currentFrame.header = init.firstFrameHeader;
- do
- {
- drflac_result result = drflac__decode_flac_frame(pFlac);
- if (result == DRFLAC_SUCCESS) {
- break;
- } else {
- if (result == DRFLAC_CRC_MISMATCH) {
- if (!drflac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFrame.header)) {
- DRFLAC_FREE(pFlac);
- return NULL;
- }
- continue;
- } else {
- DRFLAC_FREE(pFlac);
- return NULL;
- }
- }
- } while (1);
- }
+ shift0_4 = vdupq_n_s32(shift0);
+ shift1_4 = vdupq_n_s32(shift1);
- return pFlac;
-}
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t side;
+ int32x4_t right;
+ int32x4_t left;
+ side = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), shift0_4);
+ right = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), shift1_4);
+ left = vaddq_s32(right, side);
+ left = vshrq_n_s32(left, 16);
+ right = vshrq_n_s32(right, 16);
-#ifndef DR_FLAC_NO_STDIO
-#include
+ drflac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right)));
+ }
-static size_t drflac__on_read_stdio(void* pUserData, void* bufferOut, size_t bytesToRead)
-{
- return fread(bufferOut, 1, bytesToRead, (FILE*)pUserData);
-}
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 side = pInputSamples0[i] << shift0;
+ drflac_int32 right = pInputSamples1[i] << shift1;
+ drflac_int32 left = right + side;
-static drflac_bool32 drflac__on_seek_stdio(void* pUserData, int offset, drflac_seek_origin origin)
-{
- drflac_assert(offset >= 0); /* <-- Never seek backwards. */
+ left >>= 16;
+ right >>= 16;
- return fseek((FILE*)pUserData, offset, (origin == drflac_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0;
+ pOutputSamples[i*2+0] = (drflac_int16)left;
+ pOutputSamples[i*2+1] = (drflac_int16)right;
+ }
}
+#endif
-static FILE* drflac__fopen(const char* filename)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_right_side(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
- FILE* pFile;
-#if defined(_MSC_VER) && _MSC_VER >= 1400
- if (fopen_s(&pFile, filename, "rb") != 0) {
- return NULL;
- }
+#if defined(DRFLAC_SUPPORT_SSE2)
+ if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s16__decode_right_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s16__decode_right_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#endif
+ {
+ /* Scalar fallback. */
+#if 0
+ drflac_read_pcm_frames_s16__decode_right_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
#else
- pFile = fopen(filename, "rb");
- if (pFile == NULL) {
- return NULL;
- }
+ drflac_read_pcm_frames_s16__decode_right_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
#endif
-
- return pFile;
+ }
}
-drflac* drflac_open_file(const char* filename)
+#if 0
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_mid_side__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
- drflac* pFlac;
- FILE* pFile;
+ for (drflac_uint64 i = 0; i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
- pFile = drflac__fopen(filename);
- if (pFile == NULL) {
- return NULL;
- }
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
- pFlac = drflac_open(drflac__on_read_stdio, drflac__on_seek_stdio, (void*)pFile);
- if (pFlac == NULL) {
- fclose(pFile);
- return NULL;
+ pOutputSamples[i*2+0] = (drflac_int16)((((mid + side) >> 1) << unusedBitsPerSample) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)((((mid - side) >> 1) << unusedBitsPerSample) >> 16);
}
-
- return pFlac;
}
+#endif
-drflac* drflac_open_file_with_metadata(const char* filename, drflac_meta_proc onMeta, void* pUserData)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_mid_side__scalar(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
- drflac* pFlac;
- FILE* pFile;
-
- pFile = drflac__fopen(filename);
- if (pFile == NULL) {
- return NULL;
- }
-
- pFlac = drflac_open_with_metadata_private(drflac__on_read_stdio, drflac__on_seek_stdio, onMeta, drflac_container_unknown, (void*)pFile, pUserData);
- if (pFlac == NULL) {
- fclose(pFile);
- return pFlac;
- }
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
- return pFlac;
-}
-#endif /* DR_FLAC_NO_STDIO */
+ int shift = unusedBitsPerSample;
+ if (shift > 0) {
+ shift -= 1;
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 temp0L;
+ drflac_int32 temp1L;
+ drflac_int32 temp2L;
+ drflac_int32 temp3L;
+ drflac_int32 temp0R;
+ drflac_int32 temp1R;
+ drflac_int32 temp2R;
+ drflac_int32 temp3R;
+
+ drflac_int32 mid0 = pInputSamples0[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid1 = pInputSamples0[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid2 = pInputSamples0[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid3 = pInputSamples0[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+
+ drflac_int32 side0 = pInputSamples1[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side1 = pInputSamples1[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side2 = pInputSamples1[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side3 = pInputSamples1[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
-static size_t drflac__on_read_memory(void* pUserData, void* bufferOut, size_t bytesToRead)
-{
- drflac__memory_stream* memoryStream = (drflac__memory_stream*)pUserData;
- size_t bytesRemaining;
+ mid0 = (((drflac_uint32)mid0) << 1) | (side0 & 0x01);
+ mid1 = (((drflac_uint32)mid1) << 1) | (side1 & 0x01);
+ mid2 = (((drflac_uint32)mid2) << 1) | (side2 & 0x01);
+ mid3 = (((drflac_uint32)mid3) << 1) | (side3 & 0x01);
- drflac_assert(memoryStream != NULL);
- drflac_assert(memoryStream->dataSize >= memoryStream->currentReadPos);
+ temp0L = ((mid0 + side0) << shift);
+ temp1L = ((mid1 + side1) << shift);
+ temp2L = ((mid2 + side2) << shift);
+ temp3L = ((mid3 + side3) << shift);
- bytesRemaining = memoryStream->dataSize - memoryStream->currentReadPos;
- if (bytesToRead > bytesRemaining) {
- bytesToRead = bytesRemaining;
- }
+ temp0R = ((mid0 - side0) << shift);
+ temp1R = ((mid1 - side1) << shift);
+ temp2R = ((mid2 - side2) << shift);
+ temp3R = ((mid3 - side3) << shift);
- if (bytesToRead > 0) {
- drflac_copy_memory(bufferOut, memoryStream->data + memoryStream->currentReadPos, bytesToRead);
- memoryStream->currentReadPos += bytesToRead;
- }
+ temp0L >>= 16;
+ temp1L >>= 16;
+ temp2L >>= 16;
+ temp3L >>= 16;
+
+ temp0R >>= 16;
+ temp1R >>= 16;
+ temp2R >>= 16;
+ temp3R >>= 16;
+
+ pOutputSamples[i*8+0] = (drflac_int16)temp0L;
+ pOutputSamples[i*8+1] = (drflac_int16)temp0R;
+ pOutputSamples[i*8+2] = (drflac_int16)temp1L;
+ pOutputSamples[i*8+3] = (drflac_int16)temp1R;
+ pOutputSamples[i*8+4] = (drflac_int16)temp2L;
+ pOutputSamples[i*8+5] = (drflac_int16)temp2R;
+ pOutputSamples[i*8+6] = (drflac_int16)temp3L;
+ pOutputSamples[i*8+7] = (drflac_int16)temp3R;
+ }
+ } else {
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 temp0L;
+ drflac_int32 temp1L;
+ drflac_int32 temp2L;
+ drflac_int32 temp3L;
+ drflac_int32 temp0R;
+ drflac_int32 temp1R;
+ drflac_int32 temp2R;
+ drflac_int32 temp3R;
+
+ drflac_int32 mid0 = pInputSamples0[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid1 = pInputSamples0[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid2 = pInputSamples0[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid3 = pInputSamples0[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+
+ drflac_int32 side0 = pInputSamples1[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side1 = pInputSamples1[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side2 = pInputSamples1[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side3 = pInputSamples1[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
- return bytesToRead;
-}
+ mid0 = (((drflac_uint32)mid0) << 1) | (side0 & 0x01);
+ mid1 = (((drflac_uint32)mid1) << 1) | (side1 & 0x01);
+ mid2 = (((drflac_uint32)mid2) << 1) | (side2 & 0x01);
+ mid3 = (((drflac_uint32)mid3) << 1) | (side3 & 0x01);
-static drflac_bool32 drflac__on_seek_memory(void* pUserData, int offset, drflac_seek_origin origin)
-{
- drflac__memory_stream* memoryStream = (drflac__memory_stream*)pUserData;
+ temp0L = ((mid0 + side0) >> 1);
+ temp1L = ((mid1 + side1) >> 1);
+ temp2L = ((mid2 + side2) >> 1);
+ temp3L = ((mid3 + side3) >> 1);
- drflac_assert(memoryStream != NULL);
- drflac_assert(offset >= 0); /* <-- Never seek backwards. */
+ temp0R = ((mid0 - side0) >> 1);
+ temp1R = ((mid1 - side1) >> 1);
+ temp2R = ((mid2 - side2) >> 1);
+ temp3R = ((mid3 - side3) >> 1);
- if (offset > (drflac_int64)memoryStream->dataSize) {
- return DRFLAC_FALSE;
- }
+ temp0L >>= 16;
+ temp1L >>= 16;
+ temp2L >>= 16;
+ temp3L >>= 16;
- if (origin == drflac_seek_origin_current) {
- if (memoryStream->currentReadPos + offset <= memoryStream->dataSize) {
- memoryStream->currentReadPos += offset;
- } else {
- return DRFLAC_FALSE; /* Trying to seek too far forward. */
- }
- } else {
- if ((drflac_uint32)offset <= memoryStream->dataSize) {
- memoryStream->currentReadPos = offset;
- } else {
- return DRFLAC_FALSE; /* Trying to seek too far forward. */
+ temp0R >>= 16;
+ temp1R >>= 16;
+ temp2R >>= 16;
+ temp3R >>= 16;
+
+ pOutputSamples[i*8+0] = (drflac_int16)temp0L;
+ pOutputSamples[i*8+1] = (drflac_int16)temp0R;
+ pOutputSamples[i*8+2] = (drflac_int16)temp1L;
+ pOutputSamples[i*8+3] = (drflac_int16)temp1R;
+ pOutputSamples[i*8+4] = (drflac_int16)temp2L;
+ pOutputSamples[i*8+5] = (drflac_int16)temp2R;
+ pOutputSamples[i*8+6] = (drflac_int16)temp3L;
+ pOutputSamples[i*8+7] = (drflac_int16)temp3R;
}
}
- return DRFLAC_TRUE;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
+
+ pOutputSamples[i*2+0] = (drflac_int16)((((mid + side) >> 1) << unusedBitsPerSample) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)((((mid - side) >> 1) << unusedBitsPerSample) >> 16);
+ }
}
-drflac* drflac_open_memory(const void* data, size_t dataSize)
+#if defined(DRFLAC_SUPPORT_SSE2)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_mid_side__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
- drflac__memory_stream memoryStream;
- drflac* pFlac;
+ drflac_uint64 i;
+ drflac_uint64 frameCount4;
+ drflac_int32 shift;
- memoryStream.data = (const unsigned char*)data;
- memoryStream.dataSize = dataSize;
- memoryStream.currentReadPos = 0;
- pFlac = drflac_open(drflac__on_read_memory, drflac__on_seek_memory, &memoryStream);
- if (pFlac == NULL) {
- return NULL;
- }
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
- pFlac->memoryStream = memoryStream;
+ frameCount4 = frameCount >> 2;
- /* This is an awful hack... */
-#ifndef DR_FLAC_NO_OGG
- if (pFlac->container == drflac_container_ogg)
- {
- drflac_oggbs* oggbs = (drflac_oggbs*)pFlac->_oggbs;
- oggbs->pUserData = &pFlac->memoryStream;
- }
- else
-#endif
- {
- pFlac->bs.pUserData = &pFlac->memoryStream;
- }
+ shift = unusedBitsPerSample;
+ if (shift == 0) {
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i mid;
+ __m128i side;
+ __m128i left;
+ __m128i right;
- return pFlac;
-}
+ mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
-drflac* drflac_open_memory_with_metadata(const void* data, size_t dataSize, drflac_meta_proc onMeta, void* pUserData)
-{
- drflac__memory_stream memoryStream;
- drflac* pFlac;
+ mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01)));
- memoryStream.data = (const unsigned char*)data;
- memoryStream.dataSize = dataSize;
- memoryStream.currentReadPos = 0;
- pFlac = drflac_open_with_metadata_private(drflac__on_read_memory, drflac__on_seek_memory, onMeta, drflac_container_unknown, &memoryStream, pUserData);
- if (pFlac == NULL) {
- return NULL;
- }
+ left = _mm_srai_epi32(_mm_add_epi32(mid, side), 1);
+ right = _mm_srai_epi32(_mm_sub_epi32(mid, side), 1);
- pFlac->memoryStream = memoryStream;
+ left = _mm_srai_epi32(left, 16);
+ right = _mm_srai_epi32(right, 16);
- /* This is an awful hack... */
-#ifndef DR_FLAC_NO_OGG
- if (pFlac->container == drflac_container_ogg)
- {
- drflac_oggbs* oggbs = (drflac_oggbs*)pFlac->_oggbs;
- oggbs->pUserData = &pFlac->memoryStream;
- }
- else
-#endif
- {
- pFlac->bs.pUserData = &pFlac->memoryStream;
- }
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), drflac__mm_packs_interleaved_epi32(left, right));
+ }
- return pFlac;
-}
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
+ pOutputSamples[i*2+0] = (drflac_int16)(((mid + side) >> 1) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)(((mid - side) >> 1) >> 16);
+ }
+ } else {
+ shift -= 1;
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i mid;
+ __m128i side;
+ __m128i left;
+ __m128i right;
-drflac* drflac_open(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData)
-{
- return drflac_open_with_metadata_private(onRead, onSeek, NULL, drflac_container_unknown, pUserData, pUserData);
-}
-drflac* drflac_open_relaxed(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_container container, void* pUserData)
-{
- return drflac_open_with_metadata_private(onRead, onSeek, NULL, container, pUserData, pUserData);
-}
+ mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
-drflac* drflac_open_with_metadata(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, void* pUserData)
-{
- return drflac_open_with_metadata_private(onRead, onSeek, onMeta, drflac_container_unknown, pUserData, pUserData);
-}
-drflac* drflac_open_with_metadata_relaxed(drflac_read_proc onRead, drflac_seek_proc onSeek, drflac_meta_proc onMeta, drflac_container container, void* pUserData)
-{
- return drflac_open_with_metadata_private(onRead, onSeek, onMeta, container, pUserData, pUserData);
-}
+ mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01)));
-void drflac_close(drflac* pFlac)
-{
- if (pFlac == NULL) {
- return;
- }
+ left = _mm_slli_epi32(_mm_add_epi32(mid, side), shift);
+ right = _mm_slli_epi32(_mm_sub_epi32(mid, side), shift);
-#ifndef DR_FLAC_NO_STDIO
- /*
- If we opened the file with drflac_open_file() we will want to close the file handle. We can know whether or not drflac_open_file()
- was used by looking at the callbacks.
- */
- if (pFlac->bs.onRead == drflac__on_read_stdio) {
- fclose((FILE*)pFlac->bs.pUserData);
- }
+ left = _mm_srai_epi32(left, 16);
+ right = _mm_srai_epi32(right, 16);
-#ifndef DR_FLAC_NO_OGG
- /* Need to clean up Ogg streams a bit differently due to the way the bit streaming is chained. */
- if (pFlac->container == drflac_container_ogg) {
- drflac_oggbs* oggbs = (drflac_oggbs*)pFlac->_oggbs;
- drflac_assert(pFlac->bs.onRead == drflac__on_read_ogg);
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), drflac__mm_packs_interleaved_epi32(left, right));
+ }
- if (oggbs->onRead == drflac__on_read_stdio) {
- fclose((FILE*)oggbs->pUserData);
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
+
+ pOutputSamples[i*2+0] = (drflac_int16)(((mid + side) << shift) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)(((mid - side) << shift) >> 16);
}
}
-#endif
-#endif
-
- DRFLAC_FREE(pFlac);
}
+#endif
-drflac_uint64 drflac__read_s32__misaligned(drflac* pFlac, drflac_uint64 samplesToRead, drflac_int32* bufferOut)
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_mid_side__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
- unsigned int channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFrame.header.channelAssignment);
- drflac_uint64 samplesRead;
+ drflac_uint64 i;
+ drflac_uint64 frameCount4;
+ int shift;
+ int32x4_t wbpsShift0_4; /* wbps = Wasted Bits Per Sample */
+ int32x4_t wbpsShift1_4; /* wbps = Wasted Bits Per Sample */
- /* We should never be calling this when the number of samples to read is >= the sample count. */
- drflac_assert(samplesToRead < channelCount);
- drflac_assert(pFlac->currentFrame.samplesRemaining > 0 && samplesToRead <= pFlac->currentFrame.samplesRemaining);
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
- samplesRead = 0;
- while (samplesToRead > 0) {
- drflac_uint64 totalSamplesInFrame = pFlac->currentFrame.header.blockSize * channelCount;
- drflac_uint64 samplesReadFromFrameSoFar = totalSamplesInFrame - pFlac->currentFrame.samplesRemaining;
- drflac_uint64 channelIndex = samplesReadFromFrameSoFar % channelCount;
- drflac_uint64 nextSampleInFrame = samplesReadFromFrameSoFar / channelCount;
- int decodedSample = 0;
+ frameCount4 = frameCount >> 2;
- switch (pFlac->currentFrame.header.channelAssignment)
- {
- case DRFLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE:
- {
- if (channelIndex == 0) {
- decodedSample = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex + 0].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex + 0].wastedBitsPerSample);
- } else {
- int side = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex + 0].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex + 0].wastedBitsPerSample);
- int left = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex - 1].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex - 1].wastedBitsPerSample);
- decodedSample = left - side;
- }
- } break;
+ wbpsShift0_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ wbpsShift1_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- case DRFLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE:
- {
- if (channelIndex == 0) {
- int side = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex + 0].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex + 0].wastedBitsPerSample);
- int right = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex + 1].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex + 1].wastedBitsPerSample);
- decodedSample = side + right;
- } else {
- decodedSample = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex + 0].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex + 0].wastedBitsPerSample);
- }
- } break;
+ shift = unusedBitsPerSample;
+ if (shift == 0) {
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t mid;
+ int32x4_t side;
+ int32x4_t left;
+ int32x4_t right;
- case DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE:
- {
- int mid;
- int side;
- if (channelIndex == 0) {
- mid = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex + 0].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex + 0].wastedBitsPerSample);
- side = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex + 1].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex + 1].wastedBitsPerSample);
-
- mid = (((unsigned int)mid) << 1) | (side & 0x01);
- decodedSample = (mid + side) >> 1;
- } else {
- mid = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex - 1].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex - 1].wastedBitsPerSample);
- side = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex + 0].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex + 0].wastedBitsPerSample);
+ mid = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), wbpsShift0_4);
+ side = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), wbpsShift1_4);
- mid = (((unsigned int)mid) << 1) | (side & 0x01);
- decodedSample = (mid - side) >> 1;
- }
- } break;
+ mid = vorrq_s32(vshlq_n_s32(mid, 1), vandq_s32(side, vdupq_n_s32(1)));
- case DRFLAC_CHANNEL_ASSIGNMENT_INDEPENDENT:
- default:
- {
- decodedSample = (int)((drflac_uint32)pFlac->currentFrame.subframes[channelIndex + 0].pDecodedSamples[nextSampleInFrame] << pFlac->currentFrame.subframes[channelIndex + 0].wastedBitsPerSample);
- } break;
+ left = vshrq_n_s32(vaddq_s32(mid, side), 1);
+ right = vshrq_n_s32(vsubq_s32(mid, side), 1);
+
+ left = vshrq_n_s32(left, 16);
+ right = vshrq_n_s32(right, 16);
+
+ drflac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right)));
}
- decodedSample = (int)((drflac_uint32)decodedSample << (32 - pFlac->bitsPerSample));
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
- if (bufferOut) {
- *bufferOut++ = decodedSample;
+ pOutputSamples[i*2+0] = (drflac_int16)(((mid + side) >> 1) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)(((mid - side) >> 1) >> 16);
}
+ } else {
+ int32x4_t shift4;
- samplesRead += 1;
- pFlac->currentFrame.samplesRemaining -= 1;
- samplesToRead -= 1;
- }
+ shift -= 1;
+ shift4 = vdupq_n_s32(shift);
- return samplesRead;
-}
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t mid;
+ int32x4_t side;
+ int32x4_t left;
+ int32x4_t right;
-drflac_uint64 drflac_read_s32(drflac* pFlac, drflac_uint64 samplesToRead, drflac_int32* bufferOut)
-{
- drflac_uint64 samplesRead;
+ mid = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), wbpsShift0_4);
+ side = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), wbpsShift1_4);
- /* Note that is allowed to be null, in which case this will act like a seek. */
- if (pFlac == NULL || samplesToRead == 0) {
- return 0;
- }
+ mid = vorrq_s32(vshlq_n_s32(mid, 1), vandq_s32(side, vdupq_n_s32(1)));
- if (bufferOut == NULL) {
- return drflac__seek_forward_by_samples(pFlac, samplesToRead);
- }
+ left = vshlq_s32(vaddq_s32(mid, side), shift4);
+ right = vshlq_s32(vsubq_s32(mid, side), shift4);
- samplesRead = 0;
- while (samplesToRead > 0) {
- /* If we've run out of samples in this frame, go to the next. */
- if (pFlac->currentFrame.samplesRemaining == 0) {
- if (!drflac__read_and_decode_next_flac_frame(pFlac)) {
- break; /* Couldn't read the next frame, so just break from the loop and return. */
- }
- } else {
- /* Here is where we grab the samples and interleave them. */
- unsigned int channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFrame.header.channelAssignment);
- drflac_uint64 totalSamplesInFrame = pFlac->currentFrame.header.blockSize * channelCount;
- drflac_uint64 samplesReadFromFrameSoFar = totalSamplesInFrame - pFlac->currentFrame.samplesRemaining;
- drflac_uint64 misalignedSampleCount = samplesReadFromFrameSoFar % channelCount;
- drflac_uint64 alignedSampleCountPerChannel;
- drflac_uint64 firstAlignedSampleInFrame;
- unsigned int unusedBitsPerSample;
- drflac_uint64 alignedSamplesRead;
+ left = vshrq_n_s32(left, 16);
+ right = vshrq_n_s32(right, 16);
- if (misalignedSampleCount > 0) {
- drflac_uint64 misalignedSamplesRead = drflac__read_s32__misaligned(pFlac, misalignedSampleCount, bufferOut);
- samplesRead += misalignedSamplesRead;
- samplesReadFromFrameSoFar += misalignedSamplesRead;
- bufferOut += misalignedSamplesRead;
- samplesToRead -= misalignedSamplesRead;
- pFlac->currentSample += misalignedSamplesRead;
- }
+ drflac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right)));
+ }
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
- alignedSampleCountPerChannel = samplesToRead / channelCount;
- if (alignedSampleCountPerChannel > pFlac->currentFrame.samplesRemaining / channelCount) {
- alignedSampleCountPerChannel = pFlac->currentFrame.samplesRemaining / channelCount;
- }
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
- firstAlignedSampleInFrame = samplesReadFromFrameSoFar / channelCount;
- unusedBitsPerSample = 32 - pFlac->bitsPerSample;
+ pOutputSamples[i*2+0] = (drflac_int16)(((mid + side) << shift) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)(((mid - side) << shift) >> 16);
+ }
+ }
+}
+#endif
- switch (pFlac->currentFrame.header.channelAssignment)
- {
- case DRFLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE:
- {
- drflac_uint64 i;
- const drflac_int32* pDecodedSamples0 = pFlac->currentFrame.subframes[0].pDecodedSamples + firstAlignedSampleInFrame;
- const drflac_int32* pDecodedSamples1 = pFlac->currentFrame.subframes[1].pDecodedSamples + firstAlignedSampleInFrame;
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_mid_side(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+#if defined(DRFLAC_SUPPORT_SSE2)
+ if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s16__decode_mid_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s16__decode_mid_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#endif
+ {
+ /* Scalar fallback. */
+#if 0
+ drflac_read_pcm_frames_s16__decode_mid_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#else
+ drflac_read_pcm_frames_s16__decode_mid_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#endif
+ }
+}
- for (i = 0; i < alignedSampleCountPerChannel; ++i) {
- int left = (int)((drflac_uint32)pDecodedSamples0[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample));
- int side = (int)((drflac_uint32)pDecodedSamples1[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample));
- int right = left - side;
- bufferOut[i*2+0] = left;
- bufferOut[i*2+1] = right;
- }
- } break;
+#if 0
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_independent_stereo__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+ for (drflac_uint64 i = 0; i < frameCount; ++i) {
+ pOutputSamples[i*2+0] = (drflac_int16)((pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample)) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)((pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample)) >> 16);
+ }
+}
+#endif
- case DRFLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE:
- {
- drflac_uint64 i;
- const drflac_int32* pDecodedSamples0 = pFlac->currentFrame.subframes[0].pDecodedSamples + firstAlignedSampleInFrame;
- const drflac_int32* pDecodedSamples1 = pFlac->currentFrame.subframes[1].pDecodedSamples + firstAlignedSampleInFrame;
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_independent_stereo__scalar(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
- for (i = 0; i < alignedSampleCountPerChannel; ++i) {
- int side = (int)((drflac_uint32)pDecodedSamples0[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample));
- int right = (int)((drflac_uint32)pDecodedSamples1[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample));
- int left = right + side;
+ int shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ int shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- bufferOut[i*2+0] = left;
- bufferOut[i*2+1] = right;
- }
- } break;
+ for (i = 0; i < frameCount4; ++i) {
+ drflac_int32 tempL0 = pInputSamples0[i*4+0] << shift0;
+ drflac_int32 tempL1 = pInputSamples0[i*4+1] << shift0;
+ drflac_int32 tempL2 = pInputSamples0[i*4+2] << shift0;
+ drflac_int32 tempL3 = pInputSamples0[i*4+3] << shift0;
+
+ drflac_int32 tempR0 = pInputSamples1[i*4+0] << shift1;
+ drflac_int32 tempR1 = pInputSamples1[i*4+1] << shift1;
+ drflac_int32 tempR2 = pInputSamples1[i*4+2] << shift1;
+ drflac_int32 tempR3 = pInputSamples1[i*4+3] << shift1;
+
+ tempL0 >>= 16;
+ tempL1 >>= 16;
+ tempL2 >>= 16;
+ tempL3 >>= 16;
+
+ tempR0 >>= 16;
+ tempR1 >>= 16;
+ tempR2 >>= 16;
+ tempR3 >>= 16;
+
+ pOutputSamples[i*8+0] = (drflac_int16)tempL0;
+ pOutputSamples[i*8+1] = (drflac_int16)tempR0;
+ pOutputSamples[i*8+2] = (drflac_int16)tempL1;
+ pOutputSamples[i*8+3] = (drflac_int16)tempR1;
+ pOutputSamples[i*8+4] = (drflac_int16)tempL2;
+ pOutputSamples[i*8+5] = (drflac_int16)tempR2;
+ pOutputSamples[i*8+6] = (drflac_int16)tempL3;
+ pOutputSamples[i*8+7] = (drflac_int16)tempR3;
+ }
- case DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE:
- {
- drflac_uint64 i;
- const drflac_int32* pDecodedSamples0 = pFlac->currentFrame.subframes[0].pDecodedSamples + firstAlignedSampleInFrame;
- const drflac_int32* pDecodedSamples1 = pFlac->currentFrame.subframes[1].pDecodedSamples + firstAlignedSampleInFrame;
-
- for (i = 0; i < alignedSampleCountPerChannel; ++i) {
- int mid = (int)((drflac_uint32)pDecodedSamples0[i] << pFlac->currentFrame.subframes[0].wastedBitsPerSample);
- int side = (int)((drflac_uint32)pDecodedSamples1[i] << pFlac->currentFrame.subframes[1].wastedBitsPerSample);
-
- mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
-
- bufferOut[i*2+0] = (drflac_int32)((drflac_uint32)((mid + side) >> 1) << (unusedBitsPerSample));
- bufferOut[i*2+1] = (drflac_int32)((drflac_uint32)((mid - side) >> 1) << (unusedBitsPerSample));
- }
- } break;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ pOutputSamples[i*2+0] = (drflac_int16)((pInputSamples0[i] << shift0) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)((pInputSamples1[i] << shift1) >> 16);
+ }
+}
- case DRFLAC_CHANNEL_ASSIGNMENT_INDEPENDENT:
- default:
- {
- if (pFlac->currentFrame.header.channelAssignment == 1) /* 1 = Stereo */
- {
- /* Stereo optimized inner loop unroll. */
- drflac_uint64 i;
- const drflac_int32* pDecodedSamples0 = pFlac->currentFrame.subframes[0].pDecodedSamples + firstAlignedSampleInFrame;
- const drflac_int32* pDecodedSamples1 = pFlac->currentFrame.subframes[1].pDecodedSamples + firstAlignedSampleInFrame;
-
- for (i = 0; i < alignedSampleCountPerChannel; ++i) {
- bufferOut[i*2+0] = (drflac_int32)((drflac_uint32)pDecodedSamples0[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample));
- bufferOut[i*2+1] = (drflac_int32)((drflac_uint32)pDecodedSamples1[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample));
- }
- }
- else
- {
- /* Generic interleaving. */
- drflac_uint64 i;
- for (i = 0; i < alignedSampleCountPerChannel; ++i) {
- unsigned int j;
- for (j = 0; j < channelCount; ++j) {
- bufferOut[(i*channelCount)+j] = (drflac_int32)((drflac_uint32)(pFlac->currentFrame.subframes[j].pDecodedSamples[firstAlignedSampleInFrame + i]) << (unusedBitsPerSample + pFlac->currentFrame.subframes[j].wastedBitsPerSample));
- }
- }
- }
- } break;
- }
+#if defined(DRFLAC_SUPPORT_SSE2)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_independent_stereo__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
- alignedSamplesRead = alignedSampleCountPerChannel * channelCount;
- samplesRead += alignedSamplesRead;
- samplesReadFromFrameSoFar += alignedSamplesRead;
- bufferOut += alignedSamplesRead;
- samplesToRead -= alignedSamplesRead;
- pFlac->currentSample += alignedSamplesRead;
- pFlac->currentFrame.samplesRemaining -= (unsigned int)alignedSamplesRead;
+ drflac_int32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0);
+ __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1);
- /* At this point we may still have some excess samples left to read. */
- if (samplesToRead > 0 && pFlac->currentFrame.samplesRemaining > 0) {
- drflac_uint64 excessSamplesRead = 0;
- if (samplesToRead < pFlac->currentFrame.samplesRemaining) {
- excessSamplesRead = drflac__read_s32__misaligned(pFlac, samplesToRead, bufferOut);
- } else {
- excessSamplesRead = drflac__read_s32__misaligned(pFlac, pFlac->currentFrame.samplesRemaining, bufferOut);
- }
+ left = _mm_srai_epi32(left, 16);
+ right = _mm_srai_epi32(right, 16);
- samplesRead += excessSamplesRead;
- samplesReadFromFrameSoFar += excessSamplesRead;
- bufferOut += excessSamplesRead;
- samplesToRead -= excessSamplesRead;
- pFlac->currentSample += excessSamplesRead;
- }
- }
+ /* At this point we have results. We can now pack and interleave these into a single __m128i object and then store the in the output buffer. */
+ _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), drflac__mm_packs_interleaved_epi32(left, right));
}
- return samplesRead;
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ pOutputSamples[i*2+0] = (drflac_int16)((pInputSamples0[i] << shift0) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)((pInputSamples1[i] << shift1) >> 16);
+ }
}
+#endif
-drflac_uint64 drflac_read_pcm_frames_s32(drflac* pFlac, drflac_uint64 framesToRead, drflac_int32* pBufferOut)
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_independent_stereo__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
{
-#if defined(_MSC_VER) && !defined(__clang__)
- #pragma warning(push)
- #pragma warning(disable:4996) /* was declared deprecated */
-#elif defined(__GNUC__) || defined(__clang__)
- #pragma GCC diagnostic push
- #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-#endif
- return drflac_read_s32(pFlac, framesToRead*pFlac->channels, pBufferOut) / pFlac->channels;
-#if defined(_MSC_VER) && !defined(__clang__)
- #pragma warning(pop)
-#elif defined(__GNUC__) || defined(__clang__)
- #pragma GCC diagnostic pop
-#endif
-}
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
+ drflac_int32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
-drflac_uint64 drflac_read_s16(drflac* pFlac, drflac_uint64 samplesToRead, drflac_int16* pBufferOut)
-{
- /* This reads samples in 2 passes and can probably be optimized. */
- drflac_uint64 totalSamplesRead = 0;
+ int32x4_t shift0_4 = vdupq_n_s32(shift0);
+ int32x4_t shift1_4 = vdupq_n_s32(shift1);
-#if defined(_MSC_VER) && !defined(__clang__)
- #pragma warning(push)
- #pragma warning(disable:4996) /* was declared deprecated */
-#elif defined(__GNUC__) || defined(__clang__)
- #pragma GCC diagnostic push
- #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-#endif
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t left;
+ int32x4_t right;
- while (samplesToRead > 0) {
- drflac_uint64 i;
- drflac_int32 samples32[4096];
- drflac_uint64 samplesJustRead = drflac_read_s32(pFlac, (samplesToRead > 4096) ? 4096 : samplesToRead, samples32);
- if (samplesJustRead == 0) {
- break; /* Reached the end. */
- }
+ left = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), shift0_4);
+ right = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), shift1_4);
- /* s32 -> s16 */
- for (i = 0; i < samplesJustRead; ++i) {
- pBufferOut[i] = (drflac_int16)(samples32[i] >> 16);
- }
+ left = vshrq_n_s32(left, 16);
+ right = vshrq_n_s32(right, 16);
- totalSamplesRead += samplesJustRead;
- samplesToRead -= samplesJustRead;
- pBufferOut += samplesJustRead;
+ drflac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right)));
}
-#if defined(_MSC_VER) && !defined(__clang__)
- #pragma warning(pop)
-#elif defined(__GNUC__) || defined(__clang__)
- #pragma GCC diagnostic pop
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ pOutputSamples[i*2+0] = (drflac_int16)((pInputSamples0[i] << shift0) >> 16);
+ pOutputSamples[i*2+1] = (drflac_int16)((pInputSamples1[i] << shift1) >> 16);
+ }
+}
#endif
- return totalSamplesRead;
+static DRFLAC_INLINE void drflac_read_pcm_frames_s16__decode_independent_stereo(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, drflac_int16* pOutputSamples)
+{
+#if defined(DRFLAC_SUPPORT_SSE2)
+ if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s16__decode_independent_stereo__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_s16__decode_independent_stereo__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
+#endif
+ {
+ /* Scalar fallback. */
+#if 0
+ drflac_read_pcm_frames_s16__decode_independent_stereo__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#else
+ drflac_read_pcm_frames_s16__decode_independent_stereo__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+#endif
+ }
}
drflac_uint64 drflac_read_pcm_frames_s16(drflac* pFlac, drflac_uint64 framesToRead, drflac_int16* pBufferOut)
{
- /* This reads samples in 2 passes and can probably be optimized. */
- drflac_uint64 totalPCMFramesRead = 0;
+ drflac_uint64 framesRead;
+ drflac_int32 unusedBitsPerSample;
+
+ if (pFlac == NULL || framesToRead == 0) {
+ return 0;
+ }
+
+ if (pBufferOut == NULL) {
+ return drflac__seek_forward_by_pcm_frames(pFlac, framesToRead);
+ }
+
+ unusedBitsPerSample = 32 - pFlac->bitsPerSample;
+ framesRead = 0;
while (framesToRead > 0) {
- drflac_uint64 iFrame;
- drflac_int32 samples32[4096];
- drflac_uint64 framesJustRead = drflac_read_pcm_frames_s32(pFlac, (framesToRead > 4096/pFlac->channels) ? 4096/pFlac->channels : framesToRead, samples32);
- if (framesJustRead == 0) {
- break; /* Reached the end. */
- }
-
- /* s32 -> s16 */
- for (iFrame = 0; iFrame < framesJustRead; ++iFrame) {
- drflac_uint32 iChannel;
- for (iChannel = 0; iChannel < pFlac->channels; ++iChannel) {
- drflac_uint64 iSample = iFrame*pFlac->channels + iChannel;
- pBufferOut[iSample] = (drflac_int16)(samples32[iSample] >> 16);
+ /* If we've run out of samples in this frame, go to the next. */
+ if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) {
+ if (!drflac__read_and_decode_next_flac_frame(pFlac)) {
+ break; /* Couldn't read the next frame, so just break from the loop and return. */
}
- }
+ } else {
+ unsigned int channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment);
+ drflac_uint64 iFirstPCMFrame = pFlac->currentFLACFrame.header.blockSizeInPCMFrames - pFlac->currentFLACFrame.pcmFramesRemaining;
+ drflac_uint64 frameCountThisIteration = framesToRead;
- totalPCMFramesRead += framesJustRead;
- framesToRead -= framesJustRead;
- pBufferOut += framesJustRead * pFlac->channels;
- }
+ if (frameCountThisIteration > pFlac->currentFLACFrame.pcmFramesRemaining) {
+ frameCountThisIteration = pFlac->currentFLACFrame.pcmFramesRemaining;
+ }
- return totalPCMFramesRead;
-}
+ if (channelCount == 2) {
+ const drflac_int32* pDecodedSamples0 = pFlac->currentFLACFrame.subframes[0].pSamplesS32 + iFirstPCMFrame;
+ const drflac_int32* pDecodedSamples1 = pFlac->currentFLACFrame.subframes[1].pSamplesS32 + iFirstPCMFrame;
+ switch (pFlac->currentFLACFrame.header.channelAssignment)
+ {
+ case DRFLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE:
+ {
+ drflac_read_pcm_frames_s16__decode_left_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
+ } break;
-drflac_uint64 drflac_read_f32(drflac* pFlac, drflac_uint64 samplesToRead, float* pBufferOut)
-{
- /* This reads samples in 2 passes and can probably be optimized. */
- drflac_uint64 totalSamplesRead = 0;
+ case DRFLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE:
+ {
+ drflac_read_pcm_frames_s16__decode_right_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
+ } break;
-#if defined(_MSC_VER) && !defined(__clang__)
- #pragma warning(push)
- #pragma warning(disable:4996) /* was declared deprecated */
-#elif defined(__GNUC__) || defined(__clang__)
- #pragma GCC diagnostic push
- #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-#endif
+ case DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE:
+ {
+ drflac_read_pcm_frames_s16__decode_mid_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
+ } break;
- while (samplesToRead > 0) {
- drflac_uint64 i;
- drflac_int32 samples32[4096];
- drflac_uint64 samplesJustRead = drflac_read_s32(pFlac, (samplesToRead > 4096) ? 4096 : samplesToRead, samples32);
- if (samplesJustRead == 0) {
- break; /* Reached the end. */
- }
+ case DRFLAC_CHANNEL_ASSIGNMENT_INDEPENDENT:
+ default:
+ {
+ drflac_read_pcm_frames_s16__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
+ } break;
+ }
+ } else {
+ /* Generic interleaving. */
+ drflac_uint64 i;
+ for (i = 0; i < frameCountThisIteration; ++i) {
+ unsigned int j;
+ for (j = 0; j < channelCount; ++j) {
+ drflac_int32 sampleS32 = (drflac_int32)((drflac_uint32)(pFlac->currentFLACFrame.subframes[j].pSamplesS32[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[j].wastedBitsPerSample));
+ pBufferOut[(i*channelCount)+j] = (drflac_int16)(sampleS32 >> 16);
+ }
+ }
+ }
- /* s32 -> f32 */
- for (i = 0; i < samplesJustRead; ++i) {
- pBufferOut[i] = (float)(samples32[i] / 2147483648.0);
+ framesRead += frameCountThisIteration;
+ pBufferOut += frameCountThisIteration * channelCount;
+ framesToRead -= frameCountThisIteration;
+ pFlac->currentPCMFrame += frameCountThisIteration;
+ pFlac->currentFLACFrame.pcmFramesRemaining -= (drflac_uint32)frameCountThisIteration;
}
-
- totalSamplesRead += samplesJustRead;
- samplesToRead -= samplesJustRead;
- pBufferOut += samplesJustRead;
}
-#if defined(_MSC_VER) && !defined(__clang__)
- #pragma warning(pop)
-#elif defined(__GNUC__) || defined(__clang__)
- #pragma GCC diagnostic pop
-#endif
-
- return totalSamplesRead;
+ return framesRead;
}
+
#if 0
static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_left_side__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
{
drflac_uint64 i;
for (i = 0; i < frameCount; ++i) {
- int left = pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample);
- int side = pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample);
- int right = left - side;
+ drflac_int32 left = pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 side = pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+ drflac_int32 right = left - side;
pOutputSamples[i*2+0] = (float)(left / 2147483648.0);
pOutputSamples[i*2+1] = (float)(right / 2147483648.0);
@@ -7135,8 +9390,8 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_left_side__scalar(d
float factor = 1 / 2147483648.0;
- drflac_int32 shift0 = unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- drflac_int32 shift1 = unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
for (i = 0; i < frameCount4; ++i) {
drflac_int32 left0 = pInputSamples0[i*4+0] << shift0;
drflac_int32 left1 = pInputSamples0[i*4+1] << shift0;
@@ -7164,9 +9419,9 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_left_side__scalar(d
}
for (i = (frameCount4 << 2); i < frameCount; ++i) {
- int left = pInputSamples0[i] << shift0;
- int side = pInputSamples1[i] << shift1;
- int right = left - side;
+ drflac_int32 left = pInputSamples0[i] << shift0;
+ drflac_int32 side = pInputSamples1[i] << shift1;
+ drflac_int32 right = left - side;
pOutputSamples[i*2+0] = (float)(left * factor);
pOutputSamples[i*2+1] = (float)(right * factor);
@@ -7177,43 +9432,84 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_left_side__scalar(d
static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_left_side__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
{
drflac_uint64 frameCount4;
- __m128 factor;
- int shift0;
- int shift1;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
drflac_uint64 i;
+ __m128 factor;
- drflac_assert(pFlac->bitsPerSample <= 24);
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
frameCount4 = frameCount >> 2;
factor = _mm_set1_ps(1.0f / 8388608.0f);
- shift0 = (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample) - 8;
- shift1 = (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample) - 8;
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8;
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8;
for (i = 0; i < frameCount4; ++i) {
- __m128i inputSample0 = _mm_loadu_si128((const __m128i*)pInputSamples0 + i);
- __m128i inputSample1 = _mm_loadu_si128((const __m128i*)pInputSamples1 + i);
-
- __m128i left = _mm_slli_epi32(inputSample0, shift0);
- __m128i side = _mm_slli_epi32(inputSample1, shift1);
+ __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0);
+ __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1);
__m128i right = _mm_sub_epi32(left, side);
__m128 leftf = _mm_mul_ps(_mm_cvtepi32_ps(left), factor);
__m128 rightf = _mm_mul_ps(_mm_cvtepi32_ps(right), factor);
- pOutputSamples[i*8+0] = ((float*)&leftf)[0];
- pOutputSamples[i*8+1] = ((float*)&rightf)[0];
- pOutputSamples[i*8+2] = ((float*)&leftf)[1];
- pOutputSamples[i*8+3] = ((float*)&rightf)[1];
- pOutputSamples[i*8+4] = ((float*)&leftf)[2];
- pOutputSamples[i*8+5] = ((float*)&rightf)[2];
- pOutputSamples[i*8+6] = ((float*)&leftf)[3];
- pOutputSamples[i*8+7] = ((float*)&rightf)[3];
+ _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf));
+ _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf));
}
for (i = (frameCount4 << 2); i < frameCount; ++i) {
- int left = pInputSamples0[i] << shift0;
- int side = pInputSamples1[i] << shift1;
- int right = left - side;
+ drflac_int32 left = pInputSamples0[i] << shift0;
+ drflac_int32 side = pInputSamples1[i] << shift1;
+ drflac_int32 right = left - side;
+
+ pOutputSamples[i*2+0] = (float)(left / 8388608.0f);
+ pOutputSamples[i*2+1] = (float)(right / 8388608.0f);
+ }
+}
+#endif
+
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_left_side__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
+{
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
+ float32x4_t factor4;
+ int32x4_t shift0_4;
+ int32x4_t shift1_4;
+
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
+
+ frameCount4 = frameCount >> 2;
+
+ factor4 = vdupq_n_f32(1.0f / 8388608.0f);
+
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8;
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8;
+
+ shift0_4 = vdupq_n_s32(shift0);
+ shift1_4 = vdupq_n_s32(shift1);
+
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t left;
+ int32x4_t side;
+ int32x4_t right;
+ float32x4_t leftf;
+ float32x4_t rightf;
+
+ left = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), shift0_4);
+ side = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), shift1_4);
+ right = vsubq_s32(left, side);
+ leftf = vmulq_f32(vcvtq_f32_s32(left), factor4);
+ rightf = vmulq_f32(vcvtq_f32_s32(right), factor4);
+
+ drflac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf));
+ }
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 left = pInputSamples0[i] << shift0;
+ drflac_int32 side = pInputSamples1[i] << shift1;
+ drflac_int32 right = left - side;
pOutputSamples[i*2+0] = (float)(left / 8388608.0f);
pOutputSamples[i*2+1] = (float)(right / 8388608.0f);
@@ -7227,6 +9523,10 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_left_side(drflac* p
if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
drflac_read_pcm_frames_f32__decode_left_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
} else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_f32__decode_left_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
#endif
{
/* Scalar fallback. */
@@ -7244,9 +9544,9 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_right_side__referen
{
drflac_uint64 i;
for (i = 0; i < frameCount; ++i) {
- int side = pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample);
- int right = pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample);
- int left = right + side;
+ drflac_int32 side = pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 right = pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+ drflac_int32 left = right + side;
pOutputSamples[i*2+0] = (float)(left / 2147483648.0);
pOutputSamples[i*2+1] = (float)(right / 2147483648.0);
@@ -7261,8 +9561,8 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_right_side__scalar(
float factor = 1 / 2147483648.0;
- drflac_int32 shift0 = unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- drflac_int32 shift1 = unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
for (i = 0; i < frameCount4; ++i) {
drflac_int32 side0 = pInputSamples0[i*4+0] << shift0;
drflac_int32 side1 = pInputSamples0[i*4+1] << shift0;
@@ -7290,9 +9590,9 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_right_side__scalar(
}
for (i = (frameCount4 << 2); i < frameCount; ++i) {
- int side = pInputSamples0[i] << shift0;
- int right = pInputSamples1[i] << shift1;
- int left = right + side;
+ drflac_int32 side = pInputSamples0[i] << shift0;
+ drflac_int32 right = pInputSamples1[i] << shift1;
+ drflac_int32 left = right + side;
pOutputSamples[i*2+0] = (float)(left * factor);
pOutputSamples[i*2+1] = (float)(right * factor);
@@ -7303,43 +9603,84 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_right_side__scalar(
static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_right_side__sse2(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
{
drflac_uint64 frameCount4;
- __m128 factor;
- int shift0;
- int shift1;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
drflac_uint64 i;
+ __m128 factor;
- drflac_assert(pFlac->bitsPerSample <= 24);
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
frameCount4 = frameCount >> 2;
factor = _mm_set1_ps(1.0f / 8388608.0f);
- shift0 = (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample) - 8;
- shift1 = (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample) - 8;
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8;
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8;
for (i = 0; i < frameCount4; ++i) {
- __m128i inputSample0 = _mm_loadu_si128((const __m128i*)pInputSamples0 + i);
- __m128i inputSample1 = _mm_loadu_si128((const __m128i*)pInputSamples1 + i);
-
- __m128i side = _mm_slli_epi32(inputSample0, shift0);
- __m128i right = _mm_slli_epi32(inputSample1, shift1);
+ __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0);
+ __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1);
__m128i left = _mm_add_epi32(right, side);
__m128 leftf = _mm_mul_ps(_mm_cvtepi32_ps(left), factor);
__m128 rightf = _mm_mul_ps(_mm_cvtepi32_ps(right), factor);
- pOutputSamples[i*8+0] = ((float*)&leftf)[0];
- pOutputSamples[i*8+1] = ((float*)&rightf)[0];
- pOutputSamples[i*8+2] = ((float*)&leftf)[1];
- pOutputSamples[i*8+3] = ((float*)&rightf)[1];
- pOutputSamples[i*8+4] = ((float*)&leftf)[2];
- pOutputSamples[i*8+5] = ((float*)&rightf)[2];
- pOutputSamples[i*8+6] = ((float*)&leftf)[3];
- pOutputSamples[i*8+7] = ((float*)&rightf)[3];
+ _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf));
+ _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf));
+ }
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 side = pInputSamples0[i] << shift0;
+ drflac_int32 right = pInputSamples1[i] << shift1;
+ drflac_int32 left = right + side;
+
+ pOutputSamples[i*2+0] = (float)(left / 8388608.0f);
+ pOutputSamples[i*2+1] = (float)(right / 8388608.0f);
+ }
+}
+#endif
+
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_right_side__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
+{
+ drflac_uint64 frameCount4;
+ drflac_int32 shift0;
+ drflac_int32 shift1;
+ drflac_uint64 i;
+ float32x4_t factor4;
+ int32x4_t shift0_4;
+ int32x4_t shift1_4;
+
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
+
+ frameCount4 = frameCount >> 2;
+
+ factor4 = vdupq_n_f32(1.0f / 8388608.0f);
+
+ shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8;
+ shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8;
+
+ shift0_4 = vdupq_n_s32(shift0);
+ shift1_4 = vdupq_n_s32(shift1);
+
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t side;
+ int32x4_t right;
+ int32x4_t left;
+ float32x4_t leftf;
+ float32x4_t rightf;
+
+ side = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), shift0_4);
+ right = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), shift1_4);
+ left = vaddq_s32(right, side);
+ leftf = vmulq_f32(vcvtq_f32_s32(left), factor4);
+ rightf = vmulq_f32(vcvtq_f32_s32(right), factor4);
+
+ drflac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf));
}
for (i = (frameCount4 << 2); i < frameCount; ++i) {
- int side = pInputSamples0[i] << shift0;
- int right = pInputSamples1[i] << shift1;
- int left = right + side;
+ drflac_int32 side = pInputSamples0[i] << shift0;
+ drflac_int32 right = pInputSamples1[i] << shift1;
+ drflac_int32 left = right + side;
pOutputSamples[i*2+0] = (float)(left / 8388608.0f);
pOutputSamples[i*2+1] = (float)(right / 8388608.0f);
@@ -7353,6 +9694,10 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_right_side(drflac*
if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
drflac_read_pcm_frames_f32__decode_right_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
} else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_f32__decode_right_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
#endif
{
/* Scalar fallback. */
@@ -7369,9 +9714,9 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_right_side(drflac*
static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
{
for (drflac_uint64 i = 0; i < frameCount; ++i) {
- int mid = pInputSamples0[i] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int side = pInputSamples1[i] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
-
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
pOutputSamples[i*2+0] = (float)((((mid + side) >> 1) << (unusedBitsPerSample)) / 2147483648.0);
@@ -7391,24 +9736,24 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side__scalar(dr
if (shift > 0) {
shift -= 1;
for (i = 0; i < frameCount4; ++i) {
- int temp0L;
- int temp1L;
- int temp2L;
- int temp3L;
- int temp0R;
- int temp1R;
- int temp2R;
- int temp3R;
-
- int mid0 = pInputSamples0[i*4+0] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int mid1 = pInputSamples0[i*4+1] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int mid2 = pInputSamples0[i*4+2] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int mid3 = pInputSamples0[i*4+3] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
-
- int side0 = pInputSamples1[i*4+0] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
- int side1 = pInputSamples1[i*4+1] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
- int side2 = pInputSamples1[i*4+2] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
- int side3 = pInputSamples1[i*4+3] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 temp0L;
+ drflac_int32 temp1L;
+ drflac_int32 temp2L;
+ drflac_int32 temp3L;
+ drflac_int32 temp0R;
+ drflac_int32 temp1R;
+ drflac_int32 temp2R;
+ drflac_int32 temp3R;
+
+ drflac_int32 mid0 = pInputSamples0[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid1 = pInputSamples0[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid2 = pInputSamples0[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid3 = pInputSamples0[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+
+ drflac_int32 side0 = pInputSamples1[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side1 = pInputSamples1[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side2 = pInputSamples1[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side3 = pInputSamples1[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
mid0 = (((drflac_uint32)mid0) << 1) | (side0 & 0x01);
mid1 = (((drflac_uint32)mid1) << 1) | (side1 & 0x01);
@@ -7436,24 +9781,24 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side__scalar(dr
}
} else {
for (i = 0; i < frameCount4; ++i) {
- int temp0L;
- int temp1L;
- int temp2L;
- int temp3L;
- int temp0R;
- int temp1R;
- int temp2R;
- int temp3R;
-
- int mid0 = pInputSamples0[i*4+0] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int mid1 = pInputSamples0[i*4+1] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int mid2 = pInputSamples0[i*4+2] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int mid3 = pInputSamples0[i*4+3] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
-
- int side0 = pInputSamples1[i*4+0] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
- int side1 = pInputSamples1[i*4+1] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
- int side2 = pInputSamples1[i*4+2] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
- int side3 = pInputSamples1[i*4+3] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 temp0L;
+ drflac_int32 temp1L;
+ drflac_int32 temp2L;
+ drflac_int32 temp3L;
+ drflac_int32 temp0R;
+ drflac_int32 temp1R;
+ drflac_int32 temp2R;
+ drflac_int32 temp3R;
+
+ drflac_int32 mid0 = pInputSamples0[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid1 = pInputSamples0[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid2 = pInputSamples0[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 mid3 = pInputSamples0[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+
+ drflac_int32 side0 = pInputSamples1[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side1 = pInputSamples1[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side2 = pInputSamples1[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+ drflac_int32 side3 = pInputSamples1[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
mid0 = (((drflac_uint32)mid0) << 1) | (side0 & 0x01);
mid1 = (((drflac_uint32)mid1) << 1) | (side1 & 0x01);
@@ -7482,9 +9827,9 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side__scalar(dr
}
for (i = (frameCount4 << 2); i < frameCount; ++i) {
- int mid = pInputSamples0[i] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int side = pInputSamples1[i] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
-
+ int mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ int side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
pOutputSamples[i*2+0] = (float)((((mid + side) >> 1) << unusedBitsPerSample) * factor);
@@ -7498,10 +9843,10 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side__sse2(drfl
drflac_uint64 i;
drflac_uint64 frameCount4;
float factor;
- int shift;
+ drflac_int32 shift;
__m128 factor128;
- drflac_assert(pFlac->bitsPerSample <= 24);
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
frameCount4 = frameCount >> 2;
@@ -7511,52 +9856,40 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side__sse2(drfl
shift = unusedBitsPerSample - 8;
if (shift == 0) {
for (i = 0; i < frameCount4; ++i) {
+ __m128i mid;
+ __m128i side;
__m128i tempL;
__m128i tempR;
__m128 leftf;
__m128 rightf;
- __m128i inputSample0 = _mm_loadu_si128((const __m128i*)pInputSamples0 + i);
- __m128i inputSample1 = _mm_loadu_si128((const __m128i*)pInputSamples1 + i);
-
- __m128i mid = _mm_slli_epi32(inputSample0, pFlac->currentFrame.subframes[0].wastedBitsPerSample);
- __m128i side = _mm_slli_epi32(inputSample1, pFlac->currentFrame.subframes[1].wastedBitsPerSample);
+ mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01)));
+ mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01)));
- tempL = _mm_add_epi32(mid, side);
- tempR = _mm_sub_epi32(mid, side);
-
- /* Signed bit shift. */
- tempL = _mm_or_si128(_mm_srli_epi32(tempL, 1), _mm_and_si128(tempL, _mm_set1_epi32(0x80000000)));
- tempR = _mm_or_si128(_mm_srli_epi32(tempR, 1), _mm_and_si128(tempR, _mm_set1_epi32(0x80000000)));
+ tempL = _mm_srai_epi32(_mm_add_epi32(mid, side), 1);
+ tempR = _mm_srai_epi32(_mm_sub_epi32(mid, side), 1);
leftf = _mm_mul_ps(_mm_cvtepi32_ps(tempL), factor128);
rightf = _mm_mul_ps(_mm_cvtepi32_ps(tempR), factor128);
- pOutputSamples[i*8+0] = ((float*)&leftf)[0];
- pOutputSamples[i*8+1] = ((float*)&rightf)[0];
- pOutputSamples[i*8+2] = ((float*)&leftf)[1];
- pOutputSamples[i*8+3] = ((float*)&rightf)[1];
- pOutputSamples[i*8+4] = ((float*)&leftf)[2];
- pOutputSamples[i*8+5] = ((float*)&rightf)[2];
- pOutputSamples[i*8+6] = ((float*)&leftf)[3];
- pOutputSamples[i*8+7] = ((float*)&rightf)[3];
+ _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf));
+ _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf));
}
for (i = (frameCount4 << 2); i < frameCount; ++i) {
- int mid = pInputSamples0[i] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int side = pInputSamples1[i] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
-
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
pOutputSamples[i*2+0] = (float)(((mid + side) >> 1) * factor);
pOutputSamples[i*2+1] = (float)(((mid - side) >> 1) * factor);
}
} else {
+ shift -= 1;
for (i = 0; i < frameCount4; ++i) {
- __m128i inputSample0;
- __m128i inputSample1;
__m128i mid;
__m128i side;
__m128i tempL;
@@ -7564,43 +9897,124 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side__sse2(drfl
__m128 leftf;
__m128 rightf;
- inputSample0 = _mm_loadu_si128((const __m128i*)pInputSamples0 + i);
- inputSample1 = _mm_loadu_si128((const __m128i*)pInputSamples1 + i);
-
- mid = _mm_slli_epi32(inputSample0, pFlac->currentFrame.subframes[0].wastedBitsPerSample);
- side = _mm_slli_epi32(inputSample1, pFlac->currentFrame.subframes[1].wastedBitsPerSample);
+ mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
- mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01)));
+ mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01)));
- tempL = _mm_slli_epi32(_mm_srli_epi32(_mm_add_epi32(mid, side), 1), shift);
- tempR = _mm_slli_epi32(_mm_srli_epi32(_mm_sub_epi32(mid, side), 1), shift);
+ tempL = _mm_slli_epi32(_mm_add_epi32(mid, side), shift);
+ tempR = _mm_slli_epi32(_mm_sub_epi32(mid, side), shift);
leftf = _mm_mul_ps(_mm_cvtepi32_ps(tempL), factor128);
rightf = _mm_mul_ps(_mm_cvtepi32_ps(tempR), factor128);
- pOutputSamples[i*8+0] = ((float*)&leftf)[0];
- pOutputSamples[i*8+1] = ((float*)&rightf)[0];
- pOutputSamples[i*8+2] = ((float*)&leftf)[1];
- pOutputSamples[i*8+3] = ((float*)&rightf)[1];
- pOutputSamples[i*8+4] = ((float*)&leftf)[2];
- pOutputSamples[i*8+5] = ((float*)&rightf)[2];
- pOutputSamples[i*8+6] = ((float*)&leftf)[3];
- pOutputSamples[i*8+7] = ((float*)&rightf)[3];
+ _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf));
+ _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf));
}
for (i = (frameCount4 << 2); i < frameCount; ++i) {
- int mid = pInputSamples0[i] << pFlac->currentFrame.subframes[0].wastedBitsPerSample;
- int side = pInputSamples1[i] << pFlac->currentFrame.subframes[1].wastedBitsPerSample;
-
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
- pOutputSamples[i*2+0] = (float)((((mid + side) >> 1) << shift) * factor);
- pOutputSamples[i*2+1] = (float)((((mid - side) >> 1) << shift) * factor);
+ pOutputSamples[i*2+0] = (float)(((mid + side) << shift) * factor);
+ pOutputSamples[i*2+1] = (float)(((mid - side) << shift) * factor);
}
}
}
#endif
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
+{
+ drflac_uint64 i;
+ drflac_uint64 frameCount4;
+ float factor;
+ drflac_int32 shift;
+ float32x4_t factor4;
+ int32x4_t shift4;
+ int32x4_t wbps0_4; /* Wasted Bits Per Sample */
+ int32x4_t wbps1_4; /* Wasted Bits Per Sample */
+
+ DRFLAC_ASSERT(pFlac->bitsPerSample <= 24);
+
+ frameCount4 = frameCount >> 2;
+
+ factor = 1.0f / 8388608.0f;
+ factor4 = vdupq_n_f32(factor);
+
+ wbps0_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ wbps1_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
+
+ shift = unusedBitsPerSample - 8;
+ if (shift == 0) {
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t lefti;
+ int32x4_t righti;
+ float32x4_t leftf;
+ float32x4_t rightf;
+
+ int32x4_t mid = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), wbps0_4);
+ int32x4_t side = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), wbps1_4);
+
+ mid = vorrq_s32(vshlq_n_s32(mid, 1), vandq_s32(side, vdupq_n_s32(1)));
+
+ lefti = vshrq_n_s32(vaddq_s32(mid, side), 1);
+ righti = vshrq_n_s32(vsubq_s32(mid, side), 1);
+
+ leftf = vmulq_f32(vcvtq_f32_s32(lefti), factor4);
+ rightf = vmulq_f32(vcvtq_f32_s32(righti), factor4);
+
+ drflac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf));
+ }
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
+
+ pOutputSamples[i*2+0] = (float)(((mid + side) >> 1) * factor);
+ pOutputSamples[i*2+1] = (float)(((mid - side) >> 1) * factor);
+ }
+ } else {
+ shift -= 1;
+ shift4 = vdupq_n_s32(shift);
+ for (i = 0; i < frameCount4; ++i) {
+ int32x4_t mid;
+ int32x4_t side;
+ int32x4_t lefti;
+ int32x4_t righti;
+ float32x4_t leftf;
+ float32x4_t rightf;
+
+ mid = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), wbps0_4);
+ side = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), wbps1_4);
+
+ mid = vorrq_s32(vshlq_n_s32(mid, 1), vandq_s32(side, vdupq_n_s32(1)));
+
+ lefti = vshlq_s32(vaddq_s32(mid, side), shift4);
+ righti = vshlq_s32(vsubq_s32(mid, side), shift4);
+
+ leftf = vmulq_f32(vcvtq_f32_s32(lefti), factor4);
+ rightf = vmulq_f32(vcvtq_f32_s32(righti), factor4);
+
+ drflac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf));
+ }
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ drflac_int32 mid = pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample;
+ drflac_int32 side = pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample;
+
+ mid = (((drflac_uint32)mid) << 1) | (side & 0x01);
+
+ pOutputSamples[i*2+0] = (float)(((mid + side) << shift) * factor);
+ pOutputSamples[i*2+1] = (float)(((mid - side) << shift) * factor);
+ }
+ }
+}
+#endif
static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
{
@@ -7608,6 +10022,10 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side(drflac* pF
if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
drflac_read_pcm_frames_f32__decode_mid_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
} else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_f32__decode_mid_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
#endif
{
/* Scalar fallback. */
@@ -7623,8 +10041,8 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_mid_side(drflac* pF
static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_independent_stereo__reference(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
{
for (drflac_uint64 i = 0; i < frameCount; ++i) {
- pOutputSamples[i*2+0] = (float)((pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample)) / 2147483648.0);
- pOutputSamples[i*2+1] = (float)((pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample)) / 2147483648.0);
+ pOutputSamples[i*2+0] = (float)((pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample)) / 2147483648.0);
+ pOutputSamples[i*2+1] = (float)((pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample)) / 2147483648.0);
}
}
#endif
@@ -7636,19 +10054,19 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_independent_stereo_
float factor = 1 / 2147483648.0;
- int shift0 = (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample);
- int shift1 = (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample);
+ drflac_int32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample);
+ drflac_int32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample);
for (i = 0; i < frameCount4; ++i) {
- int tempL0 = pInputSamples0[i*4+0] << shift0;
- int tempL1 = pInputSamples0[i*4+1] << shift0;
- int tempL2 = pInputSamples0[i*4+2] << shift0;
- int tempL3 = pInputSamples0[i*4+3] << shift0;
+ drflac_int32 tempL0 = pInputSamples0[i*4+0] << shift0;
+ drflac_int32 tempL1 = pInputSamples0[i*4+1] << shift0;
+ drflac_int32 tempL2 = pInputSamples0[i*4+2] << shift0;
+ drflac_int32 tempL3 = pInputSamples0[i*4+3] << shift0;
- int tempR0 = pInputSamples1[i*4+0] << shift1;
- int tempR1 = pInputSamples1[i*4+1] << shift1;
- int tempR2 = pInputSamples1[i*4+2] << shift1;
- int tempR3 = pInputSamples1[i*4+3] << shift1;
+ drflac_int32 tempR0 = pInputSamples1[i*4+0] << shift1;
+ drflac_int32 tempR1 = pInputSamples1[i*4+1] << shift1;
+ drflac_int32 tempR2 = pInputSamples1[i*4+2] << shift1;
+ drflac_int32 tempR3 = pInputSamples1[i*4+3] << shift1;
pOutputSamples[i*8+0] = (float)(tempL0 * factor);
pOutputSamples[i*8+1] = (float)(tempR0 * factor);
@@ -7675,27 +10093,60 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_independent_stereo_
float factor = 1.0f / 8388608.0f;
__m128 factor128 = _mm_set1_ps(1.0f / 8388608.0f);
- int shift0 = (unusedBitsPerSample + pFlac->currentFrame.subframes[0].wastedBitsPerSample) - 8;
- int shift1 = (unusedBitsPerSample + pFlac->currentFrame.subframes[1].wastedBitsPerSample) - 8;
+ drflac_int32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8;
+ drflac_int32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8;
+
+ for (i = 0; i < frameCount4; ++i) {
+ __m128i lefti;
+ __m128i righti;
+ __m128 leftf;
+ __m128 rightf;
+
+ lefti = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0);
+ righti = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1);
+
+ leftf = _mm_mul_ps(_mm_cvtepi32_ps(lefti), factor128);
+ rightf = _mm_mul_ps(_mm_cvtepi32_ps(righti), factor128);
+
+ _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf));
+ _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf));
+ }
+
+ for (i = (frameCount4 << 2); i < frameCount; ++i) {
+ pOutputSamples[i*2+0] = (float)((pInputSamples0[i] << shift0) * factor);
+ pOutputSamples[i*2+1] = (float)((pInputSamples1[i] << shift1) * factor);
+ }
+}
+#endif
+
+#if defined(DRFLAC_SUPPORT_NEON)
+static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_independent_stereo__neon(drflac* pFlac, drflac_uint64 frameCount, drflac_int32 unusedBitsPerSample, const drflac_int32* pInputSamples0, const drflac_int32* pInputSamples1, float* pOutputSamples)
+{
+ drflac_uint64 i;
+ drflac_uint64 frameCount4 = frameCount >> 2;
+
+ float factor = 1.0f / 8388608.0f;
+ float32x4_t factor4 = vdupq_n_f32(factor);
+
+ drflac_int32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8;
+ drflac_int32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8;
+
+ int32x4_t shift0_4 = vdupq_n_s32(shift0);
+ int32x4_t shift1_4 = vdupq_n_s32(shift1);
for (i = 0; i < frameCount4; ++i) {
- __m128i inputSample0 = _mm_loadu_si128((const __m128i*)pInputSamples0 + i);
- __m128i inputSample1 = _mm_loadu_si128((const __m128i*)pInputSamples1 + i);
+ int32x4_t lefti;
+ int32x4_t righti;
+ float32x4_t leftf;
+ float32x4_t rightf;
- __m128i i32L = _mm_slli_epi32(inputSample0, shift0);
- __m128i i32R = _mm_slli_epi32(inputSample1, shift1);
+ lefti = vshlq_s32(vld1q_s32(pInputSamples0 + i*4), shift0_4);
+ righti = vshlq_s32(vld1q_s32(pInputSamples1 + i*4), shift1_4);
- __m128 f32L = _mm_mul_ps(_mm_cvtepi32_ps(i32L), factor128);
- __m128 f32R = _mm_mul_ps(_mm_cvtepi32_ps(i32R), factor128);
+ leftf = vmulq_f32(vcvtq_f32_s32(lefti), factor4);
+ rightf = vmulq_f32(vcvtq_f32_s32(righti), factor4);
- pOutputSamples[i*8+0] = ((float*)&f32L)[0];
- pOutputSamples[i*8+1] = ((float*)&f32R)[0];
- pOutputSamples[i*8+2] = ((float*)&f32L)[1];
- pOutputSamples[i*8+3] = ((float*)&f32R)[1];
- pOutputSamples[i*8+4] = ((float*)&f32L)[2];
- pOutputSamples[i*8+5] = ((float*)&f32R)[2];
- pOutputSamples[i*8+6] = ((float*)&f32L)[3];
- pOutputSamples[i*8+7] = ((float*)&f32R)[3];
+ drflac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf));
}
for (i = (frameCount4 << 2); i < frameCount; ++i) {
@@ -7711,6 +10162,10 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_independent_stereo(
if (drflac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) {
drflac_read_pcm_frames_f32__decode_independent_stereo__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
} else
+#elif defined(DRFLAC_SUPPORT_NEON)
+ if (drflac__gIsNEONSupported && pFlac->bitsPerSample <= 24) {
+ drflac_read_pcm_frames_f32__decode_independent_stereo__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples);
+ } else
#endif
{
/* Scalar fallback. */
@@ -7725,6 +10180,7 @@ static DRFLAC_INLINE void drflac_read_pcm_frames_f32__decode_independent_stereo(
drflac_uint64 drflac_read_pcm_frames_f32(drflac* pFlac, drflac_uint64 framesToRead, float* pBufferOut)
{
drflac_uint64 framesRead;
+ drflac_int32 unusedBitsPerSample;
if (pFlac == NULL || framesToRead == 0) {
return 0;
@@ -7734,31 +10190,29 @@ drflac_uint64 drflac_read_pcm_frames_f32(drflac* pFlac, drflac_uint64 framesToRe
return drflac__seek_forward_by_pcm_frames(pFlac, framesToRead);
}
+ unusedBitsPerSample = 32 - pFlac->bitsPerSample;
+
framesRead = 0;
while (framesToRead > 0) {
/* If we've run out of samples in this frame, go to the next. */
- if (pFlac->currentFrame.samplesRemaining == 0) {
+ if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) {
if (!drflac__read_and_decode_next_flac_frame(pFlac)) {
break; /* Couldn't read the next frame, so just break from the loop and return. */
}
} else {
- unsigned int channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFrame.header.channelAssignment);
- drflac_uint64 totalFramesInPacket = pFlac->currentFrame.header.blockSize;
- drflac_uint64 framesReadFromPacketSoFar = totalFramesInPacket - (pFlac->currentFrame.samplesRemaining/channelCount);
- drflac_uint64 iFirstPCMFrame = framesReadFromPacketSoFar;
- drflac_int32 unusedBitsPerSample = 32 - pFlac->bitsPerSample;
+ unsigned int channelCount = drflac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment);
+ drflac_uint64 iFirstPCMFrame = pFlac->currentFLACFrame.header.blockSizeInPCMFrames - pFlac->currentFLACFrame.pcmFramesRemaining;
drflac_uint64 frameCountThisIteration = framesToRead;
- drflac_uint64 samplesReadThisIteration;
- if (frameCountThisIteration > pFlac->currentFrame.samplesRemaining / channelCount) {
- frameCountThisIteration = pFlac->currentFrame.samplesRemaining / channelCount;
+ if (frameCountThisIteration > pFlac->currentFLACFrame.pcmFramesRemaining) {
+ frameCountThisIteration = pFlac->currentFLACFrame.pcmFramesRemaining;
}
if (channelCount == 2) {
- const drflac_int32* pDecodedSamples0 = pFlac->currentFrame.subframes[0].pDecodedSamples + iFirstPCMFrame;
- const drflac_int32* pDecodedSamples1 = pFlac->currentFrame.subframes[1].pDecodedSamples + iFirstPCMFrame;
+ const drflac_int32* pDecodedSamples0 = pFlac->currentFLACFrame.subframes[0].pSamplesS32 + iFirstPCMFrame;
+ const drflac_int32* pDecodedSamples1 = pFlac->currentFLACFrame.subframes[1].pSamplesS32 + iFirstPCMFrame;
- switch (pFlac->currentFrame.header.channelAssignment)
+ switch (pFlac->currentFLACFrame.header.channelAssignment)
{
case DRFLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE:
{
@@ -7769,7 +10223,7 @@ drflac_uint64 drflac_read_pcm_frames_f32(drflac* pFlac, drflac_uint64 framesToRe
{
drflac_read_pcm_frames_f32__decode_right_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
} break;
-
+
case DRFLAC_CHANNEL_ASSIGNMENT_MID_SIDE:
{
drflac_read_pcm_frames_f32__decode_mid_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
@@ -7778,145 +10232,79 @@ drflac_uint64 drflac_read_pcm_frames_f32(drflac* pFlac, drflac_uint64 framesToRe
case DRFLAC_CHANNEL_ASSIGNMENT_INDEPENDENT:
default:
{
- drflac_read_pcm_frames_f32__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
- } break;
- }
- } else {
- /* Generic interleaving. */
- drflac_uint64 i;
- for (i = 0; i < frameCountThisIteration; ++i) {
- unsigned int j;
- for (j = 0; j < channelCount; ++j) {
- pBufferOut[(i*channelCount)+j] = (float)(((pFlac->currentFrame.subframes[j].pDecodedSamples[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFrame.subframes[j].wastedBitsPerSample)) / 2147483648.0);
- }
- }
- }
-
- samplesReadThisIteration = frameCountThisIteration * channelCount;
- framesRead += frameCountThisIteration;
- framesReadFromPacketSoFar += frameCountThisIteration;
- pBufferOut += samplesReadThisIteration;
- framesToRead -= frameCountThisIteration;
- pFlac->currentSample += samplesReadThisIteration;
- pFlac->currentFrame.samplesRemaining -= (unsigned int)samplesReadThisIteration;
- }
- }
-
- return framesRead;
-}
-
-drflac_bool32 drflac_seek_to_sample(drflac* pFlac, drflac_uint64 sampleIndex)
-{
- if (pFlac == NULL) {
- return DRFLAC_FALSE;
- }
-
- /*
- If we don't know where the first frame begins then we can't seek. This will happen when the STREAMINFO block was not present
- when the decoder was opened.
- */
- if (pFlac->firstFramePos == 0) {
- return DRFLAC_FALSE;
- }
-
- if (sampleIndex == 0) {
- pFlac->currentSample = 0;
- return drflac__seek_to_first_frame(pFlac);
- } else {
- drflac_bool32 wasSuccessful = DRFLAC_FALSE;
-
- /* Clamp the sample to the end. */
- if (sampleIndex >= pFlac->totalSampleCount) {
- sampleIndex = pFlac->totalSampleCount - 1;
- }
-
- /* If the target sample and the current sample are in the same frame we just move the position forward. */
- if (sampleIndex > pFlac->currentSample) {
- /* Forward. */
- drflac_uint32 offset = (drflac_uint32)(sampleIndex - pFlac->currentSample);
- if (pFlac->currentFrame.samplesRemaining > offset) {
- pFlac->currentFrame.samplesRemaining -= offset;
- pFlac->currentSample = sampleIndex;
- return DRFLAC_TRUE;
- }
- } else {
- /* Backward. */
- drflac_uint32 offsetAbs = (drflac_uint32)(pFlac->currentSample - sampleIndex);
- drflac_uint32 currentFrameSampleCount = pFlac->currentFrame.header.blockSize * drflac__get_channel_count_from_channel_assignment(pFlac->currentFrame.header.channelAssignment);
- drflac_uint32 currentFrameSamplesConsumed = (drflac_uint32)(currentFrameSampleCount - pFlac->currentFrame.samplesRemaining);
- if (currentFrameSamplesConsumed > offsetAbs) {
- pFlac->currentFrame.samplesRemaining += offsetAbs;
- pFlac->currentSample = sampleIndex;
- return DRFLAC_TRUE;
- }
- }
-
- /*
- Different techniques depending on encapsulation. Using the native FLAC seektable with Ogg encapsulation is a bit awkward so
- we'll instead use Ogg's natural seeking facility.
- */
-#ifndef DR_FLAC_NO_OGG
- if (pFlac->container == drflac_container_ogg)
- {
- wasSuccessful = drflac_ogg__seek_to_sample(pFlac, sampleIndex);
- }
- else
-#endif
- {
- /* First try seeking via the seek table. If this fails, fall back to a brute force seek which is much slower. */
- wasSuccessful = drflac__seek_to_sample__seek_table(pFlac, sampleIndex);
- if (!wasSuccessful) {
- wasSuccessful = drflac__seek_to_sample__brute_force(pFlac, sampleIndex);
+ drflac_read_pcm_frames_f32__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut);
+ } break;
+ }
+ } else {
+ /* Generic interleaving. */
+ drflac_uint64 i;
+ for (i = 0; i < frameCountThisIteration; ++i) {
+ unsigned int j;
+ for (j = 0; j < channelCount; ++j) {
+ pBufferOut[(i*channelCount)+j] = (float)((drflac_uint64)((pFlac->currentFLACFrame.subframes[j].pSamplesS32[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[j].wastedBitsPerSample)) / 2147483648.0);
+ }
+ }
}
- }
- pFlac->currentSample = sampleIndex;
- return wasSuccessful;
+ framesRead += frameCountThisIteration;
+ pBufferOut += frameCountThisIteration * channelCount;
+ framesToRead -= frameCountThisIteration;
+ pFlac->currentPCMFrame += frameCountThisIteration;
+ pFlac->currentFLACFrame.pcmFramesRemaining -= (unsigned int)frameCountThisIteration;
+ }
}
+
+ return framesRead;
}
+
drflac_bool32 drflac_seek_to_pcm_frame(drflac* pFlac, drflac_uint64 pcmFrameIndex)
{
if (pFlac == NULL) {
return DRFLAC_FALSE;
}
+ /* Don't do anything if we're already on the seek point. */
+ if (pFlac->currentPCMFrame == pcmFrameIndex) {
+ return DRFLAC_TRUE;
+ }
+
/*
If we don't know where the first frame begins then we can't seek. This will happen when the STREAMINFO block was not present
when the decoder was opened.
*/
- if (pFlac->firstFramePos == 0) {
+ if (pFlac->firstFLACFramePosInBytes == 0) {
return DRFLAC_FALSE;
}
if (pcmFrameIndex == 0) {
- pFlac->currentSample = 0;
+ pFlac->currentPCMFrame = 0;
return drflac__seek_to_first_frame(pFlac);
} else {
drflac_bool32 wasSuccessful = DRFLAC_FALSE;
/* Clamp the sample to the end. */
- if (pcmFrameIndex >= pFlac->totalPCMFrameCount) {
- pcmFrameIndex = pFlac->totalPCMFrameCount - 1;
+ if (pcmFrameIndex > pFlac->totalPCMFrameCount) {
+ pcmFrameIndex = pFlac->totalPCMFrameCount;
}
/* If the target sample and the current sample are in the same frame we just move the position forward. */
- if (pcmFrameIndex*pFlac->channels > pFlac->currentSample) {
+ if (pcmFrameIndex > pFlac->currentPCMFrame) {
/* Forward. */
- drflac_uint32 offset = (drflac_uint32)(pcmFrameIndex*pFlac->channels - pFlac->currentSample);
- if (pFlac->currentFrame.samplesRemaining > offset) {
- pFlac->currentFrame.samplesRemaining -= offset;
- pFlac->currentSample = pcmFrameIndex*pFlac->channels;
+ drflac_uint32 offset = (drflac_uint32)(pcmFrameIndex - pFlac->currentPCMFrame);
+ if (pFlac->currentFLACFrame.pcmFramesRemaining > offset) {
+ pFlac->currentFLACFrame.pcmFramesRemaining -= offset;
+ pFlac->currentPCMFrame = pcmFrameIndex;
return DRFLAC_TRUE;
}
} else {
/* Backward. */
- drflac_uint32 offsetAbs = (drflac_uint32)(pFlac->currentSample - pcmFrameIndex*pFlac->channels);
- drflac_uint32 currentFrameSampleCount = pFlac->currentFrame.header.blockSize * drflac__get_channel_count_from_channel_assignment(pFlac->currentFrame.header.channelAssignment);
- drflac_uint32 currentFrameSamplesConsumed = (drflac_uint32)(currentFrameSampleCount - pFlac->currentFrame.samplesRemaining);
- if (currentFrameSamplesConsumed > offsetAbs) {
- pFlac->currentFrame.samplesRemaining += offsetAbs;
- pFlac->currentSample = pcmFrameIndex*pFlac->channels;
+ drflac_uint32 offsetAbs = (drflac_uint32)(pFlac->currentPCMFrame - pcmFrameIndex);
+ drflac_uint32 currentFLACFramePCMFrameCount = pFlac->currentFLACFrame.header.blockSizeInPCMFrames;
+ drflac_uint32 currentFLACFramePCMFramesConsumed = currentFLACFramePCMFrameCount - pFlac->currentFLACFrame.pcmFramesRemaining;
+ if (currentFLACFramePCMFramesConsumed > offsetAbs) {
+ pFlac->currentFLACFrame.pcmFramesRemaining += offsetAbs;
+ pFlac->currentPCMFrame = pcmFrameIndex;
return DRFLAC_TRUE;
}
}
@@ -7928,19 +10316,30 @@ drflac_bool32 drflac_seek_to_pcm_frame(drflac* pFlac, drflac_uint64 pcmFrameInde
#ifndef DR_FLAC_NO_OGG
if (pFlac->container == drflac_container_ogg)
{
- wasSuccessful = drflac_ogg__seek_to_sample(pFlac, pcmFrameIndex*pFlac->channels);
+ wasSuccessful = drflac_ogg__seek_to_pcm_frame(pFlac, pcmFrameIndex);
}
else
#endif
{
/* First try seeking via the seek table. If this fails, fall back to a brute force seek which is much slower. */
- wasSuccessful = drflac__seek_to_sample__seek_table(pFlac, pcmFrameIndex*pFlac->channels);
- if (!wasSuccessful) {
- wasSuccessful = drflac__seek_to_sample__brute_force(pFlac, pcmFrameIndex*pFlac->channels);
+ if (/*!wasSuccessful && */!pFlac->_noSeekTableSeek) {
+ wasSuccessful = drflac__seek_to_pcm_frame__seek_table(pFlac, pcmFrameIndex);
+ }
+
+#if !defined(DR_FLAC_NO_CRC)
+ /* Fall back to binary search if seek table seeking fails. This requires the length of the stream to be known. */
+ if (!wasSuccessful && !pFlac->_noBinarySearchSeek && pFlac->totalPCMFrameCount > 0) {
+ wasSuccessful = drflac__seek_to_pcm_frame__binary_search(pFlac, pcmFrameIndex);
+ }
+#endif
+
+ /* Fall back to brute force if all else fails. */
+ if (!wasSuccessful && !pFlac->_noBruteForceSeek) {
+ wasSuccessful = drflac__seek_to_pcm_frame__brute_force(pFlac, pcmFrameIndex);
}
}
- pFlac->currentSample = pcmFrameIndex*pFlac->channels;
+ pFlac->currentPCMFrame = pcmFrameIndex;
return wasSuccessful;
}
}
@@ -7967,7 +10366,7 @@ static type* drflac__full_read_and_close_ ## extension (drflac* pFlac, unsigned
type* pSampleData = NULL; \
drflac_uint64 totalPCMFrameCount; \
\
- drflac_assert(pFlac != NULL); \
+ DRFLAC_ASSERT(pFlac != NULL); \
\
totalPCMFrameCount = pFlac->totalPCMFrameCount; \
\
@@ -7976,7 +10375,7 @@ static type* drflac__full_read_and_close_ ## extension (drflac* pFlac, unsigned
drflac_uint64 pcmFramesRead; \
size_t sampleDataBufferSize = sizeof(buffer); \
\
- pSampleData = (type*)DRFLAC_MALLOC(sampleDataBufferSize); \
+ pSampleData = (type*)drflac__malloc_from_callbacks(sampleDataBufferSize, &pFlac->allocationCallbacks); \
if (pSampleData == NULL) { \
goto on_error; \
} \
@@ -7984,31 +10383,33 @@ static type* drflac__full_read_and_close_ ## extension (drflac* pFlac, unsigned
while ((pcmFramesRead = (drflac_uint64)drflac_read_pcm_frames_##extension(pFlac, sizeof(buffer)/sizeof(buffer[0])/pFlac->channels, buffer)) > 0) { \
if (((totalPCMFrameCount + pcmFramesRead) * pFlac->channels * sizeof(type)) > sampleDataBufferSize) { \
type* pNewSampleData; \
+ size_t newSampleDataBufferSize; \
\
- sampleDataBufferSize *= 2; \
- pNewSampleData = (type*)DRFLAC_REALLOC(pSampleData, sampleDataBufferSize); \
+ newSampleDataBufferSize = sampleDataBufferSize * 2; \
+ pNewSampleData = (type*)drflac__realloc_from_callbacks(pSampleData, newSampleDataBufferSize, sampleDataBufferSize, &pFlac->allocationCallbacks); \
if (pNewSampleData == NULL) { \
- DRFLAC_FREE(pSampleData); \
+ drflac__free_from_callbacks(pSampleData, &pFlac->allocationCallbacks); \
goto on_error; \
} \
\
+ sampleDataBufferSize = newSampleDataBufferSize; \
pSampleData = pNewSampleData; \
} \
\
- drflac_copy_memory(pSampleData + (totalPCMFrameCount*pFlac->channels), buffer, (size_t)(pcmFramesRead*pFlac->channels*sizeof(type))); \
+ DRFLAC_COPY_MEMORY(pSampleData + (totalPCMFrameCount*pFlac->channels), buffer, (size_t)(pcmFramesRead*pFlac->channels*sizeof(type))); \
totalPCMFrameCount += pcmFramesRead; \
} \
\
/* At this point everything should be decoded, but we just want to fill the unused part buffer with silence - need to \
protect those ears from random noise! */ \
- drflac_zero_memory(pSampleData + (totalPCMFrameCount*pFlac->channels), (size_t)(sampleDataBufferSize - totalPCMFrameCount*pFlac->channels*sizeof(type))); \
+ DRFLAC_ZERO_MEMORY(pSampleData + (totalPCMFrameCount*pFlac->channels), (size_t)(sampleDataBufferSize - totalPCMFrameCount*pFlac->channels*sizeof(type))); \
} else { \
drflac_uint64 dataSize = totalPCMFrameCount*pFlac->channels*sizeof(type); \
if (dataSize > DRFLAC_SIZE_MAX) { \
goto on_error; /* The decoded data is too big. */ \
} \
\
- pSampleData = (type*)DRFLAC_MALLOC((size_t)dataSize); /* <-- Safe cast as per the check above. */ \
+ pSampleData = (type*)drflac__malloc_from_callbacks((size_t)dataSize, &pFlac->allocationCallbacks); /* <-- Safe cast as per the check above. */ \
if (pSampleData == NULL) { \
goto on_error; \
} \
@@ -8032,7 +10433,7 @@ DRFLAC_DEFINE_FULL_READ_AND_CLOSE(s32, drflac_int32)
DRFLAC_DEFINE_FULL_READ_AND_CLOSE(s16, drflac_int16)
DRFLAC_DEFINE_FULL_READ_AND_CLOSE(f32, float)
-drflac_int32* drflac_open_and_read_pcm_frames_s32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalPCMFrameCountOut)
+drflac_int32* drflac_open_and_read_pcm_frames_s32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalPCMFrameCountOut, const drflac_allocation_callbacks* pAllocationCallbacks)
{
drflac* pFlac;
@@ -8046,7 +10447,7 @@ drflac_int32* drflac_open_and_read_pcm_frames_s32(drflac_read_proc onRead, drfla
*totalPCMFrameCountOut = 0;
}
- pFlac = drflac_open(onRead, onSeek, pUserData);
+ pFlac = drflac_open(onRead, onSeek, pUserData, pAllocationCallbacks);
if (pFlac == NULL) {
return NULL;
}
@@ -8054,44 +10455,7 @@ drflac_int32* drflac_open_and_read_pcm_frames_s32(drflac_read_proc onRead, drfla
return drflac__full_read_and_close_s32(pFlac, channelsOut, sampleRateOut, totalPCMFrameCountOut);
}
-drflac_int32* drflac_open_and_decode_s32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalSampleCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drflac_uint64 totalPCMFrameCount;
- drflac_int32* pResult;
-
- if (channelsOut) {
- *channelsOut = 0;
- }
- if (sampleRateOut) {
- *sampleRateOut = 0;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = 0;
- }
-
- pResult = drflac_open_and_read_pcm_frames_s32(onRead, onSeek, pUserData, &channels, &sampleRate, &totalPCMFrameCount);
- if (pResult == NULL) {
- return NULL;
- }
-
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = totalPCMFrameCount * channels;
- }
-
- return pResult;
-}
-
-
-
-drflac_int16* drflac_open_and_read_pcm_frames_s16(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalPCMFrameCountOut)
+drflac_int16* drflac_open_and_read_pcm_frames_s16(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalPCMFrameCountOut, const drflac_allocation_callbacks* pAllocationCallbacks)
{
drflac* pFlac;
@@ -8105,7 +10469,7 @@ drflac_int16* drflac_open_and_read_pcm_frames_s16(drflac_read_proc onRead, drfla
*totalPCMFrameCountOut = 0;
}
- pFlac = drflac_open(onRead, onSeek, pUserData);
+ pFlac = drflac_open(onRead, onSeek, pUserData, pAllocationCallbacks);
if (pFlac == NULL) {
return NULL;
}
@@ -8113,43 +10477,7 @@ drflac_int16* drflac_open_and_read_pcm_frames_s16(drflac_read_proc onRead, drfla
return drflac__full_read_and_close_s16(pFlac, channelsOut, sampleRateOut, totalPCMFrameCountOut);
}
-drflac_int16* drflac_open_and_decode_s16(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalSampleCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drflac_uint64 totalPCMFrameCount;
- drflac_int16* pResult;
-
- if (channelsOut) {
- *channelsOut = 0;
- }
- if (sampleRateOut) {
- *sampleRateOut = 0;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = 0;
- }
-
- pResult = drflac_open_and_read_pcm_frames_s16(onRead, onSeek, pUserData, &channels, &sampleRate, &totalPCMFrameCount);
- if (pResult == NULL) {
- return NULL;
- }
-
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = totalPCMFrameCount * channels;
- }
-
- return pResult;
-}
-
-
-float* drflac_open_and_read_pcm_frames_f32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalPCMFrameCountOut)
+float* drflac_open_and_read_pcm_frames_f32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalPCMFrameCountOut, const drflac_allocation_callbacks* pAllocationCallbacks)
{
drflac* pFlac;
@@ -8163,7 +10491,7 @@ float* drflac_open_and_read_pcm_frames_f32(drflac_read_proc onRead, drflac_seek_
*totalPCMFrameCountOut = 0;
}
- pFlac = drflac_open(onRead, onSeek, pUserData);
+ pFlac = drflac_open(onRead, onSeek, pUserData, pAllocationCallbacks);
if (pFlac == NULL) {
return NULL;
}
@@ -8171,43 +10499,8 @@ float* drflac_open_and_read_pcm_frames_f32(drflac_read_proc onRead, drflac_seek_
return drflac__full_read_and_close_f32(pFlac, channelsOut, sampleRateOut, totalPCMFrameCountOut);
}
-float* drflac_open_and_decode_f32(drflac_read_proc onRead, drflac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalSampleCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drflac_uint64 totalPCMFrameCount;
- float* pResult;
-
- if (channelsOut) {
- *channelsOut = 0;
- }
- if (sampleRateOut) {
- *sampleRateOut = 0;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = 0;
- }
-
- pResult = drflac_open_and_read_pcm_frames_f32(onRead, onSeek, pUserData, &channels, &sampleRate, &totalPCMFrameCount);
- if (pResult == NULL) {
- return NULL;
- }
-
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = totalPCMFrameCount * channels;
- }
-
- return pResult;
-}
-
#ifndef DR_FLAC_NO_STDIO
-drflac_int32* drflac_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount)
+drflac_int32* drflac_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks)
{
drflac* pFlac;
@@ -8221,7 +10514,7 @@ drflac_int32* drflac_open_file_and_read_pcm_frames_s32(const char* filename, uns
*totalPCMFrameCount = 0;
}
- pFlac = drflac_open_file(filename);
+ pFlac = drflac_open_file(filename, pAllocationCallbacks);
if (pFlac == NULL) {
return NULL;
}
@@ -8229,43 +10522,7 @@ drflac_int32* drflac_open_file_and_read_pcm_frames_s32(const char* filename, uns
return drflac__full_read_and_close_s32(pFlac, channels, sampleRate, totalPCMFrameCount);
}
-drflac_int32* drflac_open_and_decode_file_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalSampleCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drflac_uint64 totalPCMFrameCount;
- drflac_int32* pResult;
-
- if (channelsOut) {
- *channelsOut = 0;
- }
- if (sampleRateOut) {
- *sampleRateOut = 0;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = 0;
- }
-
- pResult = drflac_open_file_and_read_pcm_frames_s32(filename, &channels, &sampleRate, &totalPCMFrameCount);
- if (pResult == NULL) {
- return NULL;
- }
-
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = totalPCMFrameCount * channels;
- }
-
- return pResult;
-}
-
-
-drflac_int16* drflac_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount)
+drflac_int16* drflac_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks)
{
drflac* pFlac;
@@ -8279,7 +10536,7 @@ drflac_int16* drflac_open_file_and_read_pcm_frames_s16(const char* filename, uns
*totalPCMFrameCount = 0;
}
- pFlac = drflac_open_file(filename);
+ pFlac = drflac_open_file(filename, pAllocationCallbacks);
if (pFlac == NULL) {
return NULL;
}
@@ -8287,43 +10544,7 @@ drflac_int16* drflac_open_file_and_read_pcm_frames_s16(const char* filename, uns
return drflac__full_read_and_close_s16(pFlac, channels, sampleRate, totalPCMFrameCount);
}
-drflac_int16* drflac_open_and_decode_file_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalSampleCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drflac_uint64 totalPCMFrameCount;
- drflac_int16* pResult;
-
- if (channelsOut) {
- *channelsOut = 0;
- }
- if (sampleRateOut) {
- *sampleRateOut = 0;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = 0;
- }
-
- pResult = drflac_open_file_and_read_pcm_frames_s16(filename, &channels, &sampleRate, &totalPCMFrameCount);
- if (pResult == NULL) {
- return NULL;
- }
-
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = totalPCMFrameCount * channels;
- }
-
- return pResult;
-}
-
-
-float* drflac_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount)
+float* drflac_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks)
{
drflac* pFlac;
@@ -8337,51 +10558,16 @@ float* drflac_open_file_and_read_pcm_frames_f32(const char* filename, unsigned i
*totalPCMFrameCount = 0;
}
- pFlac = drflac_open_file(filename);
+ pFlac = drflac_open_file(filename, pAllocationCallbacks);
if (pFlac == NULL) {
return NULL;
}
return drflac__full_read_and_close_f32(pFlac, channels, sampleRate, totalPCMFrameCount);
}
-
-float* drflac_open_and_decode_file_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalSampleCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drflac_uint64 totalPCMFrameCount;
- float* pResult;
-
- if (channelsOut) {
- *channelsOut = 0;
- }
- if (sampleRateOut) {
- *sampleRateOut = 0;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = 0;
- }
-
- pResult = drflac_open_file_and_read_pcm_frames_f32(filename, &channels, &sampleRate, &totalPCMFrameCount);
- if (pResult == NULL) {
- return NULL;
- }
-
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = totalPCMFrameCount * channels;
- }
-
- return pResult;
-}
#endif
-drflac_int32* drflac_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount)
+drflac_int32* drflac_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks)
{
drflac* pFlac;
@@ -8395,7 +10581,7 @@ drflac_int32* drflac_open_memory_and_read_pcm_frames_s32(const void* data, size_
*totalPCMFrameCount = 0;
}
- pFlac = drflac_open_memory(data, dataSize);
+ pFlac = drflac_open_memory(data, dataSize, pAllocationCallbacks);
if (pFlac == NULL) {
return NULL;
}
@@ -8403,43 +10589,7 @@ drflac_int32* drflac_open_memory_and_read_pcm_frames_s32(const void* data, size_
return drflac__full_read_and_close_s32(pFlac, channels, sampleRate, totalPCMFrameCount);
}
-drflac_int32* drflac_open_and_decode_memory_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalSampleCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drflac_uint64 totalPCMFrameCount;
- drflac_int32* pResult;
-
- if (channelsOut) {
- *channelsOut = 0;
- }
- if (sampleRateOut) {
- *sampleRateOut = 0;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = 0;
- }
-
- pResult = drflac_open_memory_and_read_pcm_frames_s32(data, dataSize, &channels, &sampleRate, &totalPCMFrameCount);
- if (pResult == NULL) {
- return NULL;
- }
-
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = totalPCMFrameCount * channels;
- }
-
- return pResult;
-}
-
-
-drflac_int16* drflac_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount)
+drflac_int16* drflac_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks)
{
drflac* pFlac;
@@ -8453,7 +10603,7 @@ drflac_int16* drflac_open_memory_and_read_pcm_frames_s16(const void* data, size_
*totalPCMFrameCount = 0;
}
- pFlac = drflac_open_memory(data, dataSize);
+ pFlac = drflac_open_memory(data, dataSize, pAllocationCallbacks);
if (pFlac == NULL) {
return NULL;
}
@@ -8461,43 +10611,7 @@ drflac_int16* drflac_open_memory_and_read_pcm_frames_s16(const void* data, size_
return drflac__full_read_and_close_s16(pFlac, channels, sampleRate, totalPCMFrameCount);
}
-drflac_int16* drflac_open_and_decode_memory_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalSampleCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drflac_uint64 totalPCMFrameCount;
- drflac_int16* pResult;
-
- if (channelsOut) {
- *channelsOut = 0;
- }
- if (sampleRateOut) {
- *sampleRateOut = 0;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = 0;
- }
-
- pResult = drflac_open_memory_and_read_pcm_frames_s16(data, dataSize, &channels, &sampleRate, &totalPCMFrameCount);
- if (pResult == NULL) {
- return NULL;
- }
-
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = totalPCMFrameCount * channels;
- }
-
- return pResult;
-}
-
-
-float* drflac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount)
+float* drflac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drflac_uint64* totalPCMFrameCount, const drflac_allocation_callbacks* pAllocationCallbacks)
{
drflac* pFlac;
@@ -8511,7 +10625,7 @@ float* drflac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataS
*totalPCMFrameCount = 0;
}
- pFlac = drflac_open_memory(data, dataSize);
+ pFlac = drflac_open_memory(data, dataSize, pAllocationCallbacks);
if (pFlac == NULL) {
return NULL;
}
@@ -8519,45 +10633,14 @@ float* drflac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataS
return drflac__full_read_and_close_f32(pFlac, channels, sampleRate, totalPCMFrameCount);
}
-float* drflac_open_and_decode_memory_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drflac_uint64* totalSampleCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drflac_uint64 totalPCMFrameCount;
- float* pResult;
-
- if (channelsOut) {
- *channelsOut = 0;
- }
- if (sampleRateOut) {
- *sampleRateOut = 0;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = 0;
- }
-
- pResult = drflac_open_memory_and_read_pcm_frames_f32(data, dataSize, &channels, &sampleRate, &totalPCMFrameCount);
- if (pResult == NULL) {
- return NULL;
- }
-
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalSampleCountOut) {
- *totalSampleCountOut = totalPCMFrameCount * channels;
- }
-
- return pResult;
-}
-
-void drflac_free(void* pSampleDataReturnedByOpenAndDecode)
+void drflac_free(void* p, const drflac_allocation_callbacks* pAllocationCallbacks)
{
- DRFLAC_FREE(pSampleDataReturnedByOpenAndDecode);
+ if (pAllocationCallbacks != NULL) {
+ drflac__free_from_callbacks(p, pAllocationCallbacks);
+ } else {
+ drflac__free_default(p, NULL);
+ }
}
@@ -8577,7 +10660,7 @@ const char* drflac_next_vorbis_comment(drflac_vorbis_comment_iterator* pIter, dr
{
drflac_int32 length;
const char* pComment;
-
+
/* Safety. */
if (pCommentLengthOut) {
*pCommentLengthOut = 0;
@@ -8631,7 +10714,7 @@ drflac_bool32 drflac_next_cuesheet_track(drflac_cuesheet_track_iterator* pIter,
offsetLo = drflac__be2host_32(*(const drflac_uint32*)pRunningData); pRunningData += 4;
cuesheetTrack.offset = offsetLo | (offsetHi << 32);
cuesheetTrack.trackNumber = pRunningData[0]; pRunningData += 1;
- drflac_copy_memory(cuesheetTrack.ISRC, pRunningData, sizeof(cuesheetTrack.ISRC)); pRunningData += 12;
+ DRFLAC_COPY_MEMORY(cuesheetTrack.ISRC, pRunningData, sizeof(cuesheetTrack.ISRC)); pRunningData += 12;
cuesheetTrack.isAudio = (pRunningData[0] & 0x80) != 0;
cuesheetTrack.preEmphasis = (pRunningData[0] & 0x40) != 0; pRunningData += 14;
cuesheetTrack.indexCount = pRunningData[0]; pRunningData += 1;
@@ -8656,6 +10739,82 @@ drflac_bool32 drflac_next_cuesheet_track(drflac_cuesheet_track_iterator* pIter,
/*
REVISION HISTORY
================
+v0.12.6 - 2020-03-07
+ - Fix compilation error with Visual Studio .NET 2003.
+
+v0.12.5 - 2020-01-30
+ - Silence some static analysis warnings.
+
+v0.12.4 - 2020-01-29
+ - Silence some static analysis warnings.
+
+v0.12.3 - 2019-12-02
+ - Fix some warnings when compiling with GCC and the -Og flag.
+ - Fix a crash in out-of-memory situations.
+ - Fix potential integer overflow bug.
+ - Fix some static analysis warnings.
+ - Fix a possible crash when using custom memory allocators without a custom realloc() implementation.
+ - Fix a bug with binary search seeking where the bits per sample is not a multiple of 8.
+
+v0.12.2 - 2019-10-07
+ - Internal code clean up.
+
+v0.12.1 - 2019-09-29
+ - Fix some Clang Static Analyzer warnings.
+ - Fix an unused variable warning.
+
+v0.12.0 - 2019-09-23
+ - API CHANGE: Add support for user defined memory allocation routines. This system allows the program to specify their own memory allocation
+ routines with a user data pointer for client-specific contextual data. This adds an extra parameter to the end of the following APIs:
+ - drflac_open()
+ - drflac_open_relaxed()
+ - drflac_open_with_metadata()
+ - drflac_open_with_metadata_relaxed()
+ - drflac_open_file()
+ - drflac_open_file_with_metadata()
+ - drflac_open_memory()
+ - drflac_open_memory_with_metadata()
+ - drflac_open_and_read_pcm_frames_s32()
+ - drflac_open_and_read_pcm_frames_s16()
+ - drflac_open_and_read_pcm_frames_f32()
+ - drflac_open_file_and_read_pcm_frames_s32()
+ - drflac_open_file_and_read_pcm_frames_s16()
+ - drflac_open_file_and_read_pcm_frames_f32()
+ - drflac_open_memory_and_read_pcm_frames_s32()
+ - drflac_open_memory_and_read_pcm_frames_s16()
+ - drflac_open_memory_and_read_pcm_frames_f32()
+ Set this extra parameter to NULL to use defaults which is the same as the previous behaviour. Setting this NULL will use
+ DRFLAC_MALLOC, DRFLAC_REALLOC and DRFLAC_FREE.
+ - Remove deprecated APIs:
+ - drflac_read_s32()
+ - drflac_read_s16()
+ - drflac_read_f32()
+ - drflac_seek_to_sample()
+ - drflac_open_and_decode_s32()
+ - drflac_open_and_decode_s16()
+ - drflac_open_and_decode_f32()
+ - drflac_open_and_decode_file_s32()
+ - drflac_open_and_decode_file_s16()
+ - drflac_open_and_decode_file_f32()
+ - drflac_open_and_decode_memory_s32()
+ - drflac_open_and_decode_memory_s16()
+ - drflac_open_and_decode_memory_f32()
+ - Remove drflac.totalSampleCount which is now replaced with drflac.totalPCMFrameCount. You can emulate drflac.totalSampleCount
+ by doing pFlac->totalPCMFrameCount*pFlac->channels.
+ - Rename drflac.currentFrame to drflac.currentFLACFrame to remove ambiguity with PCM frames.
+ - Fix errors when seeking to the end of a stream.
+ - Optimizations to seeking.
+ - SSE improvements and optimizations.
+ - ARM NEON optimizations.
+ - Optimizations to drflac_read_pcm_frames_s16().
+ - Optimizations to drflac_read_pcm_frames_s32().
+
+v0.11.10 - 2019-06-26
+ - Fix a compiler error.
+
+v0.11.9 - 2019-06-16
+ - Silence some ThreadSanitizer warnings.
+
v0.11.8 - 2019-05-21
- Fix warnings.
@@ -8668,7 +10827,7 @@ v0.11.6 - 2019-05-05
- Change license to choice of public domain or MIT-0.
v0.11.5 - 2019-04-19
- - Fix a compiler error with GCC.
+ - Fix a compiler error with GCC.
v0.11.4 - 2019-04-17
- Fix some warnings with GCC when compiling with -std=c99.
@@ -8683,7 +10842,7 @@ v0.11.1 - 2019-02-17
- Fix a potential bug with seeking.
v0.11.0 - 2018-12-16
- - API CHANGE: Deprecated drflac_read_s32(), drflac_read_s16() and drflac_read_f32() and replaced them with
+ - API CHANGE: Deprecated drflac_read_s32(), drflac_read_s16() and drflac_read_f32() and replaced them with
drflac_read_pcm_frames_s32(), drflac_read_pcm_frames_s16() and drflac_read_pcm_frames_f32(). The new APIs take
and return PCM frame counts instead of sample counts. To upgrade you will need to change the input count by
dividing it by the channel count, and then do the same with the return value.
@@ -8893,7 +11052,7 @@ For more information, please refer to
===============================================================================
ALTERNATIVE 2 - MIT No Attribution
===============================================================================
-Copyright 2018 David Reid
+Copyright 2020 David Reid
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff --git a/client/miniaudio/dr_mp3.h b/client/miniaudio/dr_mp3.h
index 26aeec56..c876c064 100644
--- a/client/miniaudio/dr_mp3.h
+++ b/client/miniaudio/dr_mp3.h
@@ -1,6 +1,6 @@
/*
MP3 audio decoder. Choice of public domain or MIT-0. See license statements at the end of this file.
-dr_mp3 - v0.4.4 - 2019-05-06
+dr_mp3 - v0.5.6 - 2020-02-12
David Reid - mackron@gmail.com
@@ -8,6 +8,71 @@ Based off minimp3 (https://github.com/lieff/minimp3) which is where the real wor
differences between minimp3 and dr_mp3.
*/
+/*
+RELEASE NOTES - v0.5.0
+=======================
+Version 0.5.0 has breaking API changes.
+
+Improved Client-Defined Memory Allocation
+-----------------------------------------
+The main change with this release is the addition of a more flexible way of implementing custom memory allocation routines. The
+existing system of DRMP3_MALLOC, DRMP3_REALLOC and DRMP3_FREE are still in place and will be used by default when no custom
+allocation callbacks are specified.
+
+To use the new system, you pass in a pointer to a drmp3_allocation_callbacks object to drmp3_init() and family, like this:
+
+ void* my_malloc(size_t sz, void* pUserData)
+ {
+ return malloc(sz);
+ }
+ void* my_realloc(void* p, size_t sz, void* pUserData)
+ {
+ return realloc(p, sz);
+ }
+ void my_free(void* p, void* pUserData)
+ {
+ free(p);
+ }
+
+ ...
+
+ drmp3_allocation_callbacks allocationCallbacks;
+ allocationCallbacks.pUserData = &myData;
+ allocationCallbacks.onMalloc = my_malloc;
+ allocationCallbacks.onRealloc = my_realloc;
+ allocationCallbacks.onFree = my_free;
+ drmp3_init_file(&mp3, "my_file.mp3", NULL, &allocationCallbacks);
+
+The advantage of this new system is that it allows you to specify user data which will be passed in to the allocation routines.
+
+Passing in null for the allocation callbacks object will cause dr_mp3 to use defaults which is the same as DRMP3_MALLOC,
+DRMP3_REALLOC and DRMP3_FREE and the equivalent of how it worked in previous versions.
+
+Every API that opens a drmp3 object now takes this extra parameter. These include the following:
+
+ drmp3_init()
+ drmp3_init_file()
+ drmp3_init_memory()
+ drmp3_open_and_read_pcm_frames_f32()
+ drmp3_open_and_read_pcm_frames_s16()
+ drmp3_open_memory_and_read_pcm_frames_f32()
+ drmp3_open_memory_and_read_pcm_frames_s16()
+ drmp3_open_file_and_read_pcm_frames_f32()
+ drmp3_open_file_and_read_pcm_frames_s16()
+
+Renamed APIs
+------------
+The following APIs have been renamed for consistency with other dr_* libraries and to make it clear that they return PCM frame
+counts rather than sample counts.
+
+ drmp3_open_and_read_f32() -> drmp3_open_and_read_pcm_frames_f32()
+ drmp3_open_and_read_s16() -> drmp3_open_and_read_pcm_frames_s16()
+ drmp3_open_memory_and_read_f32() -> drmp3_open_memory_and_read_pcm_frames_f32()
+ drmp3_open_memory_and_read_s16() -> drmp3_open_memory_and_read_pcm_frames_s16()
+ drmp3_open_file_and_read_f32() -> drmp3_open_file_and_read_pcm_frames_f32()
+ drmp3_open_file_and_read_s16() -> drmp3_open_file_and_read_pcm_frames_s16()
+*/
+
/*
USAGE
=====
@@ -42,8 +107,8 @@ You do not need to do any annoying memory management when reading PCM frames - t
any number of PCM frames in each call to drmp3_read_pcm_frames_f32() and it will return as many PCM frames as it can, up to the
requested amount.
-You can also decode an entire file in one go with drmp3_open_and_read_f32(), drmp3_open_memory_and_read_f32() and
-drmp3_open_file_and_read_f32().
+You can also decode an entire file in one go with drmp3_open_and_read_pcm_frames_f32(), drmp3_open_memory_and_read_pcm_frames_f32() and
+drmp3_open_file_and_read_pcm_frames_f32().
OPTIONS
@@ -95,13 +160,22 @@ typedef drmp3_uint32 drmp3_bool32;
#define DRMP3_MAX_SAMPLES_PER_FRAME (DRMP3_MAX_PCM_FRAMES_PER_MP3_FRAME*2)
#ifdef _MSC_VER
-#define DRMP3_INLINE __forceinline
-#else
-#ifdef __GNUC__
-#define DRMP3_INLINE __inline__ __attribute__((always_inline))
+ #define DRMP3_INLINE __forceinline
+#elif defined(__GNUC__)
+ /*
+ I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when
+ the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some
+ case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the
+ command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue
+ I am using "__inline__" only when we're compiling in strict ANSI mode.
+ */
+ #if defined(__STRICT_ANSI__)
+ #define DRMP3_INLINE __inline__ __attribute__((always_inline))
+ #else
+ #define DRMP3_INLINE inline __attribute__((always_inline))
+ #endif
#else
-#define DRMP3_INLINE
-#endif
+ #define DRMP3_INLINE
#endif
/*
@@ -229,6 +303,14 @@ will be either drmp3_seek_origin_start or drmp3_seek_origin_current.
*/
typedef drmp3_bool32 (* drmp3_seek_proc)(void* pUserData, int offset, drmp3_seek_origin origin);
+typedef struct
+{
+ void* pUserData;
+ void* (* onMalloc)(size_t sz, void* pUserData);
+ void* (* onRealloc)(void* p, size_t sz, void* pUserData);
+ void (* onFree)(void* p, void* pUserData);
+} drmp3_allocation_callbacks;
+
typedef struct
{
drmp3_uint32 outputChannels;
@@ -244,6 +326,7 @@ typedef struct
drmp3_read_proc onRead;
drmp3_seek_proc onSeek;
void* pUserData;
+ drmp3_allocation_callbacks allocationCallbacks;
drmp3_uint32 mp3FrameChannels; /* The number of channels in the currently loaded MP3 frame. Internal use only. */
drmp3_uint32 mp3FrameSampleRate; /* The sample rate of the currently loaded MP3 frame. Internal use only. */
drmp3_uint32 pcmFramesConsumedInMP3Frame;
@@ -279,7 +362,7 @@ Close the loader with drmp3_uninit().
See also: drmp3_init_file(), drmp3_init_memory(), drmp3_uninit()
*/
-drmp3_bool32 drmp3_init(drmp3* pMP3, drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, const drmp3_config* pConfig);
+drmp3_bool32 drmp3_init(drmp3* pMP3, drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, const drmp3_config* pConfig, const drmp3_allocation_callbacks* pAllocationCallbacks);
/*
Initializes an MP3 decoder from a block of memory.
@@ -289,7 +372,7 @@ the lifetime of the drmp3 object.
The buffer should contain the contents of the entire MP3 file.
*/
-drmp3_bool32 drmp3_init_memory(drmp3* pMP3, const void* pData, size_t dataSize, const drmp3_config* pConfig);
+drmp3_bool32 drmp3_init_memory(drmp3* pMP3, const void* pData, size_t dataSize, const drmp3_config* pConfig, const drmp3_allocation_callbacks* pAllocationCallbacks);
#ifndef DR_MP3_NO_STDIO
/*
@@ -299,7 +382,7 @@ This holds the internal FILE object until drmp3_uninit() is called. Keep this in
objects because the operating system may restrict the number of file handles an application can have open at
any given time.
*/
-drmp3_bool32 drmp3_init_file(drmp3* pMP3, const char* filePath, const drmp3_config* pConfig);
+drmp3_bool32 drmp3_init_file(drmp3* pMP3, const char* filePath, const drmp3_config* pConfig, const drmp3_allocation_callbacks* pAllocationCallbacks);
#endif
/*
@@ -377,21 +460,21 @@ pConfig is both an input and output. On input it contains what you want. On outp
Free the returned pointer with drmp3_free().
*/
-float* drmp3_open_and_read_f32(drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount);
-drmp3_int16* drmp3_open_and_read_s16(drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount);
+float* drmp3_open_and_read_pcm_frames_f32(drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks);
+drmp3_int16* drmp3_open_and_read_pcm_frames_s16(drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks);
-float* drmp3_open_memory_and_read_f32(const void* pData, size_t dataSize, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount);
-drmp3_int16* drmp3_open_memory_and_read_s16(const void* pData, size_t dataSize, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount);
+float* drmp3_open_memory_and_read_pcm_frames_f32(const void* pData, size_t dataSize, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks);
+drmp3_int16* drmp3_open_memory_and_read_pcm_frames_s16(const void* pData, size_t dataSize, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks);
#ifndef DR_MP3_NO_STDIO
-float* drmp3_open_file_and_read_f32(const char* filePath, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount);
-drmp3_int16* drmp3_open_file_and_read_s16(const char* filePath, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount);
+float* drmp3_open_file_and_read_pcm_frames_f32(const char* filePath, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks);
+drmp3_int16* drmp3_open_file_and_read_pcm_frames_s16(const char* filePath, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks);
#endif
/*
Frees any memory that was allocated by a public drmp3 API.
*/
-void drmp3_free(void* p);
+void drmp3_free(void* p, const drmp3_allocation_callbacks* pAllocationCallbacks);
#ifdef __cplusplus
}
@@ -536,6 +619,7 @@ static int drmp3_have_simd()
}
#elif defined(__ARM_NEON) || defined(__aarch64__)
#include
+#define DRMP3_HAVE_SSE 0
#define DRMP3_HAVE_SIMD 1
#define DRMP3_VSTORE vst1q_f32
#define DRMP3_VLD vld1q_f32
@@ -553,6 +637,7 @@ static int drmp3_have_simd()
return 1;
}
#else
+#define DRMP3_HAVE_SSE 0
#define DRMP3_HAVE_SIMD 0
#ifdef DR_MP3_ONLY_SIMD
#error DR_MP3_ONLY_SIMD used, but SSE/NEON not enabled
@@ -738,7 +823,7 @@ static void drmp3_L12_read_scalefactors(drmp3_bs *bs, drmp3_uint8 *pba, drmp3_ui
if (mask & m)
{
int b = drmp3_bs_get_bits(bs, 6);
- s = g_deq_L12[ba*3 - 6 + b % 3]*(1 << 21 >> b/3);
+ s = g_deq_L12[ba*3 - 6 + b % 3]*(int)(1 << 21 >> b/3);
}
*scf++ = s;
}
@@ -1143,41 +1228,72 @@ static void drmp3_L3_huffman(float *dst, drmp3_bs *bs, const drmp3_L3_gr_info *g
int sfb_cnt = gr_info->region_count[ireg++];
const drmp3_int16 *codebook = tabs + tabindex[tab_num];
int linbits = g_linbits[tab_num];
- do
+ if (linbits)
{
- np = *sfb++ / 2;
- pairs_to_decode = DRMP3_MIN(big_val_cnt, np);
- one = *scf++;
do
{
- int j, w = 5;
- int leaf = codebook[DRMP3_PEEK_BITS(w)];
- while (leaf < 0)
+ np = *sfb++ / 2;
+ pairs_to_decode = DRMP3_MIN(big_val_cnt, np);
+ one = *scf++;
+ do
{
- DRMP3_FLUSH_BITS(w);
- w = leaf & 7;
- leaf = codebook[DRMP3_PEEK_BITS(w) - (leaf >> 3)];
- }
- DRMP3_FLUSH_BITS(leaf >> 8);
+ int j, w = 5;
+ int leaf = codebook[DRMP3_PEEK_BITS(w)];
+ while (leaf < 0)
+ {
+ DRMP3_FLUSH_BITS(w);
+ w = leaf & 7;
+ leaf = codebook[DRMP3_PEEK_BITS(w) - (leaf >> 3)];
+ }
+ DRMP3_FLUSH_BITS(leaf >> 8);
- for (j = 0; j < 2; j++, dst++, leaf >>= 4)
+ for (j = 0; j < 2; j++, dst++, leaf >>= 4)
+ {
+ int lsb = leaf & 0x0F;
+ if (lsb == 15)
+ {
+ lsb += DRMP3_PEEK_BITS(linbits);
+ DRMP3_FLUSH_BITS(linbits);
+ DRMP3_CHECK_BITS;
+ *dst = one*drmp3_L3_pow_43(lsb)*((drmp3_int32)bs_cache < 0 ? -1: 1);
+ } else
+ {
+ *dst = g_drmp3_pow43[16 + lsb - 16*(bs_cache >> 31)]*one;
+ }
+ DRMP3_FLUSH_BITS(lsb ? 1 : 0);
+ }
+ DRMP3_CHECK_BITS;
+ } while (--pairs_to_decode);
+ } while ((big_val_cnt -= np) > 0 && --sfb_cnt >= 0);
+ } else
+ {
+ do
+ {
+ np = *sfb++ / 2;
+ pairs_to_decode = DRMP3_MIN(big_val_cnt, np);
+ one = *scf++;
+ do
{
- int lsb = leaf & 0x0F;
- if (lsb == 15 && linbits)
+ int j, w = 5;
+ int leaf = codebook[DRMP3_PEEK_BITS(w)];
+ while (leaf < 0)
{
- lsb += DRMP3_PEEK_BITS(linbits);
- DRMP3_FLUSH_BITS(linbits);
- DRMP3_CHECK_BITS;
- *dst = one*drmp3_L3_pow_43(lsb)*((drmp3_int32)bs_cache < 0 ? -1: 1);
- } else
+ DRMP3_FLUSH_BITS(w);
+ w = leaf & 7;
+ leaf = codebook[DRMP3_PEEK_BITS(w) - (leaf >> 3)];
+ }
+ DRMP3_FLUSH_BITS(leaf >> 8);
+
+ for (j = 0; j < 2; j++, dst++, leaf >>= 4)
{
+ int lsb = leaf & 0x0F;
*dst = g_drmp3_pow43[16 + lsb - 16*(bs_cache >> 31)]*one;
+ DRMP3_FLUSH_BITS(lsb ? 1 : 0);
}
- DRMP3_FLUSH_BITS(lsb ? 1 : 0);
- }
- DRMP3_CHECK_BITS;
- } while (--pairs_to_decode);
- } while ((big_val_cnt -= np) > 0 && --sfb_cnt >= 0);
+ DRMP3_CHECK_BITS;
+ } while (--pairs_to_decode);
+ } while ((big_val_cnt -= np) > 0 && --sfb_cnt >= 0);
+ }
}
for (np = 1 - big_val_cnt;; dst += 4)
@@ -2021,7 +2137,7 @@ static int drmp3d_find_frame(const drmp3_uint8 *mp3, int mp3_bytes, int *free_fo
}
}
*ptr_frame_bytes = 0;
- return i;
+ return mp3_bytes;
}
void drmp3dec_init(drmp3dec *dec)
@@ -2126,59 +2242,56 @@ int drmp3dec_decode_frame(drmp3dec *dec, const unsigned char *mp3, int mp3_bytes
void drmp3dec_f32_to_s16(const float *in, drmp3_int16 *out, int num_samples)
{
- if(num_samples > 0)
- {
- int i = 0;
+ int i = 0;
#if DRMP3_HAVE_SIMD
- int aligned_count = num_samples & ~7;
- for(; i < aligned_count; i+=8)
- {
- static const drmp3_f4 g_scale = { 32768.0f, 32768.0f, 32768.0f, 32768.0f };
- drmp3_f4 a = DRMP3_VMUL(DRMP3_VLD(&in[i ]), g_scale);
- drmp3_f4 b = DRMP3_VMUL(DRMP3_VLD(&in[i+4]), g_scale);
+ int aligned_count = num_samples & ~7;
+ for(; i < aligned_count; i+=8)
+ {
+ drmp3_f4 scale = DRMP3_VSET(32768.0f);
+ drmp3_f4 a = DRMP3_VMUL(DRMP3_VLD(&in[i ]), scale);
+ drmp3_f4 b = DRMP3_VMUL(DRMP3_VLD(&in[i+4]), scale);
#if DRMP3_HAVE_SSE
- static const drmp3_f4 g_max = { 32767.0f, 32767.0f, 32767.0f, 32767.0f };
- static const drmp3_f4 g_min = { -32768.0f, -32768.0f, -32768.0f, -32768.0f };
- __m128i pcm8 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(a, g_max), g_min)),
- _mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(b, g_max), g_min)));
- out[i ] = (drmp3_int16)_mm_extract_epi16(pcm8, 0);
- out[i+1] = (drmp3_int16)_mm_extract_epi16(pcm8, 1);
- out[i+2] = (drmp3_int16)_mm_extract_epi16(pcm8, 2);
- out[i+3] = (drmp3_int16)_mm_extract_epi16(pcm8, 3);
- out[i+4] = (drmp3_int16)_mm_extract_epi16(pcm8, 4);
- out[i+5] = (drmp3_int16)_mm_extract_epi16(pcm8, 5);
- out[i+6] = (drmp3_int16)_mm_extract_epi16(pcm8, 6);
- out[i+7] = (drmp3_int16)_mm_extract_epi16(pcm8, 7);
+ drmp3_f4 s16max = DRMP3_VSET( 32767.0f);
+ drmp3_f4 s16min = DRMP3_VSET(-32768.0f);
+ __m128i pcm8 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(a, s16max), s16min)),
+ _mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(b, s16max), s16min)));
+ out[i ] = (drmp3_int16)_mm_extract_epi16(pcm8, 0);
+ out[i+1] = (drmp3_int16)_mm_extract_epi16(pcm8, 1);
+ out[i+2] = (drmp3_int16)_mm_extract_epi16(pcm8, 2);
+ out[i+3] = (drmp3_int16)_mm_extract_epi16(pcm8, 3);
+ out[i+4] = (drmp3_int16)_mm_extract_epi16(pcm8, 4);
+ out[i+5] = (drmp3_int16)_mm_extract_epi16(pcm8, 5);
+ out[i+6] = (drmp3_int16)_mm_extract_epi16(pcm8, 6);
+ out[i+7] = (drmp3_int16)_mm_extract_epi16(pcm8, 7);
#else
- int16x4_t pcma, pcmb;
- a = DRMP3_VADD(a, DRMP3_VSET(0.5f));
- b = DRMP3_VADD(b, DRMP3_VSET(0.5f));
- pcma = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(a), vreinterpretq_s32_u32(vcltq_f32(a, DRMP3_VSET(0)))));
- pcmb = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(b), vreinterpretq_s32_u32(vcltq_f32(b, DRMP3_VSET(0)))));
- vst1_lane_s16(out+i , pcma, 0);
- vst1_lane_s16(out+i+1, pcma, 1);
- vst1_lane_s16(out+i+2, pcma, 2);
- vst1_lane_s16(out+i+3, pcma, 3);
- vst1_lane_s16(out+i+4, pcmb, 0);
- vst1_lane_s16(out+i+5, pcmb, 1);
- vst1_lane_s16(out+i+6, pcmb, 2);
- vst1_lane_s16(out+i+7, pcmb, 3);
+ int16x4_t pcma, pcmb;
+ a = DRMP3_VADD(a, DRMP3_VSET(0.5f));
+ b = DRMP3_VADD(b, DRMP3_VSET(0.5f));
+ pcma = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(a), vreinterpretq_s32_u32(vcltq_f32(a, DRMP3_VSET(0)))));
+ pcmb = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(b), vreinterpretq_s32_u32(vcltq_f32(b, DRMP3_VSET(0)))));
+ vst1_lane_s16(out+i , pcma, 0);
+ vst1_lane_s16(out+i+1, pcma, 1);
+ vst1_lane_s16(out+i+2, pcma, 2);
+ vst1_lane_s16(out+i+3, pcma, 3);
+ vst1_lane_s16(out+i+4, pcmb, 0);
+ vst1_lane_s16(out+i+5, pcmb, 1);
+ vst1_lane_s16(out+i+6, pcmb, 2);
+ vst1_lane_s16(out+i+7, pcmb, 3);
#endif
- }
+ }
#endif
- for(; i < num_samples; i++)
+ for(; i < num_samples; i++)
+ {
+ float sample = in[i] * 32768.0f;
+ if (sample >= 32766.5)
+ out[i] = (drmp3_int16) 32767;
+ else if (sample <= -32767.5)
+ out[i] = (drmp3_int16)-32768;
+ else
{
- float sample = in[i] * 32768.0f;
- if (sample >= 32766.5)
- out[i] = (drmp3_int16) 32767;
- else if (sample <= -32767.5)
- out[i] = (drmp3_int16)-32768;
- else
- {
- short s = (drmp3_int16)(sample + .5f);
- s -= (s < 0); /* away from zero, to be compliant */
- out[i] = s;
- }
+ short s = (drmp3_int16)(sample + .5f);
+ s -= (s < 0); /* away from zero, to be compliant */
+ out[i] = s;
}
}
}
@@ -2210,7 +2323,7 @@ void drmp3dec_f32_to_s16(const float *in, drmp3_int16 *out, int num_samples)
/* Standard library stuff. */
#ifndef DRMP3_ASSERT
#include
-#define DRMP3_ASSERT(expression) assert(expression)
+#define DRMP3_ASSERT(expression) assert(expression)
#endif
#ifndef DRMP3_COPY_MEMORY
#define DRMP3_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz))
@@ -2229,13 +2342,6 @@ void drmp3dec_f32_to_s16(const float *in, drmp3_int16 *out, int num_samples)
#define DRMP3_FREE(p) free((p))
#endif
-#define drmp3_assert DRMP3_ASSERT
-#define drmp3_copy_memory DRMP3_COPY_MEMORY
-#define drmp3_zero_memory DRMP3_ZERO_MEMORY
-#define drmp3_zero_object DRMP3_ZERO_OBJECT
-#define drmp3_malloc DRMP3_MALLOC
-#define drmp3_realloc DRMP3_REALLOC
-
#define drmp3_countof(x) (sizeof(x) / sizeof(x[0]))
#define drmp3_max(x, y) (((x) > (y)) ? (x) : (y))
#define drmp3_min(x, y) (((x) < (y)) ? (x) : (y))
@@ -2255,10 +2361,109 @@ static void drmp3_blend_f32(float* pOut, float* pInA, float* pInB, float factor,
}
}
+
+static void* drmp3__malloc_default(size_t sz, void* pUserData)
+{
+ (void)pUserData;
+ return DRMP3_MALLOC(sz);
+}
+
+static void* drmp3__realloc_default(void* p, size_t sz, void* pUserData)
+{
+ (void)pUserData;
+ return DRMP3_REALLOC(p, sz);
+}
+
+static void drmp3__free_default(void* p, void* pUserData)
+{
+ (void)pUserData;
+ DRMP3_FREE(p);
+}
+
+
+#if 0 /* Unused, but leaving here in case I need to add it again later. */
+static void* drmp3__malloc_from_callbacks(size_t sz, const drmp3_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks == NULL) {
+ return NULL;
+ }
+
+ if (pAllocationCallbacks->onMalloc != NULL) {
+ return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData);
+ }
+
+ /* Try using realloc(). */
+ if (pAllocationCallbacks->onRealloc != NULL) {
+ return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData);
+ }
+
+ return NULL;
+}
+#endif
+
+static void* drmp3__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const drmp3_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks == NULL) {
+ return NULL;
+ }
+
+ if (pAllocationCallbacks->onRealloc != NULL) {
+ return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData);
+ }
+
+ /* Try emulating realloc() in terms of malloc()/free(). */
+ if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) {
+ void* p2;
+
+ p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData);
+ if (p2 == NULL) {
+ return NULL;
+ }
+
+ if (p != NULL) {
+ DRMP3_COPY_MEMORY(p2, p, szOld);
+ pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
+ }
+
+ return p2;
+ }
+
+ return NULL;
+}
+
+static void drmp3__free_from_callbacks(void* p, const drmp3_allocation_callbacks* pAllocationCallbacks)
+{
+ if (p == NULL || pAllocationCallbacks == NULL) {
+ return;
+ }
+
+ if (pAllocationCallbacks->onFree != NULL) {
+ pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
+ }
+}
+
+
+drmp3_allocation_callbacks drmp3_copy_allocation_callbacks_or_defaults(const drmp3_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks != NULL) {
+ /* Copy. */
+ return *pAllocationCallbacks;
+ } else {
+ /* Defaults. */
+ drmp3_allocation_callbacks allocationCallbacks;
+ allocationCallbacks.pUserData = NULL;
+ allocationCallbacks.onMalloc = drmp3__malloc_default;
+ allocationCallbacks.onRealloc = drmp3__realloc_default;
+ allocationCallbacks.onFree = drmp3__free_default;
+ return allocationCallbacks;
+ }
+}
+
+
void drmp3_src_cache_init(drmp3_src* pSRC, drmp3_src_cache* pCache)
{
- drmp3_assert(pSRC != NULL);
- drmp3_assert(pCache != NULL);
+ DRMP3_ASSERT(pSRC != NULL);
+ DRMP3_ASSERT(pCache != NULL);
pCache->pSRC = pSRC;
pCache->cachedFrameCount = 0;
@@ -2270,11 +2475,11 @@ drmp3_uint64 drmp3_src_cache_read_frames(drmp3_src_cache* pCache, drmp3_uint64 f
drmp3_uint32 channels;
drmp3_uint64 totalFramesRead = 0;
- drmp3_assert(pCache != NULL);
- drmp3_assert(pCache->pSRC != NULL);
- drmp3_assert(pCache->pSRC->onRead != NULL);
- drmp3_assert(frameCount > 0);
- drmp3_assert(pFramesOut != NULL);
+ DRMP3_ASSERT(pCache != NULL);
+ DRMP3_ASSERT(pCache->pSRC != NULL);
+ DRMP3_ASSERT(pCache->pSRC->onRead != NULL);
+ DRMP3_ASSERT(frameCount > 0);
+ DRMP3_ASSERT(pFramesOut != NULL);
channels = pCache->pSRC->config.channels;
@@ -2287,7 +2492,7 @@ drmp3_uint64 drmp3_src_cache_read_frames(drmp3_src_cache* pCache, drmp3_uint64 f
framesToReadFromMemory = framesRemainingInMemory;
}
- drmp3_copy_memory(pFramesOut, pCache->pCachedFrames + pCache->iNextFrame*channels, (drmp3_uint32)(framesToReadFromMemory * channels * sizeof(float)));
+ DRMP3_COPY_MEMORY(pFramesOut, pCache->pCachedFrames + pCache->iNextFrame*channels, (drmp3_uint32)(framesToReadFromMemory * channels * sizeof(float)));
pCache->iNextFrame += (drmp3_uint32)framesToReadFromMemory;
totalFramesRead += framesToReadFromMemory;
@@ -2298,7 +2503,7 @@ drmp3_uint64 drmp3_src_cache_read_frames(drmp3_src_cache* pCache, drmp3_uint64 f
/* At this point there are still more frames to read from the client, so we'll need to reload the cache with fresh data. */
- drmp3_assert(frameCount > 0);
+ DRMP3_ASSERT(frameCount > 0);
pFramesOut += framesToReadFromMemory * channels;
pCache->iNextFrame = 0;
@@ -2331,7 +2536,7 @@ drmp3_bool32 drmp3_src_init(const drmp3_src_config* pConfig, drmp3_src_read_proc
return DRMP3_FALSE;
}
- drmp3_zero_object(pSRC);
+ DRMP3_ZERO_OBJECT(pSRC);
if (pConfig == NULL || onRead == NULL) {
return DRMP3_FALSE;
@@ -2414,9 +2619,9 @@ drmp3_uint64 drmp3_src_read_frames(drmp3_src* pSRC, drmp3_uint64 frameCount, voi
drmp3_uint64 drmp3_src_read_frames_passthrough(drmp3_src* pSRC, drmp3_uint64 frameCount, void* pFramesOut, drmp3_bool32 flush)
{
- drmp3_assert(pSRC != NULL);
- drmp3_assert(frameCount > 0);
- drmp3_assert(pFramesOut != NULL);
+ DRMP3_ASSERT(pSRC != NULL);
+ DRMP3_ASSERT(frameCount > 0);
+ DRMP3_ASSERT(pFramesOut != NULL);
(void)flush; /* Passthrough need not care about flushing. */
return pSRC->onRead(pSRC, frameCount, pFramesOut, pSRC->pUserData);
@@ -2427,9 +2632,9 @@ drmp3_uint64 drmp3_src_read_frames_linear(drmp3_src* pSRC, drmp3_uint64 frameCou
double factor;
drmp3_uint64 totalFramesRead;
- drmp3_assert(pSRC != NULL);
- drmp3_assert(frameCount > 0);
- drmp3_assert(pFramesOut != NULL);
+ DRMP3_ASSERT(pSRC != NULL);
+ DRMP3_ASSERT(frameCount > 0);
+ DRMP3_ASSERT(pFramesOut != NULL);
/* For linear SRC, the bin is only 2 frames: 1 prior, 1 future. */
@@ -2518,7 +2723,7 @@ static size_t drmp3__on_read(drmp3* pMP3, void* pBufferOut, size_t bytesToRead)
static drmp3_bool32 drmp3__on_seek(drmp3* pMP3, int offset, drmp3_seek_origin origin)
{
- drmp3_assert(offset >= 0);
+ DRMP3_ASSERT(offset >= 0);
if (!pMP3->onSeek(pMP3->pUserData, offset, origin)) {
return DRMP3_FALSE;
@@ -2572,8 +2777,8 @@ static drmp3_uint64 drmp3_read_src(drmp3_src* pSRC, drmp3_uint64 frameCount, voi
float* pFramesOutF = (float*)pFramesOut;
drmp3_uint64 totalFramesRead = 0;
- drmp3_assert(pMP3 != NULL);
- drmp3_assert(pMP3->onRead != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3->onRead != NULL);
while (frameCount > 0) {
/* Read from the in-memory buffer first. */
@@ -2638,7 +2843,7 @@ static drmp3_uint64 drmp3_read_src(drmp3_src* pSRC, drmp3_uint64 frameCount, voi
break;
}
- drmp3_assert(pMP3->pcmFramesRemainingInMP3Frame == 0);
+ DRMP3_ASSERT(pMP3->pcmFramesRemainingInMP3Frame == 0);
/*
At this point we have exhausted our in-memory buffer so we need to re-fill. Note that the sample rate may have changed
@@ -2655,7 +2860,7 @@ static drmp3_uint64 drmp3_read_src(drmp3_src* pSRC, drmp3_uint64 frameCount, voi
static drmp3_bool32 drmp3_init_src(drmp3* pMP3)
{
drmp3_src_config srcConfig;
- drmp3_zero_object(&srcConfig);
+ DRMP3_ZERO_OBJECT(&srcConfig);
srcConfig.sampleRateIn = DR_MP3_DEFAULT_SAMPLE_RATE;
srcConfig.sampleRateOut = pMP3->sampleRate;
srcConfig.channels = pMP3->channels;
@@ -2672,8 +2877,8 @@ static drmp3_uint32 drmp3_decode_next_frame_ex(drmp3* pMP3, drmp3d_sample_t* pPC
{
drmp3_uint32 pcmFramesRead = 0;
- drmp3_assert(pMP3 != NULL);
- drmp3_assert(pMP3->onRead != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3->onRead != NULL);
if (pMP3->atEnd) {
return 0;
@@ -2689,14 +2894,17 @@ static drmp3_uint32 drmp3_decode_next_frame_ex(drmp3* pMP3, drmp3d_sample_t* pPC
if (pMP3->dataCapacity < DRMP3_DATA_CHUNK_SIZE) {
drmp3_uint8* pNewData;
+ size_t newDataCap;
- pMP3->dataCapacity = DRMP3_DATA_CHUNK_SIZE;
- pNewData = (drmp3_uint8*)drmp3_realloc(pMP3->pData, pMP3->dataCapacity);
+ newDataCap = DRMP3_DATA_CHUNK_SIZE;
+
+ pNewData = (drmp3_uint8*)drmp3__realloc_from_callbacks(pMP3->pData, newDataCap, pMP3->dataCapacity, &pMP3->allocationCallbacks);
if (pNewData == NULL) {
return 0; /* Out of memory. */
}
pMP3->pData = pNewData;
+ pMP3->dataCapacity = newDataCap;
}
bytesRead = drmp3__on_read(pMP3, pMP3->pData + pMP3->dataSize, (pMP3->dataCapacity - pMP3->dataSize));
@@ -2753,16 +2961,19 @@ static drmp3_uint32 drmp3_decode_next_frame_ex(drmp3* pMP3, drmp3d_sample_t* pPC
/* Need more data. minimp3 recommends doing data submission in 16K chunks. */
if (pMP3->dataCapacity == pMP3->dataSize) {
+ /* No room. Expand. */
drmp3_uint8* pNewData;
+ size_t newDataCap;
- /* No room. Expand. */
- pMP3->dataCapacity += DRMP3_DATA_CHUNK_SIZE;
- pNewData = (drmp3_uint8*)drmp3_realloc(pMP3->pData, pMP3->dataCapacity);
+ newDataCap = pMP3->dataCapacity + DRMP3_DATA_CHUNK_SIZE;
+
+ pNewData = (drmp3_uint8*)drmp3__realloc_from_callbacks(pMP3->pData, newDataCap, pMP3->dataCapacity, &pMP3->allocationCallbacks);
if (pNewData == NULL) {
return 0; /* Out of memory. */
}
pMP3->pData = pNewData;
+ pMP3->dataCapacity = newDataCap;
}
/* Fill in a chunk. */
@@ -2781,7 +2992,7 @@ static drmp3_uint32 drmp3_decode_next_frame_ex(drmp3* pMP3, drmp3d_sample_t* pPC
static drmp3_uint32 drmp3_decode_next_frame(drmp3* pMP3)
{
- drmp3_assert(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
return drmp3_decode_next_frame_ex(pMP3, (drmp3d_sample_t*)pMP3->pcmFrames, DRMP3_FALSE);
}
@@ -2790,7 +3001,7 @@ static drmp3_uint32 drmp3_seek_next_frame(drmp3* pMP3)
{
drmp3_uint32 pcmFrameCount;
- drmp3_assert(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
pcmFrameCount = drmp3_decode_next_frame_ex(pMP3, NULL);
if (pcmFrameCount == 0) {
@@ -2806,12 +3017,12 @@ static drmp3_uint32 drmp3_seek_next_frame(drmp3* pMP3)
}
#endif
-drmp3_bool32 drmp3_init_internal(drmp3* pMP3, drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, const drmp3_config* pConfig)
+drmp3_bool32 drmp3_init_internal(drmp3* pMP3, drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, const drmp3_config* pConfig, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
drmp3_config config;
- drmp3_assert(pMP3 != NULL);
- drmp3_assert(onRead != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
+ DRMP3_ASSERT(onRead != NULL);
/* This function assumes the output object has already been reset to 0. Do not do that here, otherwise things will break. */
drmp3dec_init(&pMP3->decoder);
@@ -2820,7 +3031,7 @@ drmp3_bool32 drmp3_init_internal(drmp3* pMP3, drmp3_read_proc onRead, drmp3_seek
if (pConfig != NULL) {
config = *pConfig;
} else {
- drmp3_zero_object(&config);
+ DRMP3_ZERO_OBJECT(&config);
}
pMP3->channels = config.outputChannels;
@@ -2835,6 +3046,11 @@ drmp3_bool32 drmp3_init_internal(drmp3* pMP3, drmp3_read_proc onRead, drmp3_seek
pMP3->onRead = onRead;
pMP3->onSeek = onSeek;
pMP3->pUserData = pUserData;
+ pMP3->allocationCallbacks = drmp3_copy_allocation_callbacks_or_defaults(pAllocationCallbacks);
+
+ if (pMP3->allocationCallbacks.onFree == NULL || (pMP3->allocationCallbacks.onMalloc == NULL && pMP3->allocationCallbacks.onRealloc == NULL)) {
+ return DRMP3_FALSE; /* Invalid allocation callbacks. */
+ }
/*
We need a sample rate converter for converting the sample rate from the MP3 frames to the requested output sample rate. Note that if
@@ -2853,14 +3069,14 @@ drmp3_bool32 drmp3_init_internal(drmp3* pMP3, drmp3_read_proc onRead, drmp3_seek
return DRMP3_TRUE;
}
-drmp3_bool32 drmp3_init(drmp3* pMP3, drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, const drmp3_config* pConfig)
+drmp3_bool32 drmp3_init(drmp3* pMP3, drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, const drmp3_config* pConfig, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
if (pMP3 == NULL || onRead == NULL) {
return DRMP3_FALSE;
}
- drmp3_zero_object(pMP3);
- return drmp3_init_internal(pMP3, onRead, onSeek, pUserData, pConfig);
+ DRMP3_ZERO_OBJECT(pMP3);
+ return drmp3_init_internal(pMP3, onRead, onSeek, pUserData, pConfig, pAllocationCallbacks);
}
@@ -2869,8 +3085,8 @@ static size_t drmp3__on_read_memory(void* pUserData, void* pBufferOut, size_t by
drmp3* pMP3 = (drmp3*)pUserData;
size_t bytesRemaining;
- drmp3_assert(pMP3 != NULL);
- drmp3_assert(pMP3->memory.dataSize >= pMP3->memory.currentReadPos);
+ DRMP3_ASSERT(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3->memory.dataSize >= pMP3->memory.currentReadPos);
bytesRemaining = pMP3->memory.dataSize - pMP3->memory.currentReadPos;
if (bytesToRead > bytesRemaining) {
@@ -2878,7 +3094,7 @@ static size_t drmp3__on_read_memory(void* pUserData, void* pBufferOut, size_t by
}
if (bytesToRead > 0) {
- drmp3_copy_memory(pBufferOut, pMP3->memory.pData + pMP3->memory.currentReadPos, bytesToRead);
+ DRMP3_COPY_MEMORY(pBufferOut, pMP3->memory.pData + pMP3->memory.currentReadPos, bytesToRead);
pMP3->memory.currentReadPos += bytesToRead;
}
@@ -2889,7 +3105,7 @@ static drmp3_bool32 drmp3__on_seek_memory(void* pUserData, int byteOffset, drmp3
{
drmp3* pMP3 = (drmp3*)pUserData;
- drmp3_assert(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
if (origin == drmp3_seek_origin_current) {
if (byteOffset > 0) {
@@ -2915,13 +3131,13 @@ static drmp3_bool32 drmp3__on_seek_memory(void* pUserData, int byteOffset, drmp3
return DRMP3_TRUE;
}
-drmp3_bool32 drmp3_init_memory(drmp3* pMP3, const void* pData, size_t dataSize, const drmp3_config* pConfig)
+drmp3_bool32 drmp3_init_memory(drmp3* pMP3, const void* pData, size_t dataSize, const drmp3_config* pConfig, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
if (pMP3 == NULL) {
return DRMP3_FALSE;
}
- drmp3_zero_object(pMP3);
+ DRMP3_ZERO_OBJECT(pMP3);
if (pData == NULL || dataSize == 0) {
return DRMP3_FALSE;
@@ -2931,7 +3147,7 @@ drmp3_bool32 drmp3_init_memory(drmp3* pMP3, const void* pData, size_t dataSize,
pMP3->memory.dataSize = dataSize;
pMP3->memory.currentReadPos = 0;
- return drmp3_init_internal(pMP3, drmp3__on_read_memory, drmp3__on_seek_memory, pMP3, pConfig);
+ return drmp3_init_internal(pMP3, drmp3__on_read_memory, drmp3__on_seek_memory, pMP3, pConfig, pAllocationCallbacks);
}
@@ -2948,7 +3164,7 @@ static drmp3_bool32 drmp3__on_seek_stdio(void* pUserData, int offset, drmp3_seek
return fseek((FILE*)pUserData, offset, (origin == drmp3_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0;
}
-drmp3_bool32 drmp3_init_file(drmp3* pMP3, const char* filePath, const drmp3_config* pConfig)
+drmp3_bool32 drmp3_init_file(drmp3* pMP3, const char* filePath, const drmp3_config* pConfig, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
FILE* pFile;
#if defined(_MSC_VER) && _MSC_VER >= 1400
@@ -2962,7 +3178,7 @@ drmp3_bool32 drmp3_init_file(drmp3* pMP3, const char* filePath, const drmp3_conf
}
#endif
- return drmp3_init(pMP3, drmp3__on_read_stdio, drmp3__on_seek_stdio, (void*)pFile, pConfig);
+ return drmp3_init(pMP3, drmp3__on_read_stdio, drmp3__on_seek_stdio, (void*)pFile, pConfig, pAllocationCallbacks);
}
#endif
@@ -2978,7 +3194,7 @@ void drmp3_uninit(drmp3* pMP3)
}
#endif
- drmp3_free(pMP3->pData);
+ drmp3__free_from_callbacks(pMP3->pData, &pMP3->allocationCallbacks);
}
drmp3_uint64 drmp3_read_pcm_frames_f32(drmp3* pMP3, drmp3_uint64 framesToRead, float* pBufferOut)
@@ -3051,7 +3267,7 @@ drmp3_uint64 drmp3_read_pcm_frames_s16(drmp3* pMP3, drmp3_uint64 framesToRead, d
void drmp3_reset(drmp3* pMP3)
{
- drmp3_assert(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
pMP3->pcmFramesConsumedInMP3Frame = 0;
pMP3->pcmFramesRemainingInMP3Frame = 0;
@@ -3072,8 +3288,8 @@ void drmp3_reset(drmp3* pMP3)
drmp3_bool32 drmp3_seek_to_start_of_stream(drmp3* pMP3)
{
- drmp3_assert(pMP3 != NULL);
- drmp3_assert(pMP3->onSeek != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3->onSeek != NULL);
/* Seek to the start of the stream to begin with. */
if (!drmp3__on_seek(pMP3, 0, drmp3_seek_origin_start)) {
@@ -3158,7 +3374,7 @@ drmp3_bool32 drmp3_seek_forward_by_pcm_frames__brute_force(drmp3* pMP3, drmp3_ui
drmp3_bool32 drmp3_seek_to_pcm_frame__brute_force(drmp3* pMP3, drmp3_uint64 frameIndex)
{
- drmp3_assert(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
if (frameIndex == pMP3->currentPCMFrame) {
return DRMP3_TRUE;
@@ -3175,7 +3391,7 @@ drmp3_bool32 drmp3_seek_to_pcm_frame__brute_force(drmp3* pMP3, drmp3_uint64 fram
}
}
- drmp3_assert(frameIndex >= pMP3->currentPCMFrame);
+ DRMP3_ASSERT(frameIndex >= pMP3->currentPCMFrame);
return drmp3_seek_forward_by_pcm_frames__brute_force(pMP3, (frameIndex - pMP3->currentPCMFrame));
}
@@ -3183,7 +3399,7 @@ drmp3_bool32 drmp3_find_closest_seek_point(drmp3* pMP3, drmp3_uint64 frameIndex,
{
drmp3_uint32 iSeekPoint;
- drmp3_assert(pSeekPointIndex != NULL);
+ DRMP3_ASSERT(pSeekPointIndex != NULL);
*pSeekPointIndex = 0;
@@ -3210,9 +3426,9 @@ drmp3_bool32 drmp3_seek_to_pcm_frame__seek_table(drmp3* pMP3, drmp3_uint64 frame
drmp3_uint16 iMP3Frame;
drmp3_uint64 leftoverFrames;
- drmp3_assert(pMP3 != NULL);
- drmp3_assert(pMP3->pSeekPoints != NULL);
- drmp3_assert(pMP3->seekPointCount > 0);
+ DRMP3_ASSERT(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3->pSeekPoints != NULL);
+ DRMP3_ASSERT(pMP3->seekPointCount > 0);
/* If there is no prior seekpoint it means the target PCM frame comes before the first seek point. Just assume a seekpoint at the start of the file in this case. */
if (drmp3_find_closest_seek_point(pMP3, frameIndex, &priorSeekPointIndex)) {
@@ -3334,7 +3550,7 @@ drmp3_bool32 drmp3_get_mp3_and_pcm_frame_count(drmp3* pMP3, drmp3_uint64* pMP3Fr
}
srcRatio = (float)pMP3->mp3FrameSampleRate / (float)pMP3->sampleRate;
- drmp3_assert(srcRatio > 0);
+ DRMP3_ASSERT(srcRatio > 0);
pcmFramesInCurrentMP3FrameOutF = totalPCMFrameCountFractionalPart + (pcmFramesInCurrentMP3FrameIn / srcRatio);
pcmFramesInCurrentMP3FrameOut = (drmp3_uint32)pcmFramesInCurrentMP3FrameOutF;
@@ -3389,7 +3605,7 @@ void drmp3__accumulate_running_pcm_frame_count(drmp3* pMP3, drmp3_uint32 pcmFram
drmp3_uint32 pcmFrameCountOut;
srcRatio = (float)pMP3->mp3FrameSampleRate / (float)pMP3->sampleRate;
- drmp3_assert(srcRatio > 0);
+ DRMP3_ASSERT(srcRatio > 0);
pcmFrameCountOutF = *pRunningPCMFrameCountFractionalPart + (pcmFrameCountIn / srcRatio);
pcmFrameCountOut = (drmp3_uint32)pcmFrameCountOutF;
@@ -3467,7 +3683,7 @@ drmp3_bool32 drmp3_calculate_seek_points(drmp3* pMP3, drmp3_uint32* pSeekPointCo
drmp3_uint32 pcmFramesInCurrentMP3FrameIn;
/* The byte position of the next frame will be the stream's cursor position, minus whatever is sitting in the buffer. */
- drmp3_assert(pMP3->streamCursor >= pMP3->dataSize);
+ DRMP3_ASSERT(pMP3->streamCursor >= pMP3->dataSize);
mp3FrameInfo[iMP3Frame].bytePos = pMP3->streamCursor - pMP3->dataSize;
mp3FrameInfo[iMP3Frame].pcmFrameIndex = runningPCMFrameCount;
@@ -3570,7 +3786,7 @@ float* drmp3__full_read_and_close_f32(drmp3* pMP3, drmp3_config* pConfig, drmp3_
float* pFrames = NULL;
float temp[4096];
- drmp3_assert(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
for (;;) {
drmp3_uint64 framesToReadRightNow = drmp3_countof(temp) / pMP3->channels;
@@ -3581,29 +3797,33 @@ float* drmp3__full_read_and_close_f32(drmp3* pMP3, drmp3_config* pConfig, drmp3_
/* Reallocate the output buffer if there's not enough room. */
if (framesCapacity < totalFramesRead + framesJustRead) {
+ drmp3_uint64 oldFramesBufferSize;
drmp3_uint64 newFramesBufferSize;
+ drmp3_uint64 newFramesCap;
float* pNewFrames;
- framesCapacity *= 2;
- if (framesCapacity < totalFramesRead + framesJustRead) {
- framesCapacity = totalFramesRead + framesJustRead;
+ newFramesCap = framesCapacity * 2;
+ if (newFramesCap < totalFramesRead + framesJustRead) {
+ newFramesCap = totalFramesRead + framesJustRead;
}
- newFramesBufferSize = framesCapacity*pMP3->channels*sizeof(float);
+ oldFramesBufferSize = framesCapacity * pMP3->channels * sizeof(float);
+ newFramesBufferSize = newFramesCap * pMP3->channels * sizeof(float);
if (newFramesBufferSize > DRMP3_SIZE_MAX) {
break;
}
- pNewFrames = (float*)drmp3_realloc(pFrames, (size_t)newFramesBufferSize);
+ pNewFrames = (float*)drmp3__realloc_from_callbacks(pFrames, (size_t)newFramesBufferSize, (size_t)oldFramesBufferSize, &pMP3->allocationCallbacks);
if (pNewFrames == NULL) {
- drmp3_free(pFrames);
+ drmp3__free_from_callbacks(pFrames, &pMP3->allocationCallbacks);
break;
}
pFrames = pNewFrames;
+ framesCapacity = newFramesCap;
}
- drmp3_copy_memory(pFrames + totalFramesRead*pMP3->channels, temp, (size_t)(framesJustRead*pMP3->channels*sizeof(float)));
+ DRMP3_COPY_MEMORY(pFrames + totalFramesRead*pMP3->channels, temp, (size_t)(framesJustRead*pMP3->channels*sizeof(float)));
totalFramesRead += framesJustRead;
/* If the number of frames we asked for is less that what we actually read it means we've reached the end. */
@@ -3633,7 +3853,7 @@ drmp3_int16* drmp3__full_read_and_close_s16(drmp3* pMP3, drmp3_config* pConfig,
drmp3_int16* pFrames = NULL;
drmp3_int16 temp[4096];
- drmp3_assert(pMP3 != NULL);
+ DRMP3_ASSERT(pMP3 != NULL);
for (;;) {
drmp3_uint64 framesToReadRightNow = drmp3_countof(temp) / pMP3->channels;
@@ -3645,28 +3865,32 @@ drmp3_int16* drmp3__full_read_and_close_s16(drmp3* pMP3, drmp3_config* pConfig,
/* Reallocate the output buffer if there's not enough room. */
if (framesCapacity < totalFramesRead + framesJustRead) {
drmp3_uint64 newFramesBufferSize;
+ drmp3_uint64 oldFramesBufferSize;
+ drmp3_uint64 newFramesCap;
drmp3_int16* pNewFrames;
- framesCapacity *= 2;
- if (framesCapacity < totalFramesRead + framesJustRead) {
- framesCapacity = totalFramesRead + framesJustRead;
+ newFramesCap = framesCapacity * 2;
+ if (newFramesCap < totalFramesRead + framesJustRead) {
+ newFramesCap = totalFramesRead + framesJustRead;
}
- newFramesBufferSize = framesCapacity*pMP3->channels*sizeof(drmp3_int16);
+ oldFramesBufferSize = framesCapacity * pMP3->channels * sizeof(drmp3_int16);
+ newFramesBufferSize = newFramesCap * pMP3->channels * sizeof(drmp3_int16);
if (newFramesBufferSize > DRMP3_SIZE_MAX) {
break;
}
- pNewFrames = (drmp3_int16*)drmp3_realloc(pFrames, (size_t)newFramesBufferSize);
+ pNewFrames = (drmp3_int16*)drmp3__realloc_from_callbacks(pFrames, (size_t)newFramesBufferSize, (size_t)oldFramesBufferSize, &pMP3->allocationCallbacks);
if (pNewFrames == NULL) {
- drmp3_free(pFrames);
+ drmp3__free_from_callbacks(pFrames, &pMP3->allocationCallbacks);
break;
}
pFrames = pNewFrames;
+ framesCapacity = newFramesCap;
}
- drmp3_copy_memory(pFrames + totalFramesRead*pMP3->channels, temp, (size_t)(framesJustRead*pMP3->channels*sizeof(drmp3_int16)));
+ DRMP3_COPY_MEMORY(pFrames + totalFramesRead*pMP3->channels, temp, (size_t)(framesJustRead*pMP3->channels*sizeof(drmp3_int16)));
totalFramesRead += framesJustRead;
/* If the number of frames we asked for is less that what we actually read it means we've reached the end. */
@@ -3690,20 +3914,20 @@ drmp3_int16* drmp3__full_read_and_close_s16(drmp3* pMP3, drmp3_config* pConfig,
}
-float* drmp3_open_and_read_f32(drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount)
+float* drmp3_open_and_read_pcm_frames_f32(drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
drmp3 mp3;
- if (!drmp3_init(&mp3, onRead, onSeek, pUserData, pConfig)) {
+ if (!drmp3_init(&mp3, onRead, onSeek, pUserData, pConfig, pAllocationCallbacks)) {
return NULL;
}
return drmp3__full_read_and_close_f32(&mp3, pConfig, pTotalFrameCount);
}
-drmp3_int16* drmp3_open_and_read_s16(drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount)
+drmp3_int16* drmp3_open_and_read_pcm_frames_s16(drmp3_read_proc onRead, drmp3_seek_proc onSeek, void* pUserData, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
drmp3 mp3;
- if (!drmp3_init(&mp3, onRead, onSeek, pUserData, pConfig)) {
+ if (!drmp3_init(&mp3, onRead, onSeek, pUserData, pConfig, pAllocationCallbacks)) {
return NULL;
}
@@ -3711,20 +3935,20 @@ drmp3_int16* drmp3_open_and_read_s16(drmp3_read_proc onRead, drmp3_seek_proc onS
}
-float* drmp3_open_memory_and_read_f32(const void* pData, size_t dataSize, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount)
+float* drmp3_open_memory_and_read_pcm_frames_f32(const void* pData, size_t dataSize, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
drmp3 mp3;
- if (!drmp3_init_memory(&mp3, pData, dataSize, pConfig)) {
+ if (!drmp3_init_memory(&mp3, pData, dataSize, pConfig, pAllocationCallbacks)) {
return NULL;
}
return drmp3__full_read_and_close_f32(&mp3, pConfig, pTotalFrameCount);
}
-drmp3_int16* drmp3_open_memory_and_read_s16(const void* pData, size_t dataSize, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount)
+drmp3_int16* drmp3_open_memory_and_read_pcm_frames_s16(const void* pData, size_t dataSize, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
drmp3 mp3;
- if (!drmp3_init_memory(&mp3, pData, dataSize, pConfig)) {
+ if (!drmp3_init_memory(&mp3, pData, dataSize, pConfig, pAllocationCallbacks)) {
return NULL;
}
@@ -3733,20 +3957,20 @@ drmp3_int16* drmp3_open_memory_and_read_s16(const void* pData, size_t dataSize,
#ifndef DR_MP3_NO_STDIO
-float* drmp3_open_file_and_read_f32(const char* filePath, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount)
+float* drmp3_open_file_and_read_pcm_frames_f32(const char* filePath, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
drmp3 mp3;
- if (!drmp3_init_file(&mp3, filePath, pConfig)) {
+ if (!drmp3_init_file(&mp3, filePath, pConfig, pAllocationCallbacks)) {
return NULL;
}
return drmp3__full_read_and_close_f32(&mp3, pConfig, pTotalFrameCount);
}
-drmp3_int16* drmp3_open_file_and_read_s16(const char* filePath, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount)
+drmp3_int16* drmp3_open_file_and_read_pcm_frames_s16(const char* filePath, drmp3_config* pConfig, drmp3_uint64* pTotalFrameCount, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
drmp3 mp3;
- if (!drmp3_init_file(&mp3, filePath, pConfig)) {
+ if (!drmp3_init_file(&mp3, filePath, pConfig, pAllocationCallbacks)) {
return NULL;
}
@@ -3754,9 +3978,13 @@ drmp3_int16* drmp3_open_file_and_read_s16(const char* filePath, drmp3_config* pC
}
#endif
-void drmp3_free(void* p)
+void drmp3_free(void* p, const drmp3_allocation_callbacks* pAllocationCallbacks)
{
- DRMP3_FREE(p);
+ if (pAllocationCallbacks != NULL) {
+ drmp3__free_from_callbacks(p, pAllocationCallbacks);
+ } else {
+ drmp3__free_default(p, NULL);
+ }
}
#endif /*DR_MP3_IMPLEMENTATION*/
@@ -3779,6 +4007,53 @@ DIFFERENCES BETWEEN minimp3 AND dr_mp3
/*
REVISION HISTORY
================
+v0.5.6 - 2020-02-12
+ - Bring up to date with minimp3.
+
+v0.5.5 - 2020-01-29
+ - Fix a memory allocation bug in high level s16 decoding APIs.
+
+v0.5.4 - 2019-12-02
+ - Fix a possible null pointer dereference when using custom memory allocators for realloc().
+
+v0.5.3 - 2019-11-14
+ - Fix typos in documentation.
+
+v0.5.2 - 2019-11-02
+ - Bring up to date with minimp3.
+
+v0.5.1 - 2019-10-08
+ - Fix a warning with GCC.
+
+v0.5.0 - 2019-10-07
+ - API CHANGE: Add support for user defined memory allocation routines. This system allows the program to specify their own memory allocation
+ routines with a user data pointer for client-specific contextual data. This adds an extra parameter to the end of the following APIs:
+ - drmp3_init()
+ - drmp3_init_file()
+ - drmp3_init_memory()
+ - drmp3_open_and_read_pcm_frames_f32()
+ - drmp3_open_and_read_pcm_frames_s16()
+ - drmp3_open_memory_and_read_pcm_frames_f32()
+ - drmp3_open_memory_and_read_pcm_frames_s16()
+ - drmp3_open_file_and_read_pcm_frames_f32()
+ - drmp3_open_file_and_read_pcm_frames_s16()
+ - API CHANGE: Renamed the following APIs:
+ - drmp3_open_and_read_f32() -> drmp3_open_and_read_pcm_frames_f32()
+ - drmp3_open_and_read_s16() -> drmp3_open_and_read_pcm_frames_s16()
+ - drmp3_open_memory_and_read_f32() -> drmp3_open_memory_and_read_pcm_frames_f32()
+ - drmp3_open_memory_and_read_s16() -> drmp3_open_memory_and_read_pcm_frames_s16()
+ - drmp3_open_file_and_read_f32() -> drmp3_open_file_and_read_pcm_frames_f32()
+ - drmp3_open_file_and_read_s16() -> drmp3_open_file_and_read_pcm_frames_s16()
+
+v0.4.7 - 2019-07-28
+ - Fix a compiler error.
+
+v0.4.6 - 2019-06-14
+ - Fix a compiler error.
+
+v0.4.5 - 2019-06-06
+ - Bring up to date with minimp3.
+
v0.4.4 - 2019-05-06
- Fixes to the VC6 build.
@@ -3788,9 +4063,9 @@ v0.4.3 - 2019-05-05
DR_MP3_DEFAULT_CHANNELS or DR_MP3_DEFAULT_SAMPLE_RATE.
- Add s16 reading APIs
- drmp3_read_pcm_frames_s16
- - drmp3_open_memory_and_read_s16
- - drmp3_open_and_read_s16
- - drmp3_open_file_and_read_s16
+ - drmp3_open_memory_and_read_pcm_frames_s16
+ - drmp3_open_and_read_pcm_frames_s16
+ - drmp3_open_file_and_read_pcm_frames_s16
- Add drmp3_get_mp3_and_pcm_frame_count() to the public header section.
- Add support for C89.
- Change license to choice of public domain or MIT-0.
@@ -3805,9 +4080,9 @@ v0.4.0 - 2018-12-16
- API CHANGE: Rename some APIs:
- drmp3_read_f32 -> to drmp3_read_pcm_frames_f32
- drmp3_seek_to_frame -> drmp3_seek_to_pcm_frame
- - drmp3_open_and_decode_f32 -> drmp3_open_and_read_f32
- - drmp3_open_and_decode_memory_f32 -> drmp3_open_memory_and_read_f32
- - drmp3_open_and_decode_file_f32 -> drmp3_open_file_and_read_f32
+ - drmp3_open_and_decode_f32 -> drmp3_open_and_read_pcm_frames_f32
+ - drmp3_open_and_decode_memory_f32 -> drmp3_open_memory_and_read_pcm_frames_f32
+ - drmp3_open_and_decode_file_f32 -> drmp3_open_file_and_read_pcm_frames_f32
- Add drmp3_get_pcm_frame_count().
- Add drmp3_get_mp3_frame_count().
- Improve seeking performance.
@@ -3911,7 +4186,7 @@ For more information, please refer to
===============================================================================
ALTERNATIVE 2 - MIT No Attribution
===============================================================================
-Copyright 2018 David Reid
+Copyright 2020 David Reid
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff --git a/client/miniaudio/dr_wav.h b/client/miniaudio/dr_wav.h
index 7c48ac40..7166bbb9 100644
--- a/client/miniaudio/dr_wav.h
+++ b/client/miniaudio/dr_wav.h
@@ -1,43 +1,193 @@
/*
WAV audio loader and writer. Choice of public domain or MIT-0. See license statements at the end of this file.
-dr_wav - v0.9.2 - 2019-05-21
+dr_wav - v0.11.5 - 2020-03-07
David Reid - mackron@gmail.com
*/
/*
-DEPRECATED APIS
-===============
-Version 0.9.0 deprecated the per-sample reading and seeking APIs and replaced them with versions that work on the resolution
-of a PCM frame instead. For example, given a stereo WAV file, previously you would pass 2 to drwav_read_f32() to read one
-PCM frame, whereas now you would pass in 1 to drwav_read_pcm_frames_f32(). The old APIs would return the number of samples
-read, whereas now it will return the number of PCM frames. Below is a list of APIs that have been deprecated and their
-replacements.
-
- drwav_read() -> drwav_read_pcm_frames()
- drwav_read_s16() -> drwav_read_pcm_frames_s16()
- drwav_read_f32() -> drwav_read_pcm_frames_f32()
- drwav_read_s32() -> drwav_read_pcm_frames_s32()
- drwav_seek_to_sample() -> drwav_seek_to_pcm_frame()
- drwav_write() -> drwav_write_pcm_frames()
- drwav_open_and_read_s16() -> drwav_open_and_read_pcm_frames_s16()
- drwav_open_and_read_f32() -> drwav_open_and_read_pcm_frames_f32()
- drwav_open_and_read_s32() -> drwav_open_and_read_pcm_frames_s32()
- drwav_open_file_and_read_s16() -> drwav_open_file_and_read_pcm_frames_s16()
- drwav_open_file_and_read_f32() -> drwav_open_file_and_read_pcm_frames_f32()
- drwav_open_file_and_read_s32() -> drwav_open_file_and_read_pcm_frames_s32()
- drwav_open_memory_and_read_s16() -> drwav_open_memory_and_read_pcm_frames_s16()
- drwav_open_memory_and_read_f32() -> drwav_open_memory_and_read_pcm_frames_f32()
- drwav_open_memory_and_read_s32() -> drwav_open_memory_and_read_pcm_frames_s32()
- drwav::totalSampleCount -> drwav::totalPCMFrameCount
-
-Rationale:
- 1) Most programs will want to read in multiples of the channel count which demands a per-frame reading API. Per-sample
- reading just adds complexity and maintenance costs for no practical benefit.
- 2) This is consistent with my other decoders - dr_flac and dr_mp3.
-
-These APIs will be removed completely in version 0.10.0. You can continue to use drwav_read_raw() if you need per-sample
-reading.
+RELEASE NOTES - v0.11.0
+=======================
+Version 0.11.0 has breaking API changes.
+
+Improved Client-Defined Memory Allocation
+-----------------------------------------
+The main change with this release is the addition of a more flexible way of implementing custom memory allocation routines. The
+existing system of DRWAV_MALLOC, DRWAV_REALLOC and DRWAV_FREE are still in place and will be used by default when no custom
+allocation callbacks are specified.
+
+To use the new system, you pass in a pointer to a drwav_allocation_callbacks object to drwav_init() and family, like this:
+
+ void* my_malloc(size_t sz, void* pUserData)
+ {
+ return malloc(sz);
+ }
+ void* my_realloc(void* p, size_t sz, void* pUserData)
+ {
+ return realloc(p, sz);
+ }
+ void my_free(void* p, void* pUserData)
+ {
+ free(p);
+ }
+
+ ...
+
+ drwav_allocation_callbacks allocationCallbacks;
+ allocationCallbacks.pUserData = &myData;
+ allocationCallbacks.onMalloc = my_malloc;
+ allocationCallbacks.onRealloc = my_realloc;
+ allocationCallbacks.onFree = my_free;
+ drwav_init_file(&wav, "my_file.wav", &allocationCallbacks);
+
+The advantage of this new system is that it allows you to specify user data which will be passed in to the allocation routines.
+
+Passing in null for the allocation callbacks object will cause dr_wav to use defaults which is the same as DRWAV_MALLOC,
+DRWAV_REALLOC and DRWAV_FREE and the equivalent of how it worked in previous versions.
+
+Every API that opens a drwav object now takes this extra parameter. These include the following:
+
+ drwav_init()
+ drwav_init_ex()
+ drwav_init_file()
+ drwav_init_file_ex()
+ drwav_init_file_w()
+ drwav_init_file_w_ex()
+ drwav_init_memory()
+ drwav_init_memory_ex()
+ drwav_init_write()
+ drwav_init_write_sequential()
+ drwav_init_write_sequential_pcm_frames()
+ drwav_init_file_write()
+ drwav_init_file_write_sequential()
+ drwav_init_file_write_sequential_pcm_frames()
+ drwav_init_file_write_w()
+ drwav_init_file_write_sequential_w()
+ drwav_init_file_write_sequential_pcm_frames_w()
+ drwav_init_memory_write()
+ drwav_init_memory_write_sequential()
+ drwav_init_memory_write_sequential_pcm_frames()
+ drwav_open_and_read_pcm_frames_s16()
+ drwav_open_and_read_pcm_frames_f32()
+ drwav_open_and_read_pcm_frames_s32()
+ drwav_open_file_and_read_pcm_frames_s16()
+ drwav_open_file_and_read_pcm_frames_f32()
+ drwav_open_file_and_read_pcm_frames_s32()
+ drwav_open_file_and_read_pcm_frames_s16_w()
+ drwav_open_file_and_read_pcm_frames_f32_w()
+ drwav_open_file_and_read_pcm_frames_s32_w()
+ drwav_open_memory_and_read_pcm_frames_s16()
+ drwav_open_memory_and_read_pcm_frames_f32()
+ drwav_open_memory_and_read_pcm_frames_s32()
+
+Endian Improvements
+-------------------
+Previously, the following APIs returned little-endian audio data. These now return native-endian data. This improves compatibility
+on big-endian architectures.
+
+ drwav_read_pcm_frames()
+ drwav_read_pcm_frames_s16()
+ drwav_read_pcm_frames_s32()
+ drwav_read_pcm_frames_f32()
+ drwav_open_and_read_pcm_frames_s16()
+ drwav_open_and_read_pcm_frames_s32()
+ drwav_open_and_read_pcm_frames_f32()
+ drwav_open_file_and_read_pcm_frames_s16()
+ drwav_open_file_and_read_pcm_frames_s32()
+ drwav_open_file_and_read_pcm_frames_f32()
+ drwav_open_file_and_read_pcm_frames_s16_w()
+ drwav_open_file_and_read_pcm_frames_s32_w()
+ drwav_open_file_and_read_pcm_frames_f32_w()
+ drwav_open_memory_and_read_pcm_frames_s16()
+ drwav_open_memory_and_read_pcm_frames_s32()
+ drwav_open_memory_and_read_pcm_frames_f32()
+
+APIs have been added to give you explicit control over whether or not audio data is read or written in big- or little-endian byte
+order:
+
+ drwav_read_pcm_frames_le()
+ drwav_read_pcm_frames_be()
+ drwav_read_pcm_frames_s16le()
+ drwav_read_pcm_frames_s16be()
+ drwav_read_pcm_frames_f32le()
+ drwav_read_pcm_frames_f32be()
+ drwav_read_pcm_frames_s32le()
+ drwav_read_pcm_frames_s32be()
+ drwav_write_pcm_frames_le()
+ drwav_write_pcm_frames_be()
+
+Removed APIs
+------------
+The following APIs were deprecated in version 0.10.0 and have now been removed:
+
+ drwav_open()
+ drwav_open_ex()
+ drwav_open_write()
+ drwav_open_write_sequential()
+ drwav_open_file()
+ drwav_open_file_ex()
+ drwav_open_file_write()
+ drwav_open_file_write_sequential()
+ drwav_open_memory()
+ drwav_open_memory_ex()
+ drwav_open_memory_write()
+ drwav_open_memory_write_sequential()
+ drwav_close()
+
+
+
+RELEASE NOTES - v0.10.0
+=======================
+Version 0.10.0 has breaking API changes. There are no significant bug fixes in this release, so if you are affected you do
+not need to upgrade.
+
+Removed APIs
+------------
+The following APIs were deprecated in version 0.9.0 and have been completely removed in version 0.10.0:
+
+ drwav_read()
+ drwav_read_s16()
+ drwav_read_f32()
+ drwav_read_s32()
+ drwav_seek_to_sample()
+ drwav_write()
+ drwav_open_and_read_s16()
+ drwav_open_and_read_f32()
+ drwav_open_and_read_s32()
+ drwav_open_file_and_read_s16()
+ drwav_open_file_and_read_f32()
+ drwav_open_file_and_read_s32()
+ drwav_open_memory_and_read_s16()
+ drwav_open_memory_and_read_f32()
+ drwav_open_memory_and_read_s32()
+ drwav::totalSampleCount
+
+See release notes for version 0.9.0 at the bottom of this file for replacement APIs.
+
+Deprecated APIs
+---------------
+The following APIs have been deprecated. There is a confusing and completely arbitrary difference between drwav_init*() and
+drwav_open*(), where drwav_init*() initializes a pre-allocated drwav object, whereas drwav_open*() will first allocated a
+drwav object on the heap and then initialize it. drwav_open*() has been deprecated which means you must now use a pre-
+allocated drwav object with drwav_init*(). If you need the previous functionality, you can just do a malloc() followed by
+a called to one of the drwav_init*() APIs.
+
+ drwav_open()
+ drwav_open_ex()
+ drwav_open_write()
+ drwav_open_write_sequential()
+ drwav_open_file()
+ drwav_open_file_ex()
+ drwav_open_file_write()
+ drwav_open_file_write_sequential()
+ drwav_open_memory()
+ drwav_open_memory_ex()
+ drwav_open_memory_write()
+ drwav_open_memory_write_sequential()
+ drwav_close()
+
+These APIs will be removed completely in a future version. The rationale for this change is to remove confusion between the
+two different ways to initialize a drwav object.
*/
/*
@@ -55,24 +205,13 @@ like the following to read audio data:
// Error opening WAV file.
}
- drwav_int32* pDecodedInterleavedSamples = malloc(wav.totalPCMFrameCount * wav.channels * sizeof(drwav_int32));
- size_t numberOfSamplesActuallyDecoded = drwav_read_pcm_frames_s32(&wav, wav.totalPCMFrameCount, pDecodedInterleavedSamples);
+ drwav_int32* pDecodedInterleavedPCMFrames = malloc(wav.totalPCMFrameCount * wav.channels * sizeof(drwav_int32));
+ size_t numberOfSamplesActuallyDecoded = drwav_read_pcm_frames_s32(&wav, wav.totalPCMFrameCount, pDecodedInterleavedPCMFrames);
...
drwav_uninit(&wav);
-You can also use drwav_open() to allocate and initialize the loader for you:
-
- drwav* pWav = drwav_open_file("my_song.wav");
- if (pWav == NULL) {
- // Error opening WAV file.
- }
-
- ...
-
- drwav_close(pWav);
-
If you just want to quickly open and read the audio data in a single operation you can do something like this:
unsigned int channels;
@@ -90,7 +229,7 @@ If you just want to quickly open and read the audio data in a single operation y
The examples above use versions of the API that convert the audio data to a consistent format (32-bit signed PCM, in
this case), but you can still output the audio data in its internal format (see notes below for supported formats):
- size_t samplesRead = drwav_read_pcm_frames(&wav, wav.totalPCMFrameCount, pDecodedInterleavedSamples);
+ size_t framesRead = drwav_read_pcm_frames(&wav, wav.totalPCMFrameCount, pDecodedInterleavedPCMFrames);
You can also read the raw bytes of audio data, which could be useful if dr_wav does not have native support for
a particular data format:
@@ -99,7 +238,7 @@ a particular data format:
dr_wav can also be used to output WAV files. This does not currently support compressed formats. To use this, look at
-drwav_open_write(), drwav_open_file_write(), etc. Use drwav_write_pcm_frames() to write samples, or drwav_write_raw()
+drwav_init_write(), drwav_init_file_write(), etc. Use drwav_write_pcm_frames() to write samples, or drwav_write_raw()
to write raw data in the "data" chunk.
drwav_data_format format;
@@ -108,11 +247,11 @@ to write raw data in the "data" chunk.
format.channels = 2;
format.sampleRate = 44100;
format.bitsPerSample = 16;
- drwav* pWav = drwav_open_file_write("data/recording.wav", &format);
+ drwav_init_file_write(&wav, "data/recording.wav", &format, NULL);
...
- drwav_uint64 samplesWritten = drwav_write_pcm_frames(pWav, frameCount, pSamples);
+ drwav_uint64 framesWritten = drwav_write_pcm_frames(pWav, frameCount, pSamples);
dr_wav has seamless support the Sony Wave64 format. The decoder will automatically detect it and it should Just Work
@@ -127,7 +266,7 @@ OPTIONS
Disables conversion APIs such as drwav_read_pcm_frames_f32() and drwav_s16_to_f32().
#define DR_WAV_NO_STDIO
- Disables drwav_open_file(), drwav_open_file_write(), etc.
+ Disables APIs that initialize a decoder from a file such as drwav_init_file(), drwav_init_file_write(), etc.
@@ -184,6 +323,14 @@ typedef drwav_uint32 drwav_bool32;
extern "C" {
#endif
+typedef drwav_int32 drwav_result;
+#define DRWAV_SUCCESS 0
+#define DRWAV_ERROR -1
+#define DRWAV_INVALID_ARGS -2
+#define DRWAV_INVALID_OPERATION -3
+#define DRWAV_INVALID_FILE -100
+#define DRWAV_EOF -101
+
/* Common data formats. */
#define DR_WAVE_FORMAT_PCM 0x1
#define DR_WAVE_FORMAT_ADPCM 0x2
@@ -234,7 +381,7 @@ typedef struct
/*
Callback for when data is read. Return value is the number of bytes actually read.
-pUserData [in] The user data that was passed to drwav_init(), drwav_open() and family.
+pUserData [in] The user data that was passed to drwav_init() and family.
pBufferOut [out] The output buffer.
bytesToRead [in] The number of bytes to read.
@@ -248,7 +395,7 @@ typedef size_t (* drwav_read_proc)(void* pUserData, void* pBufferOut, size_t byt
/*
Callback for when data is written. Returns value is the number of bytes actually written.
-pUserData [in] The user data that was passed to drwav_init_write(), drwav_open_write() and family.
+pUserData [in] The user data that was passed to drwav_init_write() and family.
pData [out] A pointer to the data to write.
bytesToWrite [in] The number of bytes to write.
@@ -261,7 +408,7 @@ typedef size_t (* drwav_write_proc)(void* pUserData, const void* pData, size_t b
/*
Callback for when data needs to be seeked.
-pUserData [in] The user data that was passed to drwav_init(), drwav_open() and family.
+pUserData [in] The user data that was passed to drwav_init() and family.
offset [in] The number of bytes to move, relative to the origin. Will never be negative.
origin [in] The origin of the seek - the current position or the start of the stream.
@@ -273,12 +420,12 @@ will be either drwav_seek_origin_start or drwav_seek_origin_current.
typedef drwav_bool32 (* drwav_seek_proc)(void* pUserData, int offset, drwav_seek_origin origin);
/*
-Callback for when drwav_init_ex/drwav_open_ex finds a chunk.
+Callback for when drwav_init_ex() finds a chunk.
-pChunkUserData [in] The user data that was passed to the pChunkUserData parameter of drwav_init_ex(), drwav_open_ex() and family.
+pChunkUserData [in] The user data that was passed to the pChunkUserData parameter of drwav_init_ex() and family.
onRead [in] A pointer to the function to call when reading.
onSeek [in] A pointer to the function to call when seeking.
-pReadSeekUserData [in] The user data that was passed to the pReadSeekUserData parameter of drwav_init_ex(), drwav_open_ex() and family.
+pReadSeekUserData [in] The user data that was passed to the pReadSeekUserData parameter of drwav_init_ex() and family.
pChunkHeader [in] A pointer to an object containing basic header information about the chunk. Use this to identify the chunk.
Returns the number of bytes read + seeked.
@@ -290,7 +437,15 @@ You must not attempt to read beyond the boundary of the chunk.
*/
typedef drwav_uint64 (* drwav_chunk_proc)(void* pChunkUserData, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pReadSeekUserData, const drwav_chunk_header* pChunkHeader);
-/* Structure for internal use. Only used for loaders opened with drwav_open_memory(). */
+typedef struct
+{
+ void* pUserData;
+ void* (* onMalloc)(size_t sz, void* pUserData);
+ void* (* onRealloc)(void* p, size_t sz, void* pUserData);
+ void (* onFree)(void* p, void* pUserData);
+} drwav_allocation_callbacks;
+
+/* Structure for internal use. Only used for loaders opened with drwav_init_memory(). */
typedef struct
{
const drwav_uint8* data;
@@ -298,7 +453,7 @@ typedef struct
size_t currentReadPos;
} drwav__memory_stream;
-/* Structure for internal use. Only used for writers opened with drwav_open_memory_write(). */
+/* Structure for internal use. Only used for writers opened with drwav_init_memory_write(). */
typedef struct
{
void** ppData;
@@ -395,6 +550,9 @@ typedef struct
/* The user data to pass to callbacks. */
void* pUserData;
+ /* Allocation callbacks. */
+ drwav_allocation_callbacks allocationCallbacks;
+
/* Whether or not the WAV file is formatted as a standard RIFF file or W64. */
drwav_container container;
@@ -443,14 +601,14 @@ typedef struct
drwav_smpl smpl;
- /* A hack to avoid a DRWAV_MALLOC() when opening a decoder with drwav_open_memory(). */
+ /* A hack to avoid a DRWAV_MALLOC() when opening a decoder with drwav_init_memory(). */
drwav__memory_stream memoryStream;
drwav__memory_stream_write memoryStreamWrite;
/* Generic data for compressed formats. This data is shared across all block-compressed formats. */
struct
{
- drwav_uint64 iCurrentSample; /* The index of the next sample that will be read by drwav_read_*(). This is used with "totalSampleCount" to ensure we don't read excess samples at the end of the last block. */
+ drwav_uint64 iCurrentPCMFrame; /* The index of the next PCM frame that will be read by drwav_read_*(). This is used with "totalPCMFrameCount" to ensure we don't read excess samples at the end of the last block. */
} compressed;
/* Microsoft ADPCM specific data. */
@@ -459,9 +617,9 @@ typedef struct
drwav_uint32 bytesRemainingInBlock;
drwav_uint16 predictor[2];
drwav_int32 delta[2];
- drwav_int32 cachedSamples[4]; /* Samples are stored in this cache during decoding. */
- drwav_uint32 cachedSampleCount;
- drwav_int32 prevSamples[2][2]; /* The previous 2 samples for each channel (2 channels at most). */
+ drwav_int32 cachedFrames[4]; /* Samples are stored in this cache during decoding. */
+ drwav_uint32 cachedFrameCount;
+ drwav_int32 prevFrames[2][2]; /* The previous 2 samples for each channel (2 channels at most). */
} msadpcm;
/* IMA ADPCM specific data. */
@@ -470,17 +628,14 @@ typedef struct
drwav_uint32 bytesRemainingInBlock;
drwav_int32 predictor[2];
drwav_int32 stepIndex[2];
- drwav_int32 cachedSamples[16]; /* Samples are stored in this cache during decoding. */
- drwav_uint32 cachedSampleCount;
+ drwav_int32 cachedFrames[16]; /* Samples are stored in this cache during decoding. */
+ drwav_uint32 cachedFrameCount;
} ima;
-
-
- drwav_uint64 totalSampleCount; /* <-- DEPRECATED. Will be removed in a future version. */
} drwav;
/*
-Initializes a pre-allocated drwav object.
+Initializes a pre-allocated drwav object for reading.
pWav [out] A pointer to the drwav object being initialized.
onRead [in] The function to call when data needs to be read from the client.
@@ -497,9 +652,6 @@ Close the loader with drwav_uninit().
This is the lowest level function for initializing a WAV file. You can also use drwav_init_file() and drwav_init_memory()
to open the stream from a file or from a block of memory respectively.
-If you want dr_wav to manage the memory allocation for you, consider using drwav_open() instead. This will allocate
-a drwav object on the heap and return a pointer to it.
-
Possible values for flags:
DRWAV_SEQUENTIAL: Never perform a backwards seek while loading. This disables the chunk callback and will cause this function
to return as soon as the data chunk is found. Any chunks after the data chunk will be ignored.
@@ -511,8 +663,8 @@ after the function returns.
See also: drwav_init_file(), drwav_init_memory(), drwav_uninit()
*/
-drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData);
-drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags);
+drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks);
/*
Initializes a pre-allocated drwav object for writing.
@@ -525,78 +677,37 @@ Returns true if successful; false otherwise.
Close the writer with drwav_uninit().
-This is the lowest level function for initializing a WAV file. You can also use drwav_init_file() and drwav_init_memory()
+This is the lowest level function for initializing a WAV file. You can also use drwav_init_file_write() and drwav_init_memory_write()
to open the stream from a file or from a block of memory respectively.
If the total sample count is known, you can use drwav_init_write_sequential(). This avoids the need for dr_wav to perform
a post-processing step for storing the total sample count and the size of the data chunk which requires a backwards seek.
-If you want dr_wav to manage the memory allocation for you, consider using drwav_open() instead. This will allocate
-a drwav object on the heap and return a pointer to it.
-
See also: drwav_init_file_write(), drwav_init_memory_write(), drwav_uninit()
*/
-drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData);
-drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData);
+drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_write_sequential_pcm_frames(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks);
/*
-Uninitializes the given drwav object.
-
-Use this only for objects initialized with drwav_init().
-*/
-void drwav_uninit(drwav* pWav);
-
-
-/*
-Opens a wav file using the given callbacks.
-
-onRead [in] The function to call when data needs to be read from the client.
-onSeek [in] The function to call when the read position of the client data needs to move.
-pUserData [in, optional] A pointer to application defined data that will be passed to onRead and onSeek.
-
-Returns null on error.
-
-Close the loader with drwav_close().
-
-You can also use drwav_open_file() and drwav_open_memory() to open the stream from a file or from a block of
-memory respectively.
-
-This is different from drwav_init() in that it will allocate the drwav object for you via DRWAV_MALLOC() before
-initializing it.
-
-See also: drwav_init(), drwav_open_file(), drwav_open_memory(), drwav_close()
-*/
-drwav* drwav_open(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData);
-drwav* drwav_open_ex(drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags);
-
-/*
-Opens a wav file for writing using the given callbacks.
-
-onWrite [in] The function to call when data needs to be written.
-onSeek [in] The function to call when the write position needs to move.
-pUserData [in, optional] A pointer to application defined data that will be passed to onWrite and onSeek.
-
-Returns null on error.
+Utility function to determine the target size of the entire data to be written (including all headers and chunks).
-Close the loader with drwav_close().
+Returns the target size in bytes.
-You can also use drwav_open_file_write() and drwav_open_memory_write() to open the stream from a file or from a block
-of memory respectively.
+Useful if the application needs to know the size to allocate.
-This is different from drwav_init_write() in that it will allocate the drwav object for you via DRWAV_MALLOC() before
-initializing it.
+Only writing to the RIFF chunk and one data chunk is currently supported.
-See also: drwav_open_file_write(), drwav_open_memory_write(), drwav_close()
+See also: drwav_init_write(), drwav_init_file_write(), drwav_init_memory_write()
*/
-drwav* drwav_open_write(const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData);
-drwav* drwav_open_write_sequential(const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData);
+drwav_uint64 drwav_target_write_size_bytes(drwav_data_format const *format, drwav_uint64 totalSampleCount);
/*
-Uninitializes and deletes the the given drwav object.
+Uninitializes the given drwav object.
-Use this only for objects created with drwav_open().
+Use this only for objects initialized with drwav_init*() functions (drwav_init(), drwav_init_ex(), drwav_init_write(), drwav_init_write_sequential()).
*/
-void drwav_close(drwav* pWav);
+drwav_result drwav_uninit(drwav* pWav);
/*
@@ -613,18 +724,20 @@ Returns the number of bytes actually read.
size_t drwav_read_raw(drwav* pWav, size_t bytesToRead, void* pBufferOut);
/*
-Reads a chunk of audio data in the native internal format.
+Reads up to the specified number of PCM frames from the WAV file.
-This is typically the most efficient way to retrieve audio data, but it does not do any format
-conversions which means you'll need to convert the data manually if required.
+The output data will be in the file's internal format, converted to native-endian byte order. Use
+drwav_read_pcm_frames_s16/f32/s32() to read data in a specific format.
If the return value is less than it means the end of the file has been reached or
-you have requested more samples than can possibly fit in the output buffer.
+you have requested more PCM frames than can possibly fit in the output buffer.
This function will only work when sample data is of a fixed size and uncompressed. If you are
-using a compressed format consider using drwav_read_raw() or drwav_read_pcm_frames_s16/s32/f32/etc().
+using a compressed format consider using drwav_read_raw() or drwav_read_pcm_frames_s16/s32/f32().
*/
drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut);
+drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut);
+drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut);
/*
Seeks to the given PCM frame.
@@ -645,8 +758,13 @@ size_t drwav_write_raw(drwav* pWav, size_t bytesToWrite, const void* pData);
Writes PCM frames.
Returns the number of PCM frames written.
+
+Input samples need to be in native-endian byte order. On big-endian architectures the input data will be converted to
+little-endian. Use drwav_write_raw() to write raw audio data without performing any conversion.
*/
drwav_uint64 drwav_write_pcm_frames(drwav* pWav, drwav_uint64 framesToWrite, const void* pData);
+drwav_uint64 drwav_write_pcm_frames_le(drwav* pWav, drwav_uint64 framesToWrite, const void* pData);
+drwav_uint64 drwav_write_pcm_frames_be(drwav* pWav, drwav_uint64 framesToWrite, const void* pData);
/* Conversion Utilities */
@@ -660,6 +778,8 @@ Returns the number of PCM frames actually read.
If the return value is less than it means the end of the file has been reached.
*/
drwav_uint64 drwav_read_pcm_frames_s16(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut);
+drwav_uint64 drwav_read_pcm_frames_s16le(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut);
+drwav_uint64 drwav_read_pcm_frames_s16be(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut);
/* Low-level function for converting unsigned 8-bit PCM samples to signed 16-bit PCM samples. */
void drwav_u8_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount);
@@ -691,6 +811,8 @@ Returns the number of PCM frames actually read.
If the return value is less than it means the end of the file has been reached.
*/
drwav_uint64 drwav_read_pcm_frames_f32(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut);
+drwav_uint64 drwav_read_pcm_frames_f32le(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut);
+drwav_uint64 drwav_read_pcm_frames_f32be(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut);
/* Low-level function for converting unsigned 8-bit PCM samples to IEEE 32-bit floating point samples. */
void drwav_u8_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount);
@@ -722,6 +844,8 @@ Returns the number of PCM frames actually read.
If the return value is less than it means the end of the file has been reached.
*/
drwav_uint64 drwav_read_pcm_frames_s32(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut);
+drwav_uint64 drwav_read_pcm_frames_s32le(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut);
+drwav_uint64 drwav_read_pcm_frames_s32be(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut);
/* Low-level function for converting unsigned 8-bit PCM samples to signed 32-bit PCM samples. */
void drwav_u8_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount);
@@ -751,14 +875,16 @@ void drwav_mulaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sample
#ifndef DR_WAV_NO_STDIO
/*
-Helper for initializing a wave file using stdio.
+Helper for initializing a wave file for reading using stdio.
This holds the internal FILE object until drwav_uninit() is called. Keep this in mind if you're caching drwav
objects because the operating system may restrict the number of file handles an application can have open at
any given time.
*/
-drwav_bool32 drwav_init_file(drwav* pWav, const char* filename);
-drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags);
+drwav_bool32 drwav_init_file(drwav* pWav, const char* filename, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_file_w(drwav* pWav, const wchar_t* filename, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_file_ex_w(drwav* pWav, const wchar_t* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks);
/*
Helper for initializing a wave file for writing using stdio.
@@ -767,29 +893,12 @@ This holds the internal FILE object until drwav_uninit() is called. Keep this in
objects because the operating system may restrict the number of file handles an application can have open at
any given time.
*/
-drwav_bool32 drwav_init_file_write(drwav* pWav, const char* filename, const drwav_data_format* pFormat);
-drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount);
-
-/*
-Helper for opening a wave file using stdio.
-
-This holds the internal FILE object until drwav_close() is called. Keep this in mind if you're caching drwav
-objects because the operating system may restrict the number of file handles an application can have open at
-any given time.
-*/
-drwav* drwav_open_file(const char* filename);
-drwav* drwav_open_file_ex(const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags);
-
-/*
-Helper for opening a wave file for writing using stdio.
-
-This holds the internal FILE object until drwav_close() is called. Keep this in mind if you're caching drwav
-objects because the operating system may restrict the number of file handles an application can have open at
-any given time.
-*/
-drwav* drwav_open_file_write(const char* filename, const drwav_data_format* pFormat);
-drwav* drwav_open_file_write_sequential(const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount);
-
+drwav_bool32 drwav_init_file_write(drwav* pWav, const char* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_file_write_sequential_pcm_frames(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_file_write_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_file_write_sequential_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_file_write_sequential_pcm_frames_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks);
#endif /* DR_WAV_NO_STDIO */
/*
@@ -800,8 +909,8 @@ the lifetime of the drwav object.
The buffer should contain the contents of the entire wave file, not just the sample data.
*/
-drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, size_t dataSize);
-drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags);
+drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, size_t dataSize, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks);
/*
Helper for initializing a writer which outputs data to a memory buffer.
@@ -811,75 +920,45 @@ dr_wav will manage the memory allocations, however it is up to the caller to fre
The buffer will remain allocated even after drwav_uninit() is called. Indeed, the buffer should not be
considered valid until after drwav_uninit() has been called anyway.
*/
-drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat);
-drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount);
+drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_bool32 drwav_init_memory_write_sequential_pcm_frames(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks);
-/*
-Helper for opening a loader from a pre-allocated memory buffer.
-This does not create a copy of the data. It is up to the application to ensure the buffer remains valid for
-the lifetime of the drwav object.
+#ifndef DR_WAV_NO_CONVERSION_API
+/*
+Opens and reads an entire wav file in a single operation.
-The buffer should contain the contents of the entire wave file, not just the sample data.
+The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer.
*/
-drwav* drwav_open_memory(const void* data, size_t dataSize);
-drwav* drwav_open_memory_ex(const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags);
-
+drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+#ifndef DR_WAV_NO_STDIO
/*
-Helper for opening a writer which outputs data to a memory buffer.
-
-dr_wav will manage the memory allocations, however it is up to the caller to free the data with drwav_free().
+Opens and decodes an entire wav file in a single operation.
-The buffer will remain allocated even after drwav_close() is called. Indeed, the buffer should not be
-considered valid until after drwav_close() has been called anyway.
+The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer.
*/
-drwav* drwav_open_memory_write(void** ppData, size_t* pDataSize, const drwav_data_format* pFormat);
-drwav* drwav_open_memory_write_sequential(void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount);
-
-
-#ifndef DR_WAV_NO_CONVERSION_API
-/* Opens and reads a wav file in a single operation. */
-drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount);
-float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount);
-drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount);
-#ifndef DR_WAV_NO_STDIO
-/* Opens and decodes a wav file in a single operation. */
-drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount);
-float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount);
-drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount);
+drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_int16* drwav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+float* drwav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_int32* drwav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
#endif
+/*
+Opens and decodes an entire wav file from a block of memory in a single operation.
-/* Opens and decodes a wav file from a block of memory in a single operation. */
-drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount);
-float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount);
-drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount);
+The return value is a heap-allocated buffer containing the audio data. Use drwav_free() to free the buffer.
+*/
+drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
+drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks);
#endif
/* Frees data that was allocated internally by dr_wav. */
-void drwav_free(void* pDataReturnedByOpenAndRead);
-
-
-/* DEPRECATED APIS */
-drwav_uint64 drwav_read(drwav* pWav, drwav_uint64 samplesToRead, void* pBufferOut);
-drwav_uint64 drwav_read_s16(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut);
-drwav_uint64 drwav_read_f32(drwav* pWav, drwav_uint64 samplesToRead, float* pBufferOut);
-drwav_uint64 drwav_read_s32(drwav* pWav, drwav_uint64 samplesToRead, drwav_int32* pBufferOut);
-drwav_bool32 drwav_seek_to_sample(drwav* pWav, drwav_uint64 sample);
-drwav_uint64 drwav_write(drwav* pWav, drwav_uint64 samplesToWrite, const void* pData);
-#ifndef DR_WAV_NO_CONVERSION_API
-drwav_int16* drwav_open_and_read_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount);
-float* drwav_open_and_read_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount);
-drwav_int32* drwav_open_and_read_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount);
-#ifndef DR_WAV_NO_STDIO
-drwav_int16* drwav_open_memory_and_read_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount);
-float* drwav_open_file_and_read_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount);
-drwav_int32* drwav_open_file_and_read_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount);
-#endif
-drwav_int16* drwav_open_memory_and_read_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount);
-float* drwav_open_memory_and_read_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount);
-drwav_int32* drwav_open_memory_and_read_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount);
-#endif
-
+void drwav_free(void* p, const drwav_allocation_callbacks* pAllocationCallbacks);
#ifdef __cplusplus
}
@@ -901,6 +980,7 @@ drwav_int32* drwav_open_memory_and_read_s32(const void* data, size_t dataSize, u
#ifndef DR_WAV_NO_STDIO
#include
+#include
#endif
/* Standard library stuff. */
@@ -930,28 +1010,34 @@ drwav_int32* drwav_open_memory_and_read_s32(const void* data, size_t dataSize, u
#define drwav_max(a, b) (((a) > (b)) ? (a) : (b))
#define drwav_clamp(x, lo, hi) (drwav_max((lo), drwav_min((hi), (x))))
-#define drwav_assert DRWAV_ASSERT
-#define drwav_copy_memory DRWAV_COPY_MEMORY
-#define drwav_zero_memory DRWAV_ZERO_MEMORY
-
-typedef drwav_int32 drwav_result;
-#define DRWAV_SUCCESS 0
-#define DRWAV_ERROR -1
-#define DRWAV_INVALID_ARGS -2
-#define DRWAV_INVALID_OPERATION -3
-#define DRWAV_INVALID_FILE -100
-#define DRWAV_EOF -101
-
#define DRWAV_MAX_SIMD_VECTOR_SIZE 64 /* 64 for AVX-512 in the future. */
+/* CPU architecture. */
+#if defined(__x86_64__) || defined(_M_X64)
+ #define DRWAV_X64
+#elif defined(__i386) || defined(_M_IX86)
+ #define DRWAV_X86
+#elif defined(__arm__) || defined(_M_ARM)
+ #define DRWAV_ARM
+#endif
+
#ifdef _MSC_VER
-#define DRWAV_INLINE __forceinline
-#else
-#ifdef __GNUC__
-#define DRWAV_INLINE __inline__ __attribute__((always_inline))
+ #define DRWAV_INLINE __forceinline
+#elif defined(__GNUC__)
+ /*
+ I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when
+ the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some
+ case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the
+ command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue
+ I am using "__inline__" only when we're compiling in strict ANSI mode.
+ */
+ #if defined(__STRICT_ANSI__)
+ #define DRWAV_INLINE __inline__ __attribute__((always_inline))
+ #else
+ #define DRWAV_INLINE inline __attribute__((always_inline))
+ #endif
#else
-#define DRWAV_INLINE
-#endif
+ #define DRWAV_INLINE
#endif
#if defined(SIZE_MAX)
@@ -964,6 +1050,46 @@ typedef drwav_int32 drwav_result;
#endif
#endif
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+ #define DRWAV_HAS_BYTESWAP16_INTRINSIC
+ #define DRWAV_HAS_BYTESWAP32_INTRINSIC
+ #define DRWAV_HAS_BYTESWAP64_INTRINSIC
+#elif defined(__clang__)
+ #if defined(__has_builtin)
+ #if __has_builtin(__builtin_bswap16)
+ #define DRWAV_HAS_BYTESWAP16_INTRINSIC
+ #endif
+ #if __has_builtin(__builtin_bswap32)
+ #define DRWAV_HAS_BYTESWAP32_INTRINSIC
+ #endif
+ #if __has_builtin(__builtin_bswap64)
+ #define DRWAV_HAS_BYTESWAP64_INTRINSIC
+ #endif
+ #endif
+#elif defined(__GNUC__)
+ #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+ #define DRWAV_HAS_BYTESWAP32_INTRINSIC
+ #define DRWAV_HAS_BYTESWAP64_INTRINSIC
+ #endif
+ #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
+ #define DRWAV_HAS_BYTESWAP16_INTRINSIC
+ #endif
+#endif
+
+/*
+These limits are used for basic validation when initializing the decoder. If you exceed these limits, first of all: what on Earth are
+you doing?! (Let me know, I'd be curious!) Second, you can adjust these by #define-ing them before the dr_wav implementation.
+*/
+#ifndef DRWAV_MAX_SAMPLE_RATE
+#define DRWAV_MAX_SAMPLE_RATE 384000
+#endif
+#ifndef DRWAV_MAX_CHANNELS
+#define DRWAV_MAX_CHANNELS 256
+#endif
+#ifndef DRWAV_MAX_BITS_PER_SAMPLE
+#define DRWAV_MAX_BITS_PER_SAMPLE 64
+#endif
+
static const drwav_uint8 drwavGUID_W64_RIFF[16] = {0x72,0x69,0x66,0x66, 0x2E,0x91, 0xCF,0x11, 0xA5,0xD6, 0x28,0xDB,0x04,0xC1,0x00,0x00}; /* 66666972-912E-11CF-A5D6-28DB04C10000 */
static const drwav_uint8 drwavGUID_W64_WAVE[16] = {0x77,0x61,0x76,0x65, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 65766177-ACF3-11D3-8CD1-00C04F8EDB8A */
static const drwav_uint8 drwavGUID_W64_JUNK[16] = {0x6A,0x75,0x6E,0x6B, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; /* 6B6E756A-ACF3-11D3-8CD1-00C04F8EDB8A */
@@ -974,14 +1100,14 @@ static const drwav_uint8 drwavGUID_W64_SMPL[16] = {0x73,0x6D,0x70,0x6C, 0xF3,0xA
static DRWAV_INLINE drwav_bool32 drwav__guid_equal(const drwav_uint8 a[16], const drwav_uint8 b[16])
{
- const drwav_uint32* a32 = (const drwav_uint32*)a;
- const drwav_uint32* b32 = (const drwav_uint32*)b;
+ int i;
+ for (i = 0; i < 16; i += 1) {
+ if (a[i] != b[i]) {
+ return DRWAV_FALSE;
+ }
+ }
- return
- a32[0] == b32[0] &&
- a32[1] == b32[1] &&
- a32[2] == b32[2] &&
- a32[3] == b32[3];
+ return DRWAV_TRUE;
}
static DRWAV_INLINE drwav_bool32 drwav__fourcc_equal(const unsigned char* a, const char* b)
@@ -997,8 +1123,14 @@ static DRWAV_INLINE drwav_bool32 drwav__fourcc_equal(const unsigned char* a, con
static DRWAV_INLINE int drwav__is_little_endian()
{
+#if defined(DRWAV_X86) || defined(DRWAV_X64)
+ return DRWAV_TRUE;
+#elif defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN
+ return DRWAV_TRUE;
+#else
int n = 1;
return (*(char*)&n) == 1;
+#endif
}
static DRWAV_INLINE unsigned short drwav__bytes_to_u16(const unsigned char* data)
@@ -1032,138 +1164,483 @@ static DRWAV_INLINE void drwav__bytes_to_guid(const unsigned char* data, drwav_u
}
-static DRWAV_INLINE drwav_bool32 drwav__is_compressed_format_tag(drwav_uint16 formatTag)
+static DRWAV_INLINE drwav_uint16 drwav__bswap16(drwav_uint16 n)
{
- return
- formatTag == DR_WAVE_FORMAT_ADPCM ||
- formatTag == DR_WAVE_FORMAT_DVI_ADPCM;
+#ifdef DRWAV_HAS_BYTESWAP16_INTRINSIC
+ #if defined(_MSC_VER)
+ return _byteswap_ushort(n);
+ #elif defined(__GNUC__) || defined(__clang__)
+ return __builtin_bswap16(n);
+ #else
+ #error "This compiler does not support the byte swap intrinsic."
+ #endif
+#else
+ return ((n & 0xFF00) >> 8) |
+ ((n & 0x00FF) << 8);
+#endif
}
-drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut);
-drwav_uint64 drwav_read_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut);
-drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData);
-drwav* drwav_open_write__internal(const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData);
+static DRWAV_INLINE drwav_uint32 drwav__bswap32(drwav_uint32 n)
+{
+#ifdef DRWAV_HAS_BYTESWAP32_INTRINSIC
+ #if defined(_MSC_VER)
+ return _byteswap_ulong(n);
+ #elif defined(__GNUC__) || defined(__clang__)
+ #if defined(DRWAV_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(DRWAV_64BIT) /* <-- 64-bit inline assembly has not been tested, so disabling for now. */
+ /* Inline assembly optimized implementation for ARM. In my testing, GCC does not generate optimized code with __builtin_bswap32(). */
+ drwav_uint32 r;
+ __asm__ __volatile__ (
+ #if defined(DRWAV_64BIT)
+ "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) /* <-- This is untested. If someone in the community could test this, that would be appreciated! */
+ #else
+ "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n)
+ #endif
+ );
+ return r;
+ #else
+ return __builtin_bswap32(n);
+ #endif
+ #else
+ #error "This compiler does not support the byte swap intrinsic."
+ #endif
+#else
+ return ((n & 0xFF000000) >> 24) |
+ ((n & 0x00FF0000) >> 8) |
+ ((n & 0x0000FF00) << 8) |
+ ((n & 0x000000FF) << 24);
+#endif
+}
-static drwav_result drwav__read_chunk_header(drwav_read_proc onRead, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_chunk_header* pHeaderOut)
+static DRWAV_INLINE drwav_uint64 drwav__bswap64(drwav_uint64 n)
{
- if (container == drwav_container_riff) {
- unsigned char sizeInBytes[4];
+#ifdef DRWAV_HAS_BYTESWAP64_INTRINSIC
+ #if defined(_MSC_VER)
+ return _byteswap_uint64(n);
+ #elif defined(__GNUC__) || defined(__clang__)
+ return __builtin_bswap64(n);
+ #else
+ #error "This compiler does not support the byte swap intrinsic."
+ #endif
+#else
+ return ((n & (drwav_uint64)0xFF00000000000000) >> 56) |
+ ((n & (drwav_uint64)0x00FF000000000000) >> 40) |
+ ((n & (drwav_uint64)0x0000FF0000000000) >> 24) |
+ ((n & (drwav_uint64)0x000000FF00000000) >> 8) |
+ ((n & (drwav_uint64)0x00000000FF000000) << 8) |
+ ((n & (drwav_uint64)0x0000000000FF0000) << 24) |
+ ((n & (drwav_uint64)0x000000000000FF00) << 40) |
+ ((n & (drwav_uint64)0x00000000000000FF) << 56);
+#endif
+}
- if (onRead(pUserData, pHeaderOut->id.fourcc, 4) != 4) {
- return DRWAV_EOF;
- }
- if (onRead(pUserData, sizeInBytes, 4) != 4) {
- return DRWAV_INVALID_FILE;
- }
+static DRWAV_INLINE drwav_int16 drwav__bswap_s16(drwav_int16 n)
+{
+ return (drwav_int16)drwav__bswap16((drwav_uint16)n);
+}
- pHeaderOut->sizeInBytes = drwav__bytes_to_u32(sizeInBytes);
- pHeaderOut->paddingSize = (unsigned int)(pHeaderOut->sizeInBytes % 2);
- *pRunningBytesReadOut += 8;
- } else {
- unsigned char sizeInBytes[8];
+static DRWAV_INLINE void drwav__bswap_samples_s16(drwav_int16* pSamples, drwav_uint64 sampleCount)
+{
+ drwav_uint64 iSample;
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ pSamples[iSample] = drwav__bswap_s16(pSamples[iSample]);
+ }
+}
- if (onRead(pUserData, pHeaderOut->id.guid, 16) != 16) {
- return DRWAV_EOF;
- }
- if (onRead(pUserData, sizeInBytes, 8) != 8) {
- return DRWAV_INVALID_FILE;
- }
+static DRWAV_INLINE void drwav__bswap_s24(drwav_uint8* p)
+{
+ drwav_uint8 t;
+ t = p[0];
+ p[0] = p[2];
+ p[2] = t;
+}
- pHeaderOut->sizeInBytes = drwav__bytes_to_u64(sizeInBytes) - 24; /* <-- Subtract 24 because w64 includes the size of the header. */
- pHeaderOut->paddingSize = (unsigned int)(pHeaderOut->sizeInBytes % 8);
- *pRunningBytesReadOut += 24;
+static DRWAV_INLINE void drwav__bswap_samples_s24(drwav_uint8* pSamples, drwav_uint64 sampleCount)
+{
+ drwav_uint64 iSample;
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ drwav_uint8* pSample = pSamples + (iSample*3);
+ drwav__bswap_s24(pSample);
}
+}
- return DRWAV_SUCCESS;
+
+static DRWAV_INLINE drwav_int32 drwav__bswap_s32(drwav_int32 n)
+{
+ return (drwav_int32)drwav__bswap32((drwav_uint32)n);
}
-static drwav_bool32 drwav__seek_forward(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData)
+static DRWAV_INLINE void drwav__bswap_samples_s32(drwav_int32* pSamples, drwav_uint64 sampleCount)
{
- drwav_uint64 bytesRemainingToSeek = offset;
- while (bytesRemainingToSeek > 0) {
- if (bytesRemainingToSeek > 0x7FFFFFFF) {
- if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) {
- return DRWAV_FALSE;
- }
- bytesRemainingToSeek -= 0x7FFFFFFF;
- } else {
- if (!onSeek(pUserData, (int)bytesRemainingToSeek, drwav_seek_origin_current)) {
- return DRWAV_FALSE;
- }
- bytesRemainingToSeek = 0;
- }
+ drwav_uint64 iSample;
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ pSamples[iSample] = drwav__bswap_s32(pSamples[iSample]);
}
+}
- return DRWAV_TRUE;
+
+static DRWAV_INLINE float drwav__bswap_f32(float n)
+{
+ union {
+ drwav_uint32 i;
+ float f;
+ } x;
+ x.f = n;
+ x.i = drwav__bswap32(x.i);
+
+ return x.f;
}
-static drwav_bool32 drwav__seek_from_start(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData)
+static DRWAV_INLINE void drwav__bswap_samples_f32(float* pSamples, drwav_uint64 sampleCount)
{
- if (offset <= 0x7FFFFFFF) {
- return onSeek(pUserData, (int)offset, drwav_seek_origin_start);
+ drwav_uint64 iSample;
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ pSamples[iSample] = drwav__bswap_f32(pSamples[iSample]);
}
+}
- /* Larger than 32-bit seek. */
- if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_start)) {
- return DRWAV_FALSE;
- }
- offset -= 0x7FFFFFFF;
- for (;;) {
- if (offset <= 0x7FFFFFFF) {
- return onSeek(pUserData, (int)offset, drwav_seek_origin_current);
- }
+static DRWAV_INLINE double drwav__bswap_f64(double n)
+{
+ union {
+ drwav_uint64 i;
+ double f;
+ } x;
+ x.f = n;
+ x.i = drwav__bswap64(x.i);
- if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) {
- return DRWAV_FALSE;
- }
- offset -= 0x7FFFFFFF;
- }
+ return x.f;
+}
- /* Should never get here. */
- /*return DRWAV_TRUE; */
+static DRWAV_INLINE void drwav__bswap_samples_f64(double* pSamples, drwav_uint64 sampleCount)
+{
+ drwav_uint64 iSample;
+ for (iSample = 0; iSample < sampleCount; iSample += 1) {
+ pSamples[iSample] = drwav__bswap_f64(pSamples[iSample]);
+ }
}
-static drwav_bool32 drwav__read_fmt(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_fmt* fmtOut)
+static DRWAV_INLINE void drwav__bswap_samples_pcm(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample)
{
- drwav_chunk_header header;
- unsigned char fmt[16];
-
- if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) {
- return DRWAV_FALSE;
+ /* Assumes integer PCM. Floating point PCM is done in drwav__bswap_samples_ieee(). */
+ switch (bytesPerSample)
+ {
+ case 2: /* s16, s12 (loosely packed) */
+ {
+ drwav__bswap_samples_s16((drwav_int16*)pSamples, sampleCount);
+ } break;
+ case 3: /* s24 */
+ {
+ drwav__bswap_samples_s24((drwav_uint8*)pSamples, sampleCount);
+ } break;
+ case 4: /* s32 */
+ {
+ drwav__bswap_samples_s32((drwav_int32*)pSamples, sampleCount);
+ } break;
+ default:
+ {
+ /* Unsupported format. */
+ DRWAV_ASSERT(DRWAV_FALSE);
+ } break;
+ }
+}
+
+static DRWAV_INLINE void drwav__bswap_samples_ieee(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample)
+{
+ switch (bytesPerSample)
+ {
+ #if 0 /* Contributions welcome for f16 support. */
+ case 2: /* f16 */
+ {
+ drwav__bswap_samples_f16((drwav_float16*)pSamples, sampleCount);
+ } break;
+ #endif
+ case 4: /* f32 */
+ {
+ drwav__bswap_samples_f32((float*)pSamples, sampleCount);
+ } break;
+ case 8: /* f64 */
+ {
+ drwav__bswap_samples_f64((double*)pSamples, sampleCount);
+ } break;
+ default:
+ {
+ /* Unsupported format. */
+ DRWAV_ASSERT(DRWAV_FALSE);
+ } break;
}
+}
+static DRWAV_INLINE void drwav__bswap_samples(void* pSamples, drwav_uint64 sampleCount, drwav_uint32 bytesPerSample, drwav_uint16 format)
+{
+ switch (format)
+ {
+ case DR_WAVE_FORMAT_PCM:
+ {
+ drwav__bswap_samples_pcm(pSamples, sampleCount, bytesPerSample);
+ } break;
- /* Skip non-fmt chunks. */
- while ((container == drwav_container_riff && !drwav__fourcc_equal(header.id.fourcc, "fmt ")) || (container == drwav_container_w64 && !drwav__guid_equal(header.id.guid, drwavGUID_W64_FMT))) {
- if (!drwav__seek_forward(onSeek, header.sizeInBytes + header.paddingSize, pUserData)) {
- return DRWAV_FALSE;
- }
- *pRunningBytesReadOut += header.sizeInBytes + header.paddingSize;
+ case DR_WAVE_FORMAT_IEEE_FLOAT:
+ {
+ drwav__bswap_samples_ieee(pSamples, sampleCount, bytesPerSample);
+ } break;
- /* Try the next header. */
- if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) {
- return DRWAV_FALSE;
- }
+ case DR_WAVE_FORMAT_ALAW:
+ case DR_WAVE_FORMAT_MULAW:
+ {
+ drwav__bswap_samples_s16((drwav_int16*)pSamples, sampleCount);
+ } break;
+
+ case DR_WAVE_FORMAT_ADPCM:
+ case DR_WAVE_FORMAT_DVI_ADPCM:
+ default:
+ {
+ /* Unsupported format. */
+ DRWAV_ASSERT(DRWAV_FALSE);
+ } break;
}
+}
- /* Validation. */
- if (container == drwav_container_riff) {
- if (!drwav__fourcc_equal(header.id.fourcc, "fmt ")) {
- return DRWAV_FALSE;
- }
- } else {
- if (!drwav__guid_equal(header.id.guid, drwavGUID_W64_FMT)) {
- return DRWAV_FALSE;
- }
- }
+static void* drwav__malloc_default(size_t sz, void* pUserData)
+{
+ (void)pUserData;
+ return DRWAV_MALLOC(sz);
+}
+static void* drwav__realloc_default(void* p, size_t sz, void* pUserData)
+{
+ (void)pUserData;
+ return DRWAV_REALLOC(p, sz);
+}
- if (onRead(pUserData, fmt, sizeof(fmt)) != sizeof(fmt)) {
+static void drwav__free_default(void* p, void* pUserData)
+{
+ (void)pUserData;
+ DRWAV_FREE(p);
+}
+
+
+static void* drwav__malloc_from_callbacks(size_t sz, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks == NULL) {
+ return NULL;
+ }
+
+ if (pAllocationCallbacks->onMalloc != NULL) {
+ return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData);
+ }
+
+ /* Try using realloc(). */
+ if (pAllocationCallbacks->onRealloc != NULL) {
+ return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData);
+ }
+
+ return NULL;
+}
+
+static void* drwav__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks == NULL) {
+ return NULL;
+ }
+
+ if (pAllocationCallbacks->onRealloc != NULL) {
+ return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData);
+ }
+
+ /* Try emulating realloc() in terms of malloc()/free(). */
+ if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) {
+ void* p2;
+
+ p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData);
+ if (p2 == NULL) {
+ return NULL;
+ }
+
+ if (p != NULL) {
+ DRWAV_COPY_MEMORY(p2, p, szOld);
+ pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
+ }
+
+ return p2;
+ }
+
+ return NULL;
+}
+
+static void drwav__free_from_callbacks(void* p, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (p == NULL || pAllocationCallbacks == NULL) {
+ return;
+ }
+
+ if (pAllocationCallbacks->onFree != NULL) {
+ pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData);
+ }
+}
+
+
+drwav_allocation_callbacks drwav_copy_allocation_callbacks_or_defaults(const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pAllocationCallbacks != NULL) {
+ /* Copy. */
+ return *pAllocationCallbacks;
+ } else {
+ /* Defaults. */
+ drwav_allocation_callbacks allocationCallbacks;
+ allocationCallbacks.pUserData = NULL;
+ allocationCallbacks.onMalloc = drwav__malloc_default;
+ allocationCallbacks.onRealloc = drwav__realloc_default;
+ allocationCallbacks.onFree = drwav__free_default;
+ return allocationCallbacks;
+ }
+}
+
+
+static DRWAV_INLINE drwav_bool32 drwav__is_compressed_format_tag(drwav_uint16 formatTag)
+{
+ return
+ formatTag == DR_WAVE_FORMAT_ADPCM ||
+ formatTag == DR_WAVE_FORMAT_DVI_ADPCM;
+}
+
+static unsigned int drwav__chunk_padding_size_riff(drwav_uint64 chunkSize)
+{
+ return (unsigned int)(chunkSize % 2);
+}
+
+static unsigned int drwav__chunk_padding_size_w64(drwav_uint64 chunkSize)
+{
+ return (unsigned int)(chunkSize % 8);
+}
+
+drwav_uint64 drwav_read_pcm_frames_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut);
+drwav_uint64 drwav_read_pcm_frames_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut);
+drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount);
+
+static drwav_result drwav__read_chunk_header(drwav_read_proc onRead, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_chunk_header* pHeaderOut)
+{
+ if (container == drwav_container_riff) {
+ unsigned char sizeInBytes[4];
+
+ if (onRead(pUserData, pHeaderOut->id.fourcc, 4) != 4) {
+ return DRWAV_EOF;
+ }
+
+ if (onRead(pUserData, sizeInBytes, 4) != 4) {
+ return DRWAV_INVALID_FILE;
+ }
+
+ pHeaderOut->sizeInBytes = drwav__bytes_to_u32(sizeInBytes);
+ pHeaderOut->paddingSize = drwav__chunk_padding_size_riff(pHeaderOut->sizeInBytes);
+ *pRunningBytesReadOut += 8;
+ } else {
+ unsigned char sizeInBytes[8];
+
+ if (onRead(pUserData, pHeaderOut->id.guid, 16) != 16) {
+ return DRWAV_EOF;
+ }
+
+ if (onRead(pUserData, sizeInBytes, 8) != 8) {
+ return DRWAV_INVALID_FILE;
+ }
+
+ pHeaderOut->sizeInBytes = drwav__bytes_to_u64(sizeInBytes) - 24; /* <-- Subtract 24 because w64 includes the size of the header. */
+ pHeaderOut->paddingSize = drwav__chunk_padding_size_w64(pHeaderOut->sizeInBytes);
+ *pRunningBytesReadOut += 24;
+ }
+
+ return DRWAV_SUCCESS;
+}
+
+static drwav_bool32 drwav__seek_forward(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData)
+{
+ drwav_uint64 bytesRemainingToSeek = offset;
+ while (bytesRemainingToSeek > 0) {
+ if (bytesRemainingToSeek > 0x7FFFFFFF) {
+ if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) {
+ return DRWAV_FALSE;
+ }
+ bytesRemainingToSeek -= 0x7FFFFFFF;
+ } else {
+ if (!onSeek(pUserData, (int)bytesRemainingToSeek, drwav_seek_origin_current)) {
+ return DRWAV_FALSE;
+ }
+ bytesRemainingToSeek = 0;
+ }
+ }
+
+ return DRWAV_TRUE;
+}
+
+static drwav_bool32 drwav__seek_from_start(drwav_seek_proc onSeek, drwav_uint64 offset, void* pUserData)
+{
+ if (offset <= 0x7FFFFFFF) {
+ return onSeek(pUserData, (int)offset, drwav_seek_origin_start);
+ }
+
+ /* Larger than 32-bit seek. */
+ if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_start)) {
+ return DRWAV_FALSE;
+ }
+ offset -= 0x7FFFFFFF;
+
+ for (;;) {
+ if (offset <= 0x7FFFFFFF) {
+ return onSeek(pUserData, (int)offset, drwav_seek_origin_current);
+ }
+
+ if (!onSeek(pUserData, 0x7FFFFFFF, drwav_seek_origin_current)) {
+ return DRWAV_FALSE;
+ }
+ offset -= 0x7FFFFFFF;
+ }
+
+ /* Should never get here. */
+ /*return DRWAV_TRUE; */
+}
+
+
+static drwav_bool32 drwav__read_fmt(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, drwav_container container, drwav_uint64* pRunningBytesReadOut, drwav_fmt* fmtOut)
+{
+ drwav_chunk_header header;
+ unsigned char fmt[16];
+
+ if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) {
+ return DRWAV_FALSE;
+ }
+
+
+ /* Skip non-fmt chunks. */
+ while ((container == drwav_container_riff && !drwav__fourcc_equal(header.id.fourcc, "fmt ")) || (container == drwav_container_w64 && !drwav__guid_equal(header.id.guid, drwavGUID_W64_FMT))) {
+ if (!drwav__seek_forward(onSeek, header.sizeInBytes + header.paddingSize, pUserData)) {
+ return DRWAV_FALSE;
+ }
+ *pRunningBytesReadOut += header.sizeInBytes + header.paddingSize;
+
+ /* Try the next header. */
+ if (drwav__read_chunk_header(onRead, pUserData, container, pRunningBytesReadOut, &header) != DRWAV_SUCCESS) {
+ return DRWAV_FALSE;
+ }
+ }
+
+
+ /* Validation. */
+ if (container == drwav_container_riff) {
+ if (!drwav__fourcc_equal(header.id.fourcc, "fmt ")) {
+ return DRWAV_FALSE;
+ }
+ } else {
+ if (!drwav__guid_equal(header.id.guid, drwavGUID_W64_FMT)) {
+ return DRWAV_FALSE;
+ }
+ }
+
+
+ if (onRead(pUserData, fmt, sizeof(fmt)) != sizeof(fmt)) {
return DRWAV_FALSE;
}
*pRunningBytesReadOut += sizeof(fmt);
@@ -1237,454 +1714,76 @@ static drwav_bool32 drwav__read_fmt(drwav_read_proc onRead, drwav_seek_proc onSe
}
-#ifndef DR_WAV_NO_STDIO
-FILE* drwav_fopen(const char* filePath, const char* openMode)
-{
- FILE* pFile;
-#if defined(_MSC_VER) && _MSC_VER >= 1400
- if (fopen_s(&pFile, filePath, openMode) != 0) {
- return DRWAV_FALSE;
- }
-#else
- pFile = fopen(filePath, openMode);
- if (pFile == NULL) {
- return DRWAV_FALSE;
- }
-#endif
-
- return pFile;
-}
-
-static size_t drwav__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead)
+size_t drwav__on_read(drwav_read_proc onRead, void* pUserData, void* pBufferOut, size_t bytesToRead, drwav_uint64* pCursor)
{
- return fread(pBufferOut, 1, bytesToRead, (FILE*)pUserData);
-}
+ size_t bytesRead;
-static size_t drwav__on_write_stdio(void* pUserData, const void* pData, size_t bytesToWrite)
-{
- return fwrite(pData, 1, bytesToWrite, (FILE*)pUserData);
-}
+ DRWAV_ASSERT(onRead != NULL);
+ DRWAV_ASSERT(pCursor != NULL);
-static drwav_bool32 drwav__on_seek_stdio(void* pUserData, int offset, drwav_seek_origin origin)
-{
- return fseek((FILE*)pUserData, offset, (origin == drwav_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0;
+ bytesRead = onRead(pUserData, pBufferOut, bytesToRead);
+ *pCursor += bytesRead;
+ return bytesRead;
}
-drwav_bool32 drwav_init_file(drwav* pWav, const char* filename)
+drwav_bool32 drwav__on_seek(drwav_seek_proc onSeek, void* pUserData, int offset, drwav_seek_origin origin, drwav_uint64* pCursor)
{
- return drwav_init_file_ex(pWav, filename, NULL, NULL, 0);
-}
+ DRWAV_ASSERT(onSeek != NULL);
+ DRWAV_ASSERT(pCursor != NULL);
-drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags)
-{
- FILE* pFile = drwav_fopen(filename, "rb");
- if (pFile == NULL) {
+ if (!onSeek(pUserData, offset, origin)) {
return DRWAV_FALSE;
}
- return drwav_init_ex(pWav, drwav__on_read_stdio, drwav__on_seek_stdio, onChunk, (void*)pFile, pChunkUserData, flags);
-}
-
-
-drwav_bool32 drwav_init_file_write__internal(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential)
-{
- FILE* pFile = drwav_fopen(filename, "wb");
- if (pFile == NULL) {
- return DRWAV_FALSE;
+ if (origin == drwav_seek_origin_start) {
+ *pCursor = offset;
+ } else {
+ *pCursor += offset;
}
- return drwav_init_write__internal(pWav, pFormat, totalSampleCount, isSequential, drwav__on_write_stdio, drwav__on_seek_stdio, (void*)pFile);
+ return DRWAV_TRUE;
}
-drwav_bool32 drwav_init_file_write(drwav* pWav, const char* filename, const drwav_data_format* pFormat)
-{
- return drwav_init_file_write__internal(pWav, filename, pFormat, 0, DRWAV_FALSE);
-}
-drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount)
-{
- return drwav_init_file_write__internal(pWav, filename, pFormat, totalSampleCount, DRWAV_TRUE);
-}
-drwav* drwav_open_file(const char* filename)
+static drwav_uint32 drwav_get_bytes_per_pcm_frame(drwav* pWav)
{
- return drwav_open_file_ex(filename, NULL, NULL, 0);
+ /*
+ The bytes per frame is a bit ambiguous. It can be either be based on the bits per sample, or the block align. The way I'm doing it here
+ is that if the bits per sample is a multiple of 8, use floor(bitsPerSample*channels/8), otherwise fall back to the block align.
+ */
+ if ((pWav->bitsPerSample & 0x7) == 0) {
+ /* Bits per sample is a multiple of 8. */
+ return (pWav->bitsPerSample * pWav->fmt.channels) >> 3;
+ } else {
+ return pWav->fmt.blockAlign;
+ }
}
-drwav* drwav_open_file_ex(const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags)
-{
- FILE* pFile;
- drwav* pWav;
- pFile = drwav_fopen(filename, "rb");
- if (pFile == NULL) {
+drwav_bool32 drwav_preinit(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pReadSeekUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pWav == NULL || onRead == NULL || onSeek == NULL) {
return DRWAV_FALSE;
}
- pWav = drwav_open_ex(drwav__on_read_stdio, drwav__on_seek_stdio, onChunk, (void*)pFile, pChunkUserData, flags);
- if (pWav == NULL) {
- fclose(pFile);
- return NULL;
+ DRWAV_ZERO_MEMORY(pWav, sizeof(*pWav));
+ pWav->onRead = onRead;
+ pWav->onSeek = onSeek;
+ pWav->pUserData = pReadSeekUserData;
+ pWav->allocationCallbacks = drwav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks);
+
+ if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) {
+ return DRWAV_FALSE; /* Invalid allocation callbacks. */
}
- return pWav;
+ return DRWAV_TRUE;
}
-
-drwav* drwav_open_file_write__internal(const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential)
+drwav_bool32 drwav_init__internal(drwav* pWav, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags)
{
- FILE* pFile;
- drwav* pWav;
-
- pFile = drwav_fopen(filename, "wb");
- if (pFile == NULL) {
- return DRWAV_FALSE;
- }
+ /* This function assumes drwav_preinit() has been called beforehand. */
- pWav = drwav_open_write__internal(pFormat, totalSampleCount, isSequential, drwav__on_write_stdio, drwav__on_seek_stdio, (void*)pFile);
- if (pWav == NULL) {
- fclose(pFile);
- return NULL;
- }
-
- return pWav;
-}
-
-drwav* drwav_open_file_write(const char* filename, const drwav_data_format* pFormat)
-{
- return drwav_open_file_write__internal(filename, pFormat, 0, DRWAV_FALSE);
-}
-
-drwav* drwav_open_file_write_sequential(const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount)
-{
- return drwav_open_file_write__internal(filename, pFormat, totalSampleCount, DRWAV_TRUE);
-}
-#endif /* DR_WAV_NO_STDIO */
-
-
-static size_t drwav__on_read_memory(void* pUserData, void* pBufferOut, size_t bytesToRead)
-{
- drwav__memory_stream* memory = (drwav__memory_stream*)pUserData;
- size_t bytesRemaining;
-
- drwav_assert(memory != NULL);
- drwav_assert(memory->dataSize >= memory->currentReadPos);
-
- bytesRemaining = memory->dataSize - memory->currentReadPos;
- if (bytesToRead > bytesRemaining) {
- bytesToRead = bytesRemaining;
- }
-
- if (bytesToRead > 0) {
- DRWAV_COPY_MEMORY(pBufferOut, memory->data + memory->currentReadPos, bytesToRead);
- memory->currentReadPos += bytesToRead;
- }
-
- return bytesToRead;
-}
-
-static drwav_bool32 drwav__on_seek_memory(void* pUserData, int offset, drwav_seek_origin origin)
-{
- drwav__memory_stream* memory = (drwav__memory_stream*)pUserData;
- drwav_assert(memory != NULL);
-
- if (origin == drwav_seek_origin_current) {
- if (offset > 0) {
- if (memory->currentReadPos + offset > memory->dataSize) {
- return DRWAV_FALSE; /* Trying to seek too far forward. */
- }
- } else {
- if (memory->currentReadPos < (size_t)-offset) {
- return DRWAV_FALSE; /* Trying to seek too far backwards. */
- }
- }
-
- /* This will never underflow thanks to the clamps above. */
- memory->currentReadPos += offset;
- } else {
- if ((drwav_uint32)offset <= memory->dataSize) {
- memory->currentReadPos = offset;
- } else {
- return DRWAV_FALSE; /* Trying to seek too far forward. */
- }
- }
-
- return DRWAV_TRUE;
-}
-
-static size_t drwav__on_write_memory(void* pUserData, const void* pDataIn, size_t bytesToWrite)
-{
- drwav__memory_stream_write* memory = (drwav__memory_stream_write*)pUserData;
- size_t bytesRemaining;
-
- drwav_assert(memory != NULL);
- drwav_assert(memory->dataCapacity >= memory->currentWritePos);
-
- bytesRemaining = memory->dataCapacity - memory->currentWritePos;
- if (bytesRemaining < bytesToWrite) {
- /* Need to reallocate. */
- void* pNewData;
- size_t newDataCapacity = (memory->dataCapacity == 0) ? 256 : memory->dataCapacity * 2;
-
- /* If doubling wasn't enough, just make it the minimum required size to write the data. */
- if ((newDataCapacity - memory->currentWritePos) < bytesToWrite) {
- newDataCapacity = memory->currentWritePos + bytesToWrite;
- }
-
- pNewData = DRWAV_REALLOC(*memory->ppData, newDataCapacity);
- if (pNewData == NULL) {
- return 0;
- }
-
- *memory->ppData = pNewData;
- memory->dataCapacity = newDataCapacity;
- }
-
- DRWAV_COPY_MEMORY(((drwav_uint8*)(*memory->ppData)) + memory->currentWritePos, pDataIn, bytesToWrite);
-
- memory->currentWritePos += bytesToWrite;
- if (memory->dataSize < memory->currentWritePos) {
- memory->dataSize = memory->currentWritePos;
- }
-
- *memory->pDataSize = memory->dataSize;
-
- return bytesToWrite;
-}
-
-static drwav_bool32 drwav__on_seek_memory_write(void* pUserData, int offset, drwav_seek_origin origin)
-{
- drwav__memory_stream_write* memory = (drwav__memory_stream_write*)pUserData;
- drwav_assert(memory != NULL);
-
- if (origin == drwav_seek_origin_current) {
- if (offset > 0) {
- if (memory->currentWritePos + offset > memory->dataSize) {
- offset = (int)(memory->dataSize - memory->currentWritePos); /* Trying to seek too far forward. */
- }
- } else {
- if (memory->currentWritePos < (size_t)-offset) {
- offset = -(int)memory->currentWritePos; /* Trying to seek too far backwards. */
- }
- }
-
- /* This will never underflow thanks to the clamps above. */
- memory->currentWritePos += offset;
- } else {
- if ((drwav_uint32)offset <= memory->dataSize) {
- memory->currentWritePos = offset;
- } else {
- memory->currentWritePos = memory->dataSize; /* Trying to seek too far forward. */
- }
- }
-
- return DRWAV_TRUE;
-}
-
-drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, size_t dataSize)
-{
- return drwav_init_memory_ex(pWav, data, dataSize, NULL, NULL, 0);
-}
-
-drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags)
-{
- drwav__memory_stream memoryStream;
-
- if (data == NULL || dataSize == 0) {
- return DRWAV_FALSE;
- }
-
- drwav_zero_memory(&memoryStream, sizeof(memoryStream));
- memoryStream.data = (const unsigned char*)data;
- memoryStream.dataSize = dataSize;
- memoryStream.currentReadPos = 0;
-
- if (!drwav_init_ex(pWav, drwav__on_read_memory, drwav__on_seek_memory, onChunk, (void*)&memoryStream, pChunkUserData, flags)) {
- return DRWAV_FALSE;
- }
-
- pWav->memoryStream = memoryStream;
- pWav->pUserData = &pWav->memoryStream;
- return DRWAV_TRUE;
-}
-
-
-drwav_bool32 drwav_init_memory_write__internal(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential)
-{
- drwav__memory_stream_write memoryStreamWrite;
-
- if (ppData == NULL) {
- return DRWAV_FALSE;
- }
-
- *ppData = NULL; /* Important because we're using realloc()! */
- *pDataSize = 0;
-
- drwav_zero_memory(&memoryStreamWrite, sizeof(memoryStreamWrite));
- memoryStreamWrite.ppData = ppData;
- memoryStreamWrite.pDataSize = pDataSize;
- memoryStreamWrite.dataSize = 0;
- memoryStreamWrite.dataCapacity = 0;
- memoryStreamWrite.currentWritePos = 0;
-
- if (!drwav_init_write__internal(pWav, pFormat, totalSampleCount, isSequential, drwav__on_write_memory, drwav__on_seek_memory_write, (void*)&memoryStreamWrite)) {
- return DRWAV_FALSE;
- }
-
- pWav->memoryStreamWrite = memoryStreamWrite;
- pWav->pUserData = &pWav->memoryStreamWrite;
- return DRWAV_TRUE;
-}
-
-drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat)
-{
- return drwav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, 0, DRWAV_FALSE);
-}
-
-drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount)
-{
- return drwav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, totalSampleCount, DRWAV_TRUE);
-}
-
-
-drwav* drwav_open_memory(const void* data, size_t dataSize)
-{
- return drwav_open_memory_ex(data, dataSize, NULL, NULL, 0);
-}
-
-drwav* drwav_open_memory_ex(const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags)
-{
- drwav__memory_stream memoryStream;
- drwav* pWav;
-
- if (data == NULL || dataSize == 0) {
- return NULL;
- }
-
- drwav_zero_memory(&memoryStream, sizeof(memoryStream));
- memoryStream.data = (const unsigned char*)data;
- memoryStream.dataSize = dataSize;
- memoryStream.currentReadPos = 0;
-
- pWav = drwav_open_ex(drwav__on_read_memory, drwav__on_seek_memory, onChunk, (void*)&memoryStream, pChunkUserData, flags);
- if (pWav == NULL) {
- return NULL;
- }
-
- pWav->memoryStream = memoryStream;
- pWav->pUserData = &pWav->memoryStream;
- return pWav;
-}
-
-
-drwav* drwav_open_memory_write__internal(void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential)
-{
- drwav__memory_stream_write memoryStreamWrite;
- drwav* pWav;
-
- if (ppData == NULL) {
- return NULL;
- }
-
- *ppData = NULL; /* Important because we're using realloc()! */
- *pDataSize = 0;
-
- drwav_zero_memory(&memoryStreamWrite, sizeof(memoryStreamWrite));
- memoryStreamWrite.ppData = ppData;
- memoryStreamWrite.pDataSize = pDataSize;
- memoryStreamWrite.dataSize = 0;
- memoryStreamWrite.dataCapacity = 0;
- memoryStreamWrite.currentWritePos = 0;
-
- pWav = drwav_open_write__internal(pFormat, totalSampleCount, isSequential, drwav__on_write_memory, drwav__on_seek_memory_write, (void*)&memoryStreamWrite);
- if (pWav == NULL) {
- return NULL;
- }
-
- pWav->memoryStreamWrite = memoryStreamWrite;
- pWav->pUserData = &pWav->memoryStreamWrite;
- return pWav;
-}
-
-drwav* drwav_open_memory_write(void** ppData, size_t* pDataSize, const drwav_data_format* pFormat)
-{
- return drwav_open_memory_write__internal(ppData, pDataSize, pFormat, 0, DRWAV_FALSE);
-}
-
-drwav* drwav_open_memory_write_sequential(void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount)
-{
- return drwav_open_memory_write__internal(ppData, pDataSize, pFormat, totalSampleCount, DRWAV_TRUE);
-}
-
-
-size_t drwav__on_read(drwav_read_proc onRead, void* pUserData, void* pBufferOut, size_t bytesToRead, drwav_uint64* pCursor)
-{
- size_t bytesRead;
-
- drwav_assert(onRead != NULL);
- drwav_assert(pCursor != NULL);
-
- bytesRead = onRead(pUserData, pBufferOut, bytesToRead);
- *pCursor += bytesRead;
- return bytesRead;
-}
-
-drwav_bool32 drwav__on_seek(drwav_seek_proc onSeek, void* pUserData, int offset, drwav_seek_origin origin, drwav_uint64* pCursor)
-{
- drwav_assert(onSeek != NULL);
- drwav_assert(pCursor != NULL);
-
- if (!onSeek(pUserData, offset, origin)) {
- return DRWAV_FALSE;
- }
-
- if (origin == drwav_seek_origin_start) {
- *pCursor = offset;
- } else {
- *pCursor += offset;
- }
-
- return DRWAV_TRUE;
-}
-
-
-static drwav_uint32 drwav_get_bytes_per_sample(drwav* pWav)
-{
- /*
- The number of bytes per sample is based on the bits per sample or the block align. We prioritize floor(bitsPerSample/8), but if
- this is zero or the bits per sample is not a multiple of 8 we need to fall back to the block align.
- */
- drwav_uint32 bytesPerSample = pWav->bitsPerSample >> 3;
- if (bytesPerSample == 0 || (pWav->bitsPerSample & 0x7) != 0) {
- bytesPerSample = pWav->fmt.blockAlign/pWav->fmt.channels;
- }
-
- return bytesPerSample;
-}
-
-static drwav_uint32 drwav_get_bytes_per_pcm_frame(drwav* pWav)
-{
- /*
- The number of bytes per frame is based on the bits per sample or the block align. We prioritize floor(bitsPerSample*channels/8), but if
- this is zero or the bits per frame is not a multiple of 8 we need to fall back to the block align.
- */
- drwav_uint32 bitsPerFrame = pWav->bitsPerSample * pWav->fmt.channels;
- drwav_uint32 bytesPerFrame = bitsPerFrame >> 3;
- if (bytesPerFrame == 0 || (bitsPerFrame & 0x7) != 0) {
- bytesPerFrame = pWav->fmt.blockAlign;
- }
-
- return bytesPerFrame;
-}
-
-
-drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData)
-{
- return drwav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0);
-}
-
-drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags)
-{
drwav_uint64 cursor; /* <-- Keeps track of the byte position so we can seek to specific locations. */
drwav_bool32 sequential;
unsigned char riff[4];
@@ -1695,20 +1794,11 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
drwav_uint64 dataChunkSize;
drwav_uint64 chunkSize;
- if (onRead == NULL || onSeek == NULL) {
- return DRWAV_FALSE;
- }
-
cursor = 0;
sequential = (flags & DRWAV_SEQUENTIAL) != 0;
- drwav_zero_memory(pWav, sizeof(*pWav));
- pWav->onRead = onRead;
- pWav->onSeek = onSeek;
- pWav->pUserData = pReadSeekUserData;
-
/* The first 4 bytes should be the RIFF identifier. */
- if (drwav__on_read(onRead, pReadSeekUserData, riff, sizeof(riff), &cursor) != sizeof(riff)) {
+ if (drwav__on_read(pWav->onRead, pWav->pUserData, riff, sizeof(riff), &cursor) != sizeof(riff)) {
return DRWAV_FALSE;
}
@@ -1725,7 +1815,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
pWav->container = drwav_container_w64;
/* Check the rest of the GUID for validity. */
- if (drwav__on_read(onRead, pReadSeekUserData, riff2, sizeof(riff2), &cursor) != sizeof(riff2)) {
+ if (drwav__on_read(pWav->onRead, pWav->pUserData, riff2, sizeof(riff2), &cursor) != sizeof(riff2)) {
return DRWAV_FALSE;
}
@@ -1744,7 +1834,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
unsigned char wave[4];
/* RIFF/WAVE */
- if (drwav__on_read(onRead, pReadSeekUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) {
+ if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) {
return DRWAV_FALSE;
}
@@ -1752,7 +1842,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
return DRWAV_FALSE; /* Chunk size should always be at least 36 bytes. */
}
- if (drwav__on_read(onRead, pReadSeekUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) {
+ if (drwav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) {
return DRWAV_FALSE;
}
@@ -1764,7 +1854,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
drwav_uint8 wave[16];
/* W64 */
- if (drwav__on_read(onRead, pReadSeekUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) {
+ if (drwav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) {
return DRWAV_FALSE;
}
@@ -1772,7 +1862,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
return DRWAV_FALSE;
}
- if (drwav__on_read(onRead, pReadSeekUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) {
+ if (drwav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) {
return DRWAV_FALSE;
}
@@ -1783,13 +1873,16 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
/* The next bytes should be the "fmt " chunk. */
- if (!drwav__read_fmt(onRead, onSeek, pReadSeekUserData, pWav->container, &cursor, &fmt)) {
+ if (!drwav__read_fmt(pWav->onRead, pWav->onSeek, pWav->pUserData, pWav->container, &cursor, &fmt)) {
return DRWAV_FALSE; /* Failed to read the "fmt " chunk. */
}
/* Basic validation. */
- if (fmt.sampleRate == 0 || fmt.channels == 0 || fmt.bitsPerSample == 0 || fmt.blockAlign == 0) {
- return DRWAV_FALSE; /* Invalid channel count. Probably an invalid WAV file. */
+ if ((fmt.sampleRate == 0 || fmt.sampleRate > DRWAV_MAX_SAMPLE_RATE) ||
+ (fmt.channels == 0 || fmt.channels > DRWAV_MAX_CHANNELS) ||
+ (fmt.bitsPerSample == 0 || fmt.bitsPerSample > DRWAV_MAX_BITS_PER_SAMPLE) ||
+ fmt.blockAlign == 0) {
+ return DRWAV_FALSE; /* Probably an invalid WAV file. */
}
@@ -1814,11 +1907,10 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
dataChunkSize = 0;
/* The next chunk we care about is the "data" chunk. This is not necessarily the next chunk so we'll need to loop. */
- chunkSize = 0;
for (;;)
{
drwav_chunk_header header;
- drwav_result result = drwav__read_chunk_header(onRead, pReadSeekUserData, pWav->container, &cursor, &header);
+ drwav_result result = drwav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header);
if (result != DRWAV_SUCCESS) {
if (!foundDataChunk) {
return DRWAV_FALSE;
@@ -1829,14 +1921,14 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
/* Tell the client about this chunk. */
if (!sequential && onChunk != NULL) {
- drwav_uint64 callbackBytesRead = onChunk(pChunkUserData, onRead, onSeek, pReadSeekUserData, &header);
+ drwav_uint64 callbackBytesRead = onChunk(pChunkUserData, pWav->onRead, pWav->onSeek, pWav->pUserData, &header);
/*
dr_wav may need to read the contents of the chunk, so we now need to seek back to the position before
we called the callback.
*/
if (callbackBytesRead > 0) {
- if (!drwav__seek_from_start(onSeek, cursor, pReadSeekUserData)) {
+ if (!drwav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData)) {
return DRWAV_FALSE;
}
}
@@ -1872,7 +1964,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
if (pWav->container == drwav_container_riff) {
if (drwav__fourcc_equal(header.id.fourcc, "fact")) {
drwav_uint32 sampleCount;
- if (drwav__on_read(onRead, pReadSeekUserData, &sampleCount, 4, &cursor) != 4) {
+ if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCount, 4, &cursor) != 4) {
return DRWAV_FALSE;
}
chunkSize -= 4;
@@ -1893,7 +1985,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
}
} else {
if (drwav__guid_equal(header.id.guid, drwavGUID_W64_FACT)) {
- if (drwav__on_read(onRead, pReadSeekUserData, &sampleCountFromFactChunk, 8, &cursor) != 8) {
+ if (drwav__on_read(pWav->onRead, pWav->pUserData, &sampleCountFromFactChunk, 8, &cursor) != 8) {
return DRWAV_FALSE;
}
chunkSize -= 8;
@@ -1909,7 +2001,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
if (drwav__fourcc_equal(header.id.fourcc, "smpl")) {
unsigned char smplHeaderData[36]; /* 36 = size of the smpl header section, not including the loop data. */
if (chunkSize >= sizeof(smplHeaderData)) {
- drwav_uint64 bytesJustRead = drwav__on_read(onRead, pReadSeekUserData, smplHeaderData, sizeof(smplHeaderData), &cursor);
+ drwav_uint64 bytesJustRead = drwav__on_read(pWav->onRead, pWav->pUserData, smplHeaderData, sizeof(smplHeaderData), &cursor);
chunkSize -= bytesJustRead;
if (bytesJustRead == sizeof(smplHeaderData)) {
@@ -1927,7 +2019,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
for (iLoop = 0; iLoop < pWav->smpl.numSampleLoops && iLoop < drwav_countof(pWav->smpl.loops); ++iLoop) {
unsigned char smplLoopData[24]; /* 24 = size of a loop section in the smpl chunk. */
- bytesJustRead = drwav__on_read(onRead, pReadSeekUserData, smplLoopData, sizeof(smplLoopData), &cursor);
+ bytesJustRead = drwav__on_read(pWav->onRead, pWav->pUserData, smplLoopData, sizeof(smplLoopData), &cursor);
chunkSize -= bytesJustRead;
if (bytesJustRead == sizeof(smplLoopData)) {
@@ -1957,7 +2049,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
/* Make sure we seek past the padding. */
chunkSize += header.paddingSize;
- if (!drwav__seek_forward(onSeek, chunkSize, pReadSeekUserData)) {
+ if (!drwav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData)) {
break;
}
cursor += chunkSize;
@@ -1974,7 +2066,7 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
/* We may have moved passed the data chunk. If so we need to move back. If running in sequential mode we can assume we are already sitting on the data chunk. */
if (!sequential) {
- if (!drwav__seek_from_start(onSeek, pWav->dataChunkDataPos, pReadSeekUserData)) {
+ if (!drwav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData)) {
return DRWAV_FALSE;
}
cursor = pWav->dataChunkDataPos;
@@ -1997,12 +2089,33 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
pWav->totalPCMFrameCount = dataChunkSize / drwav_get_bytes_per_pcm_frame(pWav);
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) {
+ drwav_uint64 totalBlockHeaderSizeInBytes;
drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign;
- pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (6*pWav->channels))) * 2)) / fmt.channels; /* x2 because two samples per byte. */
+
+ /* Make sure any trailing partial block is accounted for. */
+ if ((blockCount * fmt.blockAlign) < dataChunkSize) {
+ blockCount += 1;
+ }
+
+ /* We decode two samples per byte. There will be blockCount headers in the data chunk. This is enough to know how to calculate the total PCM frame count. */
+ totalBlockHeaderSizeInBytes = blockCount * (6*fmt.channels);
+ pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels;
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) {
+ drwav_uint64 totalBlockHeaderSizeInBytes;
drwav_uint64 blockCount = dataChunkSize / fmt.blockAlign;
- pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (4*pWav->channels))) * 2) + (blockCount * pWav->channels)) / fmt.channels;
+
+ /* Make sure any trailing partial block is accounted for. */
+ if ((blockCount * fmt.blockAlign) < dataChunkSize) {
+ blockCount += 1;
+ }
+
+ /* We decode two samples per byte. There will be blockCount headers in the data chunk. This is enough to know how to calculate the total PCM frame count. */
+ totalBlockHeaderSizeInBytes = blockCount * (4*fmt.channels);
+ pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels;
+
+ /* The header includes a decoded sample for each channel which acts as the initial predictor sample. */
+ pWav->totalPCMFrameCount += blockCount;
}
}
@@ -2032,22 +2145,36 @@ drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc
}
#endif
- pWav->totalSampleCount = pWav->totalPCMFrameCount * pWav->channels;
-
return DRWAV_TRUE;
}
+drwav_bool32 drwav_init(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ return drwav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_ex(drwav* pWav, drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (!drwav_preinit(pWav, onRead, onSeek, pReadSeekUserData, pAllocationCallbacks)) {
+ return DRWAV_FALSE;
+ }
+
+ return drwav_init__internal(pWav, onChunk, pChunkUserData, flags);
+}
+
-drwav_uint32 drwav_riff_chunk_size_riff(drwav_uint64 dataChunkSize)
+static drwav_uint32 drwav__riff_chunk_size_riff(drwav_uint64 dataChunkSize)
{
- if (dataChunkSize <= (0xFFFFFFFFUL - 36)) {
- return 36 + (drwav_uint32)dataChunkSize;
+ drwav_uint32 dataSubchunkPaddingSize = drwav__chunk_padding_size_riff(dataChunkSize);
+
+ if (dataChunkSize <= (0xFFFFFFFFUL - 36 - dataSubchunkPaddingSize)) {
+ return 36 + (drwav_uint32)(dataChunkSize + dataSubchunkPaddingSize);
} else {
return 0xFFFFFFFF;
}
}
-drwav_uint32 drwav_data_chunk_size_riff(drwav_uint64 dataChunkSize)
+static drwav_uint32 drwav__data_chunk_size_riff(drwav_uint64 dataChunkSize)
{
if (dataChunkSize <= 0xFFFFFFFFUL) {
return (drwav_uint32)dataChunkSize;
@@ -2056,28 +2183,22 @@ drwav_uint32 drwav_data_chunk_size_riff(drwav_uint64 dataChunkSize)
}
}
-drwav_uint64 drwav_riff_chunk_size_w64(drwav_uint64 dataChunkSize)
+static drwav_uint64 drwav__riff_chunk_size_w64(drwav_uint64 dataChunkSize)
{
- return 80 + 24 + dataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */
+ drwav_uint64 dataSubchunkPaddingSize = drwav__chunk_padding_size_w64(dataChunkSize);
+
+ return 80 + 24 + dataChunkSize + dataSubchunkPaddingSize; /* +24 because W64 includes the size of the GUID and size fields. */
}
-drwav_uint64 drwav_data_chunk_size_w64(drwav_uint64 dataChunkSize)
+static drwav_uint64 drwav__data_chunk_size_w64(drwav_uint64 dataChunkSize)
{
return 24 + dataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */
}
-drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData)
+drwav_bool32 drwav_preinit_write(drwav* pWav, const drwav_data_format* pFormat, drwav_bool32 isSequential, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
{
- size_t runningPos = 0;
- drwav_uint64 initialDataChunkSize = 0;
- drwav_uint64 chunkSizeFMT;
-
- if (pWav == NULL) {
- return DRWAV_FALSE;
- }
-
- if (onWrite == NULL) {
+ if (pWav == NULL || onWrite == NULL) {
return DRWAV_FALSE;
}
@@ -2085,7 +2206,6 @@ drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pF
return DRWAV_FALSE; /* <-- onSeek is required when in non-sequential mode. */
}
-
/* Not currently supporting compressed formats. Will need to add support for the "fact" chunk before we enable this. */
if (pFormat->format == DR_WAVE_FORMAT_EXTENSIBLE) {
return DRWAV_FALSE;
@@ -2094,125 +2214,573 @@ drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pF
return DRWAV_FALSE;
}
+ DRWAV_ZERO_MEMORY(pWav, sizeof(*pWav));
+ pWav->onWrite = onWrite;
+ pWav->onSeek = onSeek;
+ pWav->pUserData = pUserData;
+ pWav->allocationCallbacks = drwav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks);
+
+ if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) {
+ return DRWAV_FALSE; /* Invalid allocation callbacks. */
+ }
+
+ pWav->fmt.formatTag = (drwav_uint16)pFormat->format;
+ pWav->fmt.channels = (drwav_uint16)pFormat->channels;
+ pWav->fmt.sampleRate = pFormat->sampleRate;
+ pWav->fmt.avgBytesPerSec = (drwav_uint32)((pFormat->bitsPerSample * pFormat->sampleRate * pFormat->channels) / 8);
+ pWav->fmt.blockAlign = (drwav_uint16)((pFormat->channels * pFormat->bitsPerSample) / 8);
+ pWav->fmt.bitsPerSample = (drwav_uint16)pFormat->bitsPerSample;
+ pWav->fmt.extendedSize = 0;
+ pWav->isSequentialWrite = isSequential;
+
+ return DRWAV_TRUE;
+}
+
+drwav_bool32 drwav_init_write__internal(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount)
+{
+ /* The function assumes drwav_preinit_write() was called beforehand. */
+
+ size_t runningPos = 0;
+ drwav_uint64 initialDataChunkSize = 0;
+ drwav_uint64 chunkSizeFMT;
+
+ /*
+ The initial values for the "RIFF" and "data" chunks depends on whether or not we are initializing in sequential mode or not. In
+ sequential mode we set this to its final values straight away since they can be calculated from the total sample count. In non-
+ sequential mode we initialize it all to zero and fill it out in drwav_uninit() using a backwards seek.
+ */
+ if (pWav->isSequentialWrite) {
+ initialDataChunkSize = (totalSampleCount * pWav->fmt.bitsPerSample) / 8;
+
+ /*
+ The RIFF container has a limit on the number of samples. drwav is not allowing this. There's no practical limits for Wave64
+ so for the sake of simplicity I'm not doing any validation for that.
+ */
+ if (pFormat->container == drwav_container_riff) {
+ if (initialDataChunkSize > (0xFFFFFFFFUL - 36)) {
+ return DRWAV_FALSE; /* Not enough room to store every sample. */
+ }
+ }
+ }
+
+ pWav->dataChunkDataSizeTargetWrite = initialDataChunkSize;
+
+
+ /* "RIFF" chunk. */
+ if (pFormat->container == drwav_container_riff) {
+ drwav_uint32 chunkSizeRIFF = 36 + (drwav_uint32)initialDataChunkSize; /* +36 = "RIFF"+[RIFF Chunk Size]+"WAVE" + [sizeof "fmt " chunk] */
+ runningPos += pWav->onWrite(pWav->pUserData, "RIFF", 4);
+ runningPos += pWav->onWrite(pWav->pUserData, &chunkSizeRIFF, 4);
+ runningPos += pWav->onWrite(pWav->pUserData, "WAVE", 4);
+ } else {
+ drwav_uint64 chunkSizeRIFF = 80 + 24 + initialDataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */
+ runningPos += pWav->onWrite(pWav->pUserData, drwavGUID_W64_RIFF, 16);
+ runningPos += pWav->onWrite(pWav->pUserData, &chunkSizeRIFF, 8);
+ runningPos += pWav->onWrite(pWav->pUserData, drwavGUID_W64_WAVE, 16);
+ }
+
+ /* "fmt " chunk. */
+ if (pFormat->container == drwav_container_riff) {
+ chunkSizeFMT = 16;
+ runningPos += pWav->onWrite(pWav->pUserData, "fmt ", 4);
+ runningPos += pWav->onWrite(pWav->pUserData, &chunkSizeFMT, 4);
+ } else {
+ chunkSizeFMT = 40;
+ runningPos += pWav->onWrite(pWav->pUserData, drwavGUID_W64_FMT, 16);
+ runningPos += pWav->onWrite(pWav->pUserData, &chunkSizeFMT, 8);
+ }
+
+ runningPos += pWav->onWrite(pWav->pUserData, &pWav->fmt.formatTag, 2);
+ runningPos += pWav->onWrite(pWav->pUserData, &pWav->fmt.channels, 2);
+ runningPos += pWav->onWrite(pWav->pUserData, &pWav->fmt.sampleRate, 4);
+ runningPos += pWav->onWrite(pWav->pUserData, &pWav->fmt.avgBytesPerSec, 4);
+ runningPos += pWav->onWrite(pWav->pUserData, &pWav->fmt.blockAlign, 2);
+ runningPos += pWav->onWrite(pWav->pUserData, &pWav->fmt.bitsPerSample, 2);
+
+ pWav->dataChunkDataPos = runningPos;
+
+ /* "data" chunk. */
+ if (pFormat->container == drwav_container_riff) {
+ drwav_uint32 chunkSizeDATA = (drwav_uint32)initialDataChunkSize;
+ runningPos += pWav->onWrite(pWav->pUserData, "data", 4);
+ runningPos += pWav->onWrite(pWav->pUserData, &chunkSizeDATA, 4);
+ } else {
+ drwav_uint64 chunkSizeDATA = 24 + initialDataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */
+ runningPos += pWav->onWrite(pWav->pUserData, drwavGUID_W64_DATA, 16);
+ runningPos += pWav->onWrite(pWav->pUserData, &chunkSizeDATA, 8);
+ }
+
+
+ /* Simple validation. */
+ if (pFormat->container == drwav_container_riff) {
+ if (runningPos != 20 + chunkSizeFMT + 8) {
+ return DRWAV_FALSE;
+ }
+ } else {
+ if (runningPos != 40 + chunkSizeFMT + 24) {
+ return DRWAV_FALSE;
+ }
+ }
+
+
+ /* Set some properties for the client's convenience. */
+ pWav->container = pFormat->container;
+ pWav->channels = (drwav_uint16)pFormat->channels;
+ pWav->sampleRate = pFormat->sampleRate;
+ pWav->bitsPerSample = (drwav_uint16)pFormat->bitsPerSample;
+ pWav->translatedFormatTag = (drwav_uint16)pFormat->format;
+
+ return DRWAV_TRUE;
+}
+
+
+drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (!drwav_preinit_write(pWav, pFormat, DRWAV_FALSE, onWrite, onSeek, pUserData, pAllocationCallbacks)) {
+ return DRWAV_FALSE;
+ }
+
+ return drwav_init_write__internal(pWav, pFormat, 0); /* DRWAV_FALSE = Not Sequential */
+}
+
+drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (!drwav_preinit_write(pWav, pFormat, DRWAV_TRUE, onWrite, NULL, pUserData, pAllocationCallbacks)) {
+ return DRWAV_FALSE;
+ }
+
+ return drwav_init_write__internal(pWav, pFormat, totalSampleCount); /* DRWAV_TRUE = Sequential */
+}
+
+drwav_bool32 drwav_init_write_sequential_pcm_frames(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, drwav_write_proc onWrite, void* pUserData, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pFormat == NULL) {
+ return DRWAV_FALSE;
+ }
+
+ return drwav_init_write_sequential(pWav, pFormat, totalPCMFrameCount*pFormat->channels, onWrite, pUserData, pAllocationCallbacks);
+}
+
+drwav_uint64 drwav_target_write_size_bytes(drwav_data_format const *format, drwav_uint64 totalSampleCount)
+{
+ drwav_uint64 targetDataSizeBytes = (totalSampleCount * format->channels * format->bitsPerSample/8);
+ drwav_uint64 riffChunkSizeBytes;
+ drwav_uint64 fileSizeBytes;
+
+ if (format->container == drwav_container_riff) {
+ riffChunkSizeBytes = drwav__riff_chunk_size_riff(targetDataSizeBytes);
+ fileSizeBytes = (8 + riffChunkSizeBytes); /* +8 because WAV doesn't include the size of the ChunkID and ChunkSize fields. */
+ } else {
+ riffChunkSizeBytes = drwav__riff_chunk_size_w64(targetDataSizeBytes);
+ fileSizeBytes = riffChunkSizeBytes;
+ }
+
+ return fileSizeBytes;
+}
+
+
+#ifndef DR_WAV_NO_STDIO
+FILE* drwav_fopen(const char* filePath, const char* openMode)
+{
+ FILE* pFile;
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+ if (fopen_s(&pFile, filePath, openMode) != 0) {
+ return NULL;
+ }
+#else
+ pFile = fopen(filePath, openMode);
+ if (pFile == NULL) {
+ return NULL;
+ }
+#endif
+
+ return pFile;
+}
+
+FILE* drwav_wfopen(const wchar_t* pFilePath, const wchar_t* pOpenMode, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ FILE* pFile;
+
+#if defined(_WIN32)
+ (void)pAllocationCallbacks;
+ #if defined(_MSC_VER) && _MSC_VER >= 1400
+ if (_wfopen_s(&pFile, pFilePath, pOpenMode) != 0) {
+ return NULL;
+ }
+ #else
+ pFile = _wfopen(pFilePath, pOpenMode);
+ if (pFile == NULL) {
+ return NULL;
+ }
+ #endif
+#else
+ /*
+ Use fopen() on anything other than Windows. Requires a conversion. This is annoying because fopen() is locale specific. The only real way I can
+ think of to do this is with wcsrtombs(). Note that wcstombs() is apparently not thread-safe because it uses a static global mbstate_t object for
+ maintaining state. I've checked this with -std=c89 and it works, but if somebody get's a compiler error I'll look into improving compatibility.
+ */
+ {
+ mbstate_t mbs;
+ size_t lenMB;
+ const wchar_t* pFilePathTemp = pFilePath;
+ char* pFilePathMB = NULL;
+ const wchar_t* pOpenModeMBTemp = pOpenMode;
+ char pOpenModeMB[16];
+ drwav_allocation_callbacks allocationCallbacks;
+
+ allocationCallbacks = drwav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks);
+
+ /* Get the length first. */
+ DRWAV_ZERO_MEMORY(&mbs, sizeof(mbs));
+ lenMB = wcsrtombs(NULL, &pFilePathTemp, 0, &mbs);
+ if (lenMB == (size_t)-1) {
+ return NULL;
+ }
+
+ pFilePathMB = (char*)drwav__malloc_from_callbacks(lenMB + 1, &allocationCallbacks);
+ if (pFilePathMB == NULL) {
+ return NULL;
+ }
+
+ pFilePathTemp = pFilePath;
+ DRWAV_ZERO_MEMORY(&mbs, sizeof(mbs));
+ wcsrtombs(pFilePathMB, &pFilePathTemp, lenMB + 1, &mbs);
+
+ DRWAV_ZERO_MEMORY(&mbs, sizeof(mbs));
+ wcsrtombs(pOpenModeMB, &pOpenModeMBTemp, sizeof(pOpenModeMB), &mbs);
+
+ pFile = fopen(pFilePathMB, pOpenModeMB);
+
+ drwav__free_from_callbacks(pFilePathMB, &allocationCallbacks);
+ }
+#endif
+
+ return pFile;
+}
+
+
+static size_t drwav__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead)
+{
+ return fread(pBufferOut, 1, bytesToRead, (FILE*)pUserData);
+}
+
+static size_t drwav__on_write_stdio(void* pUserData, const void* pData, size_t bytesToWrite)
+{
+ return fwrite(pData, 1, bytesToWrite, (FILE*)pUserData);
+}
+
+static drwav_bool32 drwav__on_seek_stdio(void* pUserData, int offset, drwav_seek_origin origin)
+{
+ return fseek((FILE*)pUserData, offset, (origin == drwav_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0;
+}
+
+drwav_bool32 drwav_init_file(drwav* pWav, const char* filename, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ return drwav_init_file_ex(pWav, filename, NULL, NULL, 0, pAllocationCallbacks);
+}
+
+
+drwav_bool32 drwav_init_file__internal_FILE(drwav* pWav, FILE* pFile, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (!drwav_preinit(pWav, drwav__on_read_stdio, drwav__on_seek_stdio, (void*)pFile, pAllocationCallbacks)) {
+ fclose(pFile);
+ return DRWAV_FALSE;
+ }
+
+ return drwav_init__internal(pWav, onChunk, pChunkUserData, flags);
+}
+
+drwav_bool32 drwav_init_file_ex(drwav* pWav, const char* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ FILE* pFile = drwav_fopen(filename, "rb");
+ if (pFile == NULL) {
+ return DRWAV_FALSE;
+ }
+
+ /* This takes ownership of the FILE* object. */
+ return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_file_w(drwav* pWav, const wchar_t* filename, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ return drwav_init_file_ex_w(pWav, filename, NULL, NULL, 0, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_file_ex_w(drwav* pWav, const wchar_t* filename, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ FILE* pFile = drwav_wfopen(filename, L"rb", pAllocationCallbacks);
+ if (pFile == NULL) {
+ return DRWAV_FALSE;
+ }
+
+ /* This takes ownership of the FILE* object. */
+ return drwav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks);
+}
+
+
+drwav_bool32 drwav_init_file_write__internal_FILE(drwav* pWav, FILE* pFile, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (!drwav_preinit_write(pWav, pFormat, isSequential, drwav__on_write_stdio, drwav__on_seek_stdio, (void*)pFile, pAllocationCallbacks)) {
+ fclose(pFile);
+ return DRWAV_FALSE;
+ }
+
+ return drwav_init_write__internal(pWav, pFormat, totalSampleCount);
+}
+
+drwav_bool32 drwav_init_file_write__internal(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ FILE* pFile = drwav_fopen(filename, "wb");
+ if (pFile == NULL) {
+ return DRWAV_FALSE;
+ }
+
+ /* This takes ownership of the FILE* object. */
+ return drwav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_file_write_w__internal(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ FILE* pFile = drwav_wfopen(filename, L"wb", pAllocationCallbacks);
+ if (pFile == NULL) {
+ return DRWAV_FALSE;
+ }
+
+ /* This takes ownership of the FILE* object. */
+ return drwav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_file_write(drwav* pWav, const char* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ return drwav_init_file_write__internal(pWav, filename, pFormat, 0, DRWAV_FALSE, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_file_write_sequential(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ return drwav_init_file_write__internal(pWav, filename, pFormat, totalSampleCount, DRWAV_TRUE, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_file_write_sequential_pcm_frames(drwav* pWav, const char* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pFormat == NULL) {
+ return DRWAV_FALSE;
+ }
+
+ return drwav_init_file_write_sequential(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_file_write_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ return drwav_init_file_write_w__internal(pWav, filename, pFormat, 0, DRWAV_FALSE, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_file_write_sequential_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ return drwav_init_file_write_w__internal(pWav, filename, pFormat, totalSampleCount, DRWAV_TRUE, pAllocationCallbacks);
+}
+
+drwav_bool32 drwav_init_file_write_sequential_pcm_frames_w(drwav* pWav, const wchar_t* filename, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (pFormat == NULL) {
+ return DRWAV_FALSE;
+ }
+
+ return drwav_init_file_write_sequential_w(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks);
+}
+#endif /* DR_WAV_NO_STDIO */
+
+
+static size_t drwav__on_read_memory(void* pUserData, void* pBufferOut, size_t bytesToRead)
+{
+ drwav* pWav = (drwav*)pUserData;
+ size_t bytesRemaining;
+
+ DRWAV_ASSERT(pWav != NULL);
+ DRWAV_ASSERT(pWav->memoryStream.dataSize >= pWav->memoryStream.currentReadPos);
+
+ bytesRemaining = pWav->memoryStream.dataSize - pWav->memoryStream.currentReadPos;
+ if (bytesToRead > bytesRemaining) {
+ bytesToRead = bytesRemaining;
+ }
+
+ if (bytesToRead > 0) {
+ DRWAV_COPY_MEMORY(pBufferOut, pWav->memoryStream.data + pWav->memoryStream.currentReadPos, bytesToRead);
+ pWav->memoryStream.currentReadPos += bytesToRead;
+ }
+
+ return bytesToRead;
+}
+
+static drwav_bool32 drwav__on_seek_memory(void* pUserData, int offset, drwav_seek_origin origin)
+{
+ drwav* pWav = (drwav*)pUserData;
+ DRWAV_ASSERT(pWav != NULL);
+
+ if (origin == drwav_seek_origin_current) {
+ if (offset > 0) {
+ if (pWav->memoryStream.currentReadPos + offset > pWav->memoryStream.dataSize) {
+ return DRWAV_FALSE; /* Trying to seek too far forward. */
+ }
+ } else {
+ if (pWav->memoryStream.currentReadPos < (size_t)-offset) {
+ return DRWAV_FALSE; /* Trying to seek too far backwards. */
+ }
+ }
+
+ /* This will never underflow thanks to the clamps above. */
+ pWav->memoryStream.currentReadPos += offset;
+ } else {
+ if ((drwav_uint32)offset <= pWav->memoryStream.dataSize) {
+ pWav->memoryStream.currentReadPos = offset;
+ } else {
+ return DRWAV_FALSE; /* Trying to seek too far forward. */
+ }
+ }
+
+ return DRWAV_TRUE;
+}
+
+static size_t drwav__on_write_memory(void* pUserData, const void* pDataIn, size_t bytesToWrite)
+{
+ drwav* pWav = (drwav*)pUserData;
+ size_t bytesRemaining;
+
+ DRWAV_ASSERT(pWav != NULL);
+ DRWAV_ASSERT(pWav->memoryStreamWrite.dataCapacity >= pWav->memoryStreamWrite.currentWritePos);
+
+ bytesRemaining = pWav->memoryStreamWrite.dataCapacity - pWav->memoryStreamWrite.currentWritePos;
+ if (bytesRemaining < bytesToWrite) {
+ /* Need to reallocate. */
+ void* pNewData;
+ size_t newDataCapacity = (pWav->memoryStreamWrite.dataCapacity == 0) ? 256 : pWav->memoryStreamWrite.dataCapacity * 2;
+
+ /* If doubling wasn't enough, just make it the minimum required size to write the data. */
+ if ((newDataCapacity - pWav->memoryStreamWrite.currentWritePos) < bytesToWrite) {
+ newDataCapacity = pWav->memoryStreamWrite.currentWritePos + bytesToWrite;
+ }
+
+ pNewData = drwav__realloc_from_callbacks(*pWav->memoryStreamWrite.ppData, newDataCapacity, pWav->memoryStreamWrite.dataCapacity, &pWav->allocationCallbacks);
+ if (pNewData == NULL) {
+ return 0;
+ }
+
+ *pWav->memoryStreamWrite.ppData = pNewData;
+ pWav->memoryStreamWrite.dataCapacity = newDataCapacity;
+ }
+
+ DRWAV_COPY_MEMORY(((drwav_uint8*)(*pWav->memoryStreamWrite.ppData)) + pWav->memoryStreamWrite.currentWritePos, pDataIn, bytesToWrite);
+
+ pWav->memoryStreamWrite.currentWritePos += bytesToWrite;
+ if (pWav->memoryStreamWrite.dataSize < pWav->memoryStreamWrite.currentWritePos) {
+ pWav->memoryStreamWrite.dataSize = pWav->memoryStreamWrite.currentWritePos;
+ }
+
+ *pWav->memoryStreamWrite.pDataSize = pWav->memoryStreamWrite.dataSize;
- drwav_zero_memory(pWav, sizeof(*pWav));
- pWav->onWrite = onWrite;
- pWav->onSeek = onSeek;
- pWav->pUserData = pUserData;
- pWav->fmt.formatTag = (drwav_uint16)pFormat->format;
- pWav->fmt.channels = (drwav_uint16)pFormat->channels;
- pWav->fmt.sampleRate = pFormat->sampleRate;
- pWav->fmt.avgBytesPerSec = (drwav_uint32)((pFormat->bitsPerSample * pFormat->sampleRate * pFormat->channels) / 8);
- pWav->fmt.blockAlign = (drwav_uint16)((pFormat->channels * pFormat->bitsPerSample) / 8);
- pWav->fmt.bitsPerSample = (drwav_uint16)pFormat->bitsPerSample;
- pWav->fmt.extendedSize = 0;
- pWav->isSequentialWrite = isSequential;
+ return bytesToWrite;
+}
- /*
- The initial values for the "RIFF" and "data" chunks depends on whether or not we are initializing in sequential mode or not. In
- sequential mode we set this to its final values straight away since they can be calculated from the total sample count. In non-
- sequential mode we initialize it all to zero and fill it out in drwav_uninit() using a backwards seek.
- */
- if (isSequential) {
- initialDataChunkSize = (totalSampleCount * pWav->fmt.bitsPerSample) / 8;
+static drwav_bool32 drwav__on_seek_memory_write(void* pUserData, int offset, drwav_seek_origin origin)
+{
+ drwav* pWav = (drwav*)pUserData;
+ DRWAV_ASSERT(pWav != NULL);
- /*
- The RIFF container has a limit on the number of samples. drwav is not allowing this. There's no practical limits for Wave64
- so for the sake of simplicity I'm not doing any validation for that.
- */
- if (pFormat->container == drwav_container_riff) {
- if (initialDataChunkSize > (0xFFFFFFFFUL - 36)) {
- return DRWAV_FALSE; /* Not enough room to store every sample. */
+ if (origin == drwav_seek_origin_current) {
+ if (offset > 0) {
+ if (pWav->memoryStreamWrite.currentWritePos + offset > pWav->memoryStreamWrite.dataSize) {
+ offset = (int)(pWav->memoryStreamWrite.dataSize - pWav->memoryStreamWrite.currentWritePos); /* Trying to seek too far forward. */
+ }
+ } else {
+ if (pWav->memoryStreamWrite.currentWritePos < (size_t)-offset) {
+ offset = -(int)pWav->memoryStreamWrite.currentWritePos; /* Trying to seek too far backwards. */
}
}
- }
- pWav->dataChunkDataSizeTargetWrite = initialDataChunkSize;
+ /* This will never underflow thanks to the clamps above. */
+ pWav->memoryStreamWrite.currentWritePos += offset;
+ } else {
+ if ((drwav_uint32)offset <= pWav->memoryStreamWrite.dataSize) {
+ pWav->memoryStreamWrite.currentWritePos = offset;
+ } else {
+ pWav->memoryStreamWrite.currentWritePos = pWav->memoryStreamWrite.dataSize; /* Trying to seek too far forward. */
+ }
+ }
+
+ return DRWAV_TRUE;
+}
+drwav_bool32 drwav_init_memory(drwav* pWav, const void* data, size_t dataSize, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ return drwav_init_memory_ex(pWav, data, dataSize, NULL, NULL, 0, pAllocationCallbacks);
+}
- /* "RIFF" chunk. */
- if (pFormat->container == drwav_container_riff) {
- drwav_uint32 chunkSizeRIFF = 36 + (drwav_uint32)initialDataChunkSize; /* +36 = "RIFF"+[RIFF Chunk Size]+"WAVE" + [sizeof "fmt " chunk] */
- runningPos += pWav->onWrite(pUserData, "RIFF", 4);
- runningPos += pWav->onWrite(pUserData, &chunkSizeRIFF, 4);
- runningPos += pWav->onWrite(pUserData, "WAVE", 4);
- } else {
- drwav_uint64 chunkSizeRIFF = 80 + 24 + initialDataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */
- runningPos += pWav->onWrite(pUserData, drwavGUID_W64_RIFF, 16);
- runningPos += pWav->onWrite(pUserData, &chunkSizeRIFF, 8);
- runningPos += pWav->onWrite(pUserData, drwavGUID_W64_WAVE, 16);
+drwav_bool32 drwav_init_memory_ex(drwav* pWav, const void* data, size_t dataSize, drwav_chunk_proc onChunk, void* pChunkUserData, drwav_uint32 flags, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (data == NULL || dataSize == 0) {
+ return DRWAV_FALSE;
}
- /* "fmt " chunk. */
- if (pFormat->container == drwav_container_riff) {
- chunkSizeFMT = 16;
- runningPos += pWav->onWrite(pUserData, "fmt ", 4);
- runningPos += pWav->onWrite(pUserData, &chunkSizeFMT, 4);
- } else {
- chunkSizeFMT = 40;
- runningPos += pWav->onWrite(pUserData, drwavGUID_W64_FMT, 16);
- runningPos += pWav->onWrite(pUserData, &chunkSizeFMT, 8);
+ if (!drwav_preinit(pWav, drwav__on_read_memory, drwav__on_seek_memory, pWav, pAllocationCallbacks)) {
+ return DRWAV_FALSE;
}
- runningPos += pWav->onWrite(pUserData, &pWav->fmt.formatTag, 2);
- runningPos += pWav->onWrite(pUserData, &pWav->fmt.channels, 2);
- runningPos += pWav->onWrite(pUserData, &pWav->fmt.sampleRate, 4);
- runningPos += pWav->onWrite(pUserData, &pWav->fmt.avgBytesPerSec, 4);
- runningPos += pWav->onWrite(pUserData, &pWav->fmt.blockAlign, 2);
- runningPos += pWav->onWrite(pUserData, &pWav->fmt.bitsPerSample, 2);
+ pWav->memoryStream.data = (const unsigned char*)data;
+ pWav->memoryStream.dataSize = dataSize;
+ pWav->memoryStream.currentReadPos = 0;
- pWav->dataChunkDataPos = runningPos;
+ return drwav_init__internal(pWav, onChunk, pChunkUserData, flags);
+}
- /* "data" chunk. */
- if (pFormat->container == drwav_container_riff) {
- drwav_uint32 chunkSizeDATA = (drwav_uint32)initialDataChunkSize;
- runningPos += pWav->onWrite(pUserData, "data", 4);
- runningPos += pWav->onWrite(pUserData, &chunkSizeDATA, 4);
- } else {
- drwav_uint64 chunkSizeDATA = 24 + initialDataChunkSize; /* +24 because W64 includes the size of the GUID and size fields. */
- runningPos += pWav->onWrite(pUserData, drwavGUID_W64_DATA, 16);
- runningPos += pWav->onWrite(pUserData, &chunkSizeDATA, 8);
+
+drwav_bool32 drwav_init_memory_write__internal(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ if (ppData == NULL || pDataSize == NULL) {
+ return DRWAV_FALSE;
}
+ *ppData = NULL; /* Important because we're using realloc()! */
+ *pDataSize = 0;
- /* Simple validation. */
- if (pFormat->container == drwav_container_riff) {
- if (runningPos != 20 + chunkSizeFMT + 8) {
- return DRWAV_FALSE;
- }
- } else {
- if (runningPos != 40 + chunkSizeFMT + 24) {
- return DRWAV_FALSE;
- }
+ if (!drwav_preinit_write(pWav, pFormat, isSequential, drwav__on_write_memory, drwav__on_seek_memory_write, pWav, pAllocationCallbacks)) {
+ return DRWAV_FALSE;
}
-
+ pWav->memoryStreamWrite.ppData = ppData;
+ pWav->memoryStreamWrite.pDataSize = pDataSize;
+ pWav->memoryStreamWrite.dataSize = 0;
+ pWav->memoryStreamWrite.dataCapacity = 0;
+ pWav->memoryStreamWrite.currentWritePos = 0;
- /* Set some properties for the client's convenience. */
- pWav->container = pFormat->container;
- pWav->channels = (drwav_uint16)pFormat->channels;
- pWav->sampleRate = pFormat->sampleRate;
- pWav->bitsPerSample = (drwav_uint16)pFormat->bitsPerSample;
- pWav->translatedFormatTag = (drwav_uint16)pFormat->format;
-
- return DRWAV_TRUE;
+ return drwav_init_write__internal(pWav, pFormat, totalSampleCount);
}
+drwav_bool32 drwav_init_memory_write(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, const drwav_allocation_callbacks* pAllocationCallbacks)
+{
+ return drwav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, 0, DRWAV_FALSE, pAllocationCallbacks);
+}
-drwav_bool32 drwav_init_write(drwav* pWav, const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData)
+drwav_bool32 drwav_init_memory_write_sequential(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, const drwav_allocation_callbacks* pAllocationCallbacks)
{
- return drwav_init_write__internal(pWav, pFormat, 0, DRWAV_FALSE, onWrite, onSeek, pUserData); /* DRWAV_FALSE = Not Sequential */
+ return drwav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, totalSampleCount, DRWAV_TRUE, pAllocationCallbacks);
}
-drwav_bool32 drwav_init_write_sequential(drwav* pWav, const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData)
+drwav_bool32 drwav_init_memory_write_sequential_pcm_frames(drwav* pWav, void** ppData, size_t* pDataSize, const drwav_data_format* pFormat, drwav_uint64 totalPCMFrameCount, const drwav_allocation_callbacks* pAllocationCallbacks)
{
- return drwav_init_write__internal(pWav, pFormat, totalSampleCount, DRWAV_TRUE, onWrite, NULL, pUserData); /* DRWAV_TRUE = Sequential */
+ if (pFormat == NULL) {
+ return DRWAV_FALSE;
+ }
+
+ return drwav_init_memory_write_sequential(pWav, ppData, pDataSize, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks);
}
-void drwav_uninit(drwav* pWav)
+
+
+drwav_result drwav_uninit(drwav* pWav)
{
+ drwav_result result = DRWAV_SUCCESS;
+
if (pWav == NULL) {
- return;
+ return DRWAV_INVALID_ARGS;
}
/*
@@ -2223,16 +2791,11 @@ void drwav_uninit(drwav* pWav)
if (pWav->onWrite != NULL) {
drwav_uint32 paddingSize = 0;
- /* Validation for sequential mode. */
- if (pWav->isSequentialWrite) {
- drwav_assert(pWav->dataChunkDataSize == pWav->dataChunkDataSizeTargetWrite);
- }
-
/* Padding. Do not adjust pWav->dataChunkDataSize - this should not include the padding. */
if (pWav->container == drwav_container_riff) {
- paddingSize = (drwav_uint32)(pWav->dataChunkDataSize % 2);
+ paddingSize = drwav__chunk_padding_size_riff(pWav->dataChunkDataSize);
} else {
- paddingSize = (drwav_uint32)(pWav->dataChunkDataSize % 8);
+ paddingSize = drwav__chunk_padding_size_w64(pWav->dataChunkDataSize);
}
if (paddingSize > 0) {
@@ -2248,29 +2811,36 @@ void drwav_uninit(drwav* pWav)
if (pWav->container == drwav_container_riff) {
/* The "RIFF" chunk size. */
if (pWav->onSeek(pWav->pUserData, 4, drwav_seek_origin_start)) {
- drwav_uint32 riffChunkSize = drwav_riff_chunk_size_riff(pWav->dataChunkDataSize);
+ drwav_uint32 riffChunkSize = drwav__riff_chunk_size_riff(pWav->dataChunkDataSize);
pWav->onWrite(pWav->pUserData, &riffChunkSize, 4);
}
/* the "data" chunk size. */
if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos + 4, drwav_seek_origin_start)) {
- drwav_uint32 dataChunkSize = drwav_data_chunk_size_riff(pWav->dataChunkDataSize);
+ drwav_uint32 dataChunkSize = drwav__data_chunk_size_riff(pWav->dataChunkDataSize);
pWav->onWrite(pWav->pUserData, &dataChunkSize, 4);
}
} else {
/* The "RIFF" chunk size. */
if (pWav->onSeek(pWav->pUserData, 16, drwav_seek_origin_start)) {
- drwav_uint64 riffChunkSize = drwav_riff_chunk_size_w64(pWav->dataChunkDataSize);
+ drwav_uint64 riffChunkSize = drwav__riff_chunk_size_w64(pWav->dataChunkDataSize);
pWav->onWrite(pWav->pUserData, &riffChunkSize, 8);
}
/* The "data" chunk size. */
if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos + 16, drwav_seek_origin_start)) {
- drwav_uint64 dataChunkSize = drwav_data_chunk_size_w64(pWav->dataChunkDataSize);
+ drwav_uint64 dataChunkSize = drwav__data_chunk_size_w64(pWav->dataChunkDataSize);
pWav->onWrite(pWav->pUserData, &dataChunkSize, 8);
}
}
}
+
+ /* Validation for sequential mode. */
+ if (pWav->isSequentialWrite) {
+ if (pWav->dataChunkDataSize != pWav->dataChunkDataSizeTargetWrite) {
+ result = DRWAV_INVALID_FILE;
+ }
+ }
}
#ifndef DR_WAV_NO_STDIO
@@ -2282,60 +2852,10 @@ void drwav_uninit(drwav* pWav)
fclose((FILE*)pWav->pUserData);
}
#endif
-}
-
-
-drwav* drwav_open(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData)
-{
- return drwav_open_ex(onRead, onSeek, NULL, pUserData, NULL, 0);
-}
-
-drwav* drwav_open_ex(drwav_read_proc onRead, drwav_seek_proc onSeek, drwav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, drwav_uint32 flags)
-{
- drwav* pWav = (drwav*)DRWAV_MALLOC(sizeof(*pWav));
- if (pWav == NULL) {
- return NULL;
- }
-
- if (!drwav_init_ex(pWav, onRead, onSeek, onChunk, pReadSeekUserData, pChunkUserData, flags)) {
- DRWAV_FREE(pWav);
- return NULL;
- }
-
- return pWav;
-}
-
-
-drwav* drwav_open_write__internal(const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_bool32 isSequential, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData)
-{
- drwav* pWav = (drwav*)DRWAV_MALLOC(sizeof(*pWav));
- if (pWav == NULL) {
- return NULL;
- }
-
- if (!drwav_init_write__internal(pWav, pFormat, totalSampleCount, isSequential, onWrite, onSeek, pUserData)) {
- DRWAV_FREE(pWav);
- return NULL;
- }
-
- return pWav;
-}
-drwav* drwav_open_write(const drwav_data_format* pFormat, drwav_write_proc onWrite, drwav_seek_proc onSeek, void* pUserData)
-{
- return drwav_open_write__internal(pFormat, 0, DRWAV_FALSE, onWrite, onSeek, pUserData);
-}
-
-drwav* drwav_open_write_sequential(const drwav_data_format* pFormat, drwav_uint64 totalSampleCount, drwav_write_proc onWrite, void* pUserData)
-{
- return drwav_open_write__internal(pFormat, totalSampleCount, DRWAV_TRUE, onWrite, NULL, pUserData);
+ return result;
}
-void drwav_close(drwav* pWav)
-{
- drwav_uninit(pWav);
- DRWAV_FREE(pWav);
-}
size_t drwav_read_raw(drwav* pWav, size_t bytesToRead, void* pBufferOut)
@@ -2356,38 +2876,11 @@ size_t drwav_read_raw(drwav* pWav, size_t bytesToRead, void* pBufferOut)
return bytesRead;
}
-drwav_uint64 drwav_read(drwav* pWav, drwav_uint64 samplesToRead, void* pBufferOut)
-{
- drwav_uint32 bytesPerSample;
- size_t bytesRead;
-
- if (pWav == NULL || samplesToRead == 0 || pBufferOut == NULL) {
- return 0;
- }
-
- /* Cannot use this function for compressed formats. */
- if (drwav__is_compressed_format_tag(pWav->translatedFormatTag)) {
- return 0;
- }
-
- bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
- return 0;
- }
-
- /* Don't try to read more samples than can potentially fit in the output buffer. */
- if (samplesToRead * bytesPerSample > DRWAV_SIZE_MAX) {
- samplesToRead = DRWAV_SIZE_MAX / bytesPerSample;
- }
- bytesRead = drwav_read_raw(pWav, (size_t)(samplesToRead * bytesPerSample), pBufferOut);
- return bytesRead / bytesPerSample;
-}
-drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_le(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut)
{
drwav_uint32 bytesPerFrame;
- size_t bytesRead;
if (pWav == NULL || framesToRead == 0 || pBufferOut == NULL) {
return 0;
@@ -2408,10 +2901,28 @@ drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void*
framesToRead = DRWAV_SIZE_MAX / bytesPerFrame;
}
- bytesRead = drwav_read_raw(pWav, (size_t)(framesToRead * bytesPerFrame), pBufferOut);
- return bytesRead / bytesPerFrame;
+ return drwav_read_raw(pWav, (size_t)(framesToRead * bytesPerFrame), pBufferOut) / bytesPerFrame;
}
+drwav_uint64 drwav_read_pcm_frames_be(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut)
+{
+ drwav_uint64 framesRead = drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut);
+ drwav__bswap_samples(pBufferOut, framesRead*pWav->channels, drwav_get_bytes_per_pcm_frame(pWav)/pWav->channels, pWav->translatedFormatTag);
+
+ return framesRead;
+}
+
+drwav_uint64 drwav_read_pcm_frames(drwav* pWav, drwav_uint64 framesToRead, void* pBufferOut)
+{
+ if (drwav__is_little_endian()) {
+ return drwav_read_pcm_frames_le(pWav, framesToRead, pBufferOut);
+ } else {
+ return drwav_read_pcm_frames_be(pWav, framesToRead, pBufferOut);
+ }
+}
+
+
+
drwav_bool32 drwav_seek_to_first_pcm_frame(drwav* pWav)
{
if (pWav->onWrite != NULL) {
@@ -2423,33 +2934,34 @@ drwav_bool32 drwav_seek_to_first_pcm_frame(drwav* pWav)
}
if (drwav__is_compressed_format_tag(pWav->translatedFormatTag)) {
- pWav->compressed.iCurrentSample = 0;
+ pWav->compressed.iCurrentPCMFrame = 0;
}
pWav->bytesRemaining = pWav->dataChunkDataSize;
return DRWAV_TRUE;
}
-drwav_bool32 drwav_seek_to_sample(drwav* pWav, drwav_uint64 sample)
+drwav_bool32 drwav_seek_to_pcm_frame(drwav* pWav, drwav_uint64 targetFrameIndex)
{
/* Seeking should be compatible with wave files > 2GB. */
- if (pWav->onWrite != NULL) {
- return DRWAV_FALSE; /* No seeking in write mode. */
+ if (pWav == NULL || pWav->onSeek == NULL) {
+ return DRWAV_FALSE;
}
- if (pWav == NULL || pWav->onSeek == NULL) {
+ /* No seeking in write mode. */
+ if (pWav->onWrite != NULL) {
return DRWAV_FALSE;
}
/* If there are no samples, just return DRWAV_TRUE without doing anything. */
- if (pWav->totalSampleCount == 0) {
+ if (pWav->totalPCMFrameCount == 0) {
return DRWAV_TRUE;
}
/* Make sure the sample is clamped. */
- if (sample >= pWav->totalSampleCount) {
- sample = pWav->totalSampleCount - 1;
+ if (targetFrameIndex >= pWav->totalPCMFrameCount) {
+ targetFrameIndex = pWav->totalPCMFrameCount - 1;
}
/*
@@ -2463,36 +2975,36 @@ drwav_bool32 drwav_seek_to_sample(drwav* pWav, drwav_uint64 sample)
If we're seeking forward it's simple - just keep reading samples until we hit the sample we're requesting. If we're seeking backwards,
we first need to seek back to the start and then just do the same thing as a forward seek.
*/
- if (sample < pWav->compressed.iCurrentSample) {
+ if (targetFrameIndex < pWav->compressed.iCurrentPCMFrame) {
if (!drwav_seek_to_first_pcm_frame(pWav)) {
return DRWAV_FALSE;
}
}
- if (sample > pWav->compressed.iCurrentSample) {
- drwav_uint64 offset = sample - pWav->compressed.iCurrentSample;
+ if (targetFrameIndex > pWav->compressed.iCurrentPCMFrame) {
+ drwav_uint64 offsetInFrames = targetFrameIndex - pWav->compressed.iCurrentPCMFrame;
drwav_int16 devnull[2048];
- while (offset > 0) {
- drwav_uint64 samplesRead = 0;
- drwav_uint64 samplesToRead = offset;
- if (samplesToRead > 2048) {
- samplesToRead = 2048;
+ while (offsetInFrames > 0) {
+ drwav_uint64 framesRead = 0;
+ drwav_uint64 framesToRead = offsetInFrames;
+ if (framesToRead > drwav_countof(devnull)/pWav->channels) {
+ framesToRead = drwav_countof(devnull)/pWav->channels;
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) {
- samplesRead = drwav_read_s16__msadpcm(pWav, samplesToRead, devnull);
+ framesRead = drwav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, devnull);
} else if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) {
- samplesRead = drwav_read_s16__ima(pWav, samplesToRead, devnull);
+ framesRead = drwav_read_pcm_frames_s16__ima(pWav, framesToRead, devnull);
} else {
assert(DRWAV_FALSE); /* If this assertion is triggered it means I've implemented a new compressed format but forgot to add a branch for it here. */
}
- if (samplesRead != samplesToRead) {
+ if (framesRead != framesToRead) {
return DRWAV_FALSE;
}
- offset -= samplesRead;
+ offsetInFrames -= framesRead;
}
}
} else {
@@ -2502,10 +3014,10 @@ drwav_bool32 drwav_seek_to_sample(drwav* pWav, drwav_uint64 sample)
drwav_uint64 offset;
totalSizeInBytes = pWav->totalPCMFrameCount * drwav_get_bytes_per_pcm_frame(pWav);
- drwav_assert(totalSizeInBytes >= pWav->bytesRemaining);
+ DRWAV_ASSERT(totalSizeInBytes >= pWav->bytesRemaining);
currentBytePos = totalSizeInBytes - pWav->bytesRemaining;
- targetBytePos = sample * drwav_get_bytes_per_sample(pWav);
+ targetBytePos = targetFrameIndex * drwav_get_bytes_per_pcm_frame(pWav);
if (currentBytePos < targetBytePos) {
/* Offset forwards. */
@@ -2532,11 +3044,6 @@ drwav_bool32 drwav_seek_to_sample(drwav* pWav, drwav_uint64 sample)
return DRWAV_TRUE;
}
-drwav_bool32 drwav_seek_to_pcm_frame(drwav* pWav, drwav_uint64 targetFrameIndex)
-{
- return drwav_seek_to_sample(pWav, targetFrameIndex * pWav->channels);
-}
-
size_t drwav_write_raw(drwav* pWav, size_t bytesToWrite, const void* pData)
{
@@ -2552,29 +3059,31 @@ size_t drwav_write_raw(drwav* pWav, size_t bytesToWrite, const void* pData)
return bytesWritten;
}
-drwav_uint64 drwav_write(drwav* pWav, drwav_uint64 samplesToWrite, const void* pData)
+
+drwav_uint64 drwav_write_pcm_frames_le(drwav* pWav, drwav_uint64 framesToWrite, const void* pData)
{
drwav_uint64 bytesToWrite;
drwav_uint64 bytesWritten;
const drwav_uint8* pRunningData;
- if (pWav == NULL || samplesToWrite == 0 || pData == NULL) {
+ if (pWav == NULL || framesToWrite == 0 || pData == NULL) {
return 0;
}
- bytesToWrite = ((samplesToWrite * pWav->bitsPerSample) / 8);
+ bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8);
if (bytesToWrite > DRWAV_SIZE_MAX) {
return 0;
}
bytesWritten = 0;
pRunningData = (const drwav_uint8*)pData;
+
while (bytesToWrite > 0) {
size_t bytesJustWritten;
- drwav_uint64 bytesToWriteThisIteration = bytesToWrite;
- if (bytesToWriteThisIteration > DRWAV_SIZE_MAX) {
- bytesToWriteThisIteration = DRWAV_SIZE_MAX;
- }
+ drwav_uint64 bytesToWriteThisIteration;
+
+ bytesToWriteThisIteration = bytesToWrite;
+ DRWAV_ASSERT(bytesToWriteThisIteration <= DRWAV_SIZE_MAX); /* <-- This is checked above. */
bytesJustWritten = drwav_write_raw(pWav, (size_t)bytesToWriteThisIteration, pRunningData);
if (bytesJustWritten == 0) {
@@ -2586,49 +3095,108 @@ drwav_uint64 drwav_write(drwav* pWav, drwav_uint64 samplesToWrite, const void* p
pRunningData += bytesJustWritten;
}
- return (bytesWritten * 8) / pWav->bitsPerSample;
+ return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels;
}
-drwav_uint64 drwav_write_pcm_frames(drwav* pWav, drwav_uint64 framesToWrite, const void* pData)
+drwav_uint64 drwav_write_pcm_frames_be(drwav* pWav, drwav_uint64 framesToWrite, const void* pData)
{
- return drwav_write(pWav, framesToWrite * pWav->channels, pData) / pWav->channels;
+ drwav_uint64 bytesToWrite;
+ drwav_uint64 bytesWritten;
+ drwav_uint32 bytesPerSample;
+ const drwav_uint8* pRunningData;
+
+ if (pWav == NULL || framesToWrite == 0 || pData == NULL) {
+ return 0;
+ }
+
+ bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8);
+ if (bytesToWrite > DRWAV_SIZE_MAX) {
+ return 0;
+ }
+
+ bytesWritten = 0;
+ pRunningData = (const drwav_uint8*)pData;
+
+ bytesPerSample = drwav_get_bytes_per_pcm_frame(pWav) / pWav->channels;
+
+ while (bytesToWrite > 0) {
+ drwav_uint8 temp[4096];
+ drwav_uint32 sampleCount;
+ size_t bytesJustWritten;
+ drwav_uint64 bytesToWriteThisIteration;
+
+ bytesToWriteThisIteration = bytesToWrite;
+ DRWAV_ASSERT(bytesToWriteThisIteration <= DRWAV_SIZE_MAX); /* <-- This is checked above. */
+
+ /*
+ WAV files are always little-endian. We need to byte swap on big-endian architectures. Since our input buffer is read-only we need
+ to use an intermediary buffer for the conversion.
+ */
+ sampleCount = sizeof(temp)/bytesPerSample;
+
+ if (bytesToWriteThisIteration > ((drwav_uint64)sampleCount)*bytesPerSample) {
+ bytesToWriteThisIteration = ((drwav_uint64)sampleCount)*bytesPerSample;
+ }
+
+ DRWAV_COPY_MEMORY(temp, pRunningData, (size_t)bytesToWriteThisIteration);
+ drwav__bswap_samples(temp, sampleCount, bytesPerSample, pWav->translatedFormatTag);
+
+ bytesJustWritten = drwav_write_raw(pWav, (size_t)bytesToWriteThisIteration, temp);
+ if (bytesJustWritten == 0) {
+ break;
+ }
+
+ bytesToWrite -= bytesJustWritten;
+ bytesWritten += bytesJustWritten;
+ pRunningData += bytesJustWritten;
+ }
+
+ return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels;
}
+drwav_uint64 drwav_write_pcm_frames(drwav* pWav, drwav_uint64 framesToWrite, const void* pData)
+{
+ if (drwav__is_little_endian()) {
+ return drwav_write_pcm_frames_le(pWav, framesToWrite, pData);
+ } else {
+ return drwav_write_pcm_frames_be(pWav, framesToWrite, pData);
+ }
+}
-drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s16__msadpcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
{
- drwav_uint64 totalSamplesRead = 0;
+ drwav_uint64 totalFramesRead = 0;
- drwav_assert(pWav != NULL);
- drwav_assert(samplesToRead > 0);
- drwav_assert(pBufferOut != NULL);
+ DRWAV_ASSERT(pWav != NULL);
+ DRWAV_ASSERT(framesToRead > 0);
+ DRWAV_ASSERT(pBufferOut != NULL);
/* TODO: Lots of room for optimization here. */
- while (samplesToRead > 0 && pWav->compressed.iCurrentSample < pWav->totalSampleCount) {
- /* If there are no cached samples we need to load a new block. */
- if (pWav->msadpcm.cachedSampleCount == 0 && pWav->msadpcm.bytesRemainingInBlock == 0) {
+ while (framesToRead > 0 && pWav->compressed.iCurrentPCMFrame < pWav->totalPCMFrameCount) {
+ /* If there are no cached frames we need to load a new block. */
+ if (pWav->msadpcm.cachedFrameCount == 0 && pWav->msadpcm.bytesRemainingInBlock == 0) {
if (pWav->channels == 1) {
/* Mono. */
drwav_uint8 header[7];
if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) {
- return totalSamplesRead;
+ return totalFramesRead;
}
pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header);
- pWav->msadpcm.predictor[0] = header[0];
- pWav->msadpcm.delta[0] = drwav__bytes_to_s16(header + 1);
- pWav->msadpcm.prevSamples[0][1] = (drwav_int32)drwav__bytes_to_s16(header + 3);
- pWav->msadpcm.prevSamples[0][0] = (drwav_int32)drwav__bytes_to_s16(header + 5);
- pWav->msadpcm.cachedSamples[2] = pWav->msadpcm.prevSamples[0][0];
- pWav->msadpcm.cachedSamples[3] = pWav->msadpcm.prevSamples[0][1];
- pWav->msadpcm.cachedSampleCount = 2;
+ pWav->msadpcm.predictor[0] = header[0];
+ pWav->msadpcm.delta[0] = drwav__bytes_to_s16(header + 1);
+ pWav->msadpcm.prevFrames[0][1] = (drwav_int32)drwav__bytes_to_s16(header + 3);
+ pWav->msadpcm.prevFrames[0][0] = (drwav_int32)drwav__bytes_to_s16(header + 5);
+ pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][0];
+ pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[0][1];
+ pWav->msadpcm.cachedFrameCount = 2;
} else {
/* Stereo. */
drwav_uint8 header[14];
if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) {
- return totalSamplesRead;
+ return totalFramesRead;
}
pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header);
@@ -2636,32 +3204,35 @@ drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, dr
pWav->msadpcm.predictor[1] = header[1];
pWav->msadpcm.delta[0] = drwav__bytes_to_s16(header + 2);
pWav->msadpcm.delta[1] = drwav__bytes_to_s16(header + 4);
- pWav->msadpcm.prevSamples[0][1] = (drwav_int32)drwav__bytes_to_s16(header + 6);
- pWav->msadpcm.prevSamples[1][1] = (drwav_int32)drwav__bytes_to_s16(header + 8);
- pWav->msadpcm.prevSamples[0][0] = (drwav_int32)drwav__bytes_to_s16(header + 10);
- pWav->msadpcm.prevSamples[1][0] = (drwav_int32)drwav__bytes_to_s16(header + 12);
-
- pWav->msadpcm.cachedSamples[0] = pWav->msadpcm.prevSamples[0][0];
- pWav->msadpcm.cachedSamples[1] = pWav->msadpcm.prevSamples[1][0];
- pWav->msadpcm.cachedSamples[2] = pWav->msadpcm.prevSamples[0][1];
- pWav->msadpcm.cachedSamples[3] = pWav->msadpcm.prevSamples[1][1];
- pWav->msadpcm.cachedSampleCount = 4;
+ pWav->msadpcm.prevFrames[0][1] = (drwav_int32)drwav__bytes_to_s16(header + 6);
+ pWav->msadpcm.prevFrames[1][1] = (drwav_int32)drwav__bytes_to_s16(header + 8);
+ pWav->msadpcm.prevFrames[0][0] = (drwav_int32)drwav__bytes_to_s16(header + 10);
+ pWav->msadpcm.prevFrames[1][0] = (drwav_int32)drwav__bytes_to_s16(header + 12);
+
+ pWav->msadpcm.cachedFrames[0] = pWav->msadpcm.prevFrames[0][0];
+ pWav->msadpcm.cachedFrames[1] = pWav->msadpcm.prevFrames[1][0];
+ pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][1];
+ pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[1][1];
+ pWav->msadpcm.cachedFrameCount = 2;
}
}
/* Output anything that's cached. */
- while (samplesToRead > 0 && pWav->msadpcm.cachedSampleCount > 0 && pWav->compressed.iCurrentSample < pWav->totalSampleCount) {
- pBufferOut[0] = (drwav_int16)pWav->msadpcm.cachedSamples[drwav_countof(pWav->msadpcm.cachedSamples) - pWav->msadpcm.cachedSampleCount];
- pWav->msadpcm.cachedSampleCount -= 1;
-
- pBufferOut += 1;
- samplesToRead -= 1;
- totalSamplesRead += 1;
- pWav->compressed.iCurrentSample += 1;
+ while (framesToRead > 0 && pWav->msadpcm.cachedFrameCount > 0 && pWav->compressed.iCurrentPCMFrame < pWav->totalPCMFrameCount) {
+ drwav_uint32 iSample = 0;
+ for (iSample = 0; iSample < pWav->channels; iSample += 1) {
+ pBufferOut[iSample] = (drwav_int16)pWav->msadpcm.cachedFrames[(drwav_countof(pWav->msadpcm.cachedFrames) - (pWav->msadpcm.cachedFrameCount*pWav->channels)) + iSample];
+ }
+
+ pBufferOut += pWav->channels;
+ framesToRead -= 1;
+ totalFramesRead += 1;
+ pWav->compressed.iCurrentPCMFrame += 1;
+ pWav->msadpcm.cachedFrameCount -= 1;
}
- if (samplesToRead == 0) {
- return totalSamplesRead;
+ if (framesToRead == 0) {
+ return totalFramesRead;
}
@@ -2669,7 +3240,7 @@ drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, dr
If there's nothing left in the cache, just go ahead and load more. If there's nothing left to load in the current block we just continue to the next
loop iteration which will trigger the loading of a new block.
*/
- if (pWav->msadpcm.cachedSampleCount == 0) {
+ if (pWav->msadpcm.cachedFrameCount == 0) {
if (pWav->msadpcm.bytesRemainingInBlock == 0) {
continue;
} else {
@@ -2685,7 +3256,7 @@ drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, dr
drwav_int32 nibble1;
if (pWav->onRead(pWav->pUserData, &nibbles, 1) != 1) {
- return totalSamplesRead;
+ return totalFramesRead;
}
pWav->msadpcm.bytesRemainingInBlock -= 1;
@@ -2698,7 +3269,7 @@ drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, dr
drwav_int32 newSample0;
drwav_int32 newSample1;
- newSample0 = ((pWav->msadpcm.prevSamples[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevSamples[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8;
+ newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8;
newSample0 += nibble0 * pWav->msadpcm.delta[0];
newSample0 = drwav_clamp(newSample0, -32768, 32767);
@@ -2707,11 +3278,11 @@ drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, dr
pWav->msadpcm.delta[0] = 16;
}
- pWav->msadpcm.prevSamples[0][0] = pWav->msadpcm.prevSamples[0][1];
- pWav->msadpcm.prevSamples[0][1] = newSample0;
+ pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1];
+ pWav->msadpcm.prevFrames[0][1] = newSample0;
- newSample1 = ((pWav->msadpcm.prevSamples[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevSamples[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8;
+ newSample1 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8;
newSample1 += nibble1 * pWav->msadpcm.delta[0];
newSample1 = drwav_clamp(newSample1, -32768, 32767);
@@ -2720,20 +3291,20 @@ drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, dr
pWav->msadpcm.delta[0] = 16;
}
- pWav->msadpcm.prevSamples[0][0] = pWav->msadpcm.prevSamples[0][1];
- pWav->msadpcm.prevSamples[0][1] = newSample1;
+ pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1];
+ pWav->msadpcm.prevFrames[0][1] = newSample1;
- pWav->msadpcm.cachedSamples[2] = newSample0;
- pWav->msadpcm.cachedSamples[3] = newSample1;
- pWav->msadpcm.cachedSampleCount = 2;
+ pWav->msadpcm.cachedFrames[2] = newSample0;
+ pWav->msadpcm.cachedFrames[3] = newSample1;
+ pWav->msadpcm.cachedFrameCount = 2;
} else {
/* Stereo. */
drwav_int32 newSample0;
drwav_int32 newSample1;
/* Left. */
- newSample0 = ((pWav->msadpcm.prevSamples[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevSamples[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8;
+ newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8;
newSample0 += nibble0 * pWav->msadpcm.delta[0];
newSample0 = drwav_clamp(newSample0, -32768, 32767);
@@ -2742,12 +3313,12 @@ drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, dr
pWav->msadpcm.delta[0] = 16;
}
- pWav->msadpcm.prevSamples[0][0] = pWav->msadpcm.prevSamples[0][1];
- pWav->msadpcm.prevSamples[0][1] = newSample0;
+ pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1];
+ pWav->msadpcm.prevFrames[0][1] = newSample0;
/* Right. */
- newSample1 = ((pWav->msadpcm.prevSamples[1][1] * coeff1Table[pWav->msadpcm.predictor[1]]) + (pWav->msadpcm.prevSamples[1][0] * coeff2Table[pWav->msadpcm.predictor[1]])) >> 8;
+ newSample1 = ((pWav->msadpcm.prevFrames[1][1] * coeff1Table[pWav->msadpcm.predictor[1]]) + (pWav->msadpcm.prevFrames[1][0] * coeff2Table[pWav->msadpcm.predictor[1]])) >> 8;
newSample1 += nibble1 * pWav->msadpcm.delta[1];
newSample1 = drwav_clamp(newSample1, -32768, 32767);
@@ -2756,50 +3327,51 @@ drwav_uint64 drwav_read_s16__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, dr
pWav->msadpcm.delta[1] = 16;
}
- pWav->msadpcm.prevSamples[1][0] = pWav->msadpcm.prevSamples[1][1];
- pWav->msadpcm.prevSamples[1][1] = newSample1;
+ pWav->msadpcm.prevFrames[1][0] = pWav->msadpcm.prevFrames[1][1];
+ pWav->msadpcm.prevFrames[1][1] = newSample1;
- pWav->msadpcm.cachedSamples[2] = newSample0;
- pWav->msadpcm.cachedSamples[3] = newSample1;
- pWav->msadpcm.cachedSampleCount = 2;
+ pWav->msadpcm.cachedFrames[2] = newSample0;
+ pWav->msadpcm.cachedFrames[3] = newSample1;
+ pWav->msadpcm.cachedFrameCount = 1;
}
}
}
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut)
+
+drwav_uint64 drwav_read_pcm_frames_s16__ima(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
{
- drwav_uint64 totalSamplesRead = 0;
+ drwav_uint64 totalFramesRead = 0;
- drwav_assert(pWav != NULL);
- drwav_assert(samplesToRead > 0);
- drwav_assert(pBufferOut != NULL);
+ DRWAV_ASSERT(pWav != NULL);
+ DRWAV_ASSERT(framesToRead > 0);
+ DRWAV_ASSERT(pBufferOut != NULL);
/* TODO: Lots of room for optimization here. */
- while (samplesToRead > 0 && pWav->compressed.iCurrentSample < pWav->totalSampleCount) {
+ while (framesToRead > 0 && pWav->compressed.iCurrentPCMFrame < pWav->totalPCMFrameCount) {
/* If there are no cached samples we need to load a new block. */
- if (pWav->ima.cachedSampleCount == 0 && pWav->ima.bytesRemainingInBlock == 0) {
+ if (pWav->ima.cachedFrameCount == 0 && pWav->ima.bytesRemainingInBlock == 0) {
if (pWav->channels == 1) {
/* Mono. */
drwav_uint8 header[4];
if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) {
- return totalSamplesRead;
+ return totalFramesRead;
}
pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header);
pWav->ima.predictor[0] = drwav__bytes_to_s16(header + 0);
pWav->ima.stepIndex[0] = header[2];
- pWav->ima.cachedSamples[drwav_countof(pWav->ima.cachedSamples) - 1] = pWav->ima.predictor[0];
- pWav->ima.cachedSampleCount = 1;
+ pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[0];
+ pWav->ima.cachedFrameCount = 1;
} else {
/* Stereo. */
drwav_uint8 header[8];
if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) {
- return totalSamplesRead;
+ return totalFramesRead;
}
pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header);
@@ -2808,32 +3380,35 @@ drwav_uint64 drwav_read_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_
pWav->ima.predictor[1] = drwav__bytes_to_s16(header + 4);
pWav->ima.stepIndex[1] = header[6];
- pWav->ima.cachedSamples[drwav_countof(pWav->ima.cachedSamples) - 2] = pWav->ima.predictor[0];
- pWav->ima.cachedSamples[drwav_countof(pWav->ima.cachedSamples) - 1] = pWav->ima.predictor[1];
- pWav->ima.cachedSampleCount = 2;
+ pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 2] = pWav->ima.predictor[0];
+ pWav->ima.cachedFrames[drwav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[1];
+ pWav->ima.cachedFrameCount = 1;
}
}
/* Output anything that's cached. */
- while (samplesToRead > 0 && pWav->ima.cachedSampleCount > 0 && pWav->compressed.iCurrentSample < pWav->totalSampleCount) {
- pBufferOut[0] = (drwav_int16)pWav->ima.cachedSamples[drwav_countof(pWav->ima.cachedSamples) - pWav->ima.cachedSampleCount];
- pWav->ima.cachedSampleCount -= 1;
-
- pBufferOut += 1;
- samplesToRead -= 1;
- totalSamplesRead += 1;
- pWav->compressed.iCurrentSample += 1;
+ while (framesToRead > 0 && pWav->ima.cachedFrameCount > 0 && pWav->compressed.iCurrentPCMFrame < pWav->totalPCMFrameCount) {
+ drwav_uint32 iSample;
+ for (iSample = 0; iSample < pWav->channels; iSample += 1) {
+ pBufferOut[iSample] = (drwav_int16)pWav->ima.cachedFrames[(drwav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + iSample];
+ }
+
+ pBufferOut += pWav->channels;
+ framesToRead -= 1;
+ totalFramesRead += 1;
+ pWav->compressed.iCurrentPCMFrame += 1;
+ pWav->ima.cachedFrameCount -= 1;
}
- if (samplesToRead == 0) {
- return totalSamplesRead;
+ if (framesToRead == 0) {
+ return totalFramesRead;
}
/*
If there's nothing left in the cache, just go ahead and load more. If there's nothing left to load in the current block we just continue to the next
loop iteration which will trigger the loading of a new block.
*/
- if (pWav->ima.cachedSampleCount == 0) {
+ if (pWav->ima.cachedFrameCount == 0) {
if (pWav->ima.bytesRemainingInBlock == 0) {
continue;
} else {
@@ -2842,7 +3417,7 @@ drwav_uint64 drwav_read_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_
-1, -1, -1, -1, 2, 4, 6, 8
};
- static drwav_int32 stepTable[89] = {
+ static drwav_int32 stepTable[89] = {
7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
@@ -2860,12 +3435,13 @@ drwav_uint64 drwav_read_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_
From what I can tell with stereo streams, it looks like every 4 bytes (8 samples) is for one channel. So it goes 4 bytes for the
left channel, 4 bytes for the right channel.
*/
- pWav->ima.cachedSampleCount = 8 * pWav->channels;
+ pWav->ima.cachedFrameCount = 8;
for (iChannel = 0; iChannel < pWav->channels; ++iChannel) {
drwav_uint32 iByte;
drwav_uint8 nibbles[4];
if (pWav->onRead(pWav->pUserData, &nibbles, 4) != 4) {
- return totalSamplesRead;
+ pWav->ima.cachedFrameCount = 0;
+ return totalFramesRead;
}
pWav->ima.bytesRemainingInBlock -= 4;
@@ -2885,7 +3461,7 @@ drwav_uint64 drwav_read_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_
predictor = drwav_clamp(predictor + diff, -32768, 32767);
pWav->ima.predictor[iChannel] = predictor;
pWav->ima.stepIndex[iChannel] = drwav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble0], 0, (drwav_int32)drwav_countof(stepTable)-1);
- pWav->ima.cachedSamples[(drwav_countof(pWav->ima.cachedSamples) - pWav->ima.cachedSampleCount) + (iByte*2+0)*pWav->channels + iChannel] = predictor;
+ pWav->ima.cachedFrames[(drwav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+0)*pWav->channels + iChannel] = predictor;
step = stepTable[pWav->ima.stepIndex[iChannel]];
@@ -2900,14 +3476,14 @@ drwav_uint64 drwav_read_s16__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_
predictor = drwav_clamp(predictor + diff, -32768, 32767);
pWav->ima.predictor[iChannel] = predictor;
pWav->ima.stepIndex[iChannel] = drwav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble1], 0, (drwav_int32)drwav_countof(stepTable)-1);
- pWav->ima.cachedSamples[(drwav_countof(pWav->ima.cachedSamples) - pWav->ima.cachedSampleCount) + (iByte*2+1)*pWav->channels + iChannel] = predictor;
+ pWav->ima.cachedFrames[(drwav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+1)*pWav->channels + iChannel] = predictor;
}
}
}
}
}
- return totalSamplesRead;
+ return totalFramesRead;
}
@@ -2992,7 +3568,7 @@ static void drwav__pcm_to_s16(drwav_int16* pOut, const unsigned char* pIn, size_
/* Anything more than 64 bits per sample is not supported. */
if (bytesPerSample > 8) {
- drwav_zero_memory(pOut, totalSampleCount * sizeof(*pOut));
+ DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut));
return;
}
@@ -3003,7 +3579,8 @@ static void drwav__pcm_to_s16(drwav_int16* pOut, const unsigned char* pIn, size_
unsigned int shift = (8 - bytesPerSample) * 8;
unsigned int j;
- for (j = 0; j < bytesPerSample && j < 8; j += 1) {
+ for (j = 0; j < bytesPerSample; j += 1) {
+ DRWAV_ASSERT(j < 8);
sample |= (drwav_uint64)(pIn[j]) << shift;
shift += 8;
}
@@ -3023,172 +3600,188 @@ static void drwav__ieee_to_s16(drwav_int16* pOut, const unsigned char* pIn, size
return;
} else {
/* Only supporting 32- and 64-bit float. Output silence in all other cases. Contributions welcome for 16-bit float. */
- drwav_zero_memory(pOut, totalSampleCount * sizeof(*pOut));
+ DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut));
return;
}
}
-drwav_uint64 drwav_read_s16__pcm(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s16__pcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
{
- drwav_uint32 bytesPerSample;
- drwav_uint64 totalSamplesRead;
+ drwav_uint32 bytesPerFrame;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
/* Fast path. */
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 16) {
- return drwav_read(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames(pWav, framesToRead, pBufferOut);
}
- bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav__pcm_to_s16(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample);
+ drwav__pcm_to_s16(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels);
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s16__ieee(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s16__ieee(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav__ieee_to_s16(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample);
+ drwav__ieee_to_s16(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels);
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s16__alaw(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s16__alaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav_alaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead);
+ drwav_alaw_to_s16(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels));
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s16__mulaw(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s16__mulaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav_mulaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead);
+ drwav_mulaw_to_s16(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels));
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s16(drwav* pWav, drwav_uint64 samplesToRead, drwav_int16* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s16(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
{
- if (pWav == NULL || samplesToRead == 0 || pBufferOut == NULL) {
+ if (pWav == NULL || framesToRead == 0 || pBufferOut == NULL) {
return 0;
}
/* Don't try to read more samples than can potentially fit in the output buffer. */
- if (samplesToRead * sizeof(drwav_int16) > DRWAV_SIZE_MAX) {
- samplesToRead = DRWAV_SIZE_MAX / sizeof(drwav_int16);
+ if (framesToRead * pWav->channels * sizeof(drwav_int16) > DRWAV_SIZE_MAX) {
+ framesToRead = DRWAV_SIZE_MAX / sizeof(drwav_int16) / pWav->channels;
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM) {
- return drwav_read_s16__pcm(pWav, samplesToRead, pBufferOut);
- }
-
- if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) {
- return drwav_read_s16__msadpcm(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s16__pcm(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT) {
- return drwav_read_s16__ieee(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s16__ieee(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW) {
- return drwav_read_s16__alaw(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s16__alaw(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) {
- return drwav_read_s16__mulaw(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s16__mulaw(pWav, framesToRead, pBufferOut);
+ }
+
+ if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) {
+ return drwav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) {
- return drwav_read_s16__ima(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s16__ima(pWav, framesToRead, pBufferOut);
+ }
+
+ return 0;
+}
+
+drwav_uint64 drwav_read_pcm_frames_s16le(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
+{
+ drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut);
+ if (!drwav__is_little_endian()) {
+ drwav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels);
}
- return 0;
+ return framesRead;
}
-drwav_uint64 drwav_read_pcm_frames_s16(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s16be(drwav* pWav, drwav_uint64 framesToRead, drwav_int16* pBufferOut)
{
- return drwav_read_s16(pWav, framesToRead * pWav->channels, pBufferOut) / pWav->channels;
+ drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut);
+ if (drwav__is_little_endian()) {
+ drwav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels);
+ }
+
+ return framesRead;
}
+
void drwav_u8_to_s16(drwav_int16* pOut, const drwav_uint8* pIn, size_t sampleCount)
{
int r;
@@ -3298,7 +3891,7 @@ static void drwav__pcm_to_f32(float* pOut, const unsigned char* pIn, size_t samp
/* Anything more than 64 bits per sample is not supported. */
if (bytesPerSample > 8) {
- drwav_zero_memory(pOut, sampleCount * sizeof(*pOut));
+ DRWAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut));
return;
}
@@ -3309,7 +3902,8 @@ static void drwav__pcm_to_f32(float* pOut, const unsigned char* pIn, size_t samp
unsigned int shift = (8 - bytesPerSample) * 8;
unsigned int j;
- for (j = 0; j < bytesPerSample && j < 8; j += 1) {
+ for (j = 0; j < bytesPerSample; j += 1) {
+ DRWAV_ASSERT(j < 8);
sample |= (drwav_uint64)(pIn[j]) << shift;
shift += 8;
}
@@ -3332,220 +3926,236 @@ static void drwav__ieee_to_f32(float* pOut, const unsigned char* pIn, size_t sam
return;
} else {
/* Only supporting 32- and 64-bit float. Output silence in all other cases. Contributions welcome for 16-bit float. */
- drwav_zero_memory(pOut, sampleCount * sizeof(*pOut));
+ DRWAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut));
return;
}
}
-drwav_uint64 drwav_read_f32__pcm(drwav* pWav, drwav_uint64 samplesToRead, float* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_f32__pcm(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav__pcm_to_f32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample);
- pBufferOut += samplesRead;
+ drwav__pcm_to_f32(pBufferOut, sampleData, (size_t)framesRead*pWav->channels, bytesPerFrame/pWav->channels);
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_f32__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, float* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_f32__msadpcm(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
{
/*
We're just going to borrow the implementation from the drwav_read_s16() since ADPCM is a little bit more complicated than other formats and I don't
want to duplicate that code.
*/
- drwav_uint64 totalSamplesRead = 0;
+ drwav_uint64 totalFramesRead = 0;
drwav_int16 samples16[2048];
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read_s16(pWav, drwav_min(samplesToRead, 2048), samples16);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels), samples16);
+ if (framesRead == 0) {
break;
}
- drwav_s16_to_f32(pBufferOut, samples16, (size_t)samplesRead); /* <-- Safe cast because we're clamping to 2048. */
+ drwav_s16_to_f32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_f32__ima(drwav* pWav, drwav_uint64 samplesToRead, float* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_f32__ima(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
{
/*
We're just going to borrow the implementation from the drwav_read_s16() since IMA-ADPCM is a little bit more complicated than other formats and I don't
want to duplicate that code.
*/
- drwav_uint64 totalSamplesRead = 0;
+ drwav_uint64 totalFramesRead = 0;
drwav_int16 samples16[2048];
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read_s16(pWav, drwav_min(samplesToRead, 2048), samples16);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels), samples16);
+ if (framesRead == 0) {
break;
}
- drwav_s16_to_f32(pBufferOut, samples16, (size_t)samplesRead); /* <-- Safe cast because we're clamping to 2048. */
+ drwav_s16_to_f32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_f32__ieee(drwav* pWav, drwav_uint64 samplesToRead, float* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_f32__ieee(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample;
+ drwav_uint32 bytesPerFrame;
/* Fast path. */
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT && pWav->bitsPerSample == 32) {
- return drwav_read(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames(pWav, framesToRead, pBufferOut);
}
- bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav__ieee_to_f32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample);
+ drwav__ieee_to_f32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels);
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_f32__alaw(drwav* pWav, drwav_uint64 samplesToRead, float* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_f32__alaw(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav_alaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead);
+ drwav_alaw_to_f32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels));
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_f32__mulaw(drwav* pWav, drwav_uint64 samplesToRead, float* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_f32__mulaw(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav_mulaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead);
+ drwav_mulaw_to_f32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels));
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_f32(drwav* pWav, drwav_uint64 samplesToRead, float* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_f32(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
{
- if (pWav == NULL || samplesToRead == 0 || pBufferOut == NULL) {
+ if (pWav == NULL || framesToRead == 0 || pBufferOut == NULL) {
return 0;
}
/* Don't try to read more samples than can potentially fit in the output buffer. */
- if (samplesToRead * sizeof(float) > DRWAV_SIZE_MAX) {
- samplesToRead = DRWAV_SIZE_MAX / sizeof(float);
+ if (framesToRead * pWav->channels * sizeof(float) > DRWAV_SIZE_MAX) {
+ framesToRead = DRWAV_SIZE_MAX / sizeof(float) / pWav->channels;
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM) {
- return drwav_read_f32__pcm(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_f32__pcm(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) {
- return drwav_read_f32__msadpcm(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_f32__msadpcm(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT) {
- return drwav_read_f32__ieee(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_f32__ieee(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW) {
- return drwav_read_f32__alaw(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_f32__alaw(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) {
- return drwav_read_f32__mulaw(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_f32__mulaw(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) {
- return drwav_read_f32__ima(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_f32__ima(pWav, framesToRead, pBufferOut);
}
return 0;
}
-drwav_uint64 drwav_read_pcm_frames_f32(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_f32le(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
+{
+ drwav_uint64 framesRead = drwav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut);
+ if (!drwav__is_little_endian()) {
+ drwav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels);
+ }
+
+ return framesRead;
+}
+
+drwav_uint64 drwav_read_pcm_frames_f32be(drwav* pWav, drwav_uint64 framesToRead, float* pBufferOut)
{
- return drwav_read_f32(pWav, framesToRead * pWav->channels, pBufferOut) / pWav->channels;
+ drwav_uint64 framesRead = drwav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut);
+ if (drwav__is_little_endian()) {
+ drwav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels);
+ }
+
+ return framesRead;
}
+
void drwav_u8_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount)
{
size_t i;
@@ -3566,7 +4176,11 @@ void drwav_u8_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount)
}
#else
for (i = 0; i < sampleCount; ++i) {
- *pOut++ = (pIn[i] / 255.0f) * 2 - 1;
+ float x = pIn[i];
+ x = x * 0.00784313725490196078f; /* 0..255 to 0..2 */
+ x = x - 1; /* 0..2 to -1..1 */
+
+ *pOut++ = x;
}
#endif
}
@@ -3580,7 +4194,7 @@ void drwav_s16_to_f32(float* pOut, const drwav_int16* pIn, size_t sampleCount)
}
for (i = 0; i < sampleCount; ++i) {
- *pOut++ = pIn[i] / 32768.0f;
+ *pOut++ = pIn[i] * 0.000030517578125f;
}
}
@@ -3593,12 +4207,8 @@ void drwav_s24_to_f32(float* pOut, const drwav_uint8* pIn, size_t sampleCount)
}
for (i = 0; i < sampleCount; ++i) {
- unsigned int s0 = pIn[i*3 + 0];
- unsigned int s1 = pIn[i*3 + 1];
- unsigned int s2 = pIn[i*3 + 2];
-
- int sample32 = (int)((s0 << 8) | (s1 << 16) | (s2 << 24));
- *pOut++ = (float)(sample32 / 2147483648.0);
+ double x = (double)(((drwav_int32)(((drwav_uint32)(pIn[i*3+0]) << 8) | ((drwav_uint32)(pIn[i*3+1]) << 16) | ((drwav_uint32)(pIn[i*3+2])) << 24)) >> 8);
+ *pOut++ = (float)(x * 0.00000011920928955078125);
}
}
@@ -3684,7 +4294,7 @@ static void drwav__pcm_to_s32(drwav_int32* pOut, const unsigned char* pIn, size_
/* Anything more than 64 bits per sample is not supported. */
if (bytesPerSample > 8) {
- drwav_zero_memory(pOut, totalSampleCount * sizeof(*pOut));
+ DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut));
return;
}
@@ -3695,7 +4305,8 @@ static void drwav__pcm_to_s32(drwav_int32* pOut, const unsigned char* pIn, size_
unsigned int shift = (8 - bytesPerSample) * 8;
unsigned int j;
- for (j = 0; j < bytesPerSample && j < 8; j += 1) {
+ for (j = 0; j < bytesPerSample; j += 1) {
+ DRWAV_ASSERT(j < 8);
sample |= (drwav_uint64)(pIn[j]) << shift;
shift += 8;
}
@@ -3715,222 +4326,238 @@ static void drwav__ieee_to_s32(drwav_int32* pOut, const unsigned char* pIn, size
return;
} else {
/* Only supporting 32- and 64-bit float. Output silence in all other cases. Contributions welcome for 16-bit float. */
- drwav_zero_memory(pOut, totalSampleCount * sizeof(*pOut));
+ DRWAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut));
return;
}
}
-drwav_uint64 drwav_read_s32__pcm(drwav* pWav, drwav_uint64 samplesToRead, drwav_int32* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s32__pcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample;
+ drwav_uint32 bytesPerFrame;
/* Fast path. */
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 32) {
- return drwav_read(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames(pWav, framesToRead, pBufferOut);
}
- bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav__pcm_to_s32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample);
+ drwav__pcm_to_s32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels);
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s32__msadpcm(drwav* pWav, drwav_uint64 samplesToRead, drwav_int32* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s32__msadpcm(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
{
/*
We're just going to borrow the implementation from the drwav_read_s16() since ADPCM is a little bit more complicated than other formats and I don't
want to duplicate that code.
*/
- drwav_uint64 totalSamplesRead = 0;
+ drwav_uint64 totalFramesRead = 0;
drwav_int16 samples16[2048];
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read_s16(pWav, drwav_min(samplesToRead, 2048), samples16);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels), samples16);
+ if (framesRead == 0) {
break;
}
- drwav_s16_to_s32(pBufferOut, samples16, (size_t)samplesRead); /* <-- Safe cast because we're clamping to 2048. */
+ drwav_s16_to_s32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s32__ima(drwav* pWav, drwav_uint64 samplesToRead, drwav_int32* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s32__ima(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
{
/*
We're just going to borrow the implementation from the drwav_read_s16() since IMA-ADPCM is a little bit more complicated than other formats and I don't
want to duplicate that code.
*/
- drwav_uint64 totalSamplesRead = 0;
+ drwav_uint64 totalFramesRead = 0;
drwav_int16 samples16[2048];
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read_s16(pWav, drwav_min(samplesToRead, 2048), samples16);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames_s16(pWav, drwav_min(framesToRead, drwav_countof(samples16)/pWav->channels), samples16);
+ if (framesRead == 0) {
break;
}
- drwav_s16_to_s32(pBufferOut, samples16, (size_t)samplesRead); /* <-- Safe cast because we're clamping to 2048. */
+ drwav_s16_to_s32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); /* <-- Safe cast because we're clamping to 2048. */
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s32__ieee(drwav* pWav, drwav_uint64 samplesToRead, drwav_int32* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s32__ieee(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav__ieee_to_s32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample);
+ drwav__ieee_to_s32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels), bytesPerFrame/pWav->channels);
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s32__alaw(drwav* pWav, drwav_uint64 samplesToRead, drwav_int32* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s32__alaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav_alaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead);
+ drwav_alaw_to_s32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels));
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s32__mulaw(drwav* pWav, drwav_uint64 samplesToRead, drwav_int32* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s32__mulaw(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
{
- drwav_uint64 totalSamplesRead;
+ drwav_uint64 totalFramesRead;
unsigned char sampleData[4096];
- drwav_uint32 bytesPerSample = drwav_get_bytes_per_sample(pWav);
- if (bytesPerSample == 0) {
+ drwav_uint32 bytesPerFrame = drwav_get_bytes_per_pcm_frame(pWav);
+ if (bytesPerFrame == 0) {
return 0;
}
- totalSamplesRead = 0;
+ totalFramesRead = 0;
- while (samplesToRead > 0) {
- drwav_uint64 samplesRead = drwav_read(pWav, drwav_min(samplesToRead, sizeof(sampleData)/bytesPerSample), sampleData);
- if (samplesRead == 0) {
+ while (framesToRead > 0) {
+ drwav_uint64 framesRead = drwav_read_pcm_frames(pWav, drwav_min(framesToRead, sizeof(sampleData)/bytesPerFrame), sampleData);
+ if (framesRead == 0) {
break;
}
- drwav_mulaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead);
+ drwav_mulaw_to_s32(pBufferOut, sampleData, (size_t)(framesRead*pWav->channels));
- pBufferOut += samplesRead;
- samplesToRead -= samplesRead;
- totalSamplesRead += samplesRead;
+ pBufferOut += framesRead*pWav->channels;
+ framesToRead -= framesRead;
+ totalFramesRead += framesRead;
}
- return totalSamplesRead;
+ return totalFramesRead;
}
-drwav_uint64 drwav_read_s32(drwav* pWav, drwav_uint64 samplesToRead, drwav_int32* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s32(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
{
- if (pWav == NULL || samplesToRead == 0 || pBufferOut == NULL) {
+ if (pWav == NULL || framesToRead == 0 || pBufferOut == NULL) {
return 0;
}
/* Don't try to read more samples than can potentially fit in the output buffer. */
- if (samplesToRead * sizeof(drwav_int32) > DRWAV_SIZE_MAX) {
- samplesToRead = DRWAV_SIZE_MAX / sizeof(drwav_int32);
+ if (framesToRead * pWav->channels * sizeof(drwav_int32) > DRWAV_SIZE_MAX) {
+ framesToRead = DRWAV_SIZE_MAX / sizeof(drwav_int32) / pWav->channels;
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_PCM) {
- return drwav_read_s32__pcm(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s32__pcm(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ADPCM) {
- return drwav_read_s32__msadpcm(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s32__msadpcm(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_IEEE_FLOAT) {
- return drwav_read_s32__ieee(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s32__ieee(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_ALAW) {
- return drwav_read_s32__alaw(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s32__alaw(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_MULAW) {
- return drwav_read_s32__mulaw(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s32__mulaw(pWav, framesToRead, pBufferOut);
}
if (pWav->translatedFormatTag == DR_WAVE_FORMAT_DVI_ADPCM) {
- return drwav_read_s32__ima(pWav, samplesToRead, pBufferOut);
+ return drwav_read_pcm_frames_s32__ima(pWav, framesToRead, pBufferOut);
}
return 0;
}
-drwav_uint64 drwav_read_pcm_frames_s32(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
+drwav_uint64 drwav_read_pcm_frames_s32le(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
+{
+ drwav_uint64 framesRead = drwav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut);
+ if (!drwav__is_little_endian()) {
+ drwav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels);
+ }
+
+ return framesRead;
+}
+
+drwav_uint64 drwav_read_pcm_frames_s32be(drwav* pWav, drwav_uint64 framesToRead, drwav_int32* pBufferOut)
{
- return drwav_read_s32(pWav, framesToRead * pWav->channels, pBufferOut) / pWav->channels;
+ drwav_uint64 framesRead = drwav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut);
+ if (drwav__is_little_endian()) {
+ drwav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels);
+ }
+
+ return framesRead;
}
+
void drwav_u8_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sampleCount)
{
size_t i;
@@ -4029,29 +4656,29 @@ void drwav_mulaw_to_s32(drwav_int32* pOut, const drwav_uint8* pIn, size_t sample
-drwav_int16* drwav__read_and_close_s16(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+drwav_int16* drwav__read_pcm_frames_and_close_s16(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount)
{
drwav_uint64 sampleDataSize;
drwav_int16* pSampleData;
- drwav_uint64 samplesRead;
+ drwav_uint64 framesRead;
- drwav_assert(pWav != NULL);
+ DRWAV_ASSERT(pWav != NULL);
- sampleDataSize = pWav->totalSampleCount * sizeof(drwav_int16);
+ sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(drwav_int16);
if (sampleDataSize > DRWAV_SIZE_MAX) {
drwav_uninit(pWav);
return NULL; /* File's too big. */
}
- pSampleData = (drwav_int16*)DRWAV_MALLOC((size_t)sampleDataSize); /* <-- Safe cast due to the check above. */
+ pSampleData = (drwav_int16*)drwav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); /* <-- Safe cast due to the check above. */
if (pSampleData == NULL) {
drwav_uninit(pWav);
return NULL; /* Failed to allocate memory. */
}
- samplesRead = drwav_read_s16(pWav, (size_t)pWav->totalSampleCount, pSampleData);
- if (samplesRead != pWav->totalSampleCount) {
- DRWAV_FREE(pSampleData);
+ framesRead = drwav_read_pcm_frames_s16(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData);
+ if (framesRead != pWav->totalPCMFrameCount) {
+ drwav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks);
drwav_uninit(pWav);
return NULL; /* There was an error reading the samples. */
}
@@ -4064,36 +4691,36 @@ drwav_int16* drwav__read_and_close_s16(drwav* pWav, unsigned int* channels, unsi
if (channels) {
*channels = pWav->channels;
}
- if (totalSampleCount) {
- *totalSampleCount = pWav->totalSampleCount;
+ if (totalFrameCount) {
+ *totalFrameCount = pWav->totalPCMFrameCount;
}
return pSampleData;
}
-float* drwav__read_and_close_f32(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+float* drwav__read_pcm_frames_and_close_f32(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount)
{
drwav_uint64 sampleDataSize;
float* pSampleData;
- drwav_uint64 samplesRead;
+ drwav_uint64 framesRead;
- drwav_assert(pWav != NULL);
+ DRWAV_ASSERT(pWav != NULL);
- sampleDataSize = pWav->totalSampleCount * sizeof(float);
+ sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(float);
if (sampleDataSize > DRWAV_SIZE_MAX) {
drwav_uninit(pWav);
return NULL; /* File's too big. */
}
- pSampleData = (float*)DRWAV_MALLOC((size_t)sampleDataSize); /* <-- Safe cast due to the check above. */
+ pSampleData = (float*)drwav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); /* <-- Safe cast due to the check above. */
if (pSampleData == NULL) {
drwav_uninit(pWav);
return NULL; /* Failed to allocate memory. */
}
- samplesRead = drwav_read_f32(pWav, (size_t)pWav->totalSampleCount, pSampleData);
- if (samplesRead != pWav->totalSampleCount) {
- DRWAV_FREE(pSampleData);
+ framesRead = drwav_read_pcm_frames_f32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData);
+ if (framesRead != pWav->totalPCMFrameCount) {
+ drwav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks);
drwav_uninit(pWav);
return NULL; /* There was an error reading the samples. */
}
@@ -4106,36 +4733,36 @@ float* drwav__read_and_close_f32(drwav* pWav, unsigned int* channels, unsigned i
if (channels) {
*channels = pWav->channels;
}
- if (totalSampleCount) {
- *totalSampleCount = pWav->totalSampleCount;
+ if (totalFrameCount) {
+ *totalFrameCount = pWav->totalPCMFrameCount;
}
return pSampleData;
}
-drwav_int32* drwav__read_and_close_s32(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+drwav_int32* drwav__read_pcm_frames_and_close_s32(drwav* pWav, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalFrameCount)
{
drwav_uint64 sampleDataSize;
drwav_int32* pSampleData;
- drwav_uint64 samplesRead;
+ drwav_uint64 framesRead;
- drwav_assert(pWav != NULL);
+ DRWAV_ASSERT(pWav != NULL);
- sampleDataSize = pWav->totalSampleCount * sizeof(drwav_int32);
+ sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(drwav_int32);
if (sampleDataSize > DRWAV_SIZE_MAX) {
drwav_uninit(pWav);
return NULL; /* File's too big. */
}
- pSampleData = (drwav_int32*)DRWAV_MALLOC((size_t)sampleDataSize); /* <-- Safe cast due to the check above. */
+ pSampleData = (drwav_int32*)drwav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); /* <-- Safe cast due to the check above. */
if (pSampleData == NULL) {
drwav_uninit(pWav);
return NULL; /* Failed to allocate memory. */
}
- samplesRead = drwav_read_s32(pWav, (size_t)pWav->totalSampleCount, pSampleData);
- if (samplesRead != pWav->totalSampleCount) {
- DRWAV_FREE(pSampleData);
+ framesRead = drwav_read_pcm_frames_s32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData);
+ if (framesRead != pWav->totalPCMFrameCount) {
+ drwav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks);
drwav_uninit(pWav);
return NULL; /* There was an error reading the samples. */
}
@@ -4148,41 +4775,18 @@ drwav_int32* drwav__read_and_close_s32(drwav* pWav, unsigned int* channels, unsi
if (channels) {
*channels = pWav->channels;
}
- if (totalSampleCount) {
- *totalSampleCount = pWav->totalSampleCount;
+ if (totalFrameCount) {
+ *totalFrameCount = pWav->totalPCMFrameCount;
}
return pSampleData;
}
-drwav_int16* drwav_open_and_read_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
-{
- drwav wav;
-
- if (channels) {
- *channels = 0;
- }
- if (sampleRate) {
- *sampleRate = 0;
- }
- if (totalSampleCount) {
- *totalSampleCount = 0;
- }
-
- if (!drwav_init(&wav, onRead, onSeek, pUserData)) {
- return NULL;
- }
-
- return drwav__read_and_close_s16(&wav, channels, sampleRate, totalSampleCount);
-}
-drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut)
+drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
- unsigned int channels;
- unsigned int sampleRate;
- drwav_uint64 totalSampleCount;
- drwav_int16* result;
+ drwav wav;
if (channelsOut) {
*channelsOut = 0;
@@ -4194,52 +4798,17 @@ drwav_int16* drwav_open_and_read_pcm_frames_s16(drwav_read_proc onRead, drwav_se
*totalFrameCountOut = 0;
}
- result = drwav_open_and_read_s16(onRead, onSeek, pUserData, &channels, &sampleRate, &totalSampleCount);
- if (result == NULL) {
+ if (!drwav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) {
return NULL;
}
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalFrameCountOut) {
- *totalFrameCountOut = totalSampleCount / channels;
- }
-
- return result;
+ return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
-float* drwav_open_and_read_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
drwav wav;
- if (sampleRate) {
- *sampleRate = 0;
- }
- if (channels) {
- *channels = 0;
- }
- if (totalSampleCount) {
- *totalSampleCount = 0;
- }
-
- if (!drwav_init(&wav, onRead, onSeek, pUserData)) {
- return NULL;
- }
-
- return drwav__read_and_close_f32(&wav, channels, sampleRate, totalSampleCount);
-}
-
-float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drwav_uint64 totalSampleCount;
- float* result;
-
if (channelsOut) {
*channelsOut = 0;
}
@@ -4250,52 +4819,17 @@ float* drwav_open_and_read_pcm_frames_f32(drwav_read_proc onRead, drwav_seek_pro
*totalFrameCountOut = 0;
}
- result = drwav_open_and_read_f32(onRead, onSeek, pUserData, &channels, &sampleRate, &totalSampleCount);
- if (result == NULL) {
+ if (!drwav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) {
return NULL;
}
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalFrameCountOut) {
- *totalFrameCountOut = totalSampleCount / channels;
- }
-
- return result;
+ return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
-drwav_int32* drwav_open_and_read_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
drwav wav;
- if (sampleRate) {
- *sampleRate = 0;
- }
- if (channels) {
- *channels = 0;
- }
- if (totalSampleCount) {
- *totalSampleCount = 0;
- }
-
- if (!drwav_init(&wav, onRead, onSeek, pUserData)) {
- return NULL;
- }
-
- return drwav__read_and_close_s32(&wav, channels, sampleRate, totalSampleCount);
-}
-
-drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drwav_uint64 totalSampleCount;
- drwav_int32* result;
-
if (channelsOut) {
*channelsOut = 0;
}
@@ -4306,53 +4840,18 @@ drwav_int32* drwav_open_and_read_pcm_frames_s32(drwav_read_proc onRead, drwav_se
*totalFrameCountOut = 0;
}
- result = drwav_open_and_read_s32(onRead, onSeek, pUserData, &channels, &sampleRate, &totalSampleCount);
- if (result == NULL) {
+ if (!drwav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) {
return NULL;
}
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalFrameCountOut) {
- *totalFrameCountOut = totalSampleCount / channels;
- }
-
- return result;
+ return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
#ifndef DR_WAV_NO_STDIO
-drwav_int16* drwav_open_file_and_read_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
drwav wav;
- if (sampleRate) {
- *sampleRate = 0;
- }
- if (channels) {
- *channels = 0;
- }
- if (totalSampleCount) {
- *totalSampleCount = 0;
- }
-
- if (!drwav_init_file(&wav, filename)) {
- return NULL;
- }
-
- return drwav__read_and_close_s16(&wav, channels, sampleRate, totalSampleCount);
-}
-
-drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drwav_uint64 totalSampleCount;
- drwav_int16* result;
-
if (channelsOut) {
*channelsOut = 0;
}
@@ -4363,52 +4862,17 @@ drwav_int16* drwav_open_file_and_read_pcm_frames_s16(const char* filename, unsig
*totalFrameCountOut = 0;
}
- result = drwav_open_file_and_read_s16(filename, &channels, &sampleRate, &totalSampleCount);
- if (result == NULL) {
+ if (!drwav_init_file(&wav, filename, pAllocationCallbacks)) {
return NULL;
}
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalFrameCountOut) {
- *totalFrameCountOut = totalSampleCount / channels;
- }
-
- return result;
+ return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
-float* drwav_open_file_and_read_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
drwav wav;
- if (sampleRate) {
- *sampleRate = 0;
- }
- if (channels) {
- *channels = 0;
- }
- if (totalSampleCount) {
- *totalSampleCount = 0;
- }
-
- if (!drwav_init_file(&wav, filename)) {
- return NULL;
- }
-
- return drwav__read_and_close_f32(&wav, channels, sampleRate, totalSampleCount);
-}
-
-float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drwav_uint64 totalSampleCount;
- float* result;
-
if (channelsOut) {
*channelsOut = 0;
}
@@ -4419,52 +4883,17 @@ float* drwav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned in
*totalFrameCountOut = 0;
}
- result = drwav_open_file_and_read_f32(filename, &channels, &sampleRate, &totalSampleCount);
- if (result == NULL) {
+ if (!drwav_init_file(&wav, filename, pAllocationCallbacks)) {
return NULL;
}
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalFrameCountOut) {
- *totalFrameCountOut = totalSampleCount / channels;
- }
-
- return result;
+ return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
-drwav_int32* drwav_open_file_and_read_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
drwav wav;
- if (sampleRate) {
- *sampleRate = 0;
- }
- if (channels) {
- *channels = 0;
- }
- if (totalSampleCount) {
- *totalSampleCount = 0;
- }
-
- if (!drwav_init_file(&wav, filename)) {
- return NULL;
- }
-
- return drwav__read_and_close_s32(&wav, channels, sampleRate, totalSampleCount);
-}
-
-drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut)
-{
- unsigned int channels;
- unsigned int sampleRate;
- drwav_uint64 totalSampleCount;
- drwav_int32* result;
-
if (channelsOut) {
*channelsOut = 0;
}
@@ -4475,108 +4904,81 @@ drwav_int32* drwav_open_file_and_read_pcm_frames_s32(const char* filename, unsig
*totalFrameCountOut = 0;
}
- result = drwav_open_file_and_read_s32(filename, &channels, &sampleRate, &totalSampleCount);
- if (result == NULL) {
+ if (!drwav_init_file(&wav, filename, pAllocationCallbacks)) {
return NULL;
}
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalFrameCountOut) {
- *totalFrameCountOut = totalSampleCount / channels;
- }
-
- return result;
+ return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
-#endif
-drwav_int16* drwav_open_memory_and_read_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+
+drwav_int16* drwav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
drwav wav;
- if (sampleRate) {
- *sampleRate = 0;
+ if (sampleRateOut) {
+ *sampleRateOut = 0;
}
- if (channels) {
- *channels = 0;
+ if (channelsOut) {
+ *channelsOut = 0;
}
- if (totalSampleCount) {
- *totalSampleCount = 0;
+ if (totalFrameCountOut) {
+ *totalFrameCountOut = 0;
}
- if (!drwav_init_memory(&wav, data, dataSize)) {
+ if (!drwav_init_file_w(&wav, filename, pAllocationCallbacks)) {
return NULL;
}
- return drwav__read_and_close_s16(&wav, channels, sampleRate, totalSampleCount);
+ return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
-drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut)
+float* drwav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
- unsigned int channels;
- unsigned int sampleRate;
- drwav_uint64 totalSampleCount;
- drwav_int16* result;
+ drwav wav;
- if (channelsOut) {
- *channelsOut = 0;
- }
if (sampleRateOut) {
*sampleRateOut = 0;
}
+ if (channelsOut) {
+ *channelsOut = 0;
+ }
if (totalFrameCountOut) {
*totalFrameCountOut = 0;
}
- result = drwav_open_memory_and_read_s16(data, dataSize, &channels, &sampleRate, &totalSampleCount);
- if (result == NULL) {
+ if (!drwav_init_file_w(&wav, filename, pAllocationCallbacks)) {
return NULL;
}
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalFrameCountOut) {
- *totalFrameCountOut = totalSampleCount / channels;
- }
-
- return result;
+ return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
-float* drwav_open_memory_and_read_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+drwav_int32* drwav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
drwav wav;
- if (sampleRate) {
- *sampleRate = 0;
+ if (sampleRateOut) {
+ *sampleRateOut = 0;
}
- if (channels) {
- *channels = 0;
+ if (channelsOut) {
+ *channelsOut = 0;
}
- if (totalSampleCount) {
- *totalSampleCount = 0;
+ if (totalFrameCountOut) {
+ *totalFrameCountOut = 0;
}
- if (!drwav_init_memory(&wav, data, dataSize)) {
+ if (!drwav_init_file_w(&wav, filename, pAllocationCallbacks)) {
return NULL;
}
- return drwav__read_and_close_f32(&wav, channels, sampleRate, totalSampleCount);
+ return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
+#endif
-float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut)
+drwav_int16* drwav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
- unsigned int channels;
- unsigned int sampleRate;
- drwav_uint64 totalSampleCount;
- float* result;
+ drwav wav;
if (channelsOut) {
*channelsOut = 0;
@@ -4588,51 +4990,37 @@ float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSi
*totalFrameCountOut = 0;
}
- result = drwav_open_memory_and_read_f32(data, dataSize, &channels, &sampleRate, &totalSampleCount);
- if (result == NULL) {
+ if (!drwav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) {
return NULL;
}
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalFrameCountOut) {
- *totalFrameCountOut = totalSampleCount / channels;
- }
-
- return result;
+ return drwav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
-drwav_int32* drwav_open_memory_and_read_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, drwav_uint64* totalSampleCount)
+float* drwav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
drwav wav;
- if (sampleRate) {
- *sampleRate = 0;
+ if (channelsOut) {
+ *channelsOut = 0;
}
- if (channels) {
- *channels = 0;
+ if (sampleRateOut) {
+ *sampleRateOut = 0;
}
- if (totalSampleCount) {
- *totalSampleCount = 0;
+ if (totalFrameCountOut) {
+ *totalFrameCountOut = 0;
}
- if (!drwav_init_memory(&wav, data, dataSize)) {
+ if (!drwav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) {
return NULL;
}
- return drwav__read_and_close_s32(&wav, channels, sampleRate, totalSampleCount);
+ return drwav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
-drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut)
+drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, drwav_uint64* totalFrameCountOut, const drwav_allocation_callbacks* pAllocationCallbacks)
{
- unsigned int channels;
- unsigned int sampleRate;
- drwav_uint64 totalSampleCount;
- drwav_int32* result;
+ drwav wav;
if (channelsOut) {
*channelsOut = 0;
@@ -4644,37 +5032,148 @@ drwav_int32* drwav_open_memory_and_read_pcm_frames_s32(const void* data, size_t
*totalFrameCountOut = 0;
}
- result = drwav_open_memory_and_read_s32(data, dataSize, &channels, &sampleRate, &totalSampleCount);
- if (result == NULL) {
+ if (!drwav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) {
return NULL;
}
- if (channelsOut) {
- *channelsOut = channels;
- }
- if (sampleRateOut) {
- *sampleRateOut = sampleRate;
- }
- if (totalFrameCountOut) {
- *totalFrameCountOut = totalSampleCount / channels;
- }
-
- return result;
+ return drwav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut);
}
#endif /* DR_WAV_NO_CONVERSION_API */
-void drwav_free(void* pDataReturnedByOpenAndRead)
+void drwav_free(void* p, const drwav_allocation_callbacks* pAllocationCallbacks)
{
- DRWAV_FREE(pDataReturnedByOpenAndRead);
+ if (pAllocationCallbacks != NULL) {
+ drwav__free_from_callbacks(p, pAllocationCallbacks);
+ } else {
+ drwav__free_default(p, NULL);
+ }
}
#endif /* DR_WAV_IMPLEMENTATION */
-
/*
REVISION HISTORY
================
+v0.11.5 - 2020-03-07
+ - Fix compilation error with Visual Studio .NET 2003.
+
+v0.11.4 - 2020-01-29
+ - Fix some static analysis warnings.
+ - Fix a bug when reading f32 samples from an A-law encoded stream.
+
+v0.11.3 - 2020-01-12
+ - Minor changes to some f32 format conversion routines.
+ - Minor bug fix for ADPCM conversion when end of file is reached.
+
+v0.11.2 - 2019-12-02
+ - Fix a possible crash when using custom memory allocators without a custom realloc() implementation.
+ - Fix an integer overflow bug.
+ - Fix a null pointer dereference bug.
+ - Add limits to sample rate, channels and bits per sample to tighten up some validation.
+
+v0.11.1 - 2019-10-07
+ - Internal code clean up.
+
+v0.11.0 - 2019-10-06
+ - API CHANGE: Add support for user defined memory allocation routines. This system allows the program to specify their own memory allocation
+ routines with a user data pointer for client-specific contextual data. This adds an extra parameter to the end of the following APIs:
+ - drwav_init()
+ - drwav_init_ex()
+ - drwav_init_file()
+ - drwav_init_file_ex()
+ - drwav_init_file_w()
+ - drwav_init_file_w_ex()
+ - drwav_init_memory()
+ - drwav_init_memory_ex()
+ - drwav_init_write()
+ - drwav_init_write_sequential()
+ - drwav_init_write_sequential_pcm_frames()
+ - drwav_init_file_write()
+ - drwav_init_file_write_sequential()
+ - drwav_init_file_write_sequential_pcm_frames()
+ - drwav_init_file_write_w()
+ - drwav_init_file_write_sequential_w()
+ - drwav_init_file_write_sequential_pcm_frames_w()
+ - drwav_init_memory_write()
+ - drwav_init_memory_write_sequential()
+ - drwav_init_memory_write_sequential_pcm_frames()
+ - drwav_open_and_read_pcm_frames_s16()
+ - drwav_open_and_read_pcm_frames_f32()
+ - drwav_open_and_read_pcm_frames_s32()
+ - drwav_open_file_and_read_pcm_frames_s16()
+ - drwav_open_file_and_read_pcm_frames_f32()
+ - drwav_open_file_and_read_pcm_frames_s32()
+ - drwav_open_file_and_read_pcm_frames_s16_w()
+ - drwav_open_file_and_read_pcm_frames_f32_w()
+ - drwav_open_file_and_read_pcm_frames_s32_w()
+ - drwav_open_memory_and_read_pcm_frames_s16()
+ - drwav_open_memory_and_read_pcm_frames_f32()
+ - drwav_open_memory_and_read_pcm_frames_s32()
+ Set this extra parameter to NULL to use defaults which is the same as the previous behaviour. Setting this NULL will use
+ DRWAV_MALLOC, DRWAV_REALLOC and DRWAV_FREE.
+ - Add support for reading and writing PCM frames in an explicit endianness. New APIs:
+ - drwav_read_pcm_frames_le()
+ - drwav_read_pcm_frames_be()
+ - drwav_read_pcm_frames_s16le()
+ - drwav_read_pcm_frames_s16be()
+ - drwav_read_pcm_frames_f32le()
+ - drwav_read_pcm_frames_f32be()
+ - drwav_read_pcm_frames_s32le()
+ - drwav_read_pcm_frames_s32be()
+ - drwav_write_pcm_frames_le()
+ - drwav_write_pcm_frames_be()
+ - Remove deprecated APIs.
+ - API CHANGE: The following APIs now return native-endian data. Previously they returned little-endian data.
+ - drwav_read_pcm_frames()
+ - drwav_read_pcm_frames_s16()
+ - drwav_read_pcm_frames_s32()
+ - drwav_read_pcm_frames_f32()
+ - drwav_open_and_read_pcm_frames_s16()
+ - drwav_open_and_read_pcm_frames_s32()
+ - drwav_open_and_read_pcm_frames_f32()
+ - drwav_open_file_and_read_pcm_frames_s16()
+ - drwav_open_file_and_read_pcm_frames_s32()
+ - drwav_open_file_and_read_pcm_frames_f32()
+ - drwav_open_file_and_read_pcm_frames_s16_w()
+ - drwav_open_file_and_read_pcm_frames_s32_w()
+ - drwav_open_file_and_read_pcm_frames_f32_w()
+ - drwav_open_memory_and_read_pcm_frames_s16()
+ - drwav_open_memory_and_read_pcm_frames_s32()
+ - drwav_open_memory_and_read_pcm_frames_f32()
+
+v0.10.1 - 2019-08-31
+ - Correctly handle partial trailing ADPCM blocks.
+
+v0.10.0 - 2019-08-04
+ - Remove deprecated APIs.
+ - Add wchar_t variants for file loading APIs:
+ drwav_init_file_w()
+ drwav_init_file_ex_w()
+ drwav_init_file_write_w()
+ drwav_init_file_write_sequential_w()
+ - Add drwav_target_write_size_bytes() which calculates the total size in bytes of a WAV file given a format and sample count.
+ - Add APIs for specifying the PCM frame count instead of the sample count when opening in sequential write mode:
+ drwav_init_write_sequential_pcm_frames()
+ drwav_init_file_write_sequential_pcm_frames()
+ drwav_init_file_write_sequential_pcm_frames_w()
+ drwav_init_memory_write_sequential_pcm_frames()
+ - Deprecate drwav_open*() and drwav_close():
+ drwav_open()
+ drwav_open_ex()
+ drwav_open_write()
+ drwav_open_write_sequential()
+ drwav_open_file()
+ drwav_open_file_ex()
+ drwav_open_file_write()
+ drwav_open_file_write_sequential()
+ drwav_open_memory()
+ drwav_open_memory_ex()
+ drwav_open_memory_write()
+ drwav_open_memory_write_sequential()
+ drwav_close()
+ - Minor documentation updates.
+
v0.9.2 - 2019-05-21
- Fix warnings.
@@ -4855,7 +5354,7 @@ For more information, please refer to
===============================================================================
ALTERNATIVE 2 - MIT No Attribution
===============================================================================
-Copyright 2018 David Reid
+Copyright 2020 David Reid
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff --git a/client/miniaudio/jar_mod.h b/client/miniaudio/jar_mod.h
deleted file mode 100644
index 6d5b465f..00000000
--- a/client/miniaudio/jar_mod.h
+++ /dev/null
@@ -1,1600 +0,0 @@
-// jar_mod.h - v0.01 - public domain C0 - Joshua Reisenauer
-//
-// HISTORY:
-//
-// v0.01 2016-03-12 Setup
-//
-//
-// USAGE:
-//
-// In ONE source file, put:
-//
-// #define JAR_MOD_IMPLEMENTATION
-// #include "jar_mod.h"
-//
-// Other source files should just include jar_mod.h
-//
-// SAMPLE CODE:
-// jar_mod_context_t modctx;
-// short samplebuff[4096];
-// bool bufferFull = false;
-// int intro_load(void)
-// {
-// jar_mod_init(&modctx);
-// jar_mod_load_file(&modctx, "file.mod");
-// return 1;
-// }
-// int intro_unload(void)
-// {
-// jar_mod_unload(&modctx);
-// return 1;
-// }
-// int intro_tick(long counter)
-// {
-// if(!bufferFull)
-// {
-// jar_mod_fillbuffer(&modctx, samplebuff, 4096, 0);
-// bufferFull=true;
-// }
-// if(IsKeyDown(KEY_ENTER))
-// return 1;
-// return 0;
-// }
-//
-//
-// LISCENSE:
-//
-// Written by: Jean-François DEL NERO (http://hxc2001.com/) free.fr>
-// Adapted to jar_mod by: Joshua Adam Reisenauer
-// This program is free software. It comes without any warranty, to the
-// extent permitted by applicable law. You can redistribute it and/or
-// modify it under the terms of the Do What The Fuck You Want To Public
-// License, Version 2, as published by Sam Hocevar. See
-// http://sam.zoy.org/wtfpl/COPYING for more details.
-///////////////////////////////////////////////////////////////////////////////////
-// HxCMOD Core API:
-// -------------------------------------------
-// int jar_mod_init(jar_mod_context_t * modctx)
-//
-// - Initialize the jar_mod_context_t buffer. Must be called before doing anything else.
-// Return 1 if success. 0 in case of error.
-// -------------------------------------------
-// mulong jar_mod_load_file(jar_mod_context_t * modctx, const char* filename)
-//
-// - "Load" a MOD from file, context must already be initialized.
-// Return size of file in bytes.
-// -------------------------------------------
-// void jar_mod_fillbuffer( jar_mod_context_t * modctx, short * outbuffer, unsigned long nbsample, jar_mod_tracker_buffer_state * trkbuf )
-//
-// - Generate and return the next samples chunk to outbuffer.
-// nbsample specify the number of stereo 16bits samples you want.
-// The output format is by default signed 48000Hz 16-bit Stereo PCM samples, otherwise it is changed with jar_mod_setcfg().
-// The output buffer size in bytes must be equal to ( nbsample * 2 * channels ).
-// The optional trkbuf parameter can be used to get detailed status of the player. Put NULL/0 is unused.
-// -------------------------------------------
-// void jar_mod_unload( jar_mod_context_t * modctx )
-// - "Unload" / clear the player status.
-// -------------------------------------------
-///////////////////////////////////////////////////////////////////////////////////
-
-
-#ifndef INCLUDE_JAR_MOD_H
-#define INCLUDE_JAR_MOD_H
-
-#include
-#include
-#include // comment this line out if you have bool defined somewhere else
-#include // For memset()
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-
-// Basic type
-typedef unsigned char muchar;
-typedef unsigned short muint;
-typedef short mint;
-typedef unsigned long mulong;
-
-#define NUMMAXCHANNELS 32
-#define MAXNOTES 12*12
-#define DEFAULT_SAMPLE_RATE 48000
-//
-// MOD file structures
-//
-
-#pragma pack(1)
-
-typedef struct {
- muchar name[22];
- muint length;
- muchar finetune;
- muchar volume;
- muint reppnt;
- muint replen;
-} sample;
-
-typedef struct {
- muchar sampperiod;
- muchar period;
- muchar sampeffect;
- muchar effect;
-} note;
-
-typedef struct {
- muchar title[20];
- sample samples[31];
- muchar length; // length of tablepos
- muchar protracker;
- muchar patterntable[128];
- muchar signature[4];
- muchar speed;
-} module;
-
-#pragma pack()
-
-//
-// HxCMod Internal structures
-//
-typedef struct {
- char* sampdata;
- muint sampnum;
- muint length;
- muint reppnt;
- muint replen;
- mulong samppos;
- muint period;
- muchar volume;
- mulong ticks;
- muchar effect;
- muchar parameffect;
- muint effect_code;
- mint decalperiod;
- mint portaspeed;
- mint portaperiod;
- mint vibraperiod;
- mint Arpperiods[3];
- muchar ArpIndex;
- mint oldk;
- muchar volumeslide;
- muchar vibraparam;
- muchar vibrapointeur;
- muchar finetune;
- muchar cut_param;
- muint patternloopcnt;
- muint patternloopstartpoint;
-} channel;
-
-typedef struct {
- module song;
- char* sampledata[31];
- note* patterndata[128];
-
- mulong playrate;
- muint tablepos;
- muint patternpos;
- muint patterndelay;
- muint jump_loop_effect;
- muchar bpm;
- mulong patternticks;
- mulong patterntickse;
- mulong patternticksaim;
- mulong sampleticksconst;
- mulong samplenb;
- channel channels[NUMMAXCHANNELS];
- muint number_of_channels;
- muint fullperiod[MAXNOTES * 8];
- muint mod_loaded;
- mint last_r_sample;
- mint last_l_sample;
- mint stereo;
- mint stereo_separation;
- mint bits;
- mint filter;
-
- muchar *modfile; // the raw mod file
- mulong modfilesize;
- muint loopcount;
-} jar_mod_context_t;
-
-//
-// Player states structures
-//
-typedef struct track_state_
-{
- unsigned char instrument_number;
- unsigned short cur_period;
- unsigned char cur_volume;
- unsigned short cur_effect;
- unsigned short cur_parameffect;
-}track_state;
-
-typedef struct tracker_state_
-{
- int number_of_tracks;
- int bpm;
- int speed;
- int cur_pattern;
- int cur_pattern_pos;
- int cur_pattern_table_pos;
- unsigned int buf_index;
- track_state tracks[32];
-}tracker_state;
-
-typedef struct tracker_state_instrument_
-{
- char name[22];
- int active;
-}tracker_state_instrument;
-
-typedef struct jar_mod_tracker_buffer_state_
-{
- int nb_max_of_state;
- int nb_of_state;
- int cur_rd_index;
- int sample_step;
- char name[64];
- tracker_state_instrument instruments[31];
- tracker_state * track_state_buf;
-}jar_mod_tracker_buffer_state;
-
-
-
-bool jar_mod_init(jar_mod_context_t * modctx);
-bool jar_mod_setcfg(jar_mod_context_t * modctx, int samplerate, int bits, int stereo, int stereo_separation, int filter);
-void jar_mod_fillbuffer(jar_mod_context_t * modctx, short * outbuffer, unsigned long nbsample, jar_mod_tracker_buffer_state * trkbuf);
-void jar_mod_unload(jar_mod_context_t * modctx);
-mulong jar_mod_load_file(jar_mod_context_t * modctx, const char* filename);
-mulong jar_mod_current_samples(jar_mod_context_t * modctx);
-mulong jar_mod_max_samples(jar_mod_context_t * modctx);
-void jar_mod_seek_start(jar_mod_context_t * ctx);
-
-#ifdef __cplusplus
-}
-#endif
-//--------------------------------------------------------------------
-
-
-
-//-------------------------------------------------------------------------------
-#ifdef JAR_MOD_IMPLEMENTATION
-
-// Effects list
-#define EFFECT_ARPEGGIO 0x0 // Supported
-#define EFFECT_PORTAMENTO_UP 0x1 // Supported
-#define EFFECT_PORTAMENTO_DOWN 0x2 // Supported
-#define EFFECT_TONE_PORTAMENTO 0x3 // Supported
-#define EFFECT_VIBRATO 0x4 // Supported
-#define EFFECT_VOLSLIDE_TONEPORTA 0x5 // Supported
-#define EFFECT_VOLSLIDE_VIBRATO 0x6 // Supported
-#define EFFECT_VOLSLIDE_TREMOLO 0x7 // - TO BE DONE -
-#define EFFECT_SET_PANNING 0x8 // - TO BE DONE -
-#define EFFECT_SET_OFFSET 0x9 // Supported
-#define EFFECT_VOLUME_SLIDE 0xA // Supported
-#define EFFECT_JUMP_POSITION 0xB // Supported
-#define EFFECT_SET_VOLUME 0xC // Supported
-#define EFFECT_PATTERN_BREAK 0xD // Supported
-
-#define EFFECT_EXTENDED 0xE
-#define EFFECT_E_FINE_PORTA_UP 0x1 // Supported
-#define EFFECT_E_FINE_PORTA_DOWN 0x2 // Supported
-#define EFFECT_E_GLISSANDO_CTRL 0x3 // - TO BE DONE -
-#define EFFECT_E_VIBRATO_WAVEFORM 0x4 // - TO BE DONE -
-#define EFFECT_E_SET_FINETUNE 0x5 // - TO BE DONE -
-#define EFFECT_E_PATTERN_LOOP 0x6 // Supported
-#define EFFECT_E_TREMOLO_WAVEFORM 0x7 // - TO BE DONE -
-#define EFFECT_E_SET_PANNING_2 0x8 // - TO BE DONE -
-#define EFFECT_E_RETRIGGER_NOTE 0x9 // - TO BE DONE -
-#define EFFECT_E_FINE_VOLSLIDE_UP 0xA // Supported
-#define EFFECT_E_FINE_VOLSLIDE_DOWN 0xB // Supported
-#define EFFECT_E_NOTE_CUT 0xC // Supported
-#define EFFECT_E_NOTE_DELAY 0xD // - TO BE DONE -
-#define EFFECT_E_PATTERN_DELAY 0xE // Supported
-#define EFFECT_E_INVERT_LOOP 0xF // - TO BE DONE -
-#define EFFECT_SET_SPEED 0xF0 // Supported
-#define EFFECT_SET_TEMPO 0xF2 // Supported
-
-#define PERIOD_TABLE_LENGTH MAXNOTES
-#define FULL_PERIOD_TABLE_LENGTH ( PERIOD_TABLE_LENGTH * 8 )
-
-static const short periodtable[]=
-{
- 27392, 25856, 24384, 23040, 21696, 20480, 19328, 18240, 17216, 16256, 15360, 14496,
- 13696, 12928, 12192, 11520, 10848, 10240, 9664, 9120, 8606, 8128, 7680, 7248,
- 6848, 6464, 6096, 5760, 5424, 5120, 4832, 4560, 4304, 4064, 3840, 3624,
- 3424, 3232, 3048, 2880, 2712, 2560, 2416, 2280, 2152, 2032, 1920, 1812,
- 1712, 1616, 1524, 1440, 1356, 1280, 1208, 1140, 1076, 1016, 960, 906,
- 856, 808, 762, 720, 678, 640, 604, 570, 538, 508, 480, 453,
- 428, 404, 381, 360, 339, 320, 302, 285, 269, 254, 240, 226,
- 214, 202, 190, 180, 170, 160, 151, 143, 135, 127, 120, 113,
- 107, 101, 95, 90, 85, 80, 75, 71, 67, 63, 60, 56,
- 53, 50, 47, 45, 42, 40, 37, 35, 33, 31, 30, 28,
- 27, 25, 24, 22, 21, 20, 19, 18, 17, 16, 15, 14,
- 13, 13, 12, 11, 11, 10, 9, 9, 8, 8, 7, 7
-};
-
-static const short sintable[]={
- 0, 24, 49, 74, 97, 120, 141,161,
- 180, 197, 212, 224, 235, 244, 250,253,
- 255, 253, 250, 244, 235, 224, 212,197,
- 180, 161, 141, 120, 97, 74, 49, 24
-};
-
-typedef struct modtype_
-{
- unsigned char signature[5];
- int numberofchannels;
-}modtype;
-
-modtype modlist[]=
-{
- { "M!K!",4},
- { "M.K.",4},
- { "FLT4",4},
- { "FLT8",8},
- { "4CHN",4},
- { "6CHN",6},
- { "8CHN",8},
- { "10CH",10},
- { "12CH",12},
- { "14CH",14},
- { "16CH",16},
- { "18CH",18},
- { "20CH",20},
- { "22CH",22},
- { "24CH",24},
- { "26CH",26},
- { "28CH",28},
- { "30CH",30},
- { "32CH",32},
- { "",0}
-};
-
-///////////////////////////////////////////////////////////////////////////////////
-
-static void memcopy( void * dest, void *source, unsigned long size )
-{
- unsigned long i;
- unsigned char * d,*s;
-
- d=(unsigned char*)dest;
- s=(unsigned char*)source;
- for(i=0;i= mod->fullperiod[i])
- {
- return i;
- }
- }
-
- return MAXNOTES;
-}
-
-static void worknote( note * nptr, channel * cptr, char t, jar_mod_context_t * mod )
-{
- muint sample, period, effect, operiod;
- muint curnote, arpnote;
-
- sample = (nptr->sampperiod & 0xF0) | (nptr->sampeffect >> 4);
- period = ((nptr->sampperiod & 0xF) << 8) | nptr->period;
- effect = ((nptr->sampeffect & 0xF) << 8) | nptr->effect;
-
- operiod = cptr->period;
-
- if ( period || sample )
- {
- if( sample && sample < 32 )
- {
- cptr->sampnum = sample - 1;
- }
-
- if( period || sample )
- {
- cptr->sampdata = (char *) mod->sampledata[cptr->sampnum];
- cptr->length = mod->song.samples[cptr->sampnum].length;
- cptr->reppnt = mod->song.samples[cptr->sampnum].reppnt;
- cptr->replen = mod->song.samples[cptr->sampnum].replen;
-
- cptr->finetune = (mod->song.samples[cptr->sampnum].finetune)&0xF;
-
- if(effect>>8!=4 && effect>>8!=6)
- {
- cptr->vibraperiod=0;
- cptr->vibrapointeur=0;
- }
- }
-
- if( (sample != 0) && ( (effect>>8) != EFFECT_VOLSLIDE_TONEPORTA ) )
- {
- cptr->volume = mod->song.samples[cptr->sampnum].volume;
- cptr->volumeslide = 0;
- }
-
- if( ( (effect>>8) != EFFECT_TONE_PORTAMENTO && (effect>>8)!=EFFECT_VOLSLIDE_TONEPORTA) )
- {
- if (period!=0)
- cptr->samppos = 0;
- }
-
- cptr->decalperiod = 0;
- if( period )
- {
- if(cptr->finetune)
- {
- if( cptr->finetune <= 7 )
- {
- period = mod->fullperiod[getnote(mod,period,0) + cptr->finetune];
- }
- else
- {
- period = mod->fullperiod[getnote(mod,period,0) - (16 - (cptr->finetune)) ];
- }
- }
-
- cptr->period = period;
- }
-
- }
-
- cptr->effect = 0;
- cptr->parameffect = 0;
- cptr->effect_code = effect;
-
- switch (effect >> 8)
- {
- case EFFECT_ARPEGGIO:
- /*
- [0]: Arpeggio
- Where [0][x][y] means "play note, note+x semitones, note+y
- semitones, then return to original note". The fluctuations are
- carried out evenly spaced in one pattern division. They are usually
- used to simulate chords, but this doesn't work too well. They are
- also used to produce heavy vibrato. A major chord is when x=4, y=7.
- A minor chord is when x=3, y=7.
- */
-
- if(effect&0xff)
- {
- cptr->effect = EFFECT_ARPEGGIO;
- cptr->parameffect = effect&0xff;
-
- cptr->ArpIndex = 0;
-
- curnote = getnote(mod,cptr->period,cptr->finetune);
-
- cptr->Arpperiods[0] = cptr->period;
-
- arpnote = curnote + (((cptr->parameffect>>4)&0xF)*8);
- if( arpnote >= FULL_PERIOD_TABLE_LENGTH )
- arpnote = FULL_PERIOD_TABLE_LENGTH - 1;
-
- cptr->Arpperiods[1] = mod->fullperiod[arpnote];
-
- arpnote = curnote + (((cptr->parameffect)&0xF)*8);
- if( arpnote >= FULL_PERIOD_TABLE_LENGTH )
- arpnote = FULL_PERIOD_TABLE_LENGTH - 1;
-
- cptr->Arpperiods[2] = mod->fullperiod[arpnote];
- }
- break;
-
- case EFFECT_PORTAMENTO_UP:
- /*
- [1]: Slide up
- Where [1][x][y] means "smoothly decrease the period of current
- sample by x*16+y after each tick in the division". The
- ticks/division are set with the 'set speed' effect (see below). If
- the period of the note being played is z, then the final period
- will be z - (x*16 + y)*(ticks - 1). As the slide rate depends on
- the speed, changing the speed will change the slide. You cannot
- slide beyond the note B3 (period 113).
- */
-
- cptr->effect = EFFECT_PORTAMENTO_UP;
- cptr->parameffect = effect&0xff;
- break;
-
- case EFFECT_PORTAMENTO_DOWN:
- /*
- [2]: Slide down
- Where [2][x][y] means "smoothly increase the period of current
- sample by x*16+y after each tick in the division". Similar to [1],
- but lowers the pitch. You cannot slide beyond the note C1 (period
- 856).
- */
-
- cptr->effect = EFFECT_PORTAMENTO_DOWN;
- cptr->parameffect = effect&0xff;
- break;
-
- case EFFECT_TONE_PORTAMENTO:
- /*
- [3]: Slide to note
- Where [3][x][y] means "smoothly change the period of current sample
- by x*16+y after each tick in the division, never sliding beyond
- current period". The period-length in this channel's division is a
- parameter to this effect, and hence is not played. Sliding to a
- note is similar to effects [1] and [2], but the slide will not go
- beyond the given period, and the direction is implied by that
- period. If x and y are both 0, then the old slide will continue.
- */
-
- cptr->effect = EFFECT_TONE_PORTAMENTO;
- if( (effect&0xff) != 0 )
- {
- cptr->portaspeed = (short)(effect&0xff);
- }
-
- if(period!=0)
- {
- cptr->portaperiod = period;
- cptr->period = operiod;
- }
- break;
-
- case EFFECT_VIBRATO:
- /*
- [4]: Vibrato
- Where [4][x][y] means "oscillate the sample pitch using a
- particular waveform with amplitude y/16 semitones, such that (x *
- ticks)/64 cycles occur in the division". The waveform is set using
- effect [14][4]. By placing vibrato effects on consecutive
- divisions, the vibrato effect can be maintained. If either x or y
- are 0, then the old vibrato values will be used.
- */
-
- cptr->effect = EFFECT_VIBRATO;
- if( ( effect & 0x0F ) != 0 ) // Depth continue or change ?
- cptr->vibraparam = (cptr->vibraparam & 0xF0) | ( effect & 0x0F );
- if( ( effect & 0xF0 ) != 0 ) // Speed continue or change ?
- cptr->vibraparam = (cptr->vibraparam & 0x0F) | ( effect & 0xF0 );
-
- break;
-
- case EFFECT_VOLSLIDE_TONEPORTA:
- /*
- [5]: Continue 'Slide to note', but also do Volume slide
- Where [5][x][y] means "either slide the volume up x*(ticks - 1) or
- slide the volume down y*(ticks - 1), at the same time as continuing
- the last 'Slide to note'". It is illegal for both x and y to be
- non-zero. You cannot slide outside the volume range 0..64. The
- period-length in this channel's division is a parameter to this
- effect, and hence is not played.
- */
-
- if( period != 0 )
- {
- cptr->portaperiod = period;
- cptr->period = operiod;
- }
-
- cptr->effect = EFFECT_VOLSLIDE_TONEPORTA;
- if( ( effect & 0xFF ) != 0 )
- cptr->volumeslide = ( effect & 0xFF );
-
- break;
-
- case EFFECT_VOLSLIDE_VIBRATO:
- /*
- [6]: Continue 'Vibrato', but also do Volume slide
- Where [6][x][y] means "either slide the volume up x*(ticks - 1) or
- slide the volume down y*(ticks - 1), at the same time as continuing
- the last 'Vibrato'". It is illegal for both x and y to be non-zero.
- You cannot slide outside the volume range 0..64.
- */
-
- cptr->effect = EFFECT_VOLSLIDE_VIBRATO;
- if( (effect & 0xFF) != 0 )
- cptr->volumeslide = (effect & 0xFF);
- break;
-
- case EFFECT_SET_OFFSET:
- /*
- [9]: Set sample offset
- Where [9][x][y] means "play the sample from offset x*4096 + y*256".
- The offset is measured in words. If no sample is given, yet one is
- still playing on this channel, it should be retriggered to the new
- offset using the current volume.
- */
-
- cptr->samppos = ((effect>>4) * 4096) + ((effect&0xF)*256);
-
- break;
-
- case EFFECT_VOLUME_SLIDE:
- /*
- [10]: Volume slide
- Where [10][x][y] means "either slide the volume up x*(ticks - 1) or
- slide the volume down y*(ticks - 1)". If both x and y are non-zero,
- then the y value is ignored (assumed to be 0). You cannot slide
- outside the volume range 0..64.
- */
-
- cptr->effect = EFFECT_VOLUME_SLIDE;
- cptr->volumeslide = (effect & 0xFF);
- break;
-
- case EFFECT_JUMP_POSITION:
- /*
- [11]: Position Jump
- Where [11][x][y] means "stop the pattern after this division, and
- continue the song at song-position x*16+y". This shifts the
- 'pattern-cursor' in the pattern table (see above). Legal values for
- x*16+y are from 0 to 127.
- */
-
- mod->tablepos = (effect & 0xFF);
- if(mod->tablepos >= mod->song.length)
- {
- mod->tablepos = 0;
- }
- mod->patternpos = 0;
- mod->jump_loop_effect = 1;
-
- break;
-
- case EFFECT_SET_VOLUME:
- /*
- [12]: Set volume
- Where [12][x][y] means "set current sample's volume to x*16+y".
- Legal volumes are 0..64.
- */
-
- cptr->volume = (effect & 0xFF);
- break;
-
- case EFFECT_PATTERN_BREAK:
- /*
- [13]: Pattern Break
- Where [13][x][y] means "stop the pattern after this division, and
- continue the song at the next pattern at division x*10+y" (the 10
- is not a typo). Legal divisions are from 0 to 63 (note Protracker
- exception above).
- */
-
- mod->patternpos = ( ((effect>>4)&0xF)*10 + (effect&0xF) ) * mod->number_of_channels;
- mod->jump_loop_effect = 1;
- mod->tablepos++;
- if(mod->tablepos >= mod->song.length)
- {
- mod->tablepos = 0;
- }
-
- break;
-
- case EFFECT_EXTENDED:
- switch( (effect>>4) & 0xF )
- {
- case EFFECT_E_FINE_PORTA_UP:
- /*
- [14][1]: Fineslide up
- Where [14][1][x] means "decrement the period of the current sample
- by x". The incrementing takes place at the beginning of the
- division, and hence there is no actual sliding. You cannot slide
- beyond the note B3 (period 113).
- */
-
- cptr->period -= (effect & 0xF);
- if( cptr->period < 113 )
- cptr->period = 113;
- break;
-
- case EFFECT_E_FINE_PORTA_DOWN:
- /*
- [14][2]: Fineslide down
- Where [14][2][x] means "increment the period of the current sample
- by x". Similar to [14][1] but shifts the pitch down. You cannot
- slide beyond the note C1 (period 856).
- */
-
- cptr->period += (effect & 0xF);
- if( cptr->period > 856 )
- cptr->period = 856;
- break;
-
- case EFFECT_E_FINE_VOLSLIDE_UP:
- /*
- [14][10]: Fine volume slide up
- Where [14][10][x] means "increment the volume of the current sample
- by x". The incrementing takes place at the beginning of the
- division, and hence there is no sliding. You cannot slide beyond
- volume 64.
- */
-
- cptr->volume += (effect & 0xF);
- if( cptr->volume>64 )
- cptr->volume = 64;
- break;
-
- case EFFECT_E_FINE_VOLSLIDE_DOWN:
- /*
- [14][11]: Fine volume slide down
- Where [14][11][x] means "decrement the volume of the current sample
- by x". Similar to [14][10] but lowers volume. You cannot slide
- beyond volume 0.
- */
-
- cptr->volume -= (effect & 0xF);
- if( cptr->volume > 200 )
- cptr->volume = 0;
- break;
-
- case EFFECT_E_PATTERN_LOOP:
- /*
- [14][6]: Loop pattern
- Where [14][6][x] means "set the start of a loop to this division if
- x is 0, otherwise after this division, jump back to the start of a
- loop and play it another x times before continuing". If the start
- of the loop was not set, it will default to the start of the
- current pattern. Hence 'loop pattern' cannot be performed across
- multiple patterns. Note that loops do not support nesting, and you
- may generate an infinite loop if you try to nest 'loop pattern's.
- */
-
- if( effect & 0xF )
- {
- if( cptr->patternloopcnt )
- {
- cptr->patternloopcnt--;
- if( cptr->patternloopcnt )
- {
- mod->patternpos = cptr->patternloopstartpoint;
- mod->jump_loop_effect = 1;
- }
- else
- {
- cptr->patternloopstartpoint = mod->patternpos ;
- }
- }
- else
- {
- cptr->patternloopcnt = (effect & 0xF);
- mod->patternpos = cptr->patternloopstartpoint;
- mod->jump_loop_effect = 1;
- }
- }
- else // Start point
- {
- cptr->patternloopstartpoint = mod->patternpos;
- }
-
- break;
-
- case EFFECT_E_PATTERN_DELAY:
- /*
- [14][14]: Delay pattern
- Where [14][14][x] means "after this division there will be a delay
- equivalent to the time taken to play x divisions after which the
- pattern will be resumed". The delay only relates to the
- interpreting of new divisions, and all effects and previous notes
- continue during delay.
- */
-
- mod->patterndelay = (effect & 0xF);
- break;
-
- case EFFECT_E_NOTE_CUT:
- /*
- [14][12]: Cut sample
- Where [14][12][x] means "after the current sample has been played
- for x ticks in this division, its volume will be set to 0". This
- implies that if x is 0, then you will not hear any of the sample.
- If you wish to insert "silence" in a pattern, it is better to use a
- "silence"-sample (see above) due to the lack of proper support for
- this effect.
- */
- cptr->effect = EFFECT_E_NOTE_CUT;
- cptr->cut_param = (effect & 0xF);
- if(!cptr->cut_param)
- cptr->volume = 0;
- break;
-
- default:
-
- break;
- }
- break;
-
- case 0xF:
- /*
- [15]: Set speed
- Where [15][x][y] means "set speed to x*16+y". Though it is nowhere
- near that simple. Let z = x*16+y. Depending on what values z takes,
- different units of speed are set, there being two: ticks/division
- and beats/minute (though this one is only a label and not strictly
- true). If z=0, then what should technically happen is that the
- module stops, but in practice it is treated as if z=1, because
- there is already a method for stopping the module (running out of
- patterns). If z<=32, then it means "set ticks/division to z"
- otherwise it means "set beats/minute to z" (convention says that
- this should read "If z<32.." but there are some composers out there
- that defy conventions). Default values are 6 ticks/division, and
- 125 beats/minute (4 divisions = 1 beat). The beats/minute tag is
- only meaningful for 6 ticks/division. To get a more accurate view
- of how things work, use the following formula:
- 24 * beats/minute
- divisions/minute = -----------------
- ticks/division
- Hence divisions/minute range from 24.75 to 6120, eg. to get a value
- of 2000 divisions/minute use 3 ticks/division and 250 beats/minute.
- If multiple "set speed" effects are performed in a single division,
- the ones on higher-numbered channels take precedence over the ones
- on lower-numbered channels. This effect has a large number of
- different implementations, but the one described here has the
- widest usage.
- */
-
- if( (effect&0xFF) < 0x21 )
- {
- if( effect&0xFF )
- {
- mod->song.speed = effect&0xFF;
- mod->patternticksaim = (long)mod->song.speed * ((mod->playrate * 5 ) / (((long)2 * (long)mod->bpm)));
- }
- }
-
- if( (effect&0xFF) >= 0x21 )
- {
- /// HZ = 2 * BPM / 5
- mod->bpm = effect&0xFF;
- mod->patternticksaim = (long)mod->song.speed * ((mod->playrate * 5 ) / (((long)2 * (long)mod->bpm)));
- }
-
- break;
-
- default:
- // Unsupported effect
- break;
-
- }
-
-}
-
-static void workeffect( note * nptr, channel * cptr )
-{
- switch(cptr->effect)
- {
- case EFFECT_ARPEGGIO:
-
- if( cptr->parameffect )
- {
- cptr->decalperiod = cptr->period - cptr->Arpperiods[cptr->ArpIndex];
-
- cptr->ArpIndex++;
- if( cptr->ArpIndex>2 )
- cptr->ArpIndex = 0;
- }
- break;
-
- case EFFECT_PORTAMENTO_UP:
-
- if(cptr->period)
- {
- cptr->period -= cptr->parameffect;
-
- if( cptr->period < 113 || cptr->period > 20000 )
- cptr->period = 113;
- }
-
- break;
-
- case EFFECT_PORTAMENTO_DOWN:
-
- if(cptr->period)
- {
- cptr->period += cptr->parameffect;
-
- if( cptr->period > 20000 )
- cptr->period = 20000;
- }
-
- break;
-
- case EFFECT_VOLSLIDE_TONEPORTA:
- case EFFECT_TONE_PORTAMENTO:
-
- if( cptr->period && ( cptr->period != cptr->portaperiod ) && cptr->portaperiod )
- {
- if( cptr->period > cptr->portaperiod )
- {
- if( cptr->period - cptr->portaperiod >= cptr->portaspeed )
- {
- cptr->period -= cptr->portaspeed;
- }
- else
- {
- cptr->period = cptr->portaperiod;
- }
- }
- else
- {
- if( cptr->portaperiod - cptr->period >= cptr->portaspeed )
- {
- cptr->period += cptr->portaspeed;
- }
- else
- {
- cptr->period = cptr->portaperiod;
- }
- }
-
- if( cptr->period == cptr->portaperiod )
- {
- // If the slide is over, don't let it to be retriggered.
- cptr->portaperiod = 0;
- }
- }
-
- if( cptr->effect == EFFECT_VOLSLIDE_TONEPORTA )
- {
- if( cptr->volumeslide > 0x0F )
- {
- cptr->volume = cptr->volume + (cptr->volumeslide>>4);
-
- if(cptr->volume>63)
- cptr->volume = 63;
- }
- else
- {
- cptr->volume = cptr->volume - (cptr->volumeslide);
-
- if(cptr->volume>63)
- cptr->volume=0;
- }
- }
- break;
-
- case EFFECT_VOLSLIDE_VIBRATO:
- case EFFECT_VIBRATO:
-
- cptr->vibraperiod = ( (cptr->vibraparam&0xF) * sintable[cptr->vibrapointeur&0x1F] )>>7;
-
- if( cptr->vibrapointeur > 31 )
- cptr->vibraperiod = -cptr->vibraperiod;
-
- cptr->vibrapointeur = (cptr->vibrapointeur+(((cptr->vibraparam>>4))&0xf)) & 0x3F;
-
- if( cptr->effect == EFFECT_VOLSLIDE_VIBRATO )
- {
- if( cptr->volumeslide > 0xF )
- {
- cptr->volume = cptr->volume+(cptr->volumeslide>>4);
-
- if( cptr->volume > 64 )
- cptr->volume = 64;
- }
- else
- {
- cptr->volume = cptr->volume - cptr->volumeslide;
-
- if( cptr->volume > 64 )
- cptr->volume = 0;
- }
- }
-
- break;
-
- case EFFECT_VOLUME_SLIDE:
-
- if( cptr->volumeslide > 0xF )
- {
- cptr->volume += (cptr->volumeslide>>4);
-
- if( cptr->volume > 64 )
- cptr->volume = 64;
- }
- else
- {
- cptr->volume -= (cptr->volumeslide&0xf);
-
- if( cptr->volume > 64 )
- cptr->volume = 0;
- }
- break;
-
- case EFFECT_E_NOTE_CUT:
- if(cptr->cut_param)
- cptr->cut_param--;
-
- if(!cptr->cut_param)
- cptr->volume = 0;
- break;
-
- default:
- break;
-
- }
-
-}
-
-///////////////////////////////////////////////////////////////////////////////////
-bool jar_mod_init(jar_mod_context_t * modctx)
-{
- muint i,j;
-
- if( modctx )
- {
- memclear(modctx, 0, sizeof(jar_mod_context_t));
- modctx->playrate = DEFAULT_SAMPLE_RATE;
- modctx->stereo = 1;
- modctx->stereo_separation = 1;
- modctx->bits = 16;
- modctx->filter = 1;
-
- for(i=0; i < PERIOD_TABLE_LENGTH - 1; i++)
- {
- for(j=0; j < 8; j++)
- {
- modctx->fullperiod[(i*8) + j] = periodtable[i] - ((( periodtable[i] - periodtable[i+1] ) / 8) * j);
- }
- }
-
- return 1;
- }
-
- return 0;
-}
-
-bool jar_mod_setcfg(jar_mod_context_t * modctx, int samplerate, int bits, int stereo, int stereo_separation, int filter)
-{
- if( modctx )
- {
- modctx->playrate = samplerate;
-
- if( stereo )
- modctx->stereo = 1;
- else
- modctx->stereo = 0;
-
- if(stereo_separation < 4)
- {
- modctx->stereo_separation = stereo_separation;
- }
-
- if( bits == 8 || bits == 16 )
- modctx->bits = bits;
- else
- modctx->bits = 16;
-
- if( filter )
- modctx->filter = 1;
- else
- modctx->filter = 0;
-
- return 1;
- }
-
- return 0;
-}
-
-// make certain that mod_data stays in memory while playing
-static bool jar_mod_load( jar_mod_context_t * modctx, void * mod_data, int mod_data_size )
-{
- muint i, max;
- unsigned short t;
- sample *sptr;
- unsigned char * modmemory,* endmodmemory;
-
- modmemory = (unsigned char *)mod_data;
- endmodmemory = modmemory + mod_data_size;
-
-
-
- if(modmemory)
- {
- if( modctx )
- {
- memcopy(&(modctx->song.title),modmemory,1084);
-
- i = 0;
- modctx->number_of_channels = 0;
- while(modlist[i].numberofchannels)
- {
- if(memcompare(modctx->song.signature,modlist[i].signature,4))
- {
- modctx->number_of_channels = modlist[i].numberofchannels;
- }
-
- i++;
- }
-
- if( !modctx->number_of_channels )
- {
- // 15 Samples modules support
- // Shift the whole datas to make it look likes a standard 4 channels mod.
- memcopy(&(modctx->song.signature), "M.K.", 4);
- memcopy(&(modctx->song.length), &(modctx->song.samples[15]), 130);
- memclear(&(modctx->song.samples[15]), 0, 480);
- modmemory += 600;
- modctx->number_of_channels = 4;
- }
- else
- {
- modmemory += 1084;
- }
-
- if( modmemory >= endmodmemory )
- return 0; // End passed ? - Probably a bad file !
-
- // Patterns loading
- for (i = max = 0; i < 128; i++)
- {
- while (max <= modctx->song.patterntable[i])
- {
- modctx->patterndata[max] = (note*)modmemory;
- modmemory += (256*modctx->number_of_channels);
- max++;
-
- if( modmemory >= endmodmemory )
- return 0; // End passed ? - Probably a bad file !
- }
- }
-
- for (i = 0; i < 31; i++)
- modctx->sampledata[i]=0;
-
- // Samples loading
- for (i = 0, sptr = modctx->song.samples; i <31; i++, sptr++)
- {
- t= (sptr->length &0xFF00)>>8 | (sptr->length &0xFF)<<8;
- sptr->length = t*2;
-
- t= (sptr->reppnt &0xFF00)>>8 | (sptr->reppnt &0xFF)<<8;
- sptr->reppnt = t*2;
-
- t= (sptr->replen &0xFF00)>>8 | (sptr->replen &0xFF)<<8;
- sptr->replen = t*2;
-
-
- if (sptr->length == 0) continue;
-
- modctx->sampledata[i] = (char*)modmemory;
- modmemory += sptr->length;
-
- if (sptr->replen + sptr->reppnt > sptr->length)
- sptr->replen = sptr->length - sptr->reppnt;
-
- if( modmemory > endmodmemory )
- return 0; // End passed ? - Probably a bad file !
- }
-
- // States init
-
- modctx->tablepos = 0;
- modctx->patternpos = 0;
- modctx->song.speed = 6;
- modctx->bpm = 125;
- modctx->samplenb = 0;
-
- modctx->patternticks = (((long)modctx->song.speed * modctx->playrate * 5)/ (2 * modctx->bpm)) + 1;
- modctx->patternticksaim = ((long)modctx->song.speed * modctx->playrate * 5) / (2 * modctx->bpm);
-
- modctx->sampleticksconst = 3546894UL / modctx->playrate; //8448*428/playrate;
-
- for(i=0; i < modctx->number_of_channels; i++)
- {
- modctx->channels[i].volume = 0;
- modctx->channels[i].period = 0;
- }
-
- modctx->mod_loaded = 1;
-
- return 1;
- }
- }
-
- return 0;
-}
-
-void jar_mod_fillbuffer( jar_mod_context_t * modctx, short * outbuffer, unsigned long nbsample, jar_mod_tracker_buffer_state * trkbuf )
-{
- unsigned long i, j;
- unsigned long k;
- unsigned char c;
- unsigned int state_remaining_steps;
- int l,r;
- int ll,lr;
- int tl,tr;
- short finalperiod;
- note *nptr;
- channel *cptr;
-
- if( modctx && outbuffer )
- {
- if(modctx->mod_loaded)
- {
- state_remaining_steps = 0;
-
- if( trkbuf )
- {
- trkbuf->cur_rd_index = 0;
-
- memcopy(trkbuf->name,modctx->song.title,sizeof(modctx->song.title));
-
- for(i=0;i<31;i++)
- {
- memcopy(trkbuf->instruments[i].name,modctx->song.samples[i].name,sizeof(trkbuf->instruments[i].name));
- }
- }
-
- ll = modctx->last_l_sample;
- lr = modctx->last_r_sample;
-
- for (i = 0; i < nbsample; i++)
- {
- //---------------------------------------
- if( modctx->patternticks++ > modctx->patternticksaim )
- {
- if( !modctx->patterndelay )
- {
- nptr = modctx->patterndata[modctx->song.patterntable[modctx->tablepos]];
- nptr = nptr + modctx->patternpos;
- cptr = modctx->channels;
-
- modctx->patternticks = 0;
- modctx->patterntickse = 0;
-
- for(c=0;cnumber_of_channels;c++)
- {
- worknote((note*)(nptr+c), (channel*)(cptr+c),(char)(c+1),modctx);
- }
-
- if( !modctx->jump_loop_effect )
- modctx->patternpos += modctx->number_of_channels;
- else
- modctx->jump_loop_effect = 0;
-
- if( modctx->patternpos == 64*modctx->number_of_channels )
- {
- modctx->tablepos++;
- modctx->patternpos = 0;
- if(modctx->tablepos >= modctx->song.length)
- {
- modctx->tablepos = 0;
- modctx->loopcount++; // count next loop
- }
- }
- }
- else
- {
- modctx->patterndelay--;
- modctx->patternticks = 0;
- modctx->patterntickse = 0;
- }
-
- }
-
- if( modctx->patterntickse++ > (modctx->patternticksaim/modctx->song.speed) )
- {
- nptr = modctx->patterndata[modctx->song.patterntable[modctx->tablepos]];
- nptr = nptr + modctx->patternpos;
- cptr = modctx->channels;
-
- for(c=0;cnumber_of_channels;c++)
- {
- workeffect(nptr+c, cptr+c);
- }
-
- modctx->patterntickse = 0;
- }
-
- //---------------------------------------
-
- if( trkbuf && !state_remaining_steps )
- {
- if( trkbuf->nb_of_state < trkbuf->nb_max_of_state )
- {
- memclear(&trkbuf->track_state_buf[trkbuf->nb_of_state], 0, sizeof(tracker_state));
- }
- }
-
- l=0;
- r=0;
-
- for(j =0, cptr = modctx->channels; j < modctx->number_of_channels ; j++, cptr++)
- {
- if( cptr->period != 0 )
- {
- finalperiod = cptr->period - cptr->decalperiod - cptr->vibraperiod;
- if( finalperiod )
- {
- cptr->samppos += ( (modctx->sampleticksconst<<10) / finalperiod );
- }
-
- cptr->ticks++;
-
- if( cptr->replen<=2 )
- {
- if( (cptr->samppos>>10) >= (cptr->length) )
- {
- cptr->length = 0;
- cptr->reppnt = 0;
-
- if( cptr->length )
- cptr->samppos = cptr->samppos % (((unsigned long)cptr->length)<<10);
- else
- cptr->samppos = 0;
- }
- }
- else
- {
- if( (cptr->samppos>>10) >= (unsigned long)(cptr->replen+cptr->reppnt) )
- {
- cptr->samppos = ((unsigned long)(cptr->reppnt)<<10) + (cptr->samppos % ((unsigned long)(cptr->replen+cptr->reppnt)<<10));
- }
- }
-
- k = cptr->samppos >> 10;
-
- if( cptr->sampdata!=0 && ( ((j&3)==1) || ((j&3)==2) ) )
- {
- r += ( cptr->sampdata[k] * cptr->volume );
- }
-
- if( cptr->sampdata!=0 && ( ((j&3)==0) || ((j&3)==3) ) )
- {
- l += ( cptr->sampdata[k] * cptr->volume );
- }
-
- if( trkbuf && !state_remaining_steps )
- {
- if( trkbuf->nb_of_state < trkbuf->nb_max_of_state )
- {
- trkbuf->track_state_buf[trkbuf->nb_of_state].number_of_tracks = modctx->number_of_channels;
- trkbuf->track_state_buf[trkbuf->nb_of_state].buf_index = i;
- trkbuf->track_state_buf[trkbuf->nb_of_state].cur_pattern = modctx->song.patterntable[modctx->tablepos];
- trkbuf->track_state_buf[trkbuf->nb_of_state].cur_pattern_pos = modctx->patternpos / modctx->number_of_channels;
- trkbuf->track_state_buf[trkbuf->nb_of_state].cur_pattern_table_pos = modctx->tablepos;
- trkbuf->track_state_buf[trkbuf->nb_of_state].bpm = modctx->bpm;
- trkbuf->track_state_buf[trkbuf->nb_of_state].speed = modctx->song.speed;
- trkbuf->track_state_buf[trkbuf->nb_of_state].tracks[j].cur_effect = cptr->effect_code;
- trkbuf->track_state_buf[trkbuf->nb_of_state].tracks[j].cur_parameffect = cptr->parameffect;
- trkbuf->track_state_buf[trkbuf->nb_of_state].tracks[j].cur_period = finalperiod;
- trkbuf->track_state_buf[trkbuf->nb_of_state].tracks[j].cur_volume = cptr->volume;
- trkbuf->track_state_buf[trkbuf->nb_of_state].tracks[j].instrument_number = (unsigned char)cptr->sampnum;
- }
- }
- }
- }
-
- if( trkbuf && !state_remaining_steps )
- {
- state_remaining_steps = trkbuf->sample_step;
-
- if(trkbuf->nb_of_state < trkbuf->nb_max_of_state)
- trkbuf->nb_of_state++;
- }
- else
- {
- state_remaining_steps--;
- }
-
- tl = (short)l;
- tr = (short)r;
-
- if ( modctx->filter )
- {
- // Filter
- l = (l+ll)>>1;
- r = (r+lr)>>1;
- }
-
- if ( modctx->stereo_separation == 1 )
- {
- // Left & Right Stereo panning
- l = (l+(r>>1));
- r = (r+(l>>1));
- }
-
- // Level limitation
- if( l > 32767 ) l = 32767;
- if( l < -32768 ) l = -32768;
- if( r > 32767 ) r = 32767;
- if( r < -32768 ) r = -32768;
-
- // Store the final sample.
- outbuffer[(i*2)] = l;
- outbuffer[(i*2)+1] = r;
-
- ll = tl;
- lr = tr;
-
- }
-
- modctx->last_l_sample = ll;
- modctx->last_r_sample = lr;
-
- modctx->samplenb = modctx->samplenb+nbsample;
- }
- else
- {
- for (i = 0; i < nbsample; i++)
- {
- // Mod not loaded. Return blank buffer.
- outbuffer[(i*2)] = 0;
- outbuffer[(i*2)+1] = 0;
- }
-
- if(trkbuf)
- {
- trkbuf->nb_of_state = 0;
- trkbuf->cur_rd_index = 0;
- trkbuf->name[0] = 0;
- memclear(trkbuf->track_state_buf, 0, sizeof(tracker_state) * trkbuf->nb_max_of_state);
- memclear(trkbuf->instruments, 0, sizeof(trkbuf->instruments));
- }
- }
- }
-}
-
-//resets internals for mod context
-static bool jar_mod_reset( jar_mod_context_t * modctx)
-{
- if(modctx)
- {
- memclear(&modctx->song, 0, sizeof(modctx->song));
- memclear(&modctx->sampledata, 0, sizeof(modctx->sampledata));
- memclear(&modctx->patterndata, 0, sizeof(modctx->patterndata));
- modctx->tablepos = 0;
- modctx->patternpos = 0;
- modctx->patterndelay = 0;
- modctx->jump_loop_effect = 0;
- modctx->bpm = 0;
- modctx->patternticks = 0;
- modctx->patterntickse = 0;
- modctx->patternticksaim = 0;
- modctx->sampleticksconst = 0;
- modctx->samplenb = 0;
- memclear(modctx->channels, 0, sizeof(modctx->channels));
- modctx->number_of_channels = 0;
- modctx->mod_loaded = 0;
- modctx->last_r_sample = 0;
- modctx->last_l_sample = 0;
-
- return jar_mod_init(modctx);
- }
- return 0;
-}
-
-void jar_mod_unload( jar_mod_context_t * modctx)
-{
- if(modctx)
- {
- if(modctx->modfile)
- {
- free(modctx->modfile);
- modctx->modfile = 0;
- modctx->modfilesize = 0;
- modctx->loopcount = 0;
- }
- jar_mod_reset(modctx);
- }
-}
-
-
-
-mulong jar_mod_load_file(jar_mod_context_t * modctx, const char* filename)
-{
- mulong fsize = 0;
- if(modctx->modfile)
- {
- free(modctx->modfile);
- modctx->modfile = 0;
- }
-
- FILE *f = NULL;
-#if defined(_MSC_VER) && _MSC_VER >= 1500
- fopen_s(&f, filename, "rb");
-#else
- f = fopen(filename, "rb");
-#endif
- if(f)
- {
- fseek(f,0,SEEK_END);
- fsize = ftell(f);
- fseek(f,0,SEEK_SET);
-
- if(fsize && fsize < 32*1024*1024)
- {
- modctx->modfile = malloc(fsize);
- modctx->modfilesize = fsize;
- memset(modctx->modfile, 0, fsize);
- fread(modctx->modfile, fsize, 1, f);
- fclose(f);
-
- if(!jar_mod_load(modctx, (void*)modctx->modfile, fsize)) fsize = 0;
- } else fsize = 0;
- }
- return fsize;
-}
-
-mulong jar_mod_current_samples(jar_mod_context_t * modctx)
-{
- if(modctx)
- return modctx->samplenb;
-
- return 0;
-}
-
-// Works, however it is very slow, this data should be cached to ensure it is run only once per file
-mulong jar_mod_max_samples(jar_mod_context_t * ctx)
-{
- mint buff[2];
- mulong len;
- mulong lastcount = ctx->loopcount;
-
- while(ctx->loopcount <= lastcount)
- jar_mod_fillbuffer(ctx, buff, 1, 0);
-
- len = ctx->samplenb;
- jar_mod_seek_start(ctx);
-
- return len;
-}
-
-// move seek_val to sample index, 0 -> jar_mod_max_samples is the range
-void jar_mod_seek_start(jar_mod_context_t * ctx)
-{
- if(ctx && ctx->modfile)
- {
- muchar* ftmp = ctx->modfile;
- mulong stmp = ctx->modfilesize;
- muint lcnt = ctx->loopcount;
-
- if(jar_mod_reset(ctx)){
- jar_mod_load(ctx, ftmp, stmp);
- ctx->modfile = ftmp;
- ctx->modfilesize = stmp;
- ctx->loopcount = lcnt;
- }
- }
-}
-
-#endif // end of JAR_MOD_IMPLEMENTATION
-//-------------------------------------------------------------------------------
-
-
-#endif //end of header file
\ No newline at end of file
diff --git a/client/miniaudio/jar_xm.h b/client/miniaudio/jar_xm.h
deleted file mode 100644
index 159b71ac..00000000
--- a/client/miniaudio/jar_xm.h
+++ /dev/null
@@ -1,2671 +0,0 @@
-// jar_xm.h - v0.01 - public domain - Joshua Reisenauer, MAR 2016
-//
-// HISTORY:
-//
-// v0.01 2016-02-22 Setup
-//
-//
-// USAGE:
-//
-// In ONE source file, put:
-//
-// #define JAR_XM_IMPLEMENTATION
-// #include "jar_xm.h"
-//
-// Other source files should just include jar_xm.h
-//
-// SAMPLE CODE:
-//
-// jar_xm_context_t *musicptr;
-// float musicBuffer[48000 / 60];
-// int intro_load(void)
-// {
-// jar_xm_create_context_from_file(&musicptr, 48000, "Song.XM");
-// return 1;
-// }
-// int intro_unload(void)
-// {
-// jar_xm_free_context(musicptr);
-// return 1;
-// }
-// int intro_tick(long counter)
-// {
-// jar_xm_generate_samples(musicptr, musicBuffer, (48000 / 60) / 2);
-// if(IsKeyDown(KEY_ENTER))
-// return 1;
-// return 0;
-// }
-//
-//
-// LISCENSE - FOR LIBXM:
-//
-// Author: Romain "Artefact2" Dalmaso
-// Contributor: Dan Spencer
-// Repackaged into jar_xm.h By: Joshua Adam Reisenauer
-// This program is free software. It comes without any warranty, to the
-// extent permitted by applicable law. You can redistribute it and/or
-// modify it under the terms of the Do What The Fuck You Want To Public
-// License, Version 2, as published by Sam Hocevar. See
-// http://sam.zoy.org/wtfpl/COPYING for more details.
-
-#ifndef INCLUDE_JAR_XM_H
-#define INCLUDE_JAR_XM_H
-
-#define JAR_XM_DEBUG 0
-#define JAR_XM_LINEAR_INTERPOLATION 1 // speed increase with decrease in quality
-#define JAR_XM_DEFENSIVE 1
-#define JAR_XM_RAMPING 1
-
-#include
-#include
-#include
-#include
-#include
-
-#ifndef true
- #include
-#endif
-
-
-
-//-------------------------------------------------------------------------------
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct jar_xm_context_s;
-typedef struct jar_xm_context_s jar_xm_context_t;
-
-/** Create a XM context.
- *
- * @param moddata the contents of the module
- * @param rate play rate in Hz, recommended value of 48000
- *
- * @returns 0 on success
- * @returns 1 if module data is not sane
- * @returns 2 if memory allocation failed
- * @returns 3 unable to open input file
- * @returns 4 fseek() failed
- * @returns 5 fread() failed
- * @returns 6 unkown error
- *
- * @deprecated This function is unsafe!
- * @see jar_xm_create_context_safe()
- */
-int jar_xm_create_context_from_file(jar_xm_context_t** ctx, uint32_t rate, const char* filename);
-
-/** Create a XM context.
- *
- * @param moddata the contents of the module
- * @param rate play rate in Hz, recommended value of 48000
- *
- * @returns 0 on success
- * @returns 1 if module data is not sane
- * @returns 2 if memory allocation failed
- *
- * @deprecated This function is unsafe!
- * @see jar_xm_create_context_safe()
- */
-int jar_xm_create_context(jar_xm_context_t**, const char* moddata, uint32_t rate);
-
-/** Create a XM context.
- *
- * @param moddata the contents of the module
- * @param moddata_length the length of the contents of the module, in bytes
- * @param rate play rate in Hz, recommended value of 48000
- *
- * @returns 0 on success
- * @returns 1 if module data is not sane
- * @returns 2 if memory allocation failed
- */
-int jar_xm_create_context_safe(jar_xm_context_t**, const char* moddata, size_t moddata_length, uint32_t rate);
-
-/** Free a XM context created by jar_xm_create_context(). */
-void jar_xm_free_context(jar_xm_context_t*);
-
-/** Play the module and put the sound samples in an output buffer.
- *
- * @param output buffer of 2*numsamples elements (A left and right value for each sample)
- * @param numsamples number of samples to generate
- */
-void jar_xm_generate_samples(jar_xm_context_t*, float* output, size_t numsamples);
-
-/** Play the module, resample from 32 bit to 16 bit, and put the sound samples in an output buffer.
- *
- * @param output buffer of 2*numsamples elements (A left and right value for each sample)
- * @param numsamples number of samples to generate
- */
-void jar_xm_generate_samples_16bit(jar_xm_context_t* ctx, short* output, size_t numsamples)
-{
- float* musicBuffer = malloc((2*numsamples)*sizeof(float));
- jar_xm_generate_samples(ctx, musicBuffer, numsamples);
-
- if(output){
- size_t x;
- for(x=0;x<2*numsamples;x++)
- output[x] = (short)(musicBuffer[x] * SHRT_MAX);
- }
-
- free(musicBuffer);
-}
-
-/** Play the module, resample from 32 bit to 8 bit, and put the sound samples in an output buffer.
- *
- * @param output buffer of 2*numsamples elements (A left and right value for each sample)
- * @param numsamples number of samples to generate
- */
-void jar_xm_generate_samples_8bit(jar_xm_context_t* ctx, char* output, size_t numsamples)
-{
- float* musicBuffer = malloc((2*numsamples)*sizeof(float));
- jar_xm_generate_samples(ctx, musicBuffer, numsamples);
-
- if(output){
- size_t x;
- for(x=0;x<2*numsamples;x++)
- output[x] = (char)(musicBuffer[x] * CHAR_MAX);
- }
-
- free(musicBuffer);
-}
-
-
-
-/** Set the maximum number of times a module can loop. After the
- * specified number of loops, calls to jar_xm_generate_samples will only
- * generate silence. You can control the current number of loops with
- * jar_xm_get_loop_count().
- *
- * @param loopcnt maximum number of loops. Use 0 to loop
- * indefinitely. */
-void jar_xm_set_max_loop_count(jar_xm_context_t*, uint8_t loopcnt);
-
-/** Get the loop count of the currently playing module. This value is
- * 0 when the module is still playing, 1 when the module has looped
- * once, etc. */
-uint8_t jar_xm_get_loop_count(jar_xm_context_t*);
-
-
-
-/** Mute or unmute a channel.
- *
- * @note Channel numbers go from 1 to jar_xm_get_number_of_channels(...).
- *
- * @return whether the channel was muted.
- */
-bool jar_xm_mute_channel(jar_xm_context_t*, uint16_t, bool);
-
-/** Mute or unmute an instrument.
- *
- * @note Instrument numbers go from 1 to
- * jar_xm_get_number_of_instruments(...).
- *
- * @return whether the instrument was muted.
- */
-bool jar_xm_mute_instrument(jar_xm_context_t*, uint16_t, bool);
-
-
-
-/** Get the module name as a NUL-terminated string. */
-const char* jar_xm_get_module_name(jar_xm_context_t*);
-
-/** Get the tracker name as a NUL-terminated string. */
-const char* jar_xm_get_tracker_name(jar_xm_context_t*);
-
-
-
-/** Get the number of channels. */
-uint16_t jar_xm_get_number_of_channels(jar_xm_context_t*);
-
-/** Get the module length (in patterns). */
-uint16_t jar_xm_get_module_length(jar_xm_context_t*);
-
-/** Get the number of patterns. */
-uint16_t jar_xm_get_number_of_patterns(jar_xm_context_t*);
-
-/** Get the number of rows of a pattern.
- *
- * @note Pattern numbers go from 0 to
- * jar_xm_get_number_of_patterns(...)-1.
- */
-uint16_t jar_xm_get_number_of_rows(jar_xm_context_t*, uint16_t);
-
-/** Get the number of instruments. */
-uint16_t jar_xm_get_number_of_instruments(jar_xm_context_t*);
-
-/** Get the number of samples of an instrument.
- *
- * @note Instrument numbers go from 1 to
- * jar_xm_get_number_of_instruments(...).
- */
-uint16_t jar_xm_get_number_of_samples(jar_xm_context_t*, uint16_t);
-
-
-
-/** Get the current module speed.
- *
- * @param bpm will receive the current BPM
- * @param tempo will receive the current tempo (ticks per line)
- */
-void jar_xm_get_playing_speed(jar_xm_context_t*, uint16_t* bpm, uint16_t* tempo);
-
-/** Get the current position in the module being played.
- *
- * @param pattern_index if not NULL, will receive the current pattern
- * index in the POT (pattern order table)
- *
- * @param pattern if not NULL, will receive the current pattern number
- *
- * @param row if not NULL, will receive the current row
- *
- * @param samples if not NULL, will receive the total number of
- * generated samples (divide by sample rate to get seconds of
- * generated audio)
- */
-void jar_xm_get_position(jar_xm_context_t*, uint8_t* pattern_index, uint8_t* pattern, uint8_t* row, uint64_t* samples);
-
-/** Get the latest time (in number of generated samples) when a
- * particular instrument was triggered in any channel.
- *
- * @note Instrument numbers go from 1 to
- * jar_xm_get_number_of_instruments(...).
- */
-uint64_t jar_xm_get_latest_trigger_of_instrument(jar_xm_context_t*, uint16_t);
-
-/** Get the latest time (in number of generated samples) when a
- * particular sample was triggered in any channel.
- *
- * @note Instrument numbers go from 1 to
- * jar_xm_get_number_of_instruments(...).
- *
- * @note Sample numbers go from 0 to
- * jar_xm_get_nubmer_of_samples(...,instr)-1.
- */
-uint64_t jar_xm_get_latest_trigger_of_sample(jar_xm_context_t*, uint16_t instr, uint16_t sample);
-
-/** Get the latest time (in number of generated samples) when any
- * instrument was triggered in a given channel.
- *
- * @note Channel numbers go from 1 to jar_xm_get_number_of_channels(...).
- */
-uint64_t jar_xm_get_latest_trigger_of_channel(jar_xm_context_t*, uint16_t);
-
-/** Get the number of remaining samples. Divide by 2 to get the number of individual LR data samples.
- *
- * @note This is the remaining number of samples before the loop starts module again, or halts if on last pass.
- * @note This function is very slow and should only be run once, if at all.
- */
-uint64_t jar_xm_get_remaining_samples(jar_xm_context_t*);
-
-#ifdef __cplusplus
-}
-#endif
-//-------------------------------------------------------------------------------
-#endif//end of INCLUDE_JAR_XM_H
-
-
-
-
-
-//Function Definitions-----------------------------------------------------------
-#ifdef JAR_XM_IMPLEMENTATION
-
-#include
-#include
-
-#if JAR_XM_DEBUG
-#include
-#define DEBUG(fmt, ...) do { \
- fprintf(stderr, "%s(): " fmt "\n", __func__, __VA_ARGS__); \
- fflush(stderr); \
- } while(0)
-#else
-#define DEBUG(...)
-#endif
-
-#if jar_xm_BIG_ENDIAN
-#error "Big endian platforms are not yet supported, sorry"
-/* Make sure the compiler stops, even if #error is ignored */
-extern int __fail[-1];
-#endif
-
-/* ----- XM constants ----- */
-
-#define SAMPLE_NAME_LENGTH 22
-#define INSTRUMENT_NAME_LENGTH 22
-#define MODULE_NAME_LENGTH 20
-#define TRACKER_NAME_LENGTH 20
-#define PATTERN_ORDER_TABLE_LENGTH 256
-#define NUM_NOTES 96
-#define NUM_ENVELOPE_POINTS 12
-#define MAX_NUM_ROWS 256
-
-#if JAR_XM_RAMPING
-#define jar_xm_SAMPLE_RAMPING_POINTS 0x20
-#endif
-
-/* ----- Data types ----- */
-
-enum jar_xm_waveform_type_e {
- jar_xm_SINE_WAVEFORM = 0,
- jar_xm_RAMP_DOWN_WAVEFORM = 1,
- jar_xm_SQUARE_WAVEFORM = 2,
- jar_xm_RANDOM_WAVEFORM = 3,
- jar_xm_RAMP_UP_WAVEFORM = 4,
-};
-typedef enum jar_xm_waveform_type_e jar_xm_waveform_type_t;
-
-enum jar_xm_loop_type_e {
- jar_xm_NO_LOOP,
- jar_xm_FORWARD_LOOP,
- jar_xm_PING_PONG_LOOP,
-};
-typedef enum jar_xm_loop_type_e jar_xm_loop_type_t;
-
-enum jar_xm_frequency_type_e {
- jar_xm_LINEAR_FREQUENCIES,
- jar_xm_AMIGA_FREQUENCIES,
-};
-typedef enum jar_xm_frequency_type_e jar_xm_frequency_type_t;
-
-struct jar_xm_envelope_point_s {
- uint16_t frame;
- uint16_t value;
-};
-typedef struct jar_xm_envelope_point_s jar_xm_envelope_point_t;
-
-struct jar_xm_envelope_s {
- jar_xm_envelope_point_t points[NUM_ENVELOPE_POINTS];
- uint8_t num_points;
- uint8_t sustain_point;
- uint8_t loop_start_point;
- uint8_t loop_end_point;
- bool enabled;
- bool sustain_enabled;
- bool loop_enabled;
-};
-typedef struct jar_xm_envelope_s jar_xm_envelope_t;
-
-struct jar_xm_sample_s {
- char name[SAMPLE_NAME_LENGTH + 1];
- int8_t bits; /* Either 8 or 16 */
-
- uint32_t length;
- uint32_t loop_start;
- uint32_t loop_length;
- uint32_t loop_end;
- float volume;
- int8_t finetune;
- jar_xm_loop_type_t loop_type;
- float panning;
- int8_t relative_note;
- uint64_t latest_trigger;
-
- float* data;
- };
- typedef struct jar_xm_sample_s jar_xm_sample_t;
-
- struct jar_xm_instrument_s {
- char name[INSTRUMENT_NAME_LENGTH + 1];
- uint16_t num_samples;
- uint8_t sample_of_notes[NUM_NOTES];
- jar_xm_envelope_t volume_envelope;
- jar_xm_envelope_t panning_envelope;
- jar_xm_waveform_type_t vibrato_type;
- uint8_t vibrato_sweep;
- uint8_t vibrato_depth;
- uint8_t vibrato_rate;
- uint16_t volume_fadeout;
- uint64_t latest_trigger;
- bool muted;
-
- jar_xm_sample_t* samples;
- };
- typedef struct jar_xm_instrument_s jar_xm_instrument_t;
-
- struct jar_xm_pattern_slot_s {
- uint8_t note; /* 1-96, 97 = Key Off note */
- uint8_t instrument; /* 1-128 */
- uint8_t volume_column;
- uint8_t effect_type;
- uint8_t effect_param;
- };
- typedef struct jar_xm_pattern_slot_s jar_xm_pattern_slot_t;
-
- struct jar_xm_pattern_s {
- uint16_t num_rows;
- jar_xm_pattern_slot_t* slots; /* Array of size num_rows * num_channels */
- };
- typedef struct jar_xm_pattern_s jar_xm_pattern_t;
-
- struct jar_xm_module_s {
- char name[MODULE_NAME_LENGTH + 1];
- char trackername[TRACKER_NAME_LENGTH + 1];
- uint16_t length;
- uint16_t restart_position;
- uint16_t num_channels;
- uint16_t num_patterns;
- uint16_t num_instruments;
- jar_xm_frequency_type_t frequency_type;
- uint8_t pattern_table[PATTERN_ORDER_TABLE_LENGTH];
-
- jar_xm_pattern_t* patterns;
- jar_xm_instrument_t* instruments; /* Instrument 1 has index 0,
- * instrument 2 has index 1, etc. */
- };
- typedef struct jar_xm_module_s jar_xm_module_t;
-
- struct jar_xm_channel_context_s {
- float note;
- float orig_note; /* The original note before effect modifications, as read in the pattern. */
- jar_xm_instrument_t* instrument; /* Could be NULL */
- jar_xm_sample_t* sample; /* Could be NULL */
- jar_xm_pattern_slot_t* current;
-
- float sample_position;
- float period;
- float frequency;
- float step;
- bool ping; /* For ping-pong samples: true is -->, false is <-- */
-
- float volume; /* Ideally between 0 (muted) and 1 (loudest) */
- float panning; /* Between 0 (left) and 1 (right); 0.5 is centered */
-
- uint16_t autovibrato_ticks;
-
- bool sustained;
- float fadeout_volume;
- float volume_envelope_volume;
- float panning_envelope_panning;
- uint16_t volume_envelope_frame_count;
- uint16_t panning_envelope_frame_count;
-
- float autovibrato_note_offset;
-
- bool arp_in_progress;
- uint8_t arp_note_offset;
- uint8_t volume_slide_param;
- uint8_t fine_volume_slide_param;
- uint8_t global_volume_slide_param;
- uint8_t panning_slide_param;
- uint8_t portamento_up_param;
- uint8_t portamento_down_param;
- uint8_t fine_portamento_up_param;
- uint8_t fine_portamento_down_param;
- uint8_t extra_fine_portamento_up_param;
- uint8_t extra_fine_portamento_down_param;
- uint8_t tone_portamento_param;
- float tone_portamento_target_period;
- uint8_t multi_retrig_param;
- uint8_t note_delay_param;
- uint8_t pattern_loop_origin; /* Where to restart a E6y loop */
- uint8_t pattern_loop_count; /* How many loop passes have been done */
- bool vibrato_in_progress;
- jar_xm_waveform_type_t vibrato_waveform;
- bool vibrato_waveform_retrigger; /* True if a new note retriggers the waveform */
- uint8_t vibrato_param;
- uint16_t vibrato_ticks; /* Position in the waveform */
- float vibrato_note_offset;
- jar_xm_waveform_type_t tremolo_waveform;
- bool tremolo_waveform_retrigger;
- uint8_t tremolo_param;
- uint8_t tremolo_ticks;
- float tremolo_volume;
- uint8_t tremor_param;
- bool tremor_on;
-
- uint64_t latest_trigger;
- bool muted;
-
-#if JAR_XM_RAMPING
- /* These values are updated at the end of each tick, to save
- * a couple of float operations on every generated sample. */
- float target_panning;
- float target_volume;
-
- unsigned long frame_count;
- float end_of_previous_sample[jar_xm_SAMPLE_RAMPING_POINTS];
-#endif
-
- float actual_panning;
- float actual_volume;
- };
- typedef struct jar_xm_channel_context_s jar_xm_channel_context_t;
-
- struct jar_xm_context_s {
- void* allocated_memory;
- jar_xm_module_t module;
- uint32_t rate;
-
- uint16_t tempo;
- uint16_t bpm;
- float global_volume;
- float amplification;
-
-#if JAR_XM_RAMPING
- /* How much is a channel final volume allowed to change per
- * sample; this is used to avoid abrubt volume changes which
- * manifest as "clicks" in the generated sound. */
- float volume_ramp;
- float panning_ramp; /* Same for panning. */
-#endif
-
- uint8_t current_table_index;
- uint8_t current_row;
- uint16_t current_tick; /* Can go below 255, with high tempo and a pattern delay */
- float remaining_samples_in_tick;
- uint64_t generated_samples;
-
- bool position_jump;
- bool pattern_break;
- uint8_t jump_dest;
- uint8_t jump_row;
-
- /* Extra ticks to be played before going to the next row -
- * Used for EEy effect */
- uint16_t extra_ticks;
-
- uint8_t* row_loop_count; /* Array of size MAX_NUM_ROWS * module_length */
- uint8_t loop_count;
- uint8_t max_loop_count;
-
- jar_xm_channel_context_t* channels;
-};
-
-/* ----- Internal API ----- */
-
-#if JAR_XM_DEFENSIVE
-
-/** Check the module data for errors/inconsistencies.
- *
- * @returns 0 if everything looks OK. Module should be safe to load.
- */
-int jar_xm_check_sanity_preload(const char*, size_t);
-
-/** Check a loaded module for errors/inconsistencies.
- *
- * @returns 0 if everything looks OK.
- */
-int jar_xm_check_sanity_postload(jar_xm_context_t*);
-
-#endif
-
-/** Get the number of bytes needed to store the module data in a
- * dynamically allocated blank context.
- *
- * Things that are dynamically allocated:
- * - sample data
- * - sample structures in instruments
- * - pattern data
- * - row loop count arrays
- * - pattern structures in module
- * - instrument structures in module
- * - channel contexts
- * - context structure itself
-
- * @returns 0 if everything looks OK.
- */
-size_t jar_xm_get_memory_needed_for_context(const char*, size_t);
-
-/** Populate the context from module data.
- *
- * @returns pointer to the memory pool
- */
-char* jar_xm_load_module(jar_xm_context_t*, const char*, size_t, char*);
-
-int jar_xm_create_context(jar_xm_context_t** ctxp, const char* moddata, uint32_t rate) {
- return jar_xm_create_context_safe(ctxp, moddata, SIZE_MAX, rate);
-}
-
-int jar_xm_create_context_safe(jar_xm_context_t** ctxp, const char* moddata, size_t moddata_length, uint32_t rate) {
-#if JAR_XM_DEFENSIVE
- int ret;
-#endif
- size_t bytes_needed;
- char* mempool;
- jar_xm_context_t* ctx;
-
-#if JAR_XM_DEFENSIVE
- if((ret = jar_xm_check_sanity_preload(moddata, moddata_length))) {
- DEBUG("jar_xm_check_sanity_preload() returned %i, module is not safe to load", ret);
- return 1;
- }
-#endif
-
- bytes_needed = jar_xm_get_memory_needed_for_context(moddata, moddata_length);
- mempool = malloc(bytes_needed);
- if(mempool == NULL && bytes_needed > 0) {
- /* malloc() failed, trouble ahead */
- DEBUG("call to malloc() failed, returned %p", (void*)mempool);
- return 2;
- }
-
- /* Initialize most of the fields to 0, 0.f, NULL or false depending on type */
- memset(mempool, 0, bytes_needed);
-
- ctx = (*ctxp = (jar_xm_context_t*)mempool);
- ctx->allocated_memory = mempool; /* Keep original pointer for free() */
- mempool += sizeof(jar_xm_context_t);
-
- ctx->rate = rate;
- mempool = jar_xm_load_module(ctx, moddata, moddata_length, mempool);
-
- ctx->channels = (jar_xm_channel_context_t*)mempool;
- mempool += ctx->module.num_channels * sizeof(jar_xm_channel_context_t);
-
- ctx->global_volume = 1.f;
- ctx->amplification = .25f; /* XXX: some bad modules may still clip. Find out something better. */
-
-#if JAR_XM_RAMPING
- ctx->volume_ramp = (1.f / 128.f);
- ctx->panning_ramp = (1.f / 128.f);
-#endif
-
- for(uint8_t i = 0; i < ctx->module.num_channels; ++i) {
- jar_xm_channel_context_t* ch = ctx->channels + i;
-
- ch->ping = true;
- ch->vibrato_waveform = jar_xm_SINE_WAVEFORM;
- ch->vibrato_waveform_retrigger = true;
- ch->tremolo_waveform = jar_xm_SINE_WAVEFORM;
- ch->tremolo_waveform_retrigger = true;
-
- ch->volume = ch->volume_envelope_volume = ch->fadeout_volume = 1.0f;
- ch->panning = ch->panning_envelope_panning = .5f;
- ch->actual_volume = .0f;
- ch->actual_panning = .5f;
- }
-
- ctx->row_loop_count = (uint8_t*)mempool;
- mempool += MAX_NUM_ROWS * sizeof(uint8_t);
-
-#if JAR_XM_DEFENSIVE
- if((ret = jar_xm_check_sanity_postload(ctx))) {
- DEBUG("jar_xm_check_sanity_postload() returned %i, module is not safe to play", ret);
- jar_xm_free_context(ctx);
- return 1;
- }
-#endif
-
- return 0;
-}
-
-void jar_xm_free_context(jar_xm_context_t* context) {
- free(context->allocated_memory);
-}
-
-void jar_xm_set_max_loop_count(jar_xm_context_t* context, uint8_t loopcnt) {
- context->max_loop_count = loopcnt;
-}
-
-uint8_t jar_xm_get_loop_count(jar_xm_context_t* context) {
- return context->loop_count;
-}
-
-
-
-bool jar_xm_mute_channel(jar_xm_context_t* ctx, uint16_t channel, bool mute) {
- bool old = ctx->channels[channel - 1].muted;
- ctx->channels[channel - 1].muted = mute;
- return old;
-}
-
-bool jar_xm_mute_instrument(jar_xm_context_t* ctx, uint16_t instr, bool mute) {
- bool old = ctx->module.instruments[instr - 1].muted;
- ctx->module.instruments[instr - 1].muted = mute;
- return old;
-}
-
-
-
-const char* jar_xm_get_module_name(jar_xm_context_t* ctx) {
- return ctx->module.name;
-}
-
-const char* jar_xm_get_tracker_name(jar_xm_context_t* ctx) {
- return ctx->module.trackername;
-}
-
-
-
-uint16_t jar_xm_get_number_of_channels(jar_xm_context_t* ctx) {
- return ctx->module.num_channels;
-}
-
-uint16_t jar_xm_get_module_length(jar_xm_context_t* ctx) {
- return ctx->module.length;
-}
-
-uint16_t jar_xm_get_number_of_patterns(jar_xm_context_t* ctx) {
- return ctx->module.num_patterns;
-}
-
-uint16_t jar_xm_get_number_of_rows(jar_xm_context_t* ctx, uint16_t pattern) {
- return ctx->module.patterns[pattern].num_rows;
-}
-
-uint16_t jar_xm_get_number_of_instruments(jar_xm_context_t* ctx) {
- return ctx->module.num_instruments;
-}
-
-uint16_t jar_xm_get_number_of_samples(jar_xm_context_t* ctx, uint16_t instrument) {
- return ctx->module.instruments[instrument - 1].num_samples;
-}
-
-
-
-void jar_xm_get_playing_speed(jar_xm_context_t* ctx, uint16_t* bpm, uint16_t* tempo) {
- if(bpm) *bpm = ctx->bpm;
- if(tempo) *tempo = ctx->tempo;
-}
-
-void jar_xm_get_position(jar_xm_context_t* ctx, uint8_t* pattern_index, uint8_t* pattern, uint8_t* row, uint64_t* samples) {
- if(pattern_index) *pattern_index = ctx->current_table_index;
- if(pattern) *pattern = ctx->module.pattern_table[ctx->current_table_index];
- if(row) *row = ctx->current_row;
- if(samples) *samples = ctx->generated_samples;
-}
-
-uint64_t jar_xm_get_latest_trigger_of_instrument(jar_xm_context_t* ctx, uint16_t instr) {
- return ctx->module.instruments[instr - 1].latest_trigger;
-}
-
-uint64_t jar_xm_get_latest_trigger_of_sample(jar_xm_context_t* ctx, uint16_t instr, uint16_t sample) {
- return ctx->module.instruments[instr - 1].samples[sample].latest_trigger;
-}
-
-uint64_t jar_xm_get_latest_trigger_of_channel(jar_xm_context_t* ctx, uint16_t chn) {
- return ctx->channels[chn - 1].latest_trigger;
-}
-
-/* .xm files are little-endian. (XXX: Are they really?) */
-
-/* Bounded reader macros.
- * If we attempt to read the buffer out-of-bounds, pretend that the buffer is
- * infinitely padded with zeroes.
- */
-#define READ_U8(offset) (((offset) < moddata_length) ? (*(uint8_t*)(moddata + (offset))) : 0)
-#define READ_U16(offset) ((uint16_t)READ_U8(offset) | ((uint16_t)READ_U8((offset) + 1) << 8))
-#define READ_U32(offset) ((uint32_t)READ_U16(offset) | ((uint32_t)READ_U16((offset) + 2) << 16))
-#define READ_MEMCPY(ptr, offset, length) memcpy_pad(ptr, length, moddata, moddata_length, offset)
-
-static inline void memcpy_pad(void* dst, size_t dst_len, const void* src, size_t src_len, size_t offset) {
- uint8_t* dst_c = dst;
- const uint8_t* src_c = src;
-
- /* how many bytes can be copied without overrunning `src` */
- size_t copy_bytes = (src_len >= offset) ? (src_len - offset) : 0;
- copy_bytes = copy_bytes > dst_len ? dst_len : copy_bytes;
-
- memcpy(dst_c, src_c + offset, copy_bytes);
- /* padded bytes */
- memset(dst_c + copy_bytes, 0, dst_len - copy_bytes);
-}
-
-#if JAR_XM_DEFENSIVE
-
-int jar_xm_check_sanity_preload(const char* module, size_t module_length) {
- if(module_length < 60) {
- return 4;
- }
-
- if(memcmp("Extended Module: ", module, 17) != 0) {
- return 1;
- }
-
- if(module[37] != 0x1A) {
- return 2;
- }
-
- if(module[59] != 0x01 || module[58] != 0x04) {
- /* Not XM 1.04 */
- return 3;
- }
-
- return 0;
-}
-
-int jar_xm_check_sanity_postload(jar_xm_context_t* ctx) {
- /* @todo: plenty of stuff to do here… */
-
- /* Check the POT */
- for(uint8_t i = 0; i < ctx->module.length; ++i) {
- if(ctx->module.pattern_table[i] >= ctx->module.num_patterns) {
- if(i+1 == ctx->module.length && ctx->module.length > 1) {
- /* Cheap fix */
- --ctx->module.length;
- DEBUG("trimming invalid POT at pos %X", i);
- } else {
- DEBUG("module has invalid POT, pos %X references nonexistent pattern %X",
- i,
- ctx->module.pattern_table[i]);
- return 1;
- }
- }
- }
-
- return 0;
-}
-
-#endif
-
-size_t jar_xm_get_memory_needed_for_context(const char* moddata, size_t moddata_length) {
- size_t memory_needed = 0;
- size_t offset = 60; /* Skip the first header */
- uint16_t num_channels;
- uint16_t num_patterns;
- uint16_t num_instruments;
-
- /* Read the module header */
-
- num_channels = READ_U16(offset + 8);
- num_channels = READ_U16(offset + 8);
-
- num_patterns = READ_U16(offset + 10);
- memory_needed += num_patterns * sizeof(jar_xm_pattern_t);
-
- num_instruments = READ_U16(offset + 12);
- memory_needed += num_instruments * sizeof(jar_xm_instrument_t);
-
- memory_needed += MAX_NUM_ROWS * READ_U16(offset + 4) * sizeof(uint8_t); /* Module length */
-
- /* Header size */
- offset += READ_U32(offset);
-
- /* Read pattern headers */
- for(uint16_t i = 0; i < num_patterns; ++i) {
- uint16_t num_rows;
-
- num_rows = READ_U16(offset + 5);
- memory_needed += num_rows * num_channels * sizeof(jar_xm_pattern_slot_t);
-
- /* Pattern header length + packed pattern data size */
- offset += READ_U32(offset) + READ_U16(offset + 7);
- }
-
- /* Read instrument headers */
- for(uint16_t i = 0; i < num_instruments; ++i) {
- uint16_t num_samples;
- uint32_t sample_header_size = 0;
- uint32_t sample_size_aggregate = 0;
-
- num_samples = READ_U16(offset + 27);
- memory_needed += num_samples * sizeof(jar_xm_sample_t);
-
- if(num_samples > 0) {
- sample_header_size = READ_U32(offset + 29);
- }
-
- /* Instrument header size */
- offset += READ_U32(offset);
-
- for(uint16_t j = 0; j < num_samples; ++j) {
- uint32_t sample_size;
- uint8_t flags;
-
- sample_size = READ_U32(offset);
- flags = READ_U8(offset + 14);
- sample_size_aggregate += sample_size;
-
- if(flags & (1 << 4)) {
- /* 16 bit sample */
- memory_needed += sample_size * (sizeof(float) >> 1);
- } else {
- /* 8 bit sample */
- memory_needed += sample_size * sizeof(float);
- }
-
- offset += sample_header_size;
- }
-
- offset += sample_size_aggregate;
- }
-
- memory_needed += num_channels * sizeof(jar_xm_channel_context_t);
- memory_needed += sizeof(jar_xm_context_t);
-
- return memory_needed;
-}
-
-char* jar_xm_load_module(jar_xm_context_t* ctx, const char* moddata, size_t moddata_length, char* mempool) {
- size_t offset = 0;
- jar_xm_module_t* mod = &(ctx->module);
-
- /* Read XM header */
- READ_MEMCPY(mod->name, offset + 17, MODULE_NAME_LENGTH);
- READ_MEMCPY(mod->trackername, offset + 38, TRACKER_NAME_LENGTH);
- offset += 60;
-
- /* Read module header */
- uint32_t header_size = READ_U32(offset);
-
- mod->length = READ_U16(offset + 4);
- mod->restart_position = READ_U16(offset + 6);
- mod->num_channels = READ_U16(offset + 8);
- mod->num_patterns = READ_U16(offset + 10);
- mod->num_instruments = READ_U16(offset + 12);
-
- mod->patterns = (jar_xm_pattern_t*)mempool;
- mempool += mod->num_patterns * sizeof(jar_xm_pattern_t);
-
- mod->instruments = (jar_xm_instrument_t*)mempool;
- mempool += mod->num_instruments * sizeof(jar_xm_instrument_t);
-
- uint16_t flags = READ_U32(offset + 14);
- mod->frequency_type = (flags & (1 << 0)) ? jar_xm_LINEAR_FREQUENCIES : jar_xm_AMIGA_FREQUENCIES;
-
- ctx->tempo = READ_U16(offset + 16);
- ctx->bpm = READ_U16(offset + 18);
-
- READ_MEMCPY(mod->pattern_table, offset + 20, PATTERN_ORDER_TABLE_LENGTH);
- offset += header_size;
-
- /* Read patterns */
- for(uint16_t i = 0; i < mod->num_patterns; ++i) {
- uint16_t packed_patterndata_size = READ_U16(offset + 7);
- jar_xm_pattern_t* pat = mod->patterns + i;
-
- pat->num_rows = READ_U16(offset + 5);
-
- pat->slots = (jar_xm_pattern_slot_t*)mempool;
- mempool += mod->num_channels * pat->num_rows * sizeof(jar_xm_pattern_slot_t);
-
- /* Pattern header length */
- offset += READ_U32(offset);
-
- if(packed_patterndata_size == 0) {
- /* No pattern data is present */
- memset(pat->slots, 0, sizeof(jar_xm_pattern_slot_t) * pat->num_rows * mod->num_channels);
- } else {
- /* This isn't your typical for loop */
- for(uint16_t j = 0, k = 0; j < packed_patterndata_size; ++k) {
- uint8_t note = READ_U8(offset + j);
- jar_xm_pattern_slot_t* slot = pat->slots + k;
-
- if(note & (1 << 7)) {
- /* MSB is set, this is a compressed packet */
- ++j;
-
- if(note & (1 << 0)) {
- /* Note follows */
- slot->note = READ_U8(offset + j);
- ++j;
- } else {
- slot->note = 0;
- }
-
- if(note & (1 << 1)) {
- /* Instrument follows */
- slot->instrument = READ_U8(offset + j);
- ++j;
- } else {
- slot->instrument = 0;
- }
-
- if(note & (1 << 2)) {
- /* Volume column follows */
- slot->volume_column = READ_U8(offset + j);
- ++j;
- } else {
- slot->volume_column = 0;
- }
-
- if(note & (1 << 3)) {
- /* Effect follows */
- slot->effect_type = READ_U8(offset + j);
- ++j;
- } else {
- slot->effect_type = 0;
- }
-
- if(note & (1 << 4)) {
- /* Effect parameter follows */
- slot->effect_param = READ_U8(offset + j);
- ++j;
- } else {
- slot->effect_param = 0;
- }
- } else {
- /* Uncompressed packet */
- slot->note = note;
- slot->instrument = READ_U8(offset + j + 1);
- slot->volume_column = READ_U8(offset + j + 2);
- slot->effect_type = READ_U8(offset + j + 3);
- slot->effect_param = READ_U8(offset + j + 4);
- j += 5;
- }
- }
- }
-
- offset += packed_patterndata_size;
- }
-
- /* Read instruments */
- for(uint16_t i = 0; i < ctx->module.num_instruments; ++i) {
- uint32_t sample_header_size = 0;
- jar_xm_instrument_t* instr = mod->instruments + i;
-
- READ_MEMCPY(instr->name, offset + 4, INSTRUMENT_NAME_LENGTH);
- instr->num_samples = READ_U16(offset + 27);
-
- if(instr->num_samples > 0) {
- /* Read extra header properties */
- sample_header_size = READ_U32(offset + 29);
- READ_MEMCPY(instr->sample_of_notes, offset + 33, NUM_NOTES);
-
- instr->volume_envelope.num_points = READ_U8(offset + 225);
- instr->panning_envelope.num_points = READ_U8(offset + 226);
-
- for(uint8_t j = 0; j < instr->volume_envelope.num_points; ++j) {
- instr->volume_envelope.points[j].frame = READ_U16(offset + 129 + 4 * j);
- instr->volume_envelope.points[j].value = READ_U16(offset + 129 + 4 * j + 2);
- }
-
- for(uint8_t j = 0; j < instr->panning_envelope.num_points; ++j) {
- instr->panning_envelope.points[j].frame = READ_U16(offset + 177 + 4 * j);
- instr->panning_envelope.points[j].value = READ_U16(offset + 177 + 4 * j + 2);
- }
-
- instr->volume_envelope.sustain_point = READ_U8(offset + 227);
- instr->volume_envelope.loop_start_point = READ_U8(offset + 228);
- instr->volume_envelope.loop_end_point = READ_U8(offset + 229);
-
- instr->panning_envelope.sustain_point = READ_U8(offset + 230);
- instr->panning_envelope.loop_start_point = READ_U8(offset + 231);
- instr->panning_envelope.loop_end_point = READ_U8(offset + 232);
-
- uint8_t flags = READ_U8(offset + 233);
- instr->volume_envelope.enabled = flags & (1 << 0);
- instr->volume_envelope.sustain_enabled = flags & (1 << 1);
- instr->volume_envelope.loop_enabled = flags & (1 << 2);
-
- flags = READ_U8(offset + 234);
- instr->panning_envelope.enabled = flags & (1 << 0);
- instr->panning_envelope.sustain_enabled = flags & (1 << 1);
- instr->panning_envelope.loop_enabled = flags & (1 << 2);
-
- instr->vibrato_type = READ_U8(offset + 235);
- if(instr->vibrato_type == 2) {
- instr->vibrato_type = 1;
- } else if(instr->vibrato_type == 1) {
- instr->vibrato_type = 2;
- }
- instr->vibrato_sweep = READ_U8(offset + 236);
- instr->vibrato_depth = READ_U8(offset + 237);
- instr->vibrato_rate = READ_U8(offset + 238);
- instr->volume_fadeout = READ_U16(offset + 239);
-
- instr->samples = (jar_xm_sample_t*)mempool;
- mempool += instr->num_samples * sizeof(jar_xm_sample_t);
- } else {
- instr->samples = NULL;
- }
-
- /* Instrument header size */
- offset += READ_U32(offset);
-
- for(uint16_t j = 0; j < instr->num_samples; ++j) {
- /* Read sample header */
- jar_xm_sample_t* sample = instr->samples + j;
-
- sample->length = READ_U32(offset);
- sample->loop_start = READ_U32(offset + 4);
- sample->loop_length = READ_U32(offset + 8);
- sample->loop_end = sample->loop_start + sample->loop_length;
- sample->volume = (float)READ_U8(offset + 12) / (float)0x40;
- sample->finetune = (int8_t)READ_U8(offset + 13);
-
- uint8_t flags = READ_U8(offset + 14);
- if((flags & 3) == 0) {
- sample->loop_type = jar_xm_NO_LOOP;
- } else if((flags & 3) == 1) {
- sample->loop_type = jar_xm_FORWARD_LOOP;
- } else {
- sample->loop_type = jar_xm_PING_PONG_LOOP;
- }
-
- sample->bits = (flags & (1 << 4)) ? 16 : 8;
-
- sample->panning = (float)READ_U8(offset + 15) / (float)0xFF;
- sample->relative_note = (int8_t)READ_U8(offset + 16);
- READ_MEMCPY(sample->name, 18, SAMPLE_NAME_LENGTH);
- sample->data = (float*)mempool;
-
- if(sample->bits == 16) {
- /* 16 bit sample */
- mempool += sample->length * (sizeof(float) >> 1);
- sample->loop_start >>= 1;
- sample->loop_length >>= 1;
- sample->loop_end >>= 1;
- sample->length >>= 1;
- } else {
- /* 8 bit sample */
- mempool += sample->length * sizeof(float);
- }
-
- offset += sample_header_size;
- }
-
- for(uint16_t j = 0; j < instr->num_samples; ++j) {
- /* Read sample data */
- jar_xm_sample_t* sample = instr->samples + j;
- uint32_t length = sample->length;
-
- if(sample->bits == 16) {
- int16_t v = 0;
- for(uint32_t k = 0; k < length; ++k) {
- v = v + (int16_t)READ_U16(offset + (k << 1));
- sample->data[k] = (float)v / (float)(1 << 15);
- }
- offset += sample->length << 1;
- } else {
- int8_t v = 0;
- for(uint32_t k = 0; k < length; ++k) {
- v = v + (int8_t)READ_U8(offset + k);
- sample->data[k] = (float)v / (float)(1 << 7);
- }
- offset += sample->length;
- }
- }
- }
-
- return mempool;
-}
-
-//-------------------------------------------------------------------------------
-//THE FOLLOWING IS FOR PLAYING
-//-------------------------------------------------------------------------------
-
-/* ----- Static functions ----- */
-
-static float jar_xm_waveform(jar_xm_waveform_type_t, uint8_t);
-static void jar_xm_autovibrato(jar_xm_context_t*, jar_xm_channel_context_t*);
-static void jar_xm_vibrato(jar_xm_context_t*, jar_xm_channel_context_t*, uint8_t, uint16_t);
-static void jar_xm_tremolo(jar_xm_context_t*, jar_xm_channel_context_t*, uint8_t, uint16_t);
-static void jar_xm_arpeggio(jar_xm_context_t*, jar_xm_channel_context_t*, uint8_t, uint16_t);
-static void jar_xm_tone_portamento(jar_xm_context_t*, jar_xm_channel_context_t*);
-static void jar_xm_pitch_slide(jar_xm_context_t*, jar_xm_channel_context_t*, float);
-static void jar_xm_panning_slide(jar_xm_channel_context_t*, uint8_t);
-static void jar_xm_volume_slide(jar_xm_channel_context_t*, uint8_t);
-
-static float jar_xm_envelope_lerp(jar_xm_envelope_point_t*, jar_xm_envelope_point_t*, uint16_t);
-static void jar_xm_envelope_tick(jar_xm_channel_context_t*, jar_xm_envelope_t*, uint16_t*, float*);
-static void jar_xm_envelopes(jar_xm_channel_context_t*);
-
-static float jar_xm_linear_period(float);
-static float jar_xm_linear_frequency(float);
-static float jar_xm_amiga_period(float);
-static float jar_xm_amiga_frequency(float);
-static float jar_xm_period(jar_xm_context_t*, float);
-static float jar_xm_frequency(jar_xm_context_t*, float, float);
-static void jar_xm_update_frequency(jar_xm_context_t*, jar_xm_channel_context_t*);
-
-static void jar_xm_handle_note_and_instrument(jar_xm_context_t*, jar_xm_channel_context_t*, jar_xm_pattern_slot_t*);
-static void jar_xm_trigger_note(jar_xm_context_t*, jar_xm_channel_context_t*, unsigned int flags);
-static void jar_xm_cut_note(jar_xm_channel_context_t*);
-static void jar_xm_key_off(jar_xm_channel_context_t*);
-
-static void jar_xm_post_pattern_change(jar_xm_context_t*);
-static void jar_xm_row(jar_xm_context_t*);
-static void jar_xm_tick(jar_xm_context_t*);
-
-static float jar_xm_next_of_sample(jar_xm_channel_context_t*);
-static void jar_xm_sample(jar_xm_context_t*, float*, float*);
-
-/* ----- Other oddities ----- */
-
-#define jar_xm_TRIGGER_KEEP_VOLUME (1 << 0)
-#define jar_xm_TRIGGER_KEEP_PERIOD (1 << 1)
-#define jar_xm_TRIGGER_KEEP_SAMPLE_POSITION (1 << 2)
-
-static const uint16_t amiga_frequencies[] = {
- 1712, 1616, 1525, 1440, /* C-2, C#2, D-2, D#2 */
- 1357, 1281, 1209, 1141, /* E-2, F-2, F#2, G-2 */
- 1077, 1017, 961, 907, /* G#2, A-2, A#2, B-2 */
- 856, /* C-3 */
-};
-
-static const float multi_retrig_add[] = {
- 0.f, -1.f, -2.f, -4.f, /* 0, 1, 2, 3 */
- -8.f, -16.f, 0.f, 0.f, /* 4, 5, 6, 7 */
- 0.f, 1.f, 2.f, 4.f, /* 8, 9, A, B */
- 8.f, 16.f, 0.f, 0.f /* C, D, E, F */
-};
-
-static const float multi_retrig_multiply[] = {
- 1.f, 1.f, 1.f, 1.f, /* 0, 1, 2, 3 */
- 1.f, 1.f, .6666667f, .5f, /* 4, 5, 6, 7 */
- 1.f, 1.f, 1.f, 1.f, /* 8, 9, A, B */
- 1.f, 1.f, 1.5f, 2.f /* C, D, E, F */
-};
-
-#define jar_xm_CLAMP_UP1F(vol, limit) do { \
- if((vol) > (limit)) (vol) = (limit); \
- } while(0)
-#define jar_xm_CLAMP_UP(vol) jar_xm_CLAMP_UP1F((vol), 1.f)
-
-#define jar_xm_CLAMP_DOWN1F(vol, limit) do { \
- if((vol) < (limit)) (vol) = (limit); \
- } while(0)
-#define jar_xm_CLAMP_DOWN(vol) jar_xm_CLAMP_DOWN1F((vol), .0f)
-
-#define jar_xm_CLAMP2F(vol, up, down) do { \
- if((vol) > (up)) (vol) = (up); \
- else if((vol) < (down)) (vol) = (down); \
- } while(0)
-#define jar_xm_CLAMP(vol) jar_xm_CLAMP2F((vol), 1.f, .0f)
-
-#define jar_xm_SLIDE_TOWARDS(val, goal, incr) do { \
- if((val) > (goal)) { \
- (val) -= (incr); \
- jar_xm_CLAMP_DOWN1F((val), (goal)); \
- } else if((val) < (goal)) { \
- (val) += (incr); \
- jar_xm_CLAMP_UP1F((val), (goal)); \
- } \
- } while(0)
-
-#define jar_xm_LERP(u, v, t) ((u) + (t) * ((v) - (u)))
-#define jar_xm_INVERSE_LERP(u, v, lerp) (((lerp) - (u)) / ((v) - (u)))
-
-#define HAS_TONE_PORTAMENTO(s) ((s)->effect_type == 3 \
- || (s)->effect_type == 5 \
- || ((s)->volume_column >> 4) == 0xF)
-#define HAS_ARPEGGIO(s) ((s)->effect_type == 0 \
- && (s)->effect_param != 0)
-#define HAS_VIBRATO(s) ((s)->effect_type == 4 \
- || (s)->effect_param == 6 \
- || ((s)->volume_column >> 4) == 0xB)
-#define NOTE_IS_VALID(n) ((n) > 0 && (n) < 97)
-
-/* ----- Function definitions ----- */
-
-static float jar_xm_waveform(jar_xm_waveform_type_t waveform, uint8_t step) {
- static unsigned int next_rand = 24492;
- step %= 0x40;
-
- switch(waveform) {
-
- case jar_xm_SINE_WAVEFORM:
- /* Why not use a table? For saving space, and because there's
- * very very little actual performance gain. */
- return -sinf(2.f * 3.141592f * (float)step / (float)0x40);
-
- case jar_xm_RAMP_DOWN_WAVEFORM:
- /* Ramp down: 1.0f when step = 0; -1.0f when step = 0x40 */
- return (float)(0x20 - step) / 0x20;
-
- case jar_xm_SQUARE_WAVEFORM:
- /* Square with a 50% duty */
- return (step >= 0x20) ? 1.f : -1.f;
-
- case jar_xm_RANDOM_WAVEFORM:
- /* Use the POSIX.1-2001 example, just to be deterministic
- * across different machines */
- next_rand = next_rand * 1103515245 + 12345;
- return (float)((next_rand >> 16) & 0x7FFF) / (float)0x4000 - 1.f;
-
- case jar_xm_RAMP_UP_WAVEFORM:
- /* Ramp up: -1.f when step = 0; 1.f when step = 0x40 */
- return (float)(step - 0x20) / 0x20;
-
- default:
- break;
-
- }
-
- return .0f;
-}
-
-static void jar_xm_autovibrato(jar_xm_context_t* ctx, jar_xm_channel_context_t* ch) {
- if(ch->instrument == NULL || ch->instrument->vibrato_depth == 0) return;
- jar_xm_instrument_t* instr = ch->instrument;
- float sweep = 1.f;
-
- if(ch->autovibrato_ticks < instr->vibrato_sweep) {
- /* No idea if this is correct, but it sounds close enough… */
- sweep = jar_xm_LERP(0.f, 1.f, (float)ch->autovibrato_ticks / (float)instr->vibrato_sweep);
- }
-
- unsigned int step = ((ch->autovibrato_ticks++) * instr->vibrato_rate) >> 2;
- ch->autovibrato_note_offset = .25f * jar_xm_waveform(instr->vibrato_type, step)
- * (float)instr->vibrato_depth / (float)0xF * sweep;
- jar_xm_update_frequency(ctx, ch);
-}
-
-static void jar_xm_vibrato(jar_xm_context_t* ctx, jar_xm_channel_context_t* ch, uint8_t param, uint16_t pos) {
- unsigned int step = pos * (param >> 4);
- ch->vibrato_note_offset =
- 2.f
- * jar_xm_waveform(ch->vibrato_waveform, step)
- * (float)(param & 0x0F) / (float)0xF;
- jar_xm_update_frequency(ctx, ch);
-}
-
-static void jar_xm_tremolo(jar_xm_context_t* ctx, jar_xm_channel_context_t* ch, uint8_t param, uint16_t pos) {
- unsigned int step = pos * (param >> 4);
- /* Not so sure about this, it sounds correct by ear compared with
- * MilkyTracker, but it could come from other bugs */
- ch->tremolo_volume = -1.f * jar_xm_waveform(ch->tremolo_waveform, step)
- * (float)(param & 0x0F) / (float)0xF;
-}
-
-static void jar_xm_arpeggio(jar_xm_context_t* ctx, jar_xm_channel_context_t* ch, uint8_t param, uint16_t tick) {
- switch(tick % 3) {
- case 0:
- ch->arp_in_progress = false;
- ch->arp_note_offset = 0;
- break;
- case 2:
- ch->arp_in_progress = true;
- ch->arp_note_offset = param >> 4;
- break;
- case 1:
- ch->arp_in_progress = true;
- ch->arp_note_offset = param & 0x0F;
- break;
- }
-
- jar_xm_update_frequency(ctx, ch);
-}
-
-static void jar_xm_tone_portamento(jar_xm_context_t* ctx, jar_xm_channel_context_t* ch) {
- /* 3xx called without a note, wait until we get an actual
- * target note. */
- if(ch->tone_portamento_target_period == 0.f) return;
-
- if(ch->period != ch->tone_portamento_target_period) {
- jar_xm_SLIDE_TOWARDS(ch->period,
- ch->tone_portamento_target_period,
- (ctx->module.frequency_type == jar_xm_LINEAR_FREQUENCIES ?
- 4.f : 1.f) * ch->tone_portamento_param
- );
- jar_xm_update_frequency(ctx, ch);
- }
-}
-
-static void jar_xm_pitch_slide(jar_xm_context_t* ctx, jar_xm_channel_context_t* ch, float period_offset) {
- /* Don't ask about the 4.f coefficient. I found mention of it
- * nowhere. Found by earâ„¢. */
- if(ctx->module.frequency_type == jar_xm_LINEAR_FREQUENCIES) {
- period_offset *= 4.f;
- }
-
- ch->period += period_offset;
- jar_xm_CLAMP_DOWN(ch->period);
- /* XXX: upper bound of period ? */
-
- jar_xm_update_frequency(ctx, ch);
-}
-
-static void jar_xm_panning_slide(jar_xm_channel_context_t* ch, uint8_t rawval) {
- float f;
-
- if((rawval & 0xF0) && (rawval & 0x0F)) {
- /* Illegal state */
- return;
- }
-
- if(rawval & 0xF0) {
- /* Slide right */
- f = (float)(rawval >> 4) / (float)0xFF;
- ch->panning += f;
- jar_xm_CLAMP_UP(ch->panning);
- } else {
- /* Slide left */
- f = (float)(rawval & 0x0F) / (float)0xFF;
- ch->panning -= f;
- jar_xm_CLAMP_DOWN(ch->panning);
- }
-}
-
-static void jar_xm_volume_slide(jar_xm_channel_context_t* ch, uint8_t rawval) {
- float f;
-
- if((rawval & 0xF0) && (rawval & 0x0F)) {
- /* Illegal state */
- return;
- }
-
- if(rawval & 0xF0) {
- /* Slide up */
- f = (float)(rawval >> 4) / (float)0x40;
- ch->volume += f;
- jar_xm_CLAMP_UP(ch->volume);
- } else {
- /* Slide down */
- f = (float)(rawval & 0x0F) / (float)0x40;
- ch->volume -= f;
- jar_xm_CLAMP_DOWN(ch->volume);
- }
-}
-
-static float jar_xm_envelope_lerp(jar_xm_envelope_point_t* a, jar_xm_envelope_point_t* b, uint16_t pos) {
- /* Linear interpolation between two envelope points */
- if(pos <= a->frame) return a->value;
- else if(pos >= b->frame) return b->value;
- else {
- float p = (float)(pos - a->frame) / (float)(b->frame - a->frame);
- return a->value * (1 - p) + b->value * p;
- }
-}
-
-static void jar_xm_post_pattern_change(jar_xm_context_t* ctx) {
- /* Loop if necessary */
- if(ctx->current_table_index >= ctx->module.length) {
- ctx->current_table_index = ctx->module.restart_position;
- }
-}
-
-static float jar_xm_linear_period(float note) {
- return 7680.f - note * 64.f;
-}
-
-static float jar_xm_linear_frequency(float period) {
- return 8363.f * powf(2.f, (4608.f - period) / 768.f);
-}
-
-static float jar_xm_amiga_period(float note) {
- unsigned int intnote = note;
- uint8_t a = intnote % 12;
- int8_t octave = note / 12.f - 2;
- uint16_t p1 = amiga_frequencies[a], p2 = amiga_frequencies[a + 1];
-
- if(octave > 0) {
- p1 >>= octave;
- p2 >>= octave;
- } else if(octave < 0) {
- p1 <<= (-octave);
- p2 <<= (-octave);
- }
-
- return jar_xm_LERP(p1, p2, note - intnote);
-}
-
-static float jar_xm_amiga_frequency(float period) {
- if(period == .0f) return .0f;
-
- /* This is the PAL value. No reason to choose this one over the
- * NTSC value. */
- return 7093789.2f / (period * 2.f);
-}
-
-static float jar_xm_period(jar_xm_context_t* ctx, float note) {
- switch(ctx->module.frequency_type) {
- case jar_xm_LINEAR_FREQUENCIES:
- return jar_xm_linear_period(note);
- case jar_xm_AMIGA_FREQUENCIES:
- return jar_xm_amiga_period(note);
- }
- return .0f;
-}
-
-static float jar_xm_frequency(jar_xm_context_t* ctx, float period, float note_offset) {
- uint8_t a;
- int8_t octave;
- float note;
- uint16_t p1, p2;
-
- switch(ctx->module.frequency_type) {
-
- case jar_xm_LINEAR_FREQUENCIES:
- return jar_xm_linear_frequency(period - 64.f * note_offset);
-
- case jar_xm_AMIGA_FREQUENCIES:
- if(note_offset == 0) {
- /* A chance to escape from insanity */
- return jar_xm_amiga_frequency(period);
- }
-
- /* FIXME: this is very crappy at best */
- a = octave = 0;
-
- /* Find the octave of the current period */
- if(period > amiga_frequencies[0]) {
- --octave;
- while(period > (amiga_frequencies[0] << (-octave))) --octave;
- } else if(period < amiga_frequencies[12]) {
- ++octave;
- while(period < (amiga_frequencies[12] >> octave)) ++octave;
- }
-
- /* Find the smallest note closest to the current period */
- for(uint8_t i = 0; i < 12; ++i) {
- p1 = amiga_frequencies[i], p2 = amiga_frequencies[i + 1];
-
- if(octave > 0) {
- p1 >>= octave;
- p2 >>= octave;
- } else if(octave < 0) {
- p1 <<= (-octave);
- p2 <<= (-octave);
- }
-
- if(p2 <= period && period <= p1) {
- a = i;
- break;
- }
- }
-
- if(JAR_XM_DEBUG && (p1 < period || p2 > period)) {
- DEBUG("%i <= %f <= %i should hold but doesn't, this is a bug", p2, period, p1);
- }
-
- note = 12.f * (octave + 2) + a + jar_xm_INVERSE_LERP(p1, p2, period);
-
- return jar_xm_amiga_frequency(jar_xm_amiga_period(note + note_offset));
-
- }
-
- return .0f;
-}
-
-static void jar_xm_update_frequency(jar_xm_context_t* ctx, jar_xm_channel_context_t* ch) {
- ch->frequency = jar_xm_frequency(
- ctx, ch->period,
- (ch->arp_note_offset > 0 ? ch->arp_note_offset : (
- ch->vibrato_note_offset + ch->autovibrato_note_offset
- ))
- );
- ch->step = ch->frequency / ctx->rate;
-}
-
-static void jar_xm_handle_note_and_instrument(jar_xm_context_t* ctx, jar_xm_channel_context_t* ch,
- jar_xm_pattern_slot_t* s) {
- if(s->instrument > 0) {
- if(HAS_TONE_PORTAMENTO(ch->current) && ch->instrument != NULL && ch->sample != NULL) {
- /* Tone portamento in effect, unclear stuff happens */
- jar_xm_trigger_note(ctx, ch, jar_xm_TRIGGER_KEEP_PERIOD | jar_xm_TRIGGER_KEEP_SAMPLE_POSITION);
- } else if(s->instrument > ctx->module.num_instruments) {
- /* Invalid instrument, Cut current note */
- jar_xm_cut_note(ch);
- ch->instrument = NULL;
- ch->sample = NULL;
- } else {
- ch->instrument = ctx->module.instruments + (s->instrument - 1);
- if(s->note == 0 && ch->sample != NULL) {
- /* Ghost instrument, trigger note */
- /* Sample position is kept, but envelopes are reset */
- jar_xm_trigger_note(ctx, ch, jar_xm_TRIGGER_KEEP_SAMPLE_POSITION);
- }
- }
- }
-
- if(NOTE_IS_VALID(s->note)) {
- /* Yes, the real note number is s->note -1. Try finding
- * THAT in any of the specs! :-) */
-
- jar_xm_instrument_t* instr = ch->instrument;
-
- if(HAS_TONE_PORTAMENTO(ch->current) && instr != NULL && ch->sample != NULL) {
- /* Tone portamento in effect */
- ch->note = s->note + ch->sample->relative_note + ch->sample->finetune / 128.f - 1.f;
- ch->tone_portamento_target_period = jar_xm_period(ctx, ch->note);
- } else if(instr == NULL || ch->instrument->num_samples == 0) {
- /* Bad instrument */
- jar_xm_cut_note(ch);
- } else {
- if(instr->sample_of_notes[s->note - 1] < instr->num_samples) {
-#if JAR_XM_RAMPING
- for(unsigned int z = 0; z < jar_xm_SAMPLE_RAMPING_POINTS; ++z) {
- ch->end_of_previous_sample[z] = jar_xm_next_of_sample(ch);
- }
- ch->frame_count = 0;
-#endif
- ch->sample = instr->samples + instr->sample_of_notes[s->note - 1];
- ch->orig_note = ch->note = s->note + ch->sample->relative_note
- + ch->sample->finetune / 128.f - 1.f;
- if(s->instrument > 0) {
- jar_xm_trigger_note(ctx, ch, 0);
- } else {
- /* Ghost note: keep old volume */
- jar_xm_trigger_note(ctx, ch, jar_xm_TRIGGER_KEEP_VOLUME);
- }
- } else {
- /* Bad sample */
- jar_xm_cut_note(ch);
- }
- }
- } else if(s->note == 97) {
- /* Key Off */
- jar_xm_key_off(ch);
- }
-
- switch(s->volume_column >> 4) {
-
- case 0x5:
- if(s->volume_column > 0x50) break;
- case 0x1:
- case 0x2:
- case 0x3:
- case 0x4:
- /* Set volume */
- ch->volume = (float)(s->volume_column - 0x10) / (float)0x40;
- break;
-
- case 0x8: /* Fine volume slide down */
- jar_xm_volume_slide(ch, s->volume_column & 0x0F);
- break;
-
- case 0x9: /* Fine volume slide up */
- jar_xm_volume_slide(ch, s->volume_column << 4);
- break;
-
- case 0xA: /* Set vibrato speed */
- ch->vibrato_param = (ch->vibrato_param & 0x0F) | ((s->volume_column & 0x0F) << 4);
- break;
-
- case 0xC: /* Set panning */
- ch->panning = (float)(
- ((s->volume_column & 0x0F) << 4) | (s->volume_column & 0x0F)
- ) / (float)0xFF;
- break;
-
- case 0xF: /* Tone portamento */
- if(s->volume_column & 0x0F) {
- ch->tone_portamento_param = ((s->volume_column & 0x0F) << 4)
- | (s->volume_column & 0x0F);
- }
- break;
-
- default:
- break;
-
- }
-
- switch(s->effect_type) {
-
- case 1: /* 1xx: Portamento up */
- if(s->effect_param > 0) {
- ch->portamento_up_param = s->effect_param;
- }
- break;
-
- case 2: /* 2xx: Portamento down */
- if(s->effect_param > 0) {
- ch->portamento_down_param = s->effect_param;
- }
- break;
-
- case 3: /* 3xx: Tone portamento */
- if(s->effect_param > 0) {
- ch->tone_portamento_param = s->effect_param;
- }
- break;
-
- case 4: /* 4xy: Vibrato */
- if(s->effect_param & 0x0F) {
- /* Set vibrato depth */
- ch->vibrato_param = (ch->vibrato_param & 0xF0) | (s->effect_param & 0x0F);
- }
- if(s->effect_param >> 4) {
- /* Set vibrato speed */
- ch->vibrato_param = (s->effect_param & 0xF0) | (ch->vibrato_param & 0x0F);
- }
- break;
-
- case 5: /* 5xy: Tone portamento + Volume slide */
- if(s->effect_param > 0) {
- ch->volume_slide_param = s->effect_param;
- }
- break;
-
- case 6: /* 6xy: Vibrato + Volume slide */
- if(s->effect_param > 0) {
- ch->volume_slide_param = s->effect_param;
- }
- break;
-
- case 7: /* 7xy: Tremolo */
- if(s->effect_param & 0x0F) {
- /* Set tremolo depth */
- ch->tremolo_param = (ch->tremolo_param & 0xF0) | (s->effect_param & 0x0F);
- }
- if(s->effect_param >> 4) {
- /* Set tremolo speed */
- ch->tremolo_param = (s->effect_param & 0xF0) | (ch->tremolo_param & 0x0F);
- }
- break;
-
- case 8: /* 8xx: Set panning */
- ch->panning = (float)s->effect_param / (float)0xFF;
- break;
-
- case 9: /* 9xx: Sample offset */
- if(ch->sample != NULL && NOTE_IS_VALID(s->note)) {
- uint32_t final_offset = s->effect_param << (ch->sample->bits == 16 ? 7 : 8);
- if(final_offset >= ch->sample->length) {
- /* Pretend the sample dosen't loop and is done playing */
- ch->sample_position = -1;
- break;
- }
- ch->sample_position = final_offset;
- }
- break;
-
- case 0xA: /* Axy: Volume slide */
- if(s->effect_param > 0) {
- ch->volume_slide_param = s->effect_param;
- }
- break;
-
- case 0xB: /* Bxx: Position jump */
- if(s->effect_param < ctx->module.length) {
- ctx->position_jump = true;
- ctx->jump_dest = s->effect_param;
- }
- break;
-
- case 0xC: /* Cxx: Set volume */
- ch->volume = (float)((s->effect_param > 0x40)
- ? 0x40 : s->effect_param) / (float)0x40;
- break;
-
- case 0xD: /* Dxx: Pattern break */
- /* Jump after playing this line */
- ctx->pattern_break = true;
- ctx->jump_row = (s->effect_param >> 4) * 10 + (s->effect_param & 0x0F);
- break;
-
- case 0xE: /* EXy: Extended command */
- switch(s->effect_param >> 4) {
-
- case 1: /* E1y: Fine portamento up */
- if(s->effect_param & 0x0F) {
- ch->fine_portamento_up_param = s->effect_param & 0x0F;
- }
- jar_xm_pitch_slide(ctx, ch, -ch->fine_portamento_up_param);
- break;
-
- case 2: /* E2y: Fine portamento down */
- if(s->effect_param & 0x0F) {
- ch->fine_portamento_down_param = s->effect_param & 0x0F;
- }
- jar_xm_pitch_slide(ctx, ch, ch->fine_portamento_down_param);
- break;
-
- case 4: /* E4y: Set vibrato control */
- ch->vibrato_waveform = s->effect_param & 3;
- ch->vibrato_waveform_retrigger = !((s->effect_param >> 2) & 1);
- break;
-
- case 5: /* E5y: Set finetune */
- if(NOTE_IS_VALID(ch->current->note) && ch->sample != NULL) {
- ch->note = ch->current->note + ch->sample->relative_note +
- (float)(((s->effect_param & 0x0F) - 8) << 4) / 128.f - 1.f;
- ch->period = jar_xm_period(ctx, ch->note);
- jar_xm_update_frequency(ctx, ch);
- }
- break;
-
- case 6: /* E6y: Pattern loop */
- if(s->effect_param & 0x0F) {
- if((s->effect_param & 0x0F) == ch->pattern_loop_count) {
- /* Loop is over */
- ch->pattern_loop_count = 0;
- break;
- }
-
- /* Jump to the beginning of the loop */
- ch->pattern_loop_count++;
- ctx->position_jump = true;
- ctx->jump_row = ch->pattern_loop_origin;
- ctx->jump_dest = ctx->current_table_index;
- } else {
- /* Set loop start point */
- ch->pattern_loop_origin = ctx->current_row;
- /* Replicate FT2 E60 bug */
- ctx->jump_row = ch->pattern_loop_origin;
- }
- break;
-
- case 7: /* E7y: Set tremolo control */
- ch->tremolo_waveform = s->effect_param & 3;
- ch->tremolo_waveform_retrigger = !((s->effect_param >> 2) & 1);
- break;
-
- case 0xA: /* EAy: Fine volume slide up */
- if(s->effect_param & 0x0F) {
- ch->fine_volume_slide_param = s->effect_param & 0x0F;
- }
- jar_xm_volume_slide(ch, ch->fine_volume_slide_param << 4);
- break;
-
- case 0xB: /* EBy: Fine volume slide down */
- if(s->effect_param & 0x0F) {
- ch->fine_volume_slide_param = s->effect_param & 0x0F;
- }
- jar_xm_volume_slide(ch, ch->fine_volume_slide_param);
- break;
-
- case 0xD: /* EDy: Note delay */
- /* XXX: figure this out better. EDx triggers
- * the note even when there no note and no
- * instrument. But ED0 acts like like a ghost
- * note, EDx (x ≠0) does not. */
- if(s->note == 0 && s->instrument == 0) {
- unsigned int flags = jar_xm_TRIGGER_KEEP_VOLUME;
-
- if(ch->current->effect_param & 0x0F) {
- ch->note = ch->orig_note;
- jar_xm_trigger_note(ctx, ch, flags);
- } else {
- jar_xm_trigger_note(
- ctx, ch,
- flags
- | jar_xm_TRIGGER_KEEP_PERIOD
- | jar_xm_TRIGGER_KEEP_SAMPLE_POSITION
- );
- }
- }
- break;
-
- case 0xE: /* EEy: Pattern delay */
- ctx->extra_ticks = (ch->current->effect_param & 0x0F) * ctx->tempo;
- break;
-
- default:
- break;
-
- }
- break;
-
- case 0xF: /* Fxx: Set tempo/BPM */
- if(s->effect_param > 0) {
- if(s->effect_param <= 0x1F) {
- ctx->tempo = s->effect_param;
- } else {
- ctx->bpm = s->effect_param;
- }
- }
- break;
-
- case 16: /* Gxx: Set global volume */
- ctx->global_volume = (float)((s->effect_param > 0x40)
- ? 0x40 : s->effect_param) / (float)0x40;
- break;
-
- case 17: /* Hxy: Global volume slide */
- if(s->effect_param > 0) {
- ch->global_volume_slide_param = s->effect_param;
- }
- break;
-
- case 21: /* Lxx: Set envelope position */
- ch->volume_envelope_frame_count = s->effect_param;
- ch->panning_envelope_frame_count = s->effect_param;
- break;
-
- case 25: /* Pxy: Panning slide */
- if(s->effect_param > 0) {
- ch->panning_slide_param = s->effect_param;
- }
- break;
-
- case 27: /* Rxy: Multi retrig note */
- if(s->effect_param > 0) {
- if((s->effect_param >> 4) == 0) {
- /* Keep previous x value */
- ch->multi_retrig_param = (ch->multi_retrig_param & 0xF0) | (s->effect_param & 0x0F);
- } else {
- ch->multi_retrig_param = s->effect_param;
- }
- }
- break;
-
- case 29: /* Txy: Tremor */
- if(s->effect_param > 0) {
- /* Tremor x and y params do not appear to be separately
- * kept in memory, unlike Rxy */
- ch->tremor_param = s->effect_param;
- }
- break;
-
- case 33: /* Xxy: Extra stuff */
- switch(s->effect_param >> 4) {
-
- case 1: /* X1y: Extra fine portamento up */
- if(s->effect_param & 0x0F) {
- ch->extra_fine_portamento_up_param = s->effect_param & 0x0F;
- }
- jar_xm_pitch_slide(ctx, ch, -1.0f * ch->extra_fine_portamento_up_param);
- break;
-
- case 2: /* X2y: Extra fine portamento down */
- if(s->effect_param & 0x0F) {
- ch->extra_fine_portamento_down_param = s->effect_param & 0x0F;
- }
- jar_xm_pitch_slide(ctx, ch, ch->extra_fine_portamento_down_param);
- break;
-
- default:
- break;
-
- }
- break;
-
- default:
- break;
-
- }
-}
-
-static void jar_xm_trigger_note(jar_xm_context_t* ctx, jar_xm_channel_context_t* ch, unsigned int flags) {
- if(!(flags & jar_xm_TRIGGER_KEEP_SAMPLE_POSITION)) {
- ch->sample_position = 0.f;
- ch->ping = true;
- }
-
- if(ch->sample != NULL) {
- if(!(flags & jar_xm_TRIGGER_KEEP_VOLUME)) {
- ch->volume = ch->sample->volume;
- }
-
- ch->panning = ch->sample->panning;
- }
-
- ch->sustained = true;
- ch->fadeout_volume = ch->volume_envelope_volume = 1.0f;
- ch->panning_envelope_panning = .5f;
- ch->volume_envelope_frame_count = ch->panning_envelope_frame_count = 0;
- ch->vibrato_note_offset = 0.f;
- ch->tremolo_volume = 0.f;
- ch->tremor_on = false;
-
- ch->autovibrato_ticks = 0;
-
- if(ch->vibrato_waveform_retrigger) {
- ch->vibrato_ticks = 0; /* XXX: should the waveform itself also
- * be reset to sine? */
- }
- if(ch->tremolo_waveform_retrigger) {
- ch->tremolo_ticks = 0;
- }
-
- if(!(flags & jar_xm_TRIGGER_KEEP_PERIOD)) {
- ch->period = jar_xm_period(ctx, ch->note);
- jar_xm_update_frequency(ctx, ch);
- }
-
- ch->latest_trigger = ctx->generated_samples;
- if(ch->instrument != NULL) {
- ch->instrument->latest_trigger = ctx->generated_samples;
- }
- if(ch->sample != NULL) {
- ch->sample->latest_trigger = ctx->generated_samples;
- }
-}
-
-static void jar_xm_cut_note(jar_xm_channel_context_t* ch) {
- /* NB: this is not the same as Key Off */
- ch->volume = .0f;
-}
-
-static void jar_xm_key_off(jar_xm_channel_context_t* ch) {
- /* Key Off */
- ch->sustained = false;
-
- /* If no volume envelope is used, also cut the note */
- if(ch->instrument == NULL || !ch->instrument->volume_envelope.enabled) {
- jar_xm_cut_note(ch);
- }
-}
-
-static void jar_xm_row(jar_xm_context_t* ctx) {
- if(ctx->position_jump) {
- ctx->current_table_index = ctx->jump_dest;
- ctx->current_row = ctx->jump_row;
- ctx->position_jump = false;
- ctx->pattern_break = false;
- ctx->jump_row = 0;
- jar_xm_post_pattern_change(ctx);
- } else if(ctx->pattern_break) {
- ctx->current_table_index++;
- ctx->current_row = ctx->jump_row;
- ctx->pattern_break = false;
- ctx->jump_row = 0;
- jar_xm_post_pattern_change(ctx);
- }
-
- jar_xm_pattern_t* cur = ctx->module.patterns + ctx->module.pattern_table[ctx->current_table_index];
- bool in_a_loop = false;
-
- /* Read notes… */
- for(uint8_t i = 0; i < ctx->module.num_channels; ++i) {
- jar_xm_pattern_slot_t* s = cur->slots + ctx->current_row * ctx->module.num_channels + i;
- jar_xm_channel_context_t* ch = ctx->channels + i;
-
- ch->current = s;
-
- if(s->effect_type != 0xE || s->effect_param >> 4 != 0xD) {
- jar_xm_handle_note_and_instrument(ctx, ch, s);
- } else {
- ch->note_delay_param = s->effect_param & 0x0F;
- }
-
- if(!in_a_loop && ch->pattern_loop_count > 0) {
- in_a_loop = true;
- }
- }
-
- if(!in_a_loop) {
- /* No E6y loop is in effect (or we are in the first pass) */
- ctx->loop_count = (ctx->row_loop_count[MAX_NUM_ROWS * ctx->current_table_index + ctx->current_row]++);
- }
-
- ctx->current_row++; /* Since this is an uint8, this line can
- * increment from 255 to 0, in which case it
- * is still necessary to go the next
- * pattern. */
- if(!ctx->position_jump && !ctx->pattern_break &&
- (ctx->current_row >= cur->num_rows || ctx->current_row == 0)) {
- ctx->current_table_index++;
- ctx->current_row = ctx->jump_row; /* This will be 0 most of
- * the time, except when E60
- * is used */
- ctx->jump_row = 0;
- jar_xm_post_pattern_change(ctx);
- }
-}
-
-static void jar_xm_envelope_tick(jar_xm_channel_context_t* ch,
- jar_xm_envelope_t* env,
- uint16_t* counter,
- float* outval) {
- if(env->num_points < 2) {
- /* Don't really know what to do… */
- if(env->num_points == 1) {
- /* XXX I am pulling this out of my ass */
- *outval = (float)env->points[0].value / (float)0x40;
- if(*outval > 1) {
- *outval = 1;
- }
- }
-
- return;
- } else {
- uint8_t j;
-
- if(env->loop_enabled) {
- uint16_t loop_start = env->points[env->loop_start_point].frame;
- uint16_t loop_end = env->points[env->loop_end_point].frame;
- uint16_t loop_length = loop_end - loop_start;
-
- if(*counter >= loop_end) {
- *counter -= loop_length;
- }
- }
-
- for(j = 0; j < (env->num_points - 2); ++j) {
- if(env->points[j].frame <= *counter &&
- env->points[j+1].frame >= *counter) {
- break;
- }
- }
-
- *outval = jar_xm_envelope_lerp(env->points + j, env->points + j + 1, *counter) / (float)0x40;
-
- /* Make sure it is safe to increment frame count */
- if(!ch->sustained || !env->sustain_enabled ||
- *counter != env->points[env->sustain_point].frame) {
- (*counter)++;
- }
- }
-}
-
-static void jar_xm_envelopes(jar_xm_channel_context_t* ch) {
- if(ch->instrument != NULL) {
- if(ch->instrument->volume_envelope.enabled) {
- if(!ch->sustained) {
- ch->fadeout_volume -= (float)ch->instrument->volume_fadeout / 65536.f;
- jar_xm_CLAMP_DOWN(ch->fadeout_volume);
- }
-
- jar_xm_envelope_tick(ch,
- &(ch->instrument->volume_envelope),
- &(ch->volume_envelope_frame_count),
- &(ch->volume_envelope_volume));
- }
-
- if(ch->instrument->panning_envelope.enabled) {
- jar_xm_envelope_tick(ch,
- &(ch->instrument->panning_envelope),
- &(ch->panning_envelope_frame_count),
- &(ch->panning_envelope_panning));
- }
- }
-}
-
-static void jar_xm_tick(jar_xm_context_t* ctx) {
- if(ctx->current_tick == 0) {
- jar_xm_row(ctx);
- }
-
- for(uint8_t i = 0; i < ctx->module.num_channels; ++i) {
- jar_xm_channel_context_t* ch = ctx->channels + i;
-
- jar_xm_envelopes(ch);
- jar_xm_autovibrato(ctx, ch);
-
- if(ch->arp_in_progress && !HAS_ARPEGGIO(ch->current)) {
- ch->arp_in_progress = false;
- ch->arp_note_offset = 0;
- jar_xm_update_frequency(ctx, ch);
- }
- if(ch->vibrato_in_progress && !HAS_VIBRATO(ch->current)) {
- ch->vibrato_in_progress = false;
- ch->vibrato_note_offset = 0.f;
- jar_xm_update_frequency(ctx, ch);
- }
-
- switch(ch->current->volume_column >> 4) {
-
- case 0x6: /* Volume slide down */
- if(ctx->current_tick == 0) break;
- jar_xm_volume_slide(ch, ch->current->volume_column & 0x0F);
- break;
-
- case 0x7: /* Volume slide up */
- if(ctx->current_tick == 0) break;
- jar_xm_volume_slide(ch, ch->current->volume_column << 4);
- break;
-
- case 0xB: /* Vibrato */
- if(ctx->current_tick == 0) break;
- ch->vibrato_in_progress = false;
- jar_xm_vibrato(ctx, ch, ch->vibrato_param, ch->vibrato_ticks++);
- break;
-
- case 0xD: /* Panning slide left */
- if(ctx->current_tick == 0) break;
- jar_xm_panning_slide(ch, ch->current->volume_column & 0x0F);
- break;
-
- case 0xE: /* Panning slide right */
- if(ctx->current_tick == 0) break;
- jar_xm_panning_slide(ch, ch->current->volume_column << 4);
- break;
-
- case 0xF: /* Tone portamento */
- if(ctx->current_tick == 0) break;
- jar_xm_tone_portamento(ctx, ch);
- break;
-
- default:
- break;
-
- }
-
- switch(ch->current->effect_type) {
-
- case 0: /* 0xy: Arpeggio */
- if(ch->current->effect_param > 0) {
- char arp_offset = ctx->tempo % 3;
- switch(arp_offset) {
- case 2: /* 0 -> x -> 0 -> y -> x -> … */
- if(ctx->current_tick == 1) {
- ch->arp_in_progress = true;
- ch->arp_note_offset = ch->current->effect_param >> 4;
- jar_xm_update_frequency(ctx, ch);
- break;
- }
- /* No break here, this is intended */
- case 1: /* 0 -> 0 -> y -> x -> … */
- if(ctx->current_tick == 0) {
- ch->arp_in_progress = false;
- ch->arp_note_offset = 0;
- jar_xm_update_frequency(ctx, ch);
- break;
- }
- /* No break here, this is intended */
- case 0: /* 0 -> y -> x -> … */
- jar_xm_arpeggio(ctx, ch, ch->current->effect_param, ctx->current_tick - arp_offset);
- default:
- break;
- }
- }
- break;
-
- case 1: /* 1xx: Portamento up */
- if(ctx->current_tick == 0) break;
- jar_xm_pitch_slide(ctx, ch, -ch->portamento_up_param);
- break;
-
- case 2: /* 2xx: Portamento down */
- if(ctx->current_tick == 0) break;
- jar_xm_pitch_slide(ctx, ch, ch->portamento_down_param);
- break;
-
- case 3: /* 3xx: Tone portamento */
- if(ctx->current_tick == 0) break;
- jar_xm_tone_portamento(ctx, ch);
- break;
-
- case 4: /* 4xy: Vibrato */
- if(ctx->current_tick == 0) break;
- ch->vibrato_in_progress = true;
- jar_xm_vibrato(ctx, ch, ch->vibrato_param, ch->vibrato_ticks++);
- break;
-
- case 5: /* 5xy: Tone portamento + Volume slide */
- if(ctx->current_tick == 0) break;
- jar_xm_tone_portamento(ctx, ch);
- jar_xm_volume_slide(ch, ch->volume_slide_param);
- break;
-
- case 6: /* 6xy: Vibrato + Volume slide */
- if(ctx->current_tick == 0) break;
- ch->vibrato_in_progress = true;
- jar_xm_vibrato(ctx, ch, ch->vibrato_param, ch->vibrato_ticks++);
- jar_xm_volume_slide(ch, ch->volume_slide_param);
- break;
-
- case 7: /* 7xy: Tremolo */
- if(ctx->current_tick == 0) break;
- jar_xm_tremolo(ctx, ch, ch->tremolo_param, ch->tremolo_ticks++);
- break;
-
- case 0xA: /* Axy: Volume slide */
- if(ctx->current_tick == 0) break;
- jar_xm_volume_slide(ch, ch->volume_slide_param);
- break;
-
- case 0xE: /* EXy: Extended command */
- switch(ch->current->effect_param >> 4) {
-
- case 0x9: /* E9y: Retrigger note */
- if(ctx->current_tick != 0 && ch->current->effect_param & 0x0F) {
- if(!(ctx->current_tick % (ch->current->effect_param & 0x0F))) {
- jar_xm_trigger_note(ctx, ch, 0);
- jar_xm_envelopes(ch);
- }
- }
- break;
-
- case 0xC: /* ECy: Note cut */
- if((ch->current->effect_param & 0x0F) == ctx->current_tick) {
- jar_xm_cut_note(ch);
- }
- break;
-
- case 0xD: /* EDy: Note delay */
- if(ch->note_delay_param == ctx->current_tick) {
- jar_xm_handle_note_and_instrument(ctx, ch, ch->current);
- jar_xm_envelopes(ch);
- }
- break;
-
- default:
- break;
-
- }
- break;
-
- case 17: /* Hxy: Global volume slide */
- if(ctx->current_tick == 0) break;
- if((ch->global_volume_slide_param & 0xF0) &&
- (ch->global_volume_slide_param & 0x0F)) {
- /* Illegal state */
- break;
- }
- if(ch->global_volume_slide_param & 0xF0) {
- /* Global slide up */
- float f = (float)(ch->global_volume_slide_param >> 4) / (float)0x40;
- ctx->global_volume += f;
- jar_xm_CLAMP_UP(ctx->global_volume);
- } else {
- /* Global slide down */
- float f = (float)(ch->global_volume_slide_param & 0x0F) / (float)0x40;
- ctx->global_volume -= f;
- jar_xm_CLAMP_DOWN(ctx->global_volume);
- }
- break;
-
- case 20: /* Kxx: Key off */
- /* Most documentations will tell you the parameter has no
- * use. Don't be fooled. */
- if(ctx->current_tick == ch->current->effect_param) {
- jar_xm_key_off(ch);
- }
- break;
-
- case 25: /* Pxy: Panning slide */
- if(ctx->current_tick == 0) break;
- jar_xm_panning_slide(ch, ch->panning_slide_param);
- break;
-
- case 27: /* Rxy: Multi retrig note */
- if(ctx->current_tick == 0) break;
- if(((ch->multi_retrig_param) & 0x0F) == 0) break;
- if((ctx->current_tick % (ch->multi_retrig_param & 0x0F)) == 0) {
- float v = ch->volume * multi_retrig_multiply[ch->multi_retrig_param >> 4]
- + multi_retrig_add[ch->multi_retrig_param >> 4];
- jar_xm_CLAMP(v);
- jar_xm_trigger_note(ctx, ch, 0);
- ch->volume = v;
- }
- break;
-
- case 29: /* Txy: Tremor */
- if(ctx->current_tick == 0) break;
- ch->tremor_on = (
- (ctx->current_tick - 1) % ((ch->tremor_param >> 4) + (ch->tremor_param & 0x0F) + 2)
- >
- (ch->tremor_param >> 4)
- );
- break;
-
- default:
- break;
-
- }
-
- float panning, volume;
-
- panning = ch->panning +
- (ch->panning_envelope_panning - .5f) * (.5f - fabsf(ch->panning - .5f)) * 2.0f;
-
- if(ch->tremor_on) {
- volume = .0f;
- } else {
- volume = ch->volume + ch->tremolo_volume;
- jar_xm_CLAMP(volume);
- volume *= ch->fadeout_volume * ch->volume_envelope_volume;
- }
-
-#if JAR_XM_RAMPING
- ch->target_panning = panning;
- ch->target_volume = volume;
-#else
- ch->actual_panning = panning;
- ch->actual_volume = volume;
-#endif
- }
-
- ctx->current_tick++;
- if(ctx->current_tick >= ctx->tempo + ctx->extra_ticks) {
- ctx->current_tick = 0;
- ctx->extra_ticks = 0;
- }
-
- /* FT2 manual says number of ticks / second = BPM * 0.4 */
- ctx->remaining_samples_in_tick += (float)ctx->rate / ((float)ctx->bpm * 0.4f);
-}
-
-static float jar_xm_next_of_sample(jar_xm_channel_context_t* ch) {
- if(ch->instrument == NULL || ch->sample == NULL || ch->sample_position < 0) {
-#if JAR_XM_RAMPING
- if(ch->frame_count < jar_xm_SAMPLE_RAMPING_POINTS) {
- return jar_xm_LERP(ch->end_of_previous_sample[ch->frame_count], .0f,
- (float)ch->frame_count / (float)jar_xm_SAMPLE_RAMPING_POINTS);
- }
-#endif
- return .0f;
- }
- if(ch->sample->length == 0) {
- return .0f;
- }
-
- float u, v, t;
- uint32_t a, b;
- a = (uint32_t)ch->sample_position; /* This cast is fine,
- * sample_position will not
- * go above integer
- * ranges */
- if(JAR_XM_LINEAR_INTERPOLATION) {
- b = a + 1;
- t = ch->sample_position - a; /* Cheaper than fmodf(., 1.f) */
- }
- u = ch->sample->data[a];
-
- switch(ch->sample->loop_type) {
-
- case jar_xm_NO_LOOP:
- if(JAR_XM_LINEAR_INTERPOLATION) {
- v = (b < ch->sample->length) ? ch->sample->data[b] : .0f;
- }
- ch->sample_position += ch->step;
- if(ch->sample_position >= ch->sample->length) {
- ch->sample_position = -1;
- }
- break;
-
- case jar_xm_FORWARD_LOOP:
- if(JAR_XM_LINEAR_INTERPOLATION) {
- v = ch->sample->data[
- (b == ch->sample->loop_end) ? ch->sample->loop_start : b
- ];
- }
- ch->sample_position += ch->step;
- while(ch->sample_position >= ch->sample->loop_end) {
- ch->sample_position -= ch->sample->loop_length;
- }
- break;
-
- case jar_xm_PING_PONG_LOOP:
- if(ch->ping) {
- ch->sample_position += ch->step;
- } else {
- ch->sample_position -= ch->step;
- }
- /* XXX: this may not work for very tight ping-pong loops
- * (ie switches direction more than once per sample */
- if(ch->ping) {
- if(JAR_XM_LINEAR_INTERPOLATION) {
- v = (b >= ch->sample->loop_end) ? ch->sample->data[a] : ch->sample->data[b];
- }
- if(ch->sample_position >= ch->sample->loop_end) {
- ch->ping = false;
- ch->sample_position = (ch->sample->loop_end << 1) - ch->sample_position;
- }
- /* sanity checking */
- if(ch->sample_position >= ch->sample->length) {
- ch->ping = false;
- ch->sample_position -= ch->sample->length - 1;
- }
- } else {
- if(JAR_XM_LINEAR_INTERPOLATION) {
- v = u;
- u = (b == 1 || b - 2 <= ch->sample->loop_start) ? ch->sample->data[a] : ch->sample->data[b - 2];
- }
- if(ch->sample_position <= ch->sample->loop_start) {
- ch->ping = true;
- ch->sample_position = (ch->sample->loop_start << 1) - ch->sample_position;
- }
- /* sanity checking */
- if(ch->sample_position <= .0f) {
- ch->ping = true;
- ch->sample_position = .0f;
- }
- }
- break;
-
- default:
- v = .0f;
- break;
- }
-
- float endval = JAR_XM_LINEAR_INTERPOLATION ? jar_xm_LERP(u, v, t) : u;
-
-#if JAR_XM_RAMPING
- if(ch->frame_count < jar_xm_SAMPLE_RAMPING_POINTS) {
- /* Smoothly transition between old and new sample. */
- return jar_xm_LERP(ch->end_of_previous_sample[ch->frame_count], endval,
- (float)ch->frame_count / (float)jar_xm_SAMPLE_RAMPING_POINTS);
- }
-#endif
-
- return endval;
-}
-
-static void jar_xm_sample(jar_xm_context_t* ctx, float* left, float* right) {
- if(ctx->remaining_samples_in_tick <= 0) {
- jar_xm_tick(ctx);
- }
- ctx->remaining_samples_in_tick--;
-
- *left = 0.f;
- *right = 0.f;
-
- if(ctx->max_loop_count > 0 && ctx->loop_count >= ctx->max_loop_count) {
- return;
- }
-
- for(uint8_t i = 0; i < ctx->module.num_channels; ++i) {
- jar_xm_channel_context_t* ch = ctx->channels + i;
-
- if(ch->instrument == NULL || ch->sample == NULL || ch->sample_position < 0) {
- continue;
- }
-
- const float fval = jar_xm_next_of_sample(ch);
-
- if(!ch->muted && !ch->instrument->muted) {
- *left += fval * ch->actual_volume * (1.f - ch->actual_panning);
- *right += fval * ch->actual_volume * ch->actual_panning;
- }
-
-#if JAR_XM_RAMPING
- ch->frame_count++;
- jar_xm_SLIDE_TOWARDS(ch->actual_volume, ch->target_volume, ctx->volume_ramp);
- jar_xm_SLIDE_TOWARDS(ch->actual_panning, ch->target_panning, ctx->panning_ramp);
-#endif
- }
-
- const float fgvol = ctx->global_volume * ctx->amplification;
- *left *= fgvol;
- *right *= fgvol;
-
-#if JAR_XM_DEBUG
- if(fabs(*left) > 1 || fabs(*right) > 1) {
- DEBUG("clipping frame: %f %f, this is a bad module or a libxm bug", *left, *right);
- }
-#endif
-}
-
-void jar_xm_generate_samples(jar_xm_context_t* ctx, float* output, size_t numsamples) {
- if(ctx && output) {
- ctx->generated_samples += numsamples;
- for(size_t i = 0; i < numsamples; i++) {
- jar_xm_sample(ctx, output + (2 * i), output + (2 * i + 1));
- }
- }
-}
-
-uint64_t jar_xm_get_remaining_samples(jar_xm_context_t* ctx)
-{
- uint64_t total = 0;
- uint8_t currentLoopCount = jar_xm_get_loop_count(ctx);
- jar_xm_set_max_loop_count(ctx, 0);
-
- while(jar_xm_get_loop_count(ctx) == currentLoopCount)
- {
- total += ctx->remaining_samples_in_tick;
- ctx->remaining_samples_in_tick = 0;
- jar_xm_tick(ctx);
- }
-
- ctx->loop_count = currentLoopCount;
- return total;
-}
-
-
-
-
-
-//--------------------------------------------
-//FILE LOADER - TODO - NEEDS TO BE CLEANED UP
-//--------------------------------------------
-
-
-
-#undef DEBUG
-#define DEBUG(...) do { \
- fprintf(stderr, __VA_ARGS__); \
- fflush(stderr); \
- } while(0)
-
-#define DEBUG_ERR(...) do { \
- fprintf(stderr, __VA_ARGS__); \
- fflush(stderr); \
- } while(0)
-
-#define FATAL(...) do { \
- fprintf(stderr, __VA_ARGS__); \
- fflush(stderr); \
- exit(1); \
- } while(0)
-
-#define FATAL_ERR(...) do { \
- fprintf(stderr, __VA_ARGS__); \
- fflush(stderr); \
- exit(1); \
- } while(0)
-
-
-int jar_xm_create_context_from_file(jar_xm_context_t** ctx, uint32_t rate, const char* filename) {
- FILE* xmf;
- int size;
-
-#if defined(_MSC_VER) && _MSC_VER >= 1500
- xmf = NULL;
- fopen_s(&xmf, filename, "rb");
-#else
- xmf = fopen(filename, "rb");
-#endif
- if(xmf == NULL) {
- DEBUG_ERR("Could not open input file");
- *ctx = NULL;
- return 3;
- }
-
- fseek(xmf, 0, SEEK_END);
- size = ftell(xmf);
- rewind(xmf);
- if(size == -1) {
- fclose(xmf);
- DEBUG_ERR("fseek() failed");
- *ctx = NULL;
- return 4;
- }
-
- char* data = malloc(size + 1);
- if(fread(data, 1, size, xmf) < size) {
- fclose(xmf);
- DEBUG_ERR("fread() failed");
- *ctx = NULL;
- return 5;
- }
-
- fclose(xmf);
-
- switch(jar_xm_create_context_safe(ctx, data, size, rate)) {
- case 0:
- break;
-
- case 1:
- DEBUG("could not create context: module is not sane\n");
- *ctx = NULL;
- return 1;
- break;
-
- case 2:
- FATAL("could not create context: malloc failed\n");
- return 2;
- break;
-
- default:
- FATAL("could not create context: unknown error\n");
- return 6;
- break;
-
- }
-
- return 0;
-}
-
-
-
-
-#endif//end of JAR_XM_IMPLEMENTATION
-//-------------------------------------------------------------------------------
-
diff --git a/client/miniaudio/stb_vorbis.c b/client/miniaudio/stb_vorbis.c
index a3021341..bc6fbe25 100644
--- a/client/miniaudio/stb_vorbis.c
+++ b/client/miniaudio/stb_vorbis.c
@@ -1,11 +1,11 @@
-// Ogg Vorbis audio decoder - v1.11 - public domain
+// Ogg Vorbis audio decoder - v1.19 - public domain
// http://nothings.org/stb_vorbis/
//
// Original version written by Sean Barrett in 2007.
//
-// Originally sponsored by RAD Game Tools. Seeking sponsored
-// by Phillip Bennefall, Marc Andersen, Aaron Baker, Elias Software,
-// Aras Pranckevicius, and Sean Barrett.
+// Originally sponsored by RAD Game Tools. Seeking implementation
+// sponsored by Phillip Bennefall, Marc Andersen, Aaron Baker,
+// Elias Software, Aras Pranckevicius, and Sean Barrett.
//
// LICENSE
//
@@ -26,26 +26,36 @@
// Terje Mathisen Niklas Frykholm Andy Hill
// Casey Muratori John Bolton Gargaj
// Laurent Gomila Marc LeBlanc Ronny Chevalier
-// Bernhard Wodo Evan Balster alxprd@github
+// Bernhard Wodo Evan Balster github:alxprd
// Tom Beaumont Ingo Leitgeb Nicolas Guillemot
// Phillip Bennefall Rohit Thiago Goulart
-// manxorist@github saga musix github:infatum
+// github:manxorist saga musix github:infatum
+// Timur Gagiev Maxwell Koo Peter Waller
+// github:audinowho Dougall Johnson
//
// Partial history:
-// 1.11 - 2017/07/23 - fix MinGW compilation
-// 1.10 - 2017/03/03 - more robust seeking; fix negative ilog(); clear error in open_memory
-// 1.09 - 2016/04/04 - back out 'truncation of last frame' fix from previous version
-// 1.08 - 2016/04/02 - warnings; setup memory leaks; truncation of last frame
-// 1.07 - 2015/01/16 - fixes for crashes on invalid files; warning fixes; const
-// 1.06 - 2015/08/31 - full, correct support for seeking API (Dougall Johnson)
+// 1.19 - 2020-02-05 - warnings
+// 1.18 - 2020-02-02 - fix seek bugs; parse header comments; misc warnings etc.
+// 1.17 - 2019-07-08 - fix CVE-2019-13217..CVE-2019-13223 (by ForAllSecure)
+// 1.16 - 2019-03-04 - fix warnings
+// 1.15 - 2019-02-07 - explicit failure if Ogg Skeleton data is found
+// 1.14 - 2018-02-11 - delete bogus dealloca usage
+// 1.13 - 2018-01-29 - fix truncation of last frame (hopefully)
+// 1.12 - 2017-11-21 - limit residue begin/end to blocksize/2 to avoid large temp allocs in bad/corrupt files
+// 1.11 - 2017-07-23 - fix MinGW compilation
+// 1.10 - 2017-03-03 - more robust seeking; fix negative ilog(); clear error in open_memory
+// 1.09 - 2016-04-04 - back out 'truncation of last frame' fix from previous version
+// 1.08 - 2016-04-02 - warnings; setup memory leaks; truncation of last frame
+// 1.07 - 2015-01-16 - fixes for crashes on invalid files; warning fixes; const
+// 1.06 - 2015-08-31 - full, correct support for seeking API (Dougall Johnson)
// some crash fixes when out of memory or with corrupt files
// fix some inappropriately signed shifts
-// 1.05 - 2015/04/19 - don't define __forceinline if it's redundant
-// 1.04 - 2014/08/27 - fix missing const-correct case in API
-// 1.03 - 2014/08/07 - warning fixes
-// 1.02 - 2014/07/09 - declare qsort comparison as explicitly _cdecl in Windows
-// 1.01 - 2014/06/18 - fix stb_vorbis_get_samples_float (interleaved was correct)
-// 1.0 - 2014/05/26 - fix memory leaks; fix warnings; fix bugs in >2-channel;
+// 1.05 - 2015-04-19 - don't define __forceinline if it's redundant
+// 1.04 - 2014-08-27 - fix missing const-correct case in API
+// 1.03 - 2014-08-07 - warning fixes
+// 1.02 - 2014-07-09 - declare qsort comparison as explicitly _cdecl in Windows
+// 1.01 - 2014-06-18 - fix stb_vorbis_get_samples_float (interleaved was correct)
+// 1.0 - 2014-05-26 - fix memory leaks; fix warnings; fix bugs in >2-channel;
// (API change) report sample rate for decode-full-file funcs
//
// See end of file for full version history.
@@ -123,9 +133,20 @@ typedef struct
int max_frame_size;
} stb_vorbis_info;
+typedef struct
+{
+ char *vendor;
+
+ int comment_list_length;
+ char **comment_list;
+} stb_vorbis_comment;
+
// get general information about the file
extern stb_vorbis_info stb_vorbis_get_info(stb_vorbis *f);
+// get ogg comments
+extern stb_vorbis_comment stb_vorbis_get_comment(stb_vorbis *f);
+
// get the last error detected (clears it, too)
extern int stb_vorbis_get_error(stb_vorbis *f);
@@ -249,7 +270,7 @@ extern stb_vorbis * stb_vorbis_open_file(FILE *f, int close_handle_on_close,
// create an ogg vorbis decoder from an open FILE *, looking for a stream at
// the _current_ seek point (ftell). on failure, returns NULL and sets *error.
// note that stb_vorbis must "own" this stream; if you seek it in between
-// calls to stb_vorbis, it will become confused. Morever, if you attempt to
+// calls to stb_vorbis, it will become confused. Moreover, if you attempt to
// perform stb_vorbis_seek_*() operations on this file, it will assume it
// owns the _entire_ rest of the file after the start point. Use the next
// function, stb_vorbis_open_file_section(), to limit it.
@@ -370,7 +391,8 @@ enum STBVorbisError
VORBIS_invalid_first_page,
VORBIS_bad_packet_type,
VORBIS_cant_find_last_page,
- VORBIS_seek_failed
+ VORBIS_seek_failed,
+ VORBIS_ogg_skeleton_not_supported
};
@@ -577,7 +599,9 @@ enum STBVorbisError
#undef __forceinline
#endif
#define __forceinline
+ #ifndef alloca
#define alloca __builtin_alloca
+ #endif
#elif !defined(_MSC_VER)
#if __GNUC__
#define __forceinline inline
@@ -751,6 +775,10 @@ struct stb_vorbis
unsigned int temp_memory_required;
unsigned int setup_temp_memory_required;
+ char *vendor;
+ int comment_list_length;
+ char **comment_list;
+
// input config
#ifndef STB_VORBIS_NO_STDIO
FILE *f;
@@ -766,8 +794,11 @@ struct stb_vorbis
uint8 push_mode;
+ // the page to seek to when seeking to start, may be zero
uint32 first_audio_page_offset;
+ // p_first is the page on which the first audio packet ends
+ // (but not necessarily the page on which it starts)
ProbedPage p_first, p_last;
// memory management
@@ -816,7 +847,7 @@ struct stb_vorbis
int current_loc_valid;
// per-blocksize precomputed data
-
+
// twiddle factors
float *A[2],*B[2],*C[2];
float *window[2];
@@ -880,11 +911,7 @@ static int error(vorb *f, enum STBVorbisError e)
#define array_size_required(count,size) (count*(sizeof(void *)+(size)))
#define temp_alloc(f,size) (f->alloc.alloc_buffer ? setup_temp_malloc(f,size) : alloca(size))
-#ifdef dealloca
-#define temp_free(f,p) (f->alloc.alloc_buffer ? 0 : dealloca(size))
-#else
-#define temp_free(f,p) 0
-#endif
+#define temp_free(f,p) (void)0
#define temp_alloc_save(f) ((f)->temp_offset)
#define temp_alloc_restore(f,p) ((f)->temp_offset = (p))
@@ -905,7 +932,7 @@ static void *make_block_array(void *mem, int count, int size)
static void *setup_malloc(vorb *f, int sz)
{
- sz = (sz+3) & ~3;
+ sz = (sz+7) & ~7; // round up to nearest 8 for alignment of future allocs.
f->setup_memory_required += sz;
if (f->alloc.alloc_buffer) {
void *p = (char *) f->alloc.alloc_buffer + f->setup_offset;
@@ -924,7 +951,7 @@ static void setup_free(vorb *f, void *p)
static void *setup_temp_malloc(vorb *f, int sz)
{
- sz = (sz+3) & ~3;
+ sz = (sz+7) & ~7; // round up to nearest 8 for alignment of future allocs.
if (f->alloc.alloc_buffer) {
if (f->temp_offset - sz < f->setup_offset) return NULL;
f->temp_offset -= sz;
@@ -1073,7 +1100,7 @@ static int compute_codewords(Codebook *c, uint8 *len, int n, uint32 *values)
assert(z >= 0 && z < 32);
available[z] = 0;
add_entry(c, bit_reverse(res), i, m++, len[i], values);
- // propogate availability up the tree
+ // propagate availability up the tree
if (z != len[i]) {
assert(len[i] >= 0 && len[i] < 32);
for (y=len[i]; y > z; --y) {
@@ -1142,7 +1169,7 @@ static void compute_sorted_huffman(Codebook *c, uint8 *lengths, uint32 *values)
if (!c->sparse) {
int k = 0;
for (i=0; i < c->entries; ++i)
- if (include_in_sort(c, lengths[i]))
+ if (include_in_sort(c, lengths[i]))
c->sorted_codewords[k++] = bit_reverse(c->codewords[i]);
assert(k == c->sorted_entries);
} else {
@@ -1199,8 +1226,10 @@ static int lookup1_values(int entries, int dim)
int r = (int) floor(exp((float) log((float) entries) / dim));
if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning;
++r; // floor() to avoid _ftol() when non-CRT
- assert(pow((float) r+1, dim) > entries);
- assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above
+ if (pow((float) r+1, dim) <= entries)
+ return -1;
+ if ((int) floor(pow((float) r, dim)) > entries)
+ return -1;
return r;
}
@@ -1323,7 +1352,7 @@ static int getn(vorb *z, uint8 *data, int n)
return 1;
}
- #ifndef STB_VORBIS_NO_STDIO
+ #ifndef STB_VORBIS_NO_STDIO
if (fread(data, n, 1, z->f) == 1)
return 1;
else {
@@ -1398,12 +1427,15 @@ static int capture_pattern(vorb *f)
static int start_page_no_capturepattern(vorb *f)
{
uint32 loc0,loc1,n;
+ if (f->first_decode && !IS_PUSH_MODE(f)) {
+ f->p_first.page_start = stb_vorbis_get_file_offset(f) - 4;
+ }
// stream structure version
if (0 != get8(f)) return error(f, VORBIS_invalid_stream_structure_version);
// header flag
f->page_flag = get8(f);
// absolute granule position
- loc0 = get32(f);
+ loc0 = get32(f);
loc1 = get32(f);
// @TODO: validate loc0,loc1 as valid positions?
// stream serial number -- vorbis doesn't interleave, so discard
@@ -1434,15 +1466,12 @@ static int start_page_no_capturepattern(vorb *f)
}
if (f->first_decode) {
int i,len;
- ProbedPage p;
len = 0;
for (i=0; i < f->segment_count; ++i)
len += f->segments[i];
len += 27 + f->segment_count;
- p.page_start = f->first_audio_page_offset;
- p.page_end = p.page_start + len;
- p.last_decoded_sample = loc0;
- f->p_first = p;
+ f->p_first.page_end = f->p_first.page_start + len;
+ f->p_first.last_decoded_sample = loc0;
}
f->next_seg = 0;
return TRUE;
@@ -1533,6 +1562,16 @@ static int get8_packet(vorb *f)
return x;
}
+static int get32_packet(vorb *f)
+{
+ uint32 x;
+ x = get8_packet(f);
+ x += get8_packet(f) << 8;
+ x += get8_packet(f) << 16;
+ x += (uint32) get8_packet(f) << 24;
+ return x;
+}
+
static void flush_packet(vorb *f)
{
while (get8_packet_raw(f) != EOP);
@@ -1888,69 +1927,69 @@ static int predict_point(int x, int x0, int x1, int y0, int y1)
// the following table is block-copied from the specification
static float inverse_db_table[256] =
{
- 1.0649863e-07f, 1.1341951e-07f, 1.2079015e-07f, 1.2863978e-07f,
- 1.3699951e-07f, 1.4590251e-07f, 1.5538408e-07f, 1.6548181e-07f,
- 1.7623575e-07f, 1.8768855e-07f, 1.9988561e-07f, 2.1287530e-07f,
- 2.2670913e-07f, 2.4144197e-07f, 2.5713223e-07f, 2.7384213e-07f,
- 2.9163793e-07f, 3.1059021e-07f, 3.3077411e-07f, 3.5226968e-07f,
- 3.7516214e-07f, 3.9954229e-07f, 4.2550680e-07f, 4.5315863e-07f,
- 4.8260743e-07f, 5.1396998e-07f, 5.4737065e-07f, 5.8294187e-07f,
- 6.2082472e-07f, 6.6116941e-07f, 7.0413592e-07f, 7.4989464e-07f,
- 7.9862701e-07f, 8.5052630e-07f, 9.0579828e-07f, 9.6466216e-07f,
- 1.0273513e-06f, 1.0941144e-06f, 1.1652161e-06f, 1.2409384e-06f,
- 1.3215816e-06f, 1.4074654e-06f, 1.4989305e-06f, 1.5963394e-06f,
- 1.7000785e-06f, 1.8105592e-06f, 1.9282195e-06f, 2.0535261e-06f,
- 2.1869758e-06f, 2.3290978e-06f, 2.4804557e-06f, 2.6416497e-06f,
- 2.8133190e-06f, 2.9961443e-06f, 3.1908506e-06f, 3.3982101e-06f,
- 3.6190449e-06f, 3.8542308e-06f, 4.1047004e-06f, 4.3714470e-06f,
- 4.6555282e-06f, 4.9580707e-06f, 5.2802740e-06f, 5.6234160e-06f,
- 5.9888572e-06f, 6.3780469e-06f, 6.7925283e-06f, 7.2339451e-06f,
- 7.7040476e-06f, 8.2047000e-06f, 8.7378876e-06f, 9.3057248e-06f,
- 9.9104632e-06f, 1.0554501e-05f, 1.1240392e-05f, 1.1970856e-05f,
- 1.2748789e-05f, 1.3577278e-05f, 1.4459606e-05f, 1.5399272e-05f,
- 1.6400004e-05f, 1.7465768e-05f, 1.8600792e-05f, 1.9809576e-05f,
- 2.1096914e-05f, 2.2467911e-05f, 2.3928002e-05f, 2.5482978e-05f,
- 2.7139006e-05f, 2.8902651e-05f, 3.0780908e-05f, 3.2781225e-05f,
- 3.4911534e-05f, 3.7180282e-05f, 3.9596466e-05f, 4.2169667e-05f,
- 4.4910090e-05f, 4.7828601e-05f, 5.0936773e-05f, 5.4246931e-05f,
- 5.7772202e-05f, 6.1526565e-05f, 6.5524908e-05f, 6.9783085e-05f,
- 7.4317983e-05f, 7.9147585e-05f, 8.4291040e-05f, 8.9768747e-05f,
- 9.5602426e-05f, 0.00010181521f, 0.00010843174f, 0.00011547824f,
- 0.00012298267f, 0.00013097477f, 0.00013948625f, 0.00014855085f,
- 0.00015820453f, 0.00016848555f, 0.00017943469f, 0.00019109536f,
- 0.00020351382f, 0.00021673929f, 0.00023082423f, 0.00024582449f,
- 0.00026179955f, 0.00027881276f, 0.00029693158f, 0.00031622787f,
- 0.00033677814f, 0.00035866388f, 0.00038197188f, 0.00040679456f,
- 0.00043323036f, 0.00046138411f, 0.00049136745f, 0.00052329927f,
- 0.00055730621f, 0.00059352311f, 0.00063209358f, 0.00067317058f,
- 0.00071691700f, 0.00076350630f, 0.00081312324f, 0.00086596457f,
- 0.00092223983f, 0.00098217216f, 0.0010459992f, 0.0011139742f,
- 0.0011863665f, 0.0012634633f, 0.0013455702f, 0.0014330129f,
- 0.0015261382f, 0.0016253153f, 0.0017309374f, 0.0018434235f,
- 0.0019632195f, 0.0020908006f, 0.0022266726f, 0.0023713743f,
- 0.0025254795f, 0.0026895994f, 0.0028643847f, 0.0030505286f,
- 0.0032487691f, 0.0034598925f, 0.0036847358f, 0.0039241906f,
- 0.0041792066f, 0.0044507950f, 0.0047400328f, 0.0050480668f,
- 0.0053761186f, 0.0057254891f, 0.0060975636f, 0.0064938176f,
- 0.0069158225f, 0.0073652516f, 0.0078438871f, 0.0083536271f,
- 0.0088964928f, 0.009474637f, 0.010090352f, 0.010746080f,
- 0.011444421f, 0.012188144f, 0.012980198f, 0.013823725f,
- 0.014722068f, 0.015678791f, 0.016697687f, 0.017782797f,
- 0.018938423f, 0.020169149f, 0.021479854f, 0.022875735f,
- 0.024362330f, 0.025945531f, 0.027631618f, 0.029427276f,
- 0.031339626f, 0.033376252f, 0.035545228f, 0.037855157f,
- 0.040315199f, 0.042935108f, 0.045725273f, 0.048696758f,
- 0.051861348f, 0.055231591f, 0.058820850f, 0.062643361f,
- 0.066714279f, 0.071049749f, 0.075666962f, 0.080584227f,
- 0.085821044f, 0.091398179f, 0.097337747f, 0.10366330f,
- 0.11039993f, 0.11757434f, 0.12521498f, 0.13335215f,
- 0.14201813f, 0.15124727f, 0.16107617f, 0.17154380f,
- 0.18269168f, 0.19456402f, 0.20720788f, 0.22067342f,
- 0.23501402f, 0.25028656f, 0.26655159f, 0.28387361f,
- 0.30232132f, 0.32196786f, 0.34289114f, 0.36517414f,
- 0.38890521f, 0.41417847f, 0.44109412f, 0.46975890f,
- 0.50028648f, 0.53279791f, 0.56742212f, 0.60429640f,
- 0.64356699f, 0.68538959f, 0.72993007f, 0.77736504f,
+ 1.0649863e-07f, 1.1341951e-07f, 1.2079015e-07f, 1.2863978e-07f,
+ 1.3699951e-07f, 1.4590251e-07f, 1.5538408e-07f, 1.6548181e-07f,
+ 1.7623575e-07f, 1.8768855e-07f, 1.9988561e-07f, 2.1287530e-07f,
+ 2.2670913e-07f, 2.4144197e-07f, 2.5713223e-07f, 2.7384213e-07f,
+ 2.9163793e-07f, 3.1059021e-07f, 3.3077411e-07f, 3.5226968e-07f,
+ 3.7516214e-07f, 3.9954229e-07f, 4.2550680e-07f, 4.5315863e-07f,
+ 4.8260743e-07f, 5.1396998e-07f, 5.4737065e-07f, 5.8294187e-07f,
+ 6.2082472e-07f, 6.6116941e-07f, 7.0413592e-07f, 7.4989464e-07f,
+ 7.9862701e-07f, 8.5052630e-07f, 9.0579828e-07f, 9.6466216e-07f,
+ 1.0273513e-06f, 1.0941144e-06f, 1.1652161e-06f, 1.2409384e-06f,
+ 1.3215816e-06f, 1.4074654e-06f, 1.4989305e-06f, 1.5963394e-06f,
+ 1.7000785e-06f, 1.8105592e-06f, 1.9282195e-06f, 2.0535261e-06f,
+ 2.1869758e-06f, 2.3290978e-06f, 2.4804557e-06f, 2.6416497e-06f,
+ 2.8133190e-06f, 2.9961443e-06f, 3.1908506e-06f, 3.3982101e-06f,
+ 3.6190449e-06f, 3.8542308e-06f, 4.1047004e-06f, 4.3714470e-06f,
+ 4.6555282e-06f, 4.9580707e-06f, 5.2802740e-06f, 5.6234160e-06f,
+ 5.9888572e-06f, 6.3780469e-06f, 6.7925283e-06f, 7.2339451e-06f,
+ 7.7040476e-06f, 8.2047000e-06f, 8.7378876e-06f, 9.3057248e-06f,
+ 9.9104632e-06f, 1.0554501e-05f, 1.1240392e-05f, 1.1970856e-05f,
+ 1.2748789e-05f, 1.3577278e-05f, 1.4459606e-05f, 1.5399272e-05f,
+ 1.6400004e-05f, 1.7465768e-05f, 1.8600792e-05f, 1.9809576e-05f,
+ 2.1096914e-05f, 2.2467911e-05f, 2.3928002e-05f, 2.5482978e-05f,
+ 2.7139006e-05f, 2.8902651e-05f, 3.0780908e-05f, 3.2781225e-05f,
+ 3.4911534e-05f, 3.7180282e-05f, 3.9596466e-05f, 4.2169667e-05f,
+ 4.4910090e-05f, 4.7828601e-05f, 5.0936773e-05f, 5.4246931e-05f,
+ 5.7772202e-05f, 6.1526565e-05f, 6.5524908e-05f, 6.9783085e-05f,
+ 7.4317983e-05f, 7.9147585e-05f, 8.4291040e-05f, 8.9768747e-05f,
+ 9.5602426e-05f, 0.00010181521f, 0.00010843174f, 0.00011547824f,
+ 0.00012298267f, 0.00013097477f, 0.00013948625f, 0.00014855085f,
+ 0.00015820453f, 0.00016848555f, 0.00017943469f, 0.00019109536f,
+ 0.00020351382f, 0.00021673929f, 0.00023082423f, 0.00024582449f,
+ 0.00026179955f, 0.00027881276f, 0.00029693158f, 0.00031622787f,
+ 0.00033677814f, 0.00035866388f, 0.00038197188f, 0.00040679456f,
+ 0.00043323036f, 0.00046138411f, 0.00049136745f, 0.00052329927f,
+ 0.00055730621f, 0.00059352311f, 0.00063209358f, 0.00067317058f,
+ 0.00071691700f, 0.00076350630f, 0.00081312324f, 0.00086596457f,
+ 0.00092223983f, 0.00098217216f, 0.0010459992f, 0.0011139742f,
+ 0.0011863665f, 0.0012634633f, 0.0013455702f, 0.0014330129f,
+ 0.0015261382f, 0.0016253153f, 0.0017309374f, 0.0018434235f,
+ 0.0019632195f, 0.0020908006f, 0.0022266726f, 0.0023713743f,
+ 0.0025254795f, 0.0026895994f, 0.0028643847f, 0.0030505286f,
+ 0.0032487691f, 0.0034598925f, 0.0036847358f, 0.0039241906f,
+ 0.0041792066f, 0.0044507950f, 0.0047400328f, 0.0050480668f,
+ 0.0053761186f, 0.0057254891f, 0.0060975636f, 0.0064938176f,
+ 0.0069158225f, 0.0073652516f, 0.0078438871f, 0.0083536271f,
+ 0.0088964928f, 0.009474637f, 0.010090352f, 0.010746080f,
+ 0.011444421f, 0.012188144f, 0.012980198f, 0.013823725f,
+ 0.014722068f, 0.015678791f, 0.016697687f, 0.017782797f,
+ 0.018938423f, 0.020169149f, 0.021479854f, 0.022875735f,
+ 0.024362330f, 0.025945531f, 0.027631618f, 0.029427276f,
+ 0.031339626f, 0.033376252f, 0.035545228f, 0.037855157f,
+ 0.040315199f, 0.042935108f, 0.045725273f, 0.048696758f,
+ 0.051861348f, 0.055231591f, 0.058820850f, 0.062643361f,
+ 0.066714279f, 0.071049749f, 0.075666962f, 0.080584227f,
+ 0.085821044f, 0.091398179f, 0.097337747f, 0.10366330f,
+ 0.11039993f, 0.11757434f, 0.12521498f, 0.13335215f,
+ 0.14201813f, 0.15124727f, 0.16107617f, 0.17154380f,
+ 0.18269168f, 0.19456402f, 0.20720788f, 0.22067342f,
+ 0.23501402f, 0.25028656f, 0.26655159f, 0.28387361f,
+ 0.30232132f, 0.32196786f, 0.34289114f, 0.36517414f,
+ 0.38890521f, 0.41417847f, 0.44109412f, 0.46975890f,
+ 0.50028648f, 0.53279791f, 0.56742212f, 0.60429640f,
+ 0.64356699f, 0.68538959f, 0.72993007f, 0.77736504f,
0.82788260f, 0.88168307f, 0.9389798f, 1.0f
};
@@ -2010,7 +2049,7 @@ static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y
ady -= abs(base) * adx;
if (x1 > n) x1 = n;
if (x < x1) {
- LINE_OP(output[x], inverse_db_table[y]);
+ LINE_OP(output[x], inverse_db_table[y&255]);
for (++x; x < x1; ++x) {
err += ady;
if (err >= adx) {
@@ -2018,7 +2057,7 @@ static __forceinline void draw_line(float *output, int x0, int y0, int x1, int y
y += sy;
} else
y += base;
- LINE_OP(output[x], inverse_db_table[y]);
+ LINE_OP(output[x], inverse_db_table[y&255]);
}
}
}
@@ -2042,6 +2081,8 @@ static int residue_decode(vorb *f, Codebook *book, float *target, int offset, in
return TRUE;
}
+// n is 1/2 of the blocksize --
+// specification: "Correct per-vector decode length is [n]/2"
static void decode_residue(vorb *f, float *residue_buffers[], int ch, int n, int rn, uint8 *do_not_decode)
{
int i,j,pass;
@@ -2049,7 +2090,10 @@ static void decode_residue(vorb *f, float *residue_buffers[], int ch, int n, int
int rtype = f->residue_types[rn];
int c = r->classbook;
int classwords = f->codebooks[c].dimensions;
- int n_read = r->end - r->begin;
+ unsigned int actual_size = rtype == 2 ? n*2 : n;
+ unsigned int limit_r_begin = (r->begin < actual_size ? r->begin : actual_size);
+ unsigned int limit_r_end = (r->end < actual_size ? r->end : actual_size);
+ int n_read = limit_r_end - limit_r_begin;
int part_read = n_read / r->part_size;
int temp_alloc_point = temp_alloc_save(f);
#ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
@@ -2119,47 +2163,7 @@ static void decode_residue(vorb *f, float *residue_buffers[], int ch, int n, int
++class_set;
#endif
}
- } else if (ch == 1) {
- while (pcount < part_read) {
- int z = r->begin + pcount*r->part_size;
- int c_inter = 0, p_inter = z;
- if (pass == 0) {
- Codebook *c = f->codebooks+r->classbook;
- int q;
- DECODE(q,f,c);
- if (q == EOP) goto done;
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- part_classdata[0][class_set] = r->classdata[q];
- #else
- for (i=classwords-1; i >= 0; --i) {
- classifications[0][i+pcount] = q % r->classifications;
- q /= r->classifications;
- }
- #endif
- }
- for (i=0; i < classwords && pcount < part_read; ++i, ++pcount) {
- int z = r->begin + pcount*r->part_size;
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- int c = part_classdata[0][class_set][i];
- #else
- int c = classifications[0][pcount];
- #endif
- int b = r->residue_books[c][pass];
- if (b >= 0) {
- Codebook *book = f->codebooks + b;
- if (!codebook_decode_deinterleave_repeat(f, book, residue_buffers, ch, &c_inter, &p_inter, n, r->part_size))
- goto done;
- } else {
- z += r->part_size;
- c_inter = 0;
- p_inter = z;
- }
- }
- #ifndef STB_VORBIS_DIVIDES_IN_RESIDUE
- ++class_set;
- #endif
- }
- } else {
+ } else if (ch > 2) {
while (pcount < part_read) {
int z = r->begin + pcount*r->part_size;
int c_inter = z % ch, p_inter = z/ch;
@@ -2346,11 +2350,11 @@ void inverse_mdct_slow(float *buffer, int n, vorb *f, int blocktype)
#if LIBVORBIS_MDCT
// directly call the vorbis MDCT using an interface documented
// by Jeff Roberts... useful for performance comparison
-typedef struct
+typedef struct
{
int n;
int log2n;
-
+
float *trig;
int *bitrev;
@@ -2369,7 +2373,7 @@ void inverse_mdct(float *buffer, int n, vorb *f, int blocktype)
if (M1.n == n) M = &M1;
else if (M2.n == n) M = &M2;
else if (M1.n == 0) { mdct_init(&M1, n); M = &M1; }
- else {
+ else {
if (M2.n) __asm int 3;
mdct_init(&M2, n);
M = &M2;
@@ -2632,7 +2636,7 @@ static void inverse_mdct(float *buffer, int n, vorb *f, int blocktype)
// once I combined the passes.
// so there's a missing 'times 2' here (for adding X to itself).
- // this propogates through linearly to the end, where the numbers
+ // this propagates through linearly to the end, where the numbers
// are 1/2 too small, and need to be compensated for.
{
@@ -2782,7 +2786,7 @@ static void inverse_mdct(float *buffer, int n, vorb *f, int blocktype)
d1[0] = u[k4+1];
d0[1] = u[k4+2];
d0[0] = u[k4+3];
-
+
d0 -= 4;
d1 -= 4;
bitrev += 2;
@@ -2863,7 +2867,7 @@ static void inverse_mdct(float *buffer, int n, vorb *f, int blocktype)
float p0,p1,p2,p3;
p3 = e[6]*B[7] - e[7]*B[6];
- p2 = -e[6]*B[6] - e[7]*B[7];
+ p2 = -e[6]*B[6] - e[7]*B[7];
d0[0] = p3;
d1[3] = - p3;
@@ -2871,7 +2875,7 @@ static void inverse_mdct(float *buffer, int n, vorb *f, int blocktype)
d3[3] = p2;
p1 = e[4]*B[5] - e[5]*B[4];
- p0 = -e[4]*B[4] - e[5]*B[5];
+ p0 = -e[4]*B[4] - e[5]*B[5];
d0[1] = p1;
d1[2] = - p1;
@@ -2879,7 +2883,7 @@ static void inverse_mdct(float *buffer, int n, vorb *f, int blocktype)
d3[2] = p0;
p3 = e[2]*B[3] - e[3]*B[2];
- p2 = -e[2]*B[2] - e[3]*B[3];
+ p2 = -e[2]*B[2] - e[3]*B[3];
d0[2] = p3;
d1[1] = - p3;
@@ -2887,7 +2891,7 @@ static void inverse_mdct(float *buffer, int n, vorb *f, int blocktype)
d3[1] = p2;
p1 = e[0]*B[1] - e[1]*B[0];
- p0 = -e[0]*B[0] - e[1]*B[1];
+ p0 = -e[0]*B[0] - e[1]*B[1];
d0[3] = p1;
d1[0] = - p1;
@@ -3040,7 +3044,6 @@ static float *get_window(vorb *f, int len)
len <<= 1;
if (len == f->blocksize_0) return f->window[0];
if (len == f->blocksize_1) return f->window[1];
- assert(0);
return NULL;
}
@@ -3391,7 +3394,7 @@ static int vorbis_decode_packet_rest(vorb *f, int *len, Mode *m, int left_start,
if (f->last_seg_which == f->end_seg_with_known_loc) {
// if we have a valid current loc, and this is final:
if (f->current_loc_valid && (f->page_flag & PAGEFLAG_last_page)) {
- uint32 current_end = f->known_loc_for_packet - (n-right_end);
+ uint32 current_end = f->known_loc_for_packet;
// then let's infer the size of the (probably) short final frame
if (current_end < f->current_loc + (right_end-left_start)) {
if (current_end < f->current_loc) {
@@ -3400,7 +3403,7 @@ static int vorbis_decode_packet_rest(vorb *f, int *len, Mode *m, int left_start,
} else {
*len = current_end - f->current_loc;
}
- *len += left_start;
+ *len += left_start; // this doesn't seem right, but has no ill effect on my test files
if (*len > right_end) *len = right_end; // this should never happen
f->current_loc += *len;
return TRUE;
@@ -3446,6 +3449,7 @@ static int vorbis_finish_frame(stb_vorbis *f, int len, int left, int right)
if (f->previous_length) {
int i,j, n = f->previous_length;
float *w = get_window(f, n);
+ if (w == NULL) return 0;
for (i=0; i < f->channels; ++i) {
for (j=0; j < n; ++j)
f->channel_buffers[i][left+j] =
@@ -3493,7 +3497,7 @@ static int vorbis_pump_first_frame(stb_vorbis *f)
}
#ifndef STB_VORBIS_NO_PUSHDATA_API
-static int is_whole_packet_present(stb_vorbis *f, int end_page)
+static int is_whole_packet_present(stb_vorbis *f)
{
// make sure that we have the packet available before continuing...
// this requires a full ogg parse, but we know we can fetch from f->stream
@@ -3513,15 +3517,13 @@ static int is_whole_packet_present(stb_vorbis *f, int end_page)
break;
}
// either this continues, or it ends it...
- if (end_page)
- if (s < f->segment_count-1) return error(f, VORBIS_invalid_stream);
if (s == f->segment_count)
s = -1; // set 'crosses page' flag
if (p > f->stream_end) return error(f, VORBIS_need_more_data);
first = FALSE;
}
for (; s == -1;) {
- uint8 *q;
+ uint8 *q;
int n;
// check that we have the page header ready
@@ -3547,8 +3549,6 @@ static int is_whole_packet_present(stb_vorbis *f, int end_page)
if (q[s] < 255)
break;
}
- if (end_page)
- if (s < n-1) return error(f, VORBIS_invalid_stream);
if (s == n)
s = -1; // set 'crosses page' flag
if (p > f->stream_end) return error(f, VORBIS_need_more_data);
@@ -3565,6 +3565,7 @@ static int start_decoder(vorb *f)
int longest_floorlist=0;
// first page, first packet
+ f->first_decode = TRUE;
if (!start_page(f)) return FALSE;
// validate page flag
@@ -3573,7 +3574,22 @@ static int start_decoder(vorb *f)
if (f->page_flag & PAGEFLAG_continued_packet) return error(f, VORBIS_invalid_first_page);
// check for expected packet length
if (f->segment_count != 1) return error(f, VORBIS_invalid_first_page);
- if (f->segments[0] != 30) return error(f, VORBIS_invalid_first_page);
+ if (f->segments[0] != 30) {
+ // check for the Ogg skeleton fishead identifying header to refine our error
+ if (f->segments[0] == 64 &&
+ getn(f, header, 6) &&
+ header[0] == 'f' &&
+ header[1] == 'i' &&
+ header[2] == 's' &&
+ header[3] == 'h' &&
+ header[4] == 'e' &&
+ header[5] == 'a' &&
+ get8(f) == 'd' &&
+ get8(f) == '\0') return error(f, VORBIS_ogg_skeleton_not_supported);
+ else
+ return error(f, VORBIS_invalid_first_page);
+ }
+
// read packet
// check packet header
if (get8(f) != VORBIS_packet_id) return error(f, VORBIS_invalid_first_page);
@@ -3607,6 +3623,41 @@ static int start_decoder(vorb *f)
if (!start_page(f)) return FALSE;
if (!start_packet(f)) return FALSE;
+
+ if (!next_segment(f)) return FALSE;
+
+ if (get8_packet(f) != VORBIS_packet_comment) return error(f, VORBIS_invalid_setup);
+ for (i=0; i < 6; ++i) header[i] = get8_packet(f);
+ if (!vorbis_validate(header)) return error(f, VORBIS_invalid_setup);
+ //file vendor
+ len = get32_packet(f);
+ f->vendor = (char*)setup_malloc(f, sizeof(char) * (len+1));
+ for(i=0; i < len; ++i) {
+ f->vendor[i] = get8_packet(f);
+ }
+ f->vendor[len] = (char)'\0';
+ //user comments
+ f->comment_list_length = get32_packet(f);
+ f->comment_list = (char**)setup_malloc(f, sizeof(char*) * (f->comment_list_length));
+
+ for(i=0; i < f->comment_list_length; ++i) {
+ len = get32_packet(f);
+ f->comment_list[i] = (char*)setup_malloc(f, sizeof(char) * (len+1));
+
+ for(j=0; j < len; ++j) {
+ f->comment_list[i][j] = get8_packet(f);
+ }
+ f->comment_list[i][len] = (char)'\0';
+ }
+
+ // framing_flag
+ x = get8_packet(f);
+ if (!(x & 1)) return error(f, VORBIS_invalid_setup);
+
+
+ skip(f, f->bytes_in_seg);
+ f->bytes_in_seg = 0;
+
do {
len = next_segment(f);
skip(f, len);
@@ -3618,7 +3669,7 @@ static int start_decoder(vorb *f)
#ifndef STB_VORBIS_NO_PUSHDATA_API
if (IS_PUSH_MODE(f)) {
- if (!is_whole_packet_present(f, TRUE)) {
+ if (!is_whole_packet_present(f)) {
// convert error in ogg header to write type
if (f->error == VORBIS_invalid_stream)
f->error = VORBIS_invalid_setup;
@@ -3672,6 +3723,7 @@ static int start_decoder(vorb *f)
while (current_entry < c->entries) {
int limit = c->entries - current_entry;
int n = get_bits(f, ilog(limit));
+ if (current_length >= 32) return error(f, VORBIS_invalid_setup);
if (current_entry + n > (int) c->entries) { return error(f, VORBIS_invalid_setup); }
memset(lengths + current_entry, current_length, n);
current_entry += n;
@@ -3775,7 +3827,9 @@ static int start_decoder(vorb *f)
c->value_bits = get_bits(f, 4)+1;
c->sequence_p = get_bits(f,1);
if (c->lookup_type == 1) {
- c->lookup_values = lookup1_values(c->entries, c->dimensions);
+ int values = lookup1_values(c->entries, c->dimensions);
+ if (values < 0) return error(f, VORBIS_invalid_setup);
+ c->lookup_values = (uint32) values;
} else {
c->lookup_values = c->entries * c->dimensions;
}
@@ -3874,7 +3928,7 @@ static int start_decoder(vorb *f)
} else {
stbv__floor_ordering p[31*8+2];
Floor1 *g = &f->floor_config[i].floor1;
- int max_class = -1;
+ int max_class = -1;
g->partitions = get_bits(f, 5);
for (j=0; j < g->partitions; ++j) {
g->partition_class_list[j] = get_bits(f, 4);
@@ -3911,11 +3965,14 @@ static int start_decoder(vorb *f)
p[j].id = j;
}
qsort(p, g->values, sizeof(p[0]), point_compare);
+ for (j=0; j < g->values-1; ++j)
+ if (p[j].x == p[j+1].x)
+ return error(f, VORBIS_invalid_setup);
for (j=0; j < g->values; ++j)
g->sorted_order[j] = (uint8) p[j].id;
// precompute the neighbors
for (j=2; j < g->values; ++j) {
- int low,hi;
+ int low = 0,hi = 0;
neighbors(g->Xlist, j, &low,&hi);
g->neighbors[j][0] = low;
g->neighbors[j][1] = hi;
@@ -3984,7 +4041,7 @@ static int start_decoder(vorb *f)
if (f->mapping == NULL) return error(f, VORBIS_outofmem);
memset(f->mapping, 0, f->mapping_count * sizeof(*f->mapping));
for (i=0; i < f->mapping_count; ++i) {
- Mapping *m = f->mapping + i;
+ Mapping *m = f->mapping + i;
int mapping_type = get_bits(f,16);
if (mapping_type != 0) return error(f, VORBIS_invalid_setup);
m->chan = (MappingChannel *) setup_malloc(f, f->channels * sizeof(*m->chan));
@@ -3997,6 +4054,7 @@ static int start_decoder(vorb *f)
max_submaps = m->submaps;
if (get_bits(f,1)) {
m->coupling_steps = get_bits(f,8)+1;
+ if (m->coupling_steps > f->channels) return error(f, VORBIS_invalid_setup);
for (k=0; k < m->coupling_steps; ++k) {
m->chan[k].magnitude = get_bits(f, ilog(f->channels-1));
m->chan[k].angle = get_bits(f, ilog(f->channels-1));
@@ -4050,6 +4108,7 @@ static int start_decoder(vorb *f)
f->previous_window[i] = (float *) setup_malloc(f, sizeof(float) * f->blocksize_1/2);
f->finalY[i] = (int16 *) setup_malloc(f, sizeof(int16) * longest_floorlist);
if (f->channel_buffers[i] == NULL || f->previous_window[i] == NULL || f->finalY[i] == NULL) return error(f, VORBIS_outofmem);
+ memset(f->channel_buffers[i], 0, sizeof(float) * f->blocksize_1);
#ifdef STB_VORBIS_NO_DEFER_FLOOR
f->floor_buffers[i] = (float *) setup_malloc(f, sizeof(float) * f->blocksize_1/2);
if (f->floor_buffers[i] == NULL) return error(f, VORBIS_outofmem);
@@ -4077,7 +4136,10 @@ static int start_decoder(vorb *f)
int i,max_part_read=0;
for (i=0; i < f->residue_count; ++i) {
Residue *r = f->residue_config + i;
- int n_read = r->end - r->begin;
+ unsigned int actual_size = f->blocksize_1 / 2;
+ unsigned int limit_r_begin = r->begin < actual_size ? r->begin : actual_size;
+ unsigned int limit_r_end = r->end < actual_size ? r->end : actual_size;
+ int n_read = limit_r_end - limit_r_begin;
int part_read = n_read / r->part_size;
if (part_read > max_part_read)
max_part_read = part_read;
@@ -4088,12 +4150,13 @@ static int start_decoder(vorb *f)
classify_mem = f->channels * (sizeof(void*) + max_part_read * sizeof(int *));
#endif
+ // maximum reasonable partition size is f->blocksize_1
+
f->temp_memory_required = classify_mem;
if (imdct_mem > f->temp_memory_required)
f->temp_memory_required = imdct_mem;
}
- f->first_decode = TRUE;
if (f->alloc.alloc_buffer) {
assert(f->temp_offset == f->alloc.alloc_buffer_length_in_bytes);
@@ -4102,7 +4165,17 @@ static int start_decoder(vorb *f)
return error(f, VORBIS_outofmem);
}
- f->first_audio_page_offset = stb_vorbis_get_file_offset(f);
+ // @TODO: stb_vorbis_seek_start expects first_audio_page_offset to point to a page
+ // without PAGEFLAG_continued_packet, so this either points to the first page, or
+ // the page after the end of the headers. It might be cleaner to point to a page
+ // in the middle of the headers, when that's the page where the first audio packet
+ // starts, but we'd have to also correctly skip the end of any continued packet in
+ // stb_vorbis_seek_start.
+ if (f->next_seg == -1) {
+ f->first_audio_page_offset = stb_vorbis_get_file_offset(f);
+ } else {
+ f->first_audio_page_offset = 0;
+ }
return TRUE;
}
@@ -4110,6 +4183,13 @@ static int start_decoder(vorb *f)
static void vorbis_deinit(stb_vorbis *p)
{
int i,j;
+
+ setup_free(p, p->vendor);
+ for (i=0; i < p->comment_list_length; ++i) {
+ setup_free(p, p->comment_list[i]);
+ }
+ setup_free(p, p->comment_list);
+
if (p->residue_config) {
for (i=0; i < p->residue_count; ++i) {
Residue *r = p->residue_config+i;
@@ -4209,6 +4289,15 @@ stb_vorbis_info stb_vorbis_get_info(stb_vorbis *f)
return d;
}
+stb_vorbis_comment stb_vorbis_get_comment(stb_vorbis *f)
+{
+ stb_vorbis_comment d;
+ d.vendor = f->vendor;
+ d.comment_list_length = f->comment_list_length;
+ d.comment_list = f->comment_list;
+ return d;
+}
+
int stb_vorbis_get_error(stb_vorbis *f)
{
int e = f->error;
@@ -4350,7 +4439,7 @@ int stb_vorbis_decode_frame_pushdata(
f->error = VORBIS__no_error;
// check that we have the entire packet in memory
- if (!is_whole_packet_present(f, FALSE)) {
+ if (!is_whole_packet_present(f)) {
*samples = 0;
return 0;
}
@@ -4555,7 +4644,7 @@ static int get_seek_page_info(stb_vorbis *f, ProbedPage *z)
return 1;
}
-// rarely used function to seek back to the preceeding page while finding the
+// rarely used function to seek back to the preceding page while finding the
// start of a packet
static int go_to_page_before(stb_vorbis *f, unsigned int limit_offset)
{
@@ -4586,8 +4675,8 @@ static int seek_to_sample_coarse(stb_vorbis *f, uint32 sample_number)
{
ProbedPage left, right, mid;
int i, start_seg_with_known_loc, end_pos, page_start;
- uint32 delta, stream_length, padding;
- double offset = 0, bytes_per_sample = 0;
+ uint32 delta, stream_length, padding, last_sample_limit;
+ double offset = 0.0, bytes_per_sample = 0.0;
int probe = 0;
// find the last page and validate the target sample
@@ -4600,9 +4689,9 @@ static int seek_to_sample_coarse(stb_vorbis *f, uint32 sample_number)
// indicates should be the granule position (give or take one)).
padding = ((f->blocksize_1 - f->blocksize_0) >> 2);
if (sample_number < padding)
- sample_number = 0;
+ last_sample_limit = 0;
else
- sample_number -= padding;
+ last_sample_limit = sample_number - padding;
left = f->p_first;
while (left.last_decoded_sample == ~0U) {
@@ -4615,9 +4704,12 @@ static int seek_to_sample_coarse(stb_vorbis *f, uint32 sample_number)
assert(right.last_decoded_sample != ~0U);
// starting from the start is handled differently
- if (sample_number <= left.last_decoded_sample) {
- if (stb_vorbis_seek_start(f))
+ if (last_sample_limit <= left.last_decoded_sample) {
+ if (stb_vorbis_seek_start(f)) {
+ if (f->current_loc > sample_number)
+ return error(f, VORBIS_seek_failed);
return 1;
+ }
return 0;
}
@@ -4634,10 +4726,10 @@ static int seek_to_sample_coarse(stb_vorbis *f, uint32 sample_number)
// first probe (interpolate)
double data_bytes = right.page_end - left.page_start;
bytes_per_sample = data_bytes / right.last_decoded_sample;
- offset = left.page_start + bytes_per_sample * (sample_number - left.last_decoded_sample);
+ offset = left.page_start + bytes_per_sample * (last_sample_limit - left.last_decoded_sample);
} else {
// second probe (try to bound the other side)
- double error = ((double) sample_number - mid.last_decoded_sample) * bytes_per_sample;
+ double error = ((double) last_sample_limit - mid.last_decoded_sample) * bytes_per_sample;
if (error >= 0 && error < 8000) error = 8000;
if (error < 0 && error > -8000) error = -8000;
offset += error * 2;
@@ -4668,14 +4760,16 @@ static int seek_to_sample_coarse(stb_vorbis *f, uint32 sample_number)
}
// if we've just found the last page again then we're in a tricky file,
- // and we're close enough.
- if (mid.page_start == right.page_start)
- break;
-
- if (sample_number < mid.last_decoded_sample)
- right = mid;
- else
- left = mid;
+ // and we're close enough (if it wasn't an interpolation probe).
+ if (mid.page_start == right.page_start) {
+ if (probe >= 2 || delta <= 65536)
+ break;
+ } else {
+ if (last_sample_limit < mid.last_decoded_sample)
+ right = mid;
+ else
+ left = mid;
+ }
++probe;
}
@@ -4791,8 +4885,8 @@ int stb_vorbis_seek_frame(stb_vorbis *f, unsigned int sample_number)
flush_packet(f);
}
}
- // the next frame will start with the sample
- assert(f->current_loc == sample_number);
+ // the next frame should start with the sample
+ if (f->current_loc != sample_number) return error(f, VORBIS_seek_failed);
return 1;
}
@@ -4962,8 +5056,14 @@ stb_vorbis * stb_vorbis_open_file(FILE *file, int close_on_free, int *error, con
stb_vorbis * stb_vorbis_open_filename(const char *filename, int *error, const stb_vorbis_alloc *alloc)
{
- FILE *f = fopen(filename, "rb");
- if (f)
+ FILE *f;
+#if defined(_WIN32) && defined(__STDC_WANT_SECURE_LIB__)
+ if (0 != fopen_s(&f, filename, "rb"))
+ f = NULL;
+#else
+ f = fopen(filename, "rb");
+#endif
+ if (f)
return stb_vorbis_open_file(f, TRUE, error, alloc);
if (error) *error = VORBIS_file_open_failure;
return NULL;
@@ -5026,7 +5126,7 @@ static int8 channel_position[7][6] =
#define MAGIC(SHIFT) (1.5f * (1 << (23-SHIFT)) + 0.5f/(1 << SHIFT))
#define ADDEND(SHIFT) (((150-SHIFT) << 23) + (1 << 22))
#define FAST_SCALED_FLOAT_TO_INT(temp,x,s) (temp.f = (x) + MAGIC(s), temp.i - ADDEND(s))
- #define check_endianness()
+ #define check_endianness()
#else
#define FAST_SCALED_FLOAT_TO_INT(temp,x,s) ((int) ((x) * (1 << (s))))
#define check_endianness()
@@ -5128,7 +5228,7 @@ static void convert_samples_short(int buf_c, short **buffer, int b_offset, int d
int stb_vorbis_get_frame_short(stb_vorbis *f, int num_c, short **buffer, int num_samples)
{
- float **output;
+ float **output = NULL;
int len = stb_vorbis_get_frame_float(f, NULL, &output);
if (len > num_samples) len = num_samples;
if (len)
@@ -5351,20 +5451,28 @@ int stb_vorbis_get_samples_float(stb_vorbis *f, int channels, float **buffer, in
#endif // STB_VORBIS_NO_PULLDATA_API
/* Version history
- 1.10 - 2017/03/03 - more robust seeking; fix negative ilog(); clear error in open_memory
- 1.09 - 2016/04/04 - back out 'avoid discarding last frame' fix from previous version
- 1.08 - 2016/04/02 - fixed multiple warnings; fix setup memory leaks;
+ 1.17 - 2019-07-08 - fix CVE-2019-13217, -13218, -13219, -13220, -13221, -13222, -13223
+ found with Mayhem by ForAllSecure
+ 1.16 - 2019-03-04 - fix warnings
+ 1.15 - 2019-02-07 - explicit failure if Ogg Skeleton data is found
+ 1.14 - 2018-02-11 - delete bogus dealloca usage
+ 1.13 - 2018-01-29 - fix truncation of last frame (hopefully)
+ 1.12 - 2017-11-21 - limit residue begin/end to blocksize/2 to avoid large temp allocs in bad/corrupt files
+ 1.11 - 2017-07-23 - fix MinGW compilation
+ 1.10 - 2017-03-03 - more robust seeking; fix negative ilog(); clear error in open_memory
+ 1.09 - 2016-04-04 - back out 'avoid discarding last frame' fix from previous version
+ 1.08 - 2016-04-02 - fixed multiple warnings; fix setup memory leaks;
avoid discarding last frame of audio data
- 1.07 - 2015/01/16 - fixed some warnings, fix mingw, const-correct API
- some more crash fixes when out of memory or with corrupt files
- 1.06 - 2015/08/31 - full, correct support for seeking API (Dougall Johnson)
+ 1.07 - 2015-01-16 - fixed some warnings, fix mingw, const-correct API
+ some more crash fixes when out of memory or with corrupt files
+ 1.06 - 2015-08-31 - full, correct support for seeking API (Dougall Johnson)
some crash fixes when out of memory or with corrupt files
- 1.05 - 2015/04/19 - don't define __forceinline if it's redundant
- 1.04 - 2014/08/27 - fix missing const-correct case in API
- 1.03 - 2014/08/07 - Warning fixes
- 1.02 - 2014/07/09 - Declare qsort compare function _cdecl on windows
- 1.01 - 2014/06/18 - fix stb_vorbis_get_samples_float
- 1.0 - 2014/05/26 - fix memory leaks; fix warnings; fix bugs in multichannel
+ 1.05 - 2015-04-19 - don't define __forceinline if it's redundant
+ 1.04 - 2014-08-27 - fix missing const-correct case in API
+ 1.03 - 2014-08-07 - Warning fixes
+ 1.02 - 2014-07-09 - Declare qsort compare function _cdecl on windows
+ 1.01 - 2014-06-18 - fix stb_vorbis_get_samples_float
+ 1.0 - 2014-05-26 - fix memory leaks; fix warnings; fix bugs in multichannel
(API change) report sample rate for decode-full-file funcs
0.99996 - bracket #include for macintosh compilation by Laurent Gomila
0.99995 - use union instead of pointer-cast for fast-float-to-int to avoid alias-optimization problem
@@ -5412,38 +5520,38 @@ This software is available under 2 licenses -- choose whichever you prefer.
------------------------------------------------------------------------------
ALTERNATIVE A - MIT License
Copyright (c) 2017 Sean Barrett
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
+The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
ALTERNATIVE B - Public Domain (www.unlicense.org)
This is free and unencumbered software released into the public domain.
-Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
-software, either in source code form or as a compiled binary, for any purpose,
+Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
+software, either in source code form or as a compiled binary, for any purpose,
commercial or non-commercial, and by any means.
-In jurisdictions that recognize copyright laws, the author or authors of this
-software dedicate any and all copyright interest in the software to the public
-domain. We make this dedication for the benefit of the public at large and to
-the detriment of our heirs and successors. We intend this dedication to be an
-overt act of relinquishment in perpetuity of all present and future rights to
+In jurisdictions that recognize copyright laws, the author or authors of this
+software dedicate any and all copyright interest in the software to the public
+domain. We make this dedication for the benefit of the public at large and to
+the detriment of our heirs and successors. We intend this dedication to be an
+overt act of relinquishment in perpetuity of all present and future rights to
this software under copyright law.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------
*/
diff --git a/client/snd_miniaudio.c b/client/snd_miniaudio.c
index 77179f43..ce581f9d 100644
--- a/client/snd_miniaudio.c
+++ b/client/snd_miniaudio.c
@@ -52,12 +52,12 @@ static qboolean paused = false;
static qboolean playLooping = false;
static qboolean trackFinished = false;
-static cvar_t *cd_nocd;
+static cvar_t *cd_volume;
static cvar_t *cd_loopcount;
static cvar_t *cd_looptrack;
#ifdef __APPLE__
-static ma_uint32 bufferSizeInFrames;
+static ma_uint32 periodSizeInFrames;
static UInt32 GetBytesPerSampleFrame()
{
@@ -194,14 +194,14 @@ static void Miniaudio_f(void)
void Miniaudio_Init(void)
{
- cd_nocd = Cvar_Get("cd_nocd", "0", CVAR_ARCHIVE);
+ cd_volume = Cvar_Get("cd_volume", "1", CVAR_ARCHIVE);
cd_loopcount = Cvar_Get("cd_loopcount", "4", 0);
cd_looptrack = Cvar_Get("cd_looptrack", "11", 0);
enabled = true;
paused = false;
#ifdef __APPLE__
- bufferSizeInFrames = Cvar_VariableValue("s_chunksize") * sizeof(float) / GetBytesPerSampleFrame();
+ periodSizeInFrames = Cvar_VariableValue("s_chunksize") * sizeof(float) / GetBytesPerSampleFrame();
#endif
Cmd_AddCommand("miniaudio", Miniaudio_f);
}
@@ -257,7 +257,7 @@ void Miniaudio_Play(int track, qboolean looping)
deviceConfig.dataCallback = data_callback;
deviceConfig.pUserData = &decoder;
#ifdef __APPLE__
- deviceConfig.bufferSizeInFrames = bufferSizeInFrames;
+ deviceConfig.periodSizeInFrames = periodSizeInFrames;
deviceConfig.periods = 1;
#endif
@@ -280,8 +280,10 @@ void Miniaudio_Play(int track, qboolean looping)
paused = false;
trackFinished = false;
- if (Cvar_VariableValue("cd_nocd"))
+ if ( Cvar_VariableValue("cd_volume") == 0 )
Miniaudio_Pause();
+
+ ma_device_set_master_volume(&device, cd_volume->value);
}
void Miniaudio_Stop(void)
@@ -298,9 +300,21 @@ void Miniaudio_Stop(void)
void Miniaudio_Update(void)
{
- if (cd_nocd->value != !enabled)
+ if (cd_volume->modified)
+ {
+ cd_volume->modified = false;
+
+ if (cd_volume->value < 0.f)
+ Cvar_SetValue("cd_volume", 0.f);
+ if (cd_volume->value > 1.f)
+ Cvar_SetValue("cd_volume", 1.f);
+
+ ma_device_set_master_volume(&device, cd_volume->value);
+ }
+
+ if ((cd_volume->value == 0) != !enabled)
{
- if (cd_nocd->value)
+ if (cd_volume->value == 0)
{
Miniaudio_Pause();
enabled = false;
diff --git a/linux/cd_linux.c b/linux/cd_linux.c
index 04154dac..e1bea173 100644
--- a/linux/cd_linux.c
+++ b/linux/cd_linux.c
@@ -65,7 +65,6 @@ static int cdfile = -1;
//static char cd_dev[64] = "/dev/cdrom";
cvar_t *cd_volume;
-cvar_t *cd_nocd;
cvar_t *cd_dev;
void CDAudio_Pause(void);
@@ -445,11 +444,9 @@ int CDAudio_Init(void)
if (cv->value)
return -1;
- cd_nocd = Cvar_Get ("cd_nocd", "0", CVAR_ARCHIVE );
- if ( cd_nocd->value)
- return -1;
-
cd_volume = Cvar_Get ("cd_volume", "1", CVAR_ARCHIVE);
+ if ( cd_volume->value == 0 )
+ return -1;
cd_dev = Cvar_Get("cd_dev", "/dev/cdrom", CVAR_ARCHIVE);
diff --git a/macos/quake2.xcodeproj/project.pbxproj b/macos/quake2.xcodeproj/project.pbxproj
index 045cc853..01b3ab29 100644
--- a/macos/quake2.xcodeproj/project.pbxproj
+++ b/macos/quake2.xcodeproj/project.pbxproj
@@ -274,7 +274,7 @@
E29552F521F620FB00336DD3 /* Project object */ = {
isa = PBXProject;
attributes = {
- LastUpgradeCheck = 1010;
+ LastUpgradeCheck = 1130;
ORGANIZATIONNAME = Krzysztof;
TargetAttributes = {
E29552FC21F620FB00336DD3 = {
@@ -288,6 +288,7 @@
hasScannedForEncodings = 0;
knownRegions = (
en,
+ Base,
);
mainGroup = E29552F421F620FB00336DD3;
productRefGroup = E29552FE21F620FB00336DD3 /* Products */;
@@ -491,6 +492,7 @@
E295530521F620FB00336DD3 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
+ CODE_SIGN_IDENTITY = "-";
CODE_SIGN_STYLE = Automatic;
"LD_RUNPATH_SEARCH_PATHS[arch=*]" = $VULKAN_SDK/macOS/lib;
"OTHER_CFLAGS[arch=*]" = (
@@ -509,6 +511,7 @@
E295530621F620FB00336DD3 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
+ CODE_SIGN_IDENTITY = "-";
CODE_SIGN_STYLE = Automatic;
"LD_RUNPATH_SEARCH_PATHS[arch=*]" = $VULKAN_SDK/macOS/lib;
"OTHER_CFLAGS[arch=*]" = (
diff --git a/macos/vkQuake2.xcworkspace/xcshareddata/xcschemes/vkQuake2.xcscheme b/macos/vkQuake2.xcworkspace/xcshareddata/xcschemes/vkQuake2.xcscheme
index 1fa5e439..3aec580a 100644
--- a/macos/vkQuake2.xcworkspace/xcshareddata/xcschemes/vkQuake2.xcscheme
+++ b/macos/vkQuake2.xcworkspace/xcshareddata/xcschemes/vkQuake2.xcscheme
@@ -1,6 +1,6 @@
value != !enabled )
+ if ( (cd_volume->value == 0) != !enabled )
{
- if ( cd_nocd->value )
+ if ( cd_volume->value == 0 )
{
CDAudio_Stop ();
enabled = false;
@@ -441,10 +441,10 @@ int CDAudio_Init(void)
MCI_SET_PARMS mciSetParms;
int n;
- cd_nocd = Cvar_Get ("cd_nocd", "0", CVAR_ARCHIVE );
+ cd_volume = Cvar_Get ("cd_volume", "1", CVAR_ARCHIVE );
cd_loopcount = Cvar_Get ("cd_loopcount", "4", 0);
cd_looptrack = Cvar_Get ("cd_looptrack", "11", 0);
- if ( cd_nocd->value)
+ if ( cd_volume->value == 0 )
return -1;
mciOpenParms.lpstrDeviceType = "cdaudio";