Skip to content
Snippets Groups Projects
Commit 98d6b80b authored by Alexandre Lision's avatar Alexandre Lision
Browse files

Revert "coreaudio: modern C++"

This reverts commit f3d677fb.

Change-Id: Iebfbb5554c1d910c7ea0d919b13669e760d512ca
parent e4e67249
No related branches found
No related tags found
No related merge requests found
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
#include "audiodevice.h" #include "audiodevice.h"
#include <cmath> #include <cmath>
#include <vector> #include <thread>
#include <atomic>
namespace ring { namespace ring {
...@@ -38,52 +39,55 @@ CoreLayer::CoreLayer(const AudioPreference &pref) ...@@ -38,52 +39,55 @@ CoreLayer::CoreLayer(const AudioPreference &pref)
, indexOut_(pref.getAlsaCardout()) , indexOut_(pref.getAlsaCardout())
, indexRing_(pref.getAlsaCardring()) , indexRing_(pref.getAlsaCardring())
, playbackBuff_(0, audioFormat_) , playbackBuff_(0, audioFormat_)
, captureBuff_(0)
, mainRingBuffer_(Manager::instance().getRingBufferPool().getRingBuffer(RingBufferPool::DEFAULT_ID)) , mainRingBuffer_(Manager::instance().getRingBufferPool().getRingBuffer(RingBufferPool::DEFAULT_ID))
{} {}
CoreLayer::~CoreLayer() CoreLayer::~CoreLayer()
{} {
if (captureBuff_) {
for (UInt32 i = 0; i < captureBuff_->mNumberBuffers; ++i)
free(captureBuff_->mBuffers[i].mData);
free(captureBuff_);
captureBuff_ = 0;
}
}
std::vector<std::string> std::vector<std::string> CoreLayer::getCaptureDeviceList() const
CoreLayer::getCaptureDeviceList() const
{ {
std::vector<std::string> ret; std::vector<std::string> ret;
#if !TARGET_OS_IPHONE #if !TARGET_OS_IPHONE
for (const auto& x : getDeviceList(true)) for (auto x : getDeviceList(true))
ret.push_back(x.name_); ret.push_back(x.name_);
#endif #endif
return ret; return ret;
} }
std::vector<std::string> std::vector<std::string> CoreLayer::getPlaybackDeviceList() const
CoreLayer::getPlaybackDeviceList() const
{ {
std::vector<std::string> ret; std::vector<std::string> ret;
#if !TARGET_OS_IPHONE #if !TARGET_OS_IPHONE
for (const auto& x : getDeviceList(false)) for (auto x : getDeviceList(false))
ret.push_back(x.name_); ret.push_back(x.name_);
#endif #endif
return ret; return ret;
} }
int int CoreLayer::getAudioDeviceIndex(const std::string& name, DeviceType type) const
CoreLayer::getAudioDeviceIndex(const std::string& name, DeviceType type) const
{ {
return 0; return 0;
} }
std::string std::string CoreLayer::getAudioDeviceName(int index, DeviceType type) const
CoreLayer::getAudioDeviceName(int index, DeviceType type) const
{ {
return ""; return "";
} }
void void CoreLayer::initAudioLayerIO()
CoreLayer::initAudioLayerIO()
{ {
// OS X uses Audio Units for output. Steps: // OS X uses Audio Units for output. Steps:
// 1) Create a description. // 1) Create a description.
...@@ -103,8 +107,8 @@ CoreLayer::initAudioLayerIO() ...@@ -103,8 +107,8 @@ CoreLayer::initAudioLayerIO()
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentManufacturer = kAudioUnitManufacturer_Apple;
auto comp = AudioComponentFindNext(nullptr, &desc); AudioComponent comp = AudioComponentFindNext(NULL, &desc);
if (comp == nullptr) { if (comp == NULL) {
RING_ERR("Can't find default output audio component."); RING_ERR("Can't find default output audio component.");
return; return;
} }
...@@ -121,8 +125,7 @@ CoreLayer::initAudioLayerIO() ...@@ -121,8 +125,7 @@ CoreLayer::initAudioLayerIO()
&info, &info,
&size)); &size));
audioFormat_ = {static_cast<unsigned int>(info.mSampleRate), audioFormat_ = {(unsigned int)info.mSampleRate, (unsigned int)info.mChannelsPerFrame};
static_cast<unsigned int>(info.mChannelsPerFrame)};
checkErr(AudioUnitGetProperty(ioUnit_, checkErr(AudioUnitGetProperty(ioUnit_,
kAudioUnitProperty_StreamFormat, kAudioUnitProperty_StreamFormat,
...@@ -151,8 +154,7 @@ CoreLayer::initAudioLayerIO() ...@@ -151,8 +154,7 @@ CoreLayer::initAudioLayerIO()
&info, &info,
&size)); &size));
audioInputFormat_ = {static_cast<unsigned int>(info.mSampleRate), audioInputFormat_ = {(unsigned int)info.mSampleRate, (unsigned int)info.mChannelsPerFrame};
static_cast<unsigned int>(info.mChannelsPerFrame)};
hardwareInputFormatAvailable(audioInputFormat_); hardwareInputFormatAvailable(audioInputFormat_);
// Set format on output *SCOPE* in input *BUS*. // Set format on output *SCOPE* in input *BUS*.
...@@ -194,15 +196,15 @@ CoreLayer::initAudioLayerIO() ...@@ -194,15 +196,15 @@ CoreLayer::initAudioLayerIO()
#endif #endif
UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32); UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32);
size = offsetof(AudioBufferList, mBuffers) + (sizeof(AudioBuffer) * info.mChannelsPerFrame); size = offsetof(AudioBufferList, mBuffers[0]) +
rawBuff_.reset(new Byte[size + bufferSizeBytes * info.mChannelsPerFrame]); (sizeof(AudioBuffer) * info.mChannelsPerFrame);
captureBuff_ = reinterpret_cast<::AudioBufferList*>(rawBuff_.get()); captureBuff_ = (AudioBufferList *)malloc(size);
captureBuff_->mNumberBuffers = info.mChannelsPerFrame; captureBuff_->mNumberBuffers = info.mChannelsPerFrame;
for (UInt32 i = 0; i < captureBuff_->mNumberBuffers; ++i) { for (UInt32 i = 0; i < captureBuff_->mNumberBuffers; ++i) {
captureBuff_->mBuffers[i].mNumberChannels = 1; captureBuff_->mBuffers[i].mNumberChannels = 1;
captureBuff_->mBuffers[i].mDataByteSize = bufferSizeBytes; captureBuff_->mBuffers[i].mDataByteSize = bufferSizeBytes;
captureBuff_->mBuffers[i].mData = rawBuff_.get() + bufferSizeBytes * i; captureBuff_->mBuffers[i].mData = malloc(bufferSizeBytes);
} }
// Input callback setup. // Input callback setup.
...@@ -230,8 +232,7 @@ CoreLayer::initAudioLayerIO() ...@@ -230,8 +232,7 @@ CoreLayer::initAudioLayerIO()
sizeof(AURenderCallbackStruct))); sizeof(AURenderCallbackStruct)));
} }
void void CoreLayer::startStream()
CoreLayer::startStream()
{ {
RING_DBG("START STREAM"); RING_DBG("START STREAM");
...@@ -251,16 +252,14 @@ CoreLayer::startStream() ...@@ -251,16 +252,14 @@ CoreLayer::startStream()
checkErr(AudioOutputUnitStart(ioUnit_)); checkErr(AudioOutputUnitStart(ioUnit_));
} }
void void CoreLayer::destroyAudioLayer()
CoreLayer::destroyAudioLayer()
{ {
AudioOutputUnitStop(ioUnit_); AudioOutputUnitStop(ioUnit_);
AudioUnitUninitialize(ioUnit_); AudioUnitUninitialize(ioUnit_);
AudioComponentInstanceDispose(ioUnit_); AudioComponentInstanceDispose(ioUnit_);
} }
void void CoreLayer::stopStream()
CoreLayer::stopStream()
{ {
RING_DBG("STOP STREAM"); RING_DBG("STOP STREAM");
...@@ -280,8 +279,7 @@ CoreLayer::stopStream() ...@@ -280,8 +279,7 @@ CoreLayer::stopStream()
//// PRIVATE ///// //// PRIVATE /////
OSStatus OSStatus CoreLayer::outputCallback(void* inRefCon,
CoreLayer::outputCallback(void* inRefCon,
AudioUnitRenderActionFlags* ioActionFlags, AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp, const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber, UInt32 inBusNumber,
...@@ -292,8 +290,7 @@ CoreLayer::outputCallback(void* inRefCon, ...@@ -292,8 +290,7 @@ CoreLayer::outputCallback(void* inRefCon,
return kAudioServicesNoError; return kAudioServicesNoError;
} }
void void CoreLayer::write(AudioUnitRenderActionFlags* ioActionFlags,
CoreLayer::write(AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp, const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber, UInt32 inBusNumber,
UInt32 inNumberFrames, UInt32 inNumberFrames,
...@@ -306,29 +303,26 @@ CoreLayer::write(AudioUnitRenderActionFlags* ioActionFlags, ...@@ -306,29 +303,26 @@ CoreLayer::write(AudioUnitRenderActionFlags* ioActionFlags,
if (toPlay.frames() == 0) { if (toPlay.frames() == 0) {
for (int i = 0; i < audioFormat_.nb_channels; ++i) for (int i = 0; i < audioFormat_.nb_channels; ++i)
std::fill_n(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData), memset((Float32*)ioData->mBuffers[i].mData, 0, ioData->mBuffers[i].mDataByteSize);
ioData->mBuffers[i].mDataByteSize, 0); }
} else { else {
for (int i = 0; i < audioFormat_.nb_channels; ++i) for (int i = 0; i < audioFormat_.nb_channels; ++i)
toPlay.channelToFloat(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData), i); toPlay.channelToFloat((Float32*)ioData->mBuffers[i].mData, i);
} }
} }
OSStatus OSStatus CoreLayer::inputCallback(void* inRefCon,
CoreLayer::inputCallback(void* inRefCon,
AudioUnitRenderActionFlags* ioActionFlags, AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp, const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber, UInt32 inBusNumber,
UInt32 inNumberFrames, UInt32 inNumberFrames,
AudioBufferList* ioData) AudioBufferList* ioData)
{ {
static_cast<CoreLayer*>(inRefCon)->read(ioActionFlags, inTimeStamp, inBusNumber, static_cast<CoreLayer*>(inRefCon)->read(ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData);
inNumberFrames, ioData);
return kAudioServicesNoError; return kAudioServicesNoError;
} }
void void CoreLayer::read(AudioUnitRenderActionFlags* ioActionFlags,
CoreLayer::read(AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp, const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber, UInt32 inBusNumber,
UInt32 inNumberFrames, UInt32 inNumberFrames,
...@@ -361,12 +355,12 @@ CoreLayer::read(AudioUnitRenderActionFlags* ioActionFlags, ...@@ -361,12 +355,12 @@ CoreLayer::read(AudioUnitRenderActionFlags* ioActionFlags,
bool resample = info.mSampleRate != mainBufferFormat.sample_rate; bool resample = info.mSampleRate != mainBufferFormat.sample_rate;
// FIXME: Performance! There *must* be a better way. This is testing only. // FIXME: Performance! There *must* be a better way. This is testing only.
auto inBuff = AudioBuffer {inNumberFrames, audioInputFormat_}; AudioBuffer inBuff(inNumberFrames, audioInputFormat_);
for (int i = 0; i < info.mChannelsPerFrame; ++i) { for (int i = 0; i < info.mChannelsPerFrame; ++i) {
auto data = reinterpret_cast<Float32*>(captureBuff_->mBuffers[i].mData); Float32* data = (Float32*)captureBuff_->mBuffers[i].mData;
for (int j = 0; j < inNumberFrames; ++j) { for (int j = 0; j < inNumberFrames; ++j) {
(*inBuff.getChannel(i))[j] = static_cast<AudioSample>(data[j] / .000030517578125f); (*inBuff.getChannel(i))[j] = (AudioSample)((data)[j] / .000030517578125f);
} }
} }
...@@ -375,8 +369,8 @@ CoreLayer::read(AudioUnitRenderActionFlags* ioActionFlags, ...@@ -375,8 +369,8 @@ CoreLayer::read(AudioUnitRenderActionFlags* ioActionFlags,
//FIXME: May be a multiplication, check alsa vs pulse implementation. //FIXME: May be a multiplication, check alsa vs pulse implementation.
UInt32 outSamples = inNumberFrames / (static_cast<double>(audioInputFormat_.sample_rate) / mainBufferFormat.sample_rate); int outSamples = inNumberFrames / (static_cast<double>(audioInputFormat_.sample_rate) / mainBufferFormat.sample_rate);
auto out = AudioBuffer {outSamples, mainBufferFormat}; AudioBuffer out(outSamples, mainBufferFormat);
inputResampler_->resample(inBuff, out); inputResampler_->resample(inBuff, out);
dcblocker_.process(out); dcblocker_.process(out);
mainRingBuffer_->put(out); mainRingBuffer_->put(out);
...@@ -406,8 +400,7 @@ void CoreLayer::updatePreference(AudioPreference &preference, int index, DeviceT ...@@ -406,8 +400,7 @@ void CoreLayer::updatePreference(AudioPreference &preference, int index, DeviceT
} }
} }
std::vector<AudioDevice> std::vector<AudioDevice> CoreLayer::getDeviceList(bool getCapture) const
CoreLayer::getDeviceList(bool getCapture) const
{ {
std::vector<AudioDevice> ret; std::vector<AudioDevice> ret;
#if !TARGET_OS_IPHONE #if !TARGET_OS_IPHONE
...@@ -416,31 +409,31 @@ CoreLayer::getDeviceList(bool getCapture) const ...@@ -416,31 +409,31 @@ CoreLayer::getDeviceList(bool getCapture) const
AudioObjectPropertyAddress theAddress = { AudioObjectPropertyAddress theAddress = {
kAudioHardwarePropertyDevices, kAudioHardwarePropertyDevices,
kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster kAudioObjectPropertyElementMaster };
};
verify_noerr(AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, verify_noerr(AudioObjectGetPropertyDataSize(kAudioObjectSystemObject,
&theAddress, &theAddress,
0, 0,
nullptr, NULL,
&propsize)); &propsize));
std::size_t nDevices = propsize / sizeof(AudioDeviceID); size_t nDevices = propsize / sizeof(AudioDeviceID);
auto devids = std::vector<AudioDeviceID>(nDevices); AudioDeviceID *devids = new AudioDeviceID[nDevices];
verify_noerr(AudioObjectGetPropertyData(kAudioObjectSystemObject, verify_noerr(AudioObjectGetPropertyData(kAudioObjectSystemObject,
&theAddress, &theAddress,
0, 0,
nullptr, NULL,
&propsize, &propsize,
devids.data())); devids));
for (int i = 0; i < nDevices; ++i) { for (int i = 0; i < nDevices; ++i) {
auto dev = AudioDevice {devids[i], getCapture}; AudioDevice dev(devids[i], getCapture);
if (dev.channels_ > 0) { // Channels < 0 if inactive. if (dev.channels_ > 0) { // Channels < 0 if inactive.
ret.push_back(std::move(dev)); ret.push_back(dev);
} }
} }
delete[] devids;
#endif #endif
return ret; return ret;
} }
......
...@@ -162,8 +162,7 @@ class CoreLayer : public AudioLayer { ...@@ -162,8 +162,7 @@ class CoreLayer : public AudioLayer {
/** Non-interleaved audio buffers */ /** Non-interleaved audio buffers */
AudioBuffer playbackBuff_; AudioBuffer playbackBuff_;
::AudioBufferList* captureBuff_ {nullptr}; // CoreAudio buffer (pointer is casted rawBuff_) ::AudioBufferList* captureBuff_; // CoreAudio buffer.
std::unique_ptr<Byte[]> rawBuff_; // raw allocation of captureBuff_
/** Interleaved buffer */ /** Interleaved buffer */
std::vector<AudioSample> playbackIBuff_; std::vector<AudioSample> playbackIBuff_;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment