Skip to content
Snippets Groups Projects
Commit 7fa8fc41 authored by Andreas Traczyk's avatar Andreas Traczyk
Browse files

coreaudio: implement audiolayer for iOS

- Seperates coreaudio implementations for iOS and OS X.
- Manages input/ouput port overrides when layer is reset via calls
  to Dring::setAudioOutputDevice in response to an audio route override
  event listened for by the iOS client.
- TODO: use AVAudioSession API with graphs in Objective-C++, or
  optimally, in Swift in the iOS/OS X clients.

Change-Id: I6ba34a8bdce7bedd3b9e9ca9bbcd2cc8caccf3f4
parent 3b233f11
No related branches found
No related tags found
No related merge requests found
......@@ -62,6 +62,7 @@ void AudioLayer::hardwareFormatAvailable(AudioFormat playback)
void AudioLayer::hardwareInputFormatAvailable(AudioFormat capture)
{
RING_DBG("Hardware input audio format available : %s", capture.toString().c_str());
inputResampler_->setFormat(capture);
}
......
......@@ -2,11 +2,12 @@ include $(top_srcdir)/globals.mk
if HAVE_OSX
noinst_LTLIBRARIES = libcoreaudiolayer.la
libcoreaudiolayer_la_SOURCES = osx/corelayer.cpp osx/corelayer.h osx/audiodevice.cpp osx/audiodevice.h
endif
if HAVE_IOS
noinst_LTLIBRARIES = libcoreaudiolayer.la
libcoreaudiolayer_la_SOURCES = ios/corelayer.cpp ios/corelayer.h
endif
libcoreaudiolayer_la_SOURCES = corelayer.cpp corelayer.h audiodevice.cpp audiodevice.h
libcoreaudiolayer_la_CXXFLAGS = -I$(top_srcdir)/src
/*
* Copyright (C) 2004-2018 Savoir-faire Linux Inc.
*
* Author: Philippe Groarke <philippe.groarke@savoirfairelinux.com>
* Author: Andreas Traczyk <andreas.traczyk@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "corelayer.h"
#include "manager.h"
#include "noncopyable.h"
#include "audio/resampler.h"
#include "audio/ringbufferpool.h"
#include "audio/ringbuffer.h"
#include <cmath>
#include <vector>
namespace ring {
// AudioLayer implementation.
CoreLayer::CoreLayer(const AudioPreference &pref)
: AudioLayer(pref)
, indexIn_(pref.getAlsaCardin())
, indexOut_(pref.getAlsaCardout())
, indexRing_(pref.getAlsaCardring())
, playbackBuff_(0, audioFormat_)
, mainRingBuffer_(Manager::instance().getRingBufferPool().getRingBuffer(RingBufferPool::DEFAULT_ID))
{}
CoreLayer::~CoreLayer()
{
stopStream();
}
std::vector<std::string>
CoreLayer::getCaptureDeviceList() const
{
std::vector<std::string> ret;
return ret;
}
std::vector<std::string>
CoreLayer::getPlaybackDeviceList() const
{
std::vector<std::string> ret;
// No need to enumerate devices for iOS.
// The notion of input devices can be ignored, and output devices can describe
// input/output pairs.
// Unavailable options like the receiver on iPad can be ignored by the client.
ret.assign({"built_in_spk", "bluetooth", "headphones", "receiver", "dummy"});
return ret;
}
int
CoreLayer::getAudioDeviceIndex(const std::string& name, DeviceType type) const
{
return 0;
}
std::string
CoreLayer::getAudioDeviceName(int index, DeviceType type) const
{
return "";
}
void
CoreLayer::initAudioLayerIO()
{
RING_DBG("iOS CoreLayer - initializing audio session");
AudioComponentDescription outputUnitDescription;
outputUnitDescription.componentType = kAudioUnitType_Output;
outputUnitDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
outputUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
outputUnitDescription.componentFlags = 0;
outputUnitDescription.componentFlagsMask = 0;
auto comp = AudioComponentFindNext(nullptr, &outputUnitDescription);
if (comp == nullptr) {
RING_ERR("Can't find default output audio component.");
return;
}
checkErr(AudioComponentInstanceNew(comp, &ioUnit_));
UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
sizeof(audioCategory),
&audioCategory);
auto playBackDeviceList = getPlaybackDeviceList();
RING_DBG("Setting playback device: %s", playBackDeviceList[indexOut_].c_str());
switch(indexOut_) {
case 0:
UInt32 setSpeaker;
setSpeaker = 1;
checkErr(AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
sizeof(setSpeaker),
&setSpeaker));
break;
case 1:
case 2:
break;
case 3:
UInt32 audioRouteOverrideNone;
audioRouteOverrideNone = kAudioSessionOverrideAudioRoute_None;
checkErr(AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute,
sizeof(audioRouteOverrideNone),
&audioRouteOverrideNone));
case 4:
break;
}
setupOutputBus();
setupInputBus();
bindCallbacks();
}
void
CoreLayer::setupOutputBus() {
RING_DBG("iOS CoreLayer - initializing output bus");
AudioUnitScope outputBus = 0;
UInt32 size;
AudioStreamBasicDescription outputASBD;
size = sizeof(outputASBD);
Float64 outSampleRate;
size = sizeof(outSampleRate);
AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&outSampleRate);
outputASBD.mSampleRate = outSampleRate;
outputASBD.mFormatID = kAudioFormatLinearPCM;
outputASBD.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked;
audioFormat_ = {static_cast<unsigned int>(outputASBD.mSampleRate),
static_cast<unsigned int>(outputASBD.mChannelsPerFrame)};
size = sizeof(outputASBD);
checkErr(AudioUnitGetProperty(ioUnit_,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
outputBus,
&outputASBD,
&size));
// Only change sample rate.
outputASBD.mSampleRate = audioFormat_.sample_rate;
// Set output steam format
checkErr(AudioUnitSetProperty(ioUnit_,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
outputBus,
&outputASBD,
size));
hardwareFormatAvailable(audioFormat_);
}
void
CoreLayer::setupInputBus() {
RING_DBG("Initializing input bus");
AudioUnitScope inputBus = 1;
UInt32 size;
AudioStreamBasicDescription inputASBD;
size = sizeof(inputASBD);
// Enable input
UInt32 flag = 1;
checkErr(AudioUnitSetProperty (ioUnit_,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
inputBus,
&flag,
sizeof(flag)));
// Setup audio formats
checkErr(AudioUnitGetProperty(ioUnit_,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
inputBus,
&inputASBD,
&size));
Float64 inSampleRate;
size = sizeof(Float64);
AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&inSampleRate);
inputASBD.mSampleRate = inSampleRate;
inputASBD.mFormatID = kAudioFormatLinearPCM;
inputASBD.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked;
audioInputFormat_ = {static_cast<unsigned int>(inputASBD.mSampleRate),
static_cast<unsigned int>(inputASBD.mChannelsPerFrame)};
hardwareInputFormatAvailable(audioInputFormat_);
// Keep some values to not ask them every time the read callback is fired up
inSampleRate_ = inputASBD.mSampleRate;
inChannelsPerFrame_ = inputASBD.mChannelsPerFrame;
// Set format on output *SCOPE* in input *BUS*.
checkErr(AudioUnitGetProperty(ioUnit_,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
inputBus,
&inputASBD,
&size));
// Keep everything else and change only sample rate (or else SPLOSION!!!)
inputASBD.mSampleRate = audioInputFormat_.sample_rate;
size = sizeof(inputASBD);
checkErr(AudioUnitSetProperty(ioUnit_,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
inputBus,
&inputASBD,
size));
// Input buffer setup. Note that ioData is empty and we have to store data
// in another buffer.
flag = 0;
AudioUnitSetProperty(ioUnit_,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
inputBus,
&flag,
sizeof(flag));
Float32 bufferDuration;
size = sizeof(UInt32);
AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration,
&size,
&bufferDuration);
UInt32 bufferSizeFrames = audioInputFormat_.sample_rate * bufferDuration;
UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32);
size = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * inputASBD.mChannelsPerFrame);
rawBuff_.reset(new Byte[size + bufferSizeBytes * inputASBD.mChannelsPerFrame]);
captureBuff_ = reinterpret_cast<::AudioBufferList*>(rawBuff_.get());
captureBuff_->mNumberBuffers = inputASBD.mChannelsPerFrame;
auto bufferBasePtr = rawBuff_.get() + size;
for (UInt32 i = 0; i < captureBuff_->mNumberBuffers; ++i) {
captureBuff_->mBuffers[i].mNumberChannels = 1;
captureBuff_->mBuffers[i].mDataByteSize = bufferSizeBytes;
captureBuff_->mBuffers[i].mData = bufferBasePtr + bufferSizeBytes * i;
}
}
void
CoreLayer::bindCallbacks() {
AURenderCallbackStruct callback;
AudioUnitScope outputBus = 0;
AudioUnitScope inputBus = 1;
// Output callback setup
callback.inputProc = outputCallback;
callback.inputProcRefCon = this;
checkErr(AudioUnitSetProperty(ioUnit_,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
outputBus,
&callback,
sizeof(AURenderCallbackStruct)));
// Input callback setup
callback.inputProc = inputCallback;
callback.inputProcRefCon = this;
checkErr(AudioUnitSetProperty(ioUnit_,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
inputBus,
&callback,
sizeof(AURenderCallbackStruct)));
}
void
CoreLayer::startStream()
{
RING_DBG("iOS CoreLayer - Start Stream");
{
std::lock_guard<std::mutex> lock(mutex_);
if (status_ != Status::Idle)
return;
status_ = Status::Started;
}
dcblocker_.reset();
initAudioLayerIO();
// Run
checkErr(AudioUnitInitialize(ioUnit_));
checkErr(AudioOutputUnitStart(ioUnit_));
}
void
CoreLayer::destroyAudioLayer()
{
AudioOutputUnitStop(ioUnit_);
AudioUnitUninitialize(ioUnit_);
AudioComponentInstanceDispose(ioUnit_);
}
void
CoreLayer::stopStream()
{
RING_DBG("iOS CoreLayer - Stop Stream");
{
std::lock_guard<std::mutex> lock(mutex_);
if (status_ != Status::Started)
return;
status_ = Status::Idle;
}
destroyAudioLayer();
/* Flush the ring buffers */
flushUrgent();
flushMain();
}
OSStatus
CoreLayer::outputCallback(void* inRefCon,
AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList* ioData)
{
static_cast<CoreLayer*>(inRefCon)->write(ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData);
return kAudioServicesNoError;
}
void
CoreLayer::write(AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList* ioData)
{
auto& ringBuff = getToRing(audioFormat_, inNumberFrames);
auto& playBuff = getToPlay(audioFormat_, inNumberFrames);
auto& toPlay = ringBuff.frames() > 0 ? ringBuff : playBuff;
if (toPlay.frames() == 0) {
for (unsigned i = 0; i < audioFormat_.nb_channels; ++i) {
std::fill_n(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData),
ioData->mBuffers[i].mDataByteSize / sizeof(Float32), 0);
}
} else {
for (unsigned i = 0; i < audioFormat_.nb_channels; ++i) {
toPlay.channelToFloat(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData), i);
}
}
}
OSStatus
CoreLayer::inputCallback(void* inRefCon,
AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList* ioData)
{
static_cast<CoreLayer*>(inRefCon)->read(ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData);
return kAudioServicesNoError;
}
void
CoreLayer::read(AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList* ioData)
{
if (inNumberFrames <= 0) {
RING_WARN("No frames for input.");
return;
}
// Write the mic samples in our buffer
checkErr(AudioUnitRender(ioUnit_,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
captureBuff_));
// Add them to Ring ringbuffer.
const AudioFormat mainBufferFormat = Manager::instance().getRingBufferPool().getInternalAudioFormat();
bool resample = inSampleRate_ != mainBufferFormat.sample_rate;
// FIXME: Performance! There *must* be a better way. This is testing only.
auto inBuff = AudioBuffer {inNumberFrames, audioInputFormat_};
for (unsigned i = 0; i < inChannelsPerFrame_; ++i) {
auto data = reinterpret_cast<Float32*>(captureBuff_->mBuffers[i].mData);
for (unsigned j = 0; j < inNumberFrames; ++j) {
(*inBuff.getChannel(i))[j] = static_cast<AudioSample>(data[j] * 32768);
}
}
if (resample) {
//FIXME: May be a multiplication, check alsa vs pulse implementation.
UInt32 outSamples = inNumberFrames * (mainBufferFormat.sample_rate / static_cast<double>(audioInputFormat_.sample_rate));
auto out = AudioBuffer {outSamples, mainBufferFormat};
inputResampler_->resample(inBuff, out);
dcblocker_.process(out);
mainRingBuffer_->put(out);
} else {
dcblocker_.process(inBuff);
mainRingBuffer_->put(inBuff);
}
}
void CoreLayer::updatePreference(AudioPreference &preference, int index, DeviceType type)
{
switch (type) {
case DeviceType::PLAYBACK:
preference.setAlsaCardout(index);
break;
case DeviceType::CAPTURE:
preference.setAlsaCardin(index);
break;
case DeviceType::RINGTONE:
preference.setAlsaCardring(index);
break;
default:
break;
}
}
} // namespace ring
/*
* Copyright (C) 2004-2018 Savoir-faire Linux Inc.
*
* Author: Philippe Groarke <philippe.groarke@savoirfairelinux.com>
* Author: Andreas Traczyk <andreas.traczyk@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef CORE_LAYER_H_
#define CORE_LAYER_H_
#include "audio/audiolayer.h"
#include "noncopyable.h"
#include <CoreFoundation/CoreFoundation.h>
#include <AudioToolbox/AudioToolbox.h>
#define checkErr( err) \
if(err) { \
RING_ERR("CoreAudio Error: %ld", static_cast<long>(err)); \
}
/**
* @file CoreLayer.h
* @brief Main iOS sound class. Manages the data transfers between the application and the hardware.
*/
namespace ring {
class RingBuffer;
class CoreLayer : public AudioLayer {
public:
CoreLayer(const AudioPreference &pref);
~CoreLayer();
/**
* Scan the sound card available on the system
* @return std::vector<std::string> The vector containing the string description of the card
*/
virtual std::vector<std::string> getCaptureDeviceList() const;
virtual std::vector<std::string> getPlaybackDeviceList() const;
virtual int getAudioDeviceIndex(const std::string& name, DeviceType type) const;
virtual std::string getAudioDeviceName(int index, DeviceType type) const;
/**
* Get the index of the audio card for capture
* @return int The index of the card used for capture
*/
virtual int getIndexCapture() const {
return indexIn_;
}
/**
* Get the index of the audio card for playback
* @return int The index of the card used for playback
*/
virtual int getIndexPlayback() const {
return indexOut_;
}
/**
* Get the index of the audio card for ringtone (could be differnet from playback)
* @return int The index of the card used for ringtone
*/
virtual int getIndexRingtone() const {
return indexRing_;
}
/**
* Configure the AudioUnit
*/
void initAudioLayerIO();
void setupOutputBus();
void setupInputBus();
void bindCallbacks();
int initAudioStreams(AudioUnit *audioUnit);
/**
* Start the capture stream and prepare the playback stream.
* The playback starts accordingly to its threshold
* CoreAudio Library API
*/
virtual void startStream();
void destroyAudioLayer();
/**
* Stop the playback and capture streams.
* Drops the pending frames and put the capture and playback handles to PREPARED state
* CoreAudio Library API
*/
virtual void stopStream();
private:
NON_COPYABLE(CoreLayer);
void initAudioFormat();
static OSStatus outputCallback(void* inRefCon,
AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList* ioData);
void write(AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList* ioData);
static OSStatus inputCallback(void* inRefCon,
AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList* ioData);
void read(AudioUnitRenderActionFlags* ioActionFlags,
const AudioTimeStamp* inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList* ioData);
virtual void updatePreference(AudioPreference &pref, int index, DeviceType type);
/**
* Number of audio cards on which capture stream has been opened
*/
int indexIn_;
/**
* Number of audio cards on which playback stream has been opened
*/
int indexOut_;
/**
* Number of audio cards on which ringtone stream has been opened
*/
int indexRing_;
/** Non-interleaved audio buffers */
AudioBuffer playbackBuff_;
::AudioBufferList* captureBuff_ {nullptr}; // CoreAudio buffer (pointer is casted rawBuff_)
std::unique_ptr<Byte[]> rawBuff_; // raw allocation of captureBuff_
/** Interleaved buffer */
std::vector<AudioSample> playbackIBuff_;
std::vector<AudioSample> captureIBuff_;
AudioUnit ioUnit_;
Float64 inSampleRate_;
UInt32 inChannelsPerFrame_;
std::shared_ptr<RingBuffer> mainRingBuffer_;
};
} // namespace ring
#endif // CORE_LAYER_H_
......@@ -49,10 +49,8 @@ CoreLayer::getCaptureDeviceList() const
{
std::vector<std::string> ret;
#if !TARGET_OS_IPHONE
for (const auto& x : getDeviceList(true))
ret.push_back(x.name_);
#endif
return ret;
}
......@@ -62,10 +60,8 @@ CoreLayer::getPlaybackDeviceList() const
{
std::vector<std::string> ret;
#if !TARGET_OS_IPHONE
for (const auto& x : getDeviceList(false))
ret.push_back(x.name_);
#endif
return ret;
}
......@@ -179,7 +175,6 @@ CoreLayer::initAudioLayerIO()
// Input buffer setup. Note that ioData is empty and we have to store data
// in another buffer.
#if !TARGET_OS_IPHONE
UInt32 bufferSizeFrames = 0;
size = sizeof(UInt32);
checkErr(AudioUnitGetProperty(ioUnit_,
......@@ -188,14 +183,6 @@ CoreLayer::initAudioLayerIO()
outputBus,
&bufferSizeFrames,
&size));
#else
Float32 bufferDuration;
UInt32 propSize = sizeof(Float32);
AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration,
&propSize,
&bufferDuration);
UInt32 bufferSizeFrames = audioInputFormat_.sample_rate * bufferDuration;
#endif
UInt32 bufferSizeBytes = bufferSizeFrames * sizeof(Float32);
size = offsetof(AudioBufferList, mBuffers) + (sizeof(AudioBuffer) * info.mChannelsPerFrame);
......@@ -406,7 +393,6 @@ std::vector<AudioDevice>
CoreLayer::getDeviceList(bool getCapture) const
{
std::vector<AudioDevice> ret;
#if !TARGET_OS_IPHONE
UInt32 propsize;
AudioObjectPropertyAddress theAddress = {
......@@ -437,7 +423,6 @@ CoreLayer::getDeviceList(bool getCapture) const
ret.push_back(std::move(dev));
}
}
#endif
return ret;
}
......
......@@ -25,9 +25,7 @@
#include "noncopyable.h"
#include <CoreFoundation/CoreFoundation.h>
#include <AudioToolbox/AudioToolbox.h>
#if !TARGET_OS_IPHONE
#include <CoreAudio/AudioHardware.h>
#endif
#define checkErr( err) \
if(err) {\
......
......@@ -38,8 +38,15 @@
#include "audio/pulseaudio/pulselayer.h"
#endif
#if HAVE_COREAUDIO
#include "audio/coreaudio/corelayer.h"
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
#if TARGET_OS_IOS
#include "audio/coreaudio/ios/corelayer.h"
#else
#include "audio/coreaudio/osx/corelayer.h"
#endif /* TARGET_OS_IOS */
#endif /* HAVE_COREAUDIO */
#if HAVE_PORTAUDIO
#include "audio/portaudio/portaudiolayer.h"
#endif
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment