Skip to content
Snippets Groups Projects
Commit c14b731d authored by Andreas Traczyk's avatar Andreas Traczyk
Browse files

ios audio: handle resampling in output callback

- Handles downsampling in the event of an core layer samplerate
  reduction.
- Handles urgent frames in case a call is incoming during a call.

Change-Id: I0bd3b97ef96595576bc5daef7bea24c3bb32cf26
parent fe5e13c3
Branches
Tags
No related merge requests found
...@@ -29,6 +29,9 @@ ...@@ -29,6 +29,9 @@
#include <cmath> #include <cmath>
#include <vector> #include <vector>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
namespace ring { namespace ring {
// AudioLayer implementation. // AudioLayer implementation.
...@@ -61,7 +64,7 @@ CoreLayer::getPlaybackDeviceList() const ...@@ -61,7 +64,7 @@ CoreLayer::getPlaybackDeviceList() const
// The notion of input devices can be ignored, and output devices can describe // The notion of input devices can be ignored, and output devices can describe
// input/output pairs. // input/output pairs.
// Unavailable options like the receiver on iPad can be ignored by the client. // Unavailable options like the receiver on iPad can be ignored by the client.
ret.assign({"built_in_spk", "bluetooth", "headphones", "receiver", "dummy"}); ret.assign({"built_in_spk", "bluetooth", "headphones", "receiver"});
return ret; return ret;
} }
...@@ -69,12 +72,17 @@ CoreLayer::getPlaybackDeviceList() const ...@@ -69,12 +72,17 @@ CoreLayer::getPlaybackDeviceList() const
int int
CoreLayer::getAudioDeviceIndex(const std::string& name, DeviceType type) const CoreLayer::getAudioDeviceIndex(const std::string& name, DeviceType type) const
{ {
(void) name;
(void) index;
(void) type;
return 0; return 0;
} }
std::string std::string
CoreLayer::getAudioDeviceName(int index, DeviceType type) const CoreLayer::getAudioDeviceName(int index, DeviceType type) const
{ {
(void) index;
(void) type;
return ""; return "";
} }
...@@ -122,7 +130,8 @@ CoreLayer::initAudioLayerIO() ...@@ -122,7 +130,8 @@ CoreLayer::initAudioLayerIO()
checkErr(AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute, checkErr(AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute,
sizeof(audioRouteOverrideNone), sizeof(audioRouteOverrideNone),
&audioRouteOverrideNone)); &audioRouteOverrideNone));
case 4: break;
default:
break; break;
} }
...@@ -147,12 +156,13 @@ CoreLayer::setupOutputBus() { ...@@ -147,12 +156,13 @@ CoreLayer::setupOutputBus() {
&size, &size,
&outSampleRate); &outSampleRate);
outputASBD.mSampleRate = outSampleRate; outputASBD.mSampleRate = outSampleRate;
outputASBD.mFormatID = kAudioFormatLinearPCM;
outputASBD.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked;
audioFormat_ = {static_cast<unsigned int>(outputASBD.mSampleRate), audioFormat_ = {static_cast<unsigned int>(outputASBD.mSampleRate),
static_cast<unsigned int>(outputASBD.mChannelsPerFrame)}; static_cast<unsigned int>(outputASBD.mChannelsPerFrame)};
outSampleRate_ = outputASBD.mSampleRate;
outChannelsPerFrame_ = outputASBD.mChannelsPerFrame;
size = sizeof(outputASBD); size = sizeof(outputASBD);
checkErr(AudioUnitGetProperty(ioUnit_, checkErr(AudioUnitGetProperty(ioUnit_,
kAudioUnitProperty_StreamFormat, kAudioUnitProperty_StreamFormat,
...@@ -163,6 +173,8 @@ CoreLayer::setupOutputBus() { ...@@ -163,6 +173,8 @@ CoreLayer::setupOutputBus() {
// Only change sample rate. // Only change sample rate.
outputASBD.mSampleRate = audioFormat_.sample_rate; outputASBD.mSampleRate = audioFormat_.sample_rate;
outputASBD.mFormatID = kAudioFormatLinearPCM;
outputASBD.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked;
// Set output steam format // Set output steam format
checkErr(AudioUnitSetProperty(ioUnit_, checkErr(AudioUnitSetProperty(ioUnit_,
...@@ -364,17 +376,75 @@ CoreLayer::write(AudioUnitRenderActionFlags* ioActionFlags, ...@@ -364,17 +376,75 @@ CoreLayer::write(AudioUnitRenderActionFlags* ioActionFlags,
UInt32 inNumberFrames, UInt32 inNumberFrames,
AudioBufferList* ioData) AudioBufferList* ioData)
{ {
auto& ringBuff = getToRing(audioFormat_, inNumberFrames); (void) ioActionFlags;
auto& playBuff = getToPlay(audioFormat_, inNumberFrames); (void) inTimeStamp;
(void) inBusNumber;
auto& manager = Manager::instance();
auto& bufferPool = manager.getRingBufferPool();
auto mainBufferFormat = bufferPool.getInternalAudioFormat();
const AudioFormat currentOutFormat = { static_cast<unsigned int>(outSampleRate_),
static_cast<unsigned int>(outChannelsPerFrame_)};
auto resample = currentOutFormat.sample_rate != mainBufferFormat.sample_rate;
auto normalFramesToGet = bufferPool.availableForGet(RingBufferPool::DEFAULT_ID);
auto urgentFramesToGet = urgentRingBuffer_.availableForGet(RingBufferPool::DEFAULT_ID);
double resampleFactor;
decltype(normalFramesToGet) readableSamples;
decltype(urgentFramesToGet) readableUrgentSamples;
if (resample) {
resampleFactor = mainBufferFormat.sample_rate / static_cast<double>(currentOutFormat.sample_rate);
readableSamples = std::ceil(inNumberFrames * resampleFactor);
} else {
readableSamples = inNumberFrames;
}
// incoming call during call
if (urgentFramesToGet > 0) {
readableUrgentSamples = std::min(readableSamples, urgentFramesToGet);
playbackBuff_.setFormat(currentOutFormat);
playbackBuff_.resize(readableUrgentSamples);
urgentRingBuffer_.get(playbackBuff_, RingBufferPool::DEFAULT_ID);
playbackBuff_.applyGain(isPlaybackMuted_ ? 0.0 : playbackGain_);
for (unsigned i = 0; i < currentOutFormat.nb_channels; ++i) {
playbackBuff_.channelToFloat(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData), i);
}
manager.getRingBufferPool().discard(readableUrgentSamples, RingBufferPool::DEFAULT_ID);
}
if (normalFramesToGet > 0) {
readableSamples = std::min(readableSamples, normalFramesToGet);
}
auto& ringBuff = getToRing(audioFormat_, readableSamples);
auto& playBuff = getToPlay(audioFormat_, readableSamples);
auto& toPlay = ringBuff.frames() > 0 ? ringBuff : playBuff; auto& toPlay = ringBuff.frames() > 0 ? ringBuff : playBuff;
if (toPlay.frames() == 0) { if (toPlay.frames() == 0) {
for (unsigned i = 0; i < audioFormat_.nb_channels; ++i) { // clear buffer
for (unsigned i = 0; i < currentOutFormat.nb_channels; ++i) {
std::fill_n(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData), std::fill_n(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData),
ioData->mBuffers[i].mDataByteSize / sizeof(Float32), 0); ioData->mBuffers[i].mDataByteSize / sizeof(Float32), 0);
} }
} else if (resample) {
// resample
playbackBuff_.setFormat(currentOutFormat);
playbackBuff_.resize(readableSamples);
resampler_->resample(toPlay, playbackBuff_);
playbackBuff_.applyGain(isPlaybackMuted_ ? 0.0 : playbackGain_);
for (unsigned i = 0; i < audioFormat_.nb_channels; ++i) {
playbackBuff_.channelToFloat(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData), i);
}
} else { } else {
// normal play
const_cast<AudioBuffer&>(toPlay).applyGain(isPlaybackMuted_ ? 0.0 : playbackGain_);
for (unsigned i = 0; i < audioFormat_.nb_channels; ++i) { for (unsigned i = 0; i < audioFormat_.nb_channels; ++i) {
toPlay.channelToFloat(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData), i); toPlay.channelToFloat(reinterpret_cast<Float32*>(ioData->mBuffers[i].mData), i);
} }
...@@ -400,6 +470,8 @@ CoreLayer::read(AudioUnitRenderActionFlags* ioActionFlags, ...@@ -400,6 +470,8 @@ CoreLayer::read(AudioUnitRenderActionFlags* ioActionFlags,
UInt32 inNumberFrames, UInt32 inNumberFrames,
AudioBufferList* ioData) AudioBufferList* ioData)
{ {
(void) ioData;
if (inNumberFrames <= 0) { if (inNumberFrames <= 0) {
RING_WARN("No frames for input."); RING_WARN("No frames for input.");
return; return;
...@@ -461,3 +533,5 @@ void CoreLayer::updatePreference(AudioPreference &preference, int index, DeviceT ...@@ -461,3 +533,5 @@ void CoreLayer::updatePreference(AudioPreference &preference, int index, DeviceT
} }
} // namespace ring } // namespace ring
#pragma GCC diagnostic pop
...@@ -160,14 +160,12 @@ class CoreLayer : public AudioLayer { ...@@ -160,14 +160,12 @@ class CoreLayer : public AudioLayer {
::AudioBufferList* captureBuff_ {nullptr}; // CoreAudio buffer (pointer is casted rawBuff_) ::AudioBufferList* captureBuff_ {nullptr}; // CoreAudio buffer (pointer is casted rawBuff_)
std::unique_ptr<Byte[]> rawBuff_; // raw allocation of captureBuff_ std::unique_ptr<Byte[]> rawBuff_; // raw allocation of captureBuff_
/** Interleaved buffer */
std::vector<AudioSample> playbackIBuff_;
std::vector<AudioSample> captureIBuff_;
AudioUnit ioUnit_; AudioUnit ioUnit_;
Float64 inSampleRate_; Float64 inSampleRate_;
UInt32 inChannelsPerFrame_; UInt32 inChannelsPerFrame_;
Float64 outSampleRate_;
UInt32 outChannelsPerFrame_;
std::shared_ptr<RingBuffer> mainRingBuffer_; std::shared_ptr<RingBuffer> mainRingBuffer_;
}; };
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment