From a87f1aaef40619a938b0fa73d5b2219b728b4f8d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Adrien=20B=C3=A9raud?= <adrien.beraud@savoirfairelinux.com>
Date: Mon, 25 Sep 2023 13:25:41 -0400
Subject: [PATCH] coreaudio: support floating point audio

Change-Id: Ie445116f321e91e13239009b32318cd24cf3c6a7
---
 src/media/audio/coreaudio/ios/corelayer.mm | 35 ++++++++++++++++++++--
 src/media/audio/coreaudio/osx/corelayer.mm | 35 ++++++++++++++++++++--
 2 files changed, 66 insertions(+), 4 deletions(-)

diff --git a/src/media/audio/coreaudio/ios/corelayer.mm b/src/media/audio/coreaudio/ios/corelayer.mm
index ef651a87c6..df8729f800 100644
--- a/src/media/audio/coreaudio/ios/corelayer.mm
+++ b/src/media/audio/coreaudio/ios/corelayer.mm
@@ -36,6 +36,35 @@ dispatch_queue_t audioConfigurationQueueIOS() {
     return queue;
 }
 
+enum AVSampleFormat
+getFormatFromStreamDescription(const AudioStreamBasicDescription& descr) {
+    if(descr.mFormatID == kAudioFormatLinearPCM) {
+        BOOL isPlanar = descr.mFormatFlags & kAudioFormatFlagIsNonInterleaved;
+        if(descr.mBitsPerChannel == 16) {
+            if(descr.mFormatFlags & kAudioFormatFlagIsSignedInteger) {
+                return isPlanar ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
+            }
+        }
+        else if(descr.mBitsPerChannel == 32) {
+            if(descr.mFormatFlags & kAudioFormatFlagIsFloat) {
+                return isPlanar ? AV_SAMPLE_FMT_FLTP : AV_SAMPLE_FMT_FLT;
+            }
+            else if(descr.mFormatFlags & kAudioFormatFlagIsSignedInteger) {
+                return isPlanar ? AV_SAMPLE_FMT_S32P : AV_SAMPLE_FMT_S32;
+            }
+        }
+    }
+    NSLog(@"Unsupported core audio format");
+    return AV_SAMPLE_FMT_NONE;
+}
+
+AudioFormat
+audioFormatFromDescription(const AudioStreamBasicDescription& descr) {
+    return AudioFormat {static_cast<unsigned int>(descr.mSampleRate),
+                        static_cast<unsigned int>(descr.mChannelsPerFrame),
+                        getFormatFromStreamDescription(descr)};
+}
+
 // AudioLayer implementation.
 CoreLayer::CoreLayer(const AudioPreference &pref)
     : AudioLayer(pref)
@@ -195,7 +224,8 @@ CoreLayer::setupOutputBus() {
                                   size));
 
     hardwareFormatAvailable({static_cast<unsigned int>(outputASBD.mSampleRate),
-                            static_cast<unsigned int>(outputASBD.mChannelsPerFrame)});
+                             static_cast<unsigned int>(outputASBD.mChannelsPerFrame),
+                             getFormatFromStreamDescription(outputASBD)});
 }
 
 void
@@ -244,7 +274,8 @@ CoreLayer::setupInputBus() {
                                   &size));
     inputASBD.mSampleRate = inSampleRate;
     audioInputFormat_ = {static_cast<unsigned int>(inputASBD.mSampleRate),
-                         static_cast<unsigned int>(inputASBD.mChannelsPerFrame)};
+                         static_cast<unsigned int>(inputASBD.mChannelsPerFrame),
+                         getFormatFromStreamDescription(inputASBD)};
     hardwareInputFormatAvailable(audioInputFormat_);
 
     // Keep some values to not ask them every time the read callback is fired up
diff --git a/src/media/audio/coreaudio/osx/corelayer.mm b/src/media/audio/coreaudio/osx/corelayer.mm
index 53254295f7..9e73928dac 100644
--- a/src/media/audio/coreaudio/osx/corelayer.mm
+++ b/src/media/audio/coreaudio/osx/corelayer.mm
@@ -33,6 +33,35 @@ dispatch_queue_t audioConfigurationQueueMacOS() {
     return queue;
 }
 
+enum AVSampleFormat
+getFormatFromStreamDescription(const AudioStreamBasicDescription& descr) {
+    if(descr.mFormatID == kAudioFormatLinearPCM) {
+        BOOL isPlanar = descr.mFormatFlags & kAudioFormatFlagIsNonInterleaved;
+        if(descr.mBitsPerChannel == 16) {
+            if(descr.mFormatFlags & kAudioFormatFlagIsSignedInteger) {
+                return isPlanar ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
+            }
+        }
+        else if(descr.mBitsPerChannel == 32) {
+            if(descr.mFormatFlags & kAudioFormatFlagIsFloat) {
+                return isPlanar ? AV_SAMPLE_FMT_FLTP : AV_SAMPLE_FMT_FLT;
+            }
+            else if(descr.mFormatFlags & kAudioFormatFlagIsSignedInteger) {
+                return isPlanar ? AV_SAMPLE_FMT_S32P : AV_SAMPLE_FMT_S32;
+            }
+        }
+    }
+    NSLog(@"Unsupported core audio format");
+    return AV_SAMPLE_FMT_NONE;
+}
+
+AudioFormat
+audioFormatFromDescription(const AudioStreamBasicDescription& descr) {
+    return AudioFormat {static_cast<unsigned int>(descr.mSampleRate),
+                        static_cast<unsigned int>(descr.mChannelsPerFrame),
+                        getFormatFromStreamDescription(descr)};
+}
+
 // AudioLayer implementation.
 CoreLayer::CoreLayer(const AudioPreference& pref)
     : AudioLayer(pref)
@@ -226,7 +255,8 @@ CoreLayer::initAudioLayerIO(AudioDeviceType stream)
                                   &size));
 
     audioFormat_ = {static_cast<unsigned int>(outSampleRate_),
-                    static_cast<unsigned int>(info.mChannelsPerFrame)};
+                    static_cast<unsigned int>(info.mChannelsPerFrame),
+                    getFormatFromStreamDescription(info)};
 
     outChannelsPerFrame_ = info.mChannelsPerFrame;
 
@@ -261,7 +291,8 @@ CoreLayer::initAudioLayerIO(AudioDeviceType stream)
                                   &size));
 
     audioInputFormat_ = {static_cast<unsigned int>(inSampleRate_),
-                         static_cast<unsigned int>(info.mChannelsPerFrame)};
+                         static_cast<unsigned int>(info.mChannelsPerFrame),
+                         getFormatFromStreamDescription(info)};
     hardwareInputFormatAvailable(audioInputFormat_);
     // Keep everything else and change only sample rate (or else SPLOSION!!!)
     info.mSampleRate = audioInputFormat_.sample_rate;
-- 
GitLab