Commit fa57bac5 authored by Emmanuel Milou's avatar Emmanuel Milou
Browse files

use sampling frequency and frame size from the user config

Sampling rate values are no more hardcoded. The sampling rate of the audio layer
 and the frame size can be set in the user config file. The clock rate of the codec we use
in the rtp session is set with his actual value, but can be changed by modifying
the available codec in the user config file (only G711 for now)
parent 9c76e463
...@@ -2,10 +2,11 @@ ...@@ -2,10 +2,11 @@
* Copyright (C) 2005 Savoir-Faire Linux inc. * Copyright (C) 2005 Savoir-Faire Linux inc.
* Author: Yan Morin <yan.morin@savoirfairelinux.com> * Author: Yan Morin <yan.morin@savoirfairelinux.com>
* Author: Jerome Oufella <jerome.oufella@savoirfairelinux.com> * Author: Jerome Oufella <jerome.oufella@savoirfairelinux.com>
* Author: Emmanuel Milou <emmanuel.milou@savoirfairelinux.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or * the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version. * (at your option) any later version.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
...@@ -108,13 +109,15 @@ AudioLayer::hasStream(void) { ...@@ -108,13 +109,15 @@ AudioLayer::hasStream(void) {
void void
AudioLayer::openDevice (int indexIn, int indexOut, int sampleRate) AudioLayer::openDevice (int indexIn, int indexOut, int sampleRate, int frameSize)
{ {
closeStream(); closeStream();
_sampleRate = sampleRate; _sampleRate = sampleRate;
_frameSize = frameSize;
int portaudioFramePerBuffer = FRAME_PER_BUFFER; //=FRAME_PER_BUFFER; //= paFramesPerBufferUnspecified; int portaudioFramePerBuffer = FRAME_PER_BUFFER; //=FRAME_PER_BUFFER; //= paFramesPerBufferUnspecified;
//int portaudioFramePerBuffer = (int) (8000 * frameSize / 1000);
//= paFramesPerBufferUnspecified;
int nbDevice = getDeviceCount(); int nbDevice = getDeviceCount();
if (nbDevice == 0) { if (nbDevice == 0) {
......
...@@ -2,10 +2,11 @@ ...@@ -2,10 +2,11 @@
* Copyright (C) 2004-2005 Savoir-Faire Linux inc. * Copyright (C) 2004-2005 Savoir-Faire Linux inc.
* Author: Yan Morin <yan.morin@savoirfairelinux.com> * Author: Yan Morin <yan.morin@savoirfairelinux.com>
* Author: Jerome Oufella <jerome.oufella@savoirfairelinux.com> * Author: Jerome Oufella <jerome.oufella@savoirfairelinux.com>
* Author: Emmanuel Milou <emmanuel.milou@savoirfairelinux.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or * the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version. * (at your option) any later version.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
...@@ -35,7 +36,7 @@ class RingBuffer; ...@@ -35,7 +36,7 @@ class RingBuffer;
class ManagerImpl; class ManagerImpl;
class AudioLayer { class AudioLayer {
public: public:
AudioLayer(ManagerImpl* manager); AudioLayer(ManagerImpl* manager);
~AudioLayer(void); ~AudioLayer(void);
...@@ -43,8 +44,9 @@ public: ...@@ -43,8 +44,9 @@ public:
* @param indexIn * @param indexIn
* @param indexOut * @param indexOut
* @param sampleRate * @param sampleRate
* @param frameSize
*/ */
void openDevice(int, int, int); void openDevice(int, int, int, int);
void startStream(void); void startStream(void);
void stopStream(void); void stopStream(void);
void sleep(int); void sleep(int);
...@@ -72,7 +74,7 @@ public: ...@@ -72,7 +74,7 @@ public:
* accessor only * accessor only
*/ */
unsigned int getSampleRate() { return _sampleRate; } unsigned int getSampleRate() { return _sampleRate; }
unsigned int getFrameSize() { return _frameSize; }
int getDeviceCount(); int getDeviceCount();
AudioDevice* getAudioDeviceInfo(int index, int ioDeviceMask); AudioDevice* getAudioDeviceInfo(int index, int ioDeviceMask);
...@@ -83,7 +85,7 @@ public: ...@@ -83,7 +85,7 @@ public:
*/ */
void toggleEchoTesting(); void toggleEchoTesting();
private: private:
void closeStream (void); void closeStream (void);
RingBuffer _urgentRingBuffer; RingBuffer _urgentRingBuffer;
RingBuffer _mainSndRingBuffer; RingBuffer _mainSndRingBuffer;
...@@ -94,11 +96,17 @@ private: ...@@ -94,11 +96,17 @@ private:
portaudio::MemFunCallbackStream<AudioLayer> *_stream; portaudio::MemFunCallbackStream<AudioLayer> *_stream;
/** /**
* Sample Rate of SFLphone : should be 8000 for 8khz * Sample Rate SFLphone should send sound data to the sound card
* Added because we could change it in the futur * The value can be set in the user config file- now: 44100HZ
*/ */
unsigned int _sampleRate; unsigned int _sampleRate;
/**
* Length of the sound frame we capture or read in ms
* The value can be set in the user config file - now: 20ms
*/
unsigned int _frameSize;
/** /**
* Input channel (mic) should be 1 mono * Input channel (mic) should be 1 mono
*/ */
......
/* /*
* Copyright (C) 2004-2007 Savoir-Faire Linux inc. * Copyright (C) 2004-2007 Savoir-Faire Linux inc.
* Author: Emmanuel Milou <emmanuel.milou@savoirfairelinux.com>
* Author: Alexandre Bourget <alexandre.bourget@savoirfairelinux.com> * Author: Alexandre Bourget <alexandre.bourget@savoirfairelinux.com>
* Author: Yan Morin <yan.morin@savoirfairelinux.com> * Author: Yan Morin <yan.morin@savoirfairelinux.com>
* Author: Laurielle Lea <laurielle.lea@savoirfairelinux.com> * Author: Laurielle Lea <laurielle.lea@savoirfairelinux.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or * the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version. * (at your option) any later version.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
...@@ -97,21 +98,14 @@ AudioRtp::closeRtpSession () { ...@@ -97,21 +98,14 @@ AudioRtp::closeRtpSession () {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
AudioRtpRTX::AudioRtpRTX (SIPCall *sipcall, bool sym) AudioRtpRTX::AudioRtpRTX (SIPCall *sipcall, bool sym)
// : _fstream("/tmp/audio.dat", std::ofstream::binary) // : _fstream("/tmp/audio.dat", std::ofstream::binary)
{ {
setCancel(cancelDeferred); setCancel(cancelDeferred);
time = new ost::Time(); time = new ost::Time();
_ca = sipcall; _ca = sipcall;
_sym = sym; _sym = sym;
// AudioRtpRTX should be close if we change sample rate // AudioRtpRTX should be close if we change sample rate
_receiveDataDecoded = new int16[RTP_20S_48KHZ_MAX]; _codecSampleRate = _ca->getAudioCodec()->getClockRate();
_sendDataEncoded = new unsigned char[RTP_20S_8KHZ_MAX];
// we estimate that the number of format after a conversion 8000->48000 is expanded to 6 times
_dataAudioLayer = new SFLDataFormat[RTP_20S_48KHZ_MAX];
_floatBuffer8000 = new float32[RTP_20S_8KHZ_MAX];
_floatBuffer48000 = new float32[RTP_20S_48KHZ_MAX];
_intBuffer8000 = new int16[RTP_20S_8KHZ_MAX];
// TODO: Change bind address according to user settings. // TODO: Change bind address according to user settings.
// TODO: this should be the local ip not the external (router) IP // TODO: this should be the local ip not the external (router) IP
...@@ -129,6 +123,8 @@ AudioRtpRTX::AudioRtpRTX (SIPCall *sipcall, bool sym) ...@@ -129,6 +123,8 @@ AudioRtpRTX::AudioRtpRTX (SIPCall *sipcall, bool sym)
} }
// libsamplerate-related // libsamplerate-related
// Set the converter type for the upsampling and the downsampling
// interpolator SRC_SINC_BEST_QUALITY
_src_state_mic = src_new(SRC_SINC_BEST_QUALITY, 1, &_src_err); _src_state_mic = src_new(SRC_SINC_BEST_QUALITY, 1, &_src_err);
_src_state_spkr = src_new(SRC_SINC_BEST_QUALITY, 1, &_src_err); _src_state_spkr = src_new(SRC_SINC_BEST_QUALITY, 1, &_src_err);
...@@ -153,9 +149,9 @@ AudioRtpRTX::~AudioRtpRTX () { ...@@ -153,9 +149,9 @@ AudioRtpRTX::~AudioRtpRTX () {
delete _session; _session = NULL; delete _session; _session = NULL;
} }
delete [] _intBuffer8000; _intBuffer8000 = NULL; delete [] _intBufferDown; _intBufferDown = NULL;
delete [] _floatBuffer48000; _floatBuffer48000 = NULL; delete [] _floatBufferUp; _floatBufferUp = NULL;
delete [] _floatBuffer8000; _floatBuffer8000 = NULL; delete [] _floatBufferDown; _floatBufferDown = NULL;
delete [] _dataAudioLayer; _dataAudioLayer = NULL; delete [] _dataAudioLayer; _dataAudioLayer = NULL;
delete [] _sendDataEncoded; _sendDataEncoded = NULL; delete [] _sendDataEncoded; _sendDataEncoded = NULL;
...@@ -168,7 +164,19 @@ AudioRtpRTX::~AudioRtpRTX () { ...@@ -168,7 +164,19 @@ AudioRtpRTX::~AudioRtpRTX () {
_src_state_spkr = src_delete(_src_state_spkr); _src_state_spkr = src_delete(_src_state_spkr);
} }
void void
AudioRtpRTX::initBuffers()
{
int nbSamplesMax = (int) (_layerSampleRate * _layerFrameSize /1000);
_dataAudioLayer = new SFLDataFormat[nbSamplesMax];
_receiveDataDecoded = new int16[nbSamplesMax];
_floatBufferDown = new float32[nbSamplesMax];
_floatBufferUp = new float32[nbSamplesMax];
_sendDataEncoded = new unsigned char[nbSamplesMax];
_intBufferDown = new int16[nbSamplesMax];
}
void
AudioRtpRTX::initAudioRtpSession (void) AudioRtpRTX::initAudioRtpSession (void)
{ {
try { try {
...@@ -247,6 +255,8 @@ AudioRtpRTX::sendSessionFromMic(int timestamp) ...@@ -247,6 +255,8 @@ AudioRtpRTX::sendSessionFromMic(int timestamp)
// 3. encode it // 3. encode it
// 4. send it // 4. send it
try { try {
int16* toSIP = NULL;
timestamp += time->getSecond(); timestamp += time->getSecond();
if (_ca==0) { _debug(" !ARTP: No call associated (mic)\n"); return; } // no call, so we do nothing if (_ca==0) { _debug(" !ARTP: No call associated (mic)\n"); return; } // no call, so we do nothing
AudioLayer* audiolayer = Manager::instance().getAudioDriver(); AudioLayer* audiolayer = Manager::instance().getAudioDriver();
...@@ -256,67 +266,28 @@ AudioRtpRTX::sendSessionFromMic(int timestamp) ...@@ -256,67 +266,28 @@ AudioRtpRTX::sendSessionFromMic(int timestamp)
if (!audiocodec) { _debug(" !ARTP: No audiocodec available for mic\n"); return; } if (!audiocodec) { _debug(" !ARTP: No audiocodec available for mic\n"); return; }
// we have to get 20ms of data from the mic *20/1000 = /50 // we have to get 20ms of data from the mic *20/1000 = /50
// rate/50 shall be lower than RTP_20S_48KHZ_MAX int maxBytesToGet = _layerSampleRate * _layerFrameSize * sizeof(SFLDataFormat) / 1000;
int maxBytesToGet = audiolayer->getSampleRate()/50*sizeof(SFLDataFormat);
// available bytes inside ringbuffer // available bytes inside ringbuffer
int availBytesFromMic = audiolayer->canGetMic(); int availBytesFromMic = audiolayer->canGetMic();
// take the lowest // take the lowest
int bytesAvail = (availBytesFromMic < maxBytesToGet) ? availBytesFromMic : maxBytesToGet; int bytesAvail = (availBytesFromMic < maxBytesToGet) ? availBytesFromMic : maxBytesToGet;
//_debug("available = %d, maxBytesToGet = %d\n", availBytesFromMic, maxBytesToGet);
// Get bytes from micRingBuffer to data_from_mic // Get bytes from micRingBuffer to data_from_mic
int nbSample = audiolayer->getMic(_dataAudioLayer, bytesAvail) / sizeof(SFLDataFormat); int nbSample = audiolayer->getMic(_dataAudioLayer, bytesAvail) / sizeof(SFLDataFormat);
int nb_sample_up = nbSample;
int nbSamplesMax = _layerFrameSize * audiocodec->getClockRate() / 1000;
int16* toSIP = NULL; nbSample = reSampleData(audiocodec->getClockRate(), nb_sample_up, DOWN_SAMPLING);
if (audiolayer->getSampleRate() != audiocodec->getClockRate() && nbSample) {
SRC_DATA src_data;
#ifdef DATAFORMAT_IS_FLOAT
src_data.data_in = _dataAudioLayer;
#else
src_short_to_float_array(_dataAudioLayer, _floatBuffer48000, nbSample);
src_data.data_in = _floatBuffer48000;
#endif
double factord = (double) audiocodec->getClockRate() / audiolayer->getSampleRate();
src_data.src_ratio = factord;
src_data.input_frames = nbSample;
src_data.output_frames = (int) floor(factord * nbSample);
src_data.data_out = _floatBuffer8000;
src_data.end_of_input = 0; /* More data to come */
src_process(_src_state_mic, &src_data);
nbSample = src_data.output_frames_gen;
/* WRITE IN A FILE FOR DEBUG */
//_fstream.write((char *) _floatBuffer48000, src_data.output_frames_gen * sizeof(float));
//_fstream.flush();
toSIP = _intBufferDown;
//if (nbSample > RTP_20S_8KHZ_MAX) { _debug("Alert from mic, nbSample %d is bigger than expected %d\n", nbSample, RTP_20S_8KHZ_MAX); } if ( nbSample < nbSamplesMax - 10 ) { // if only 10 is missing, it's ok
src_float_to_short_array (_floatBuffer8000, _intBuffer8000, nbSample);
toSIP = _intBuffer8000;
} else {
#ifdef DATAFORMAT_IS_FLOAT
// convert _receiveDataDecoded to float inside _receiveData
src_float_to_short_array(_dataAudioLayer, _intBuffer8000, nbSample);
toSIP = _intBuffer8000;
//if (nbSample > RTP_20S_8KHZ_MAX) { _debug("Alert from mic, nbSample %d is bigger than expected %d\n", nbSample, RTP_20S_8KHZ_MAX); }
#else
toSIP = _dataAudioLayer; // int to int
#endif
}
if ( nbSample < (RTP_20S_8KHZ_MAX - 10) ) { // if only 10 is missing, it's ok
// fill end with 0... // fill end with 0...
//_debug("begin: %p, nbSample: %d\n", toSIP, nbSample); //_debug("begin: %p, nbSample: %d\n", toSIP, nbSample);
//_debug("has to fill: %d chars at %p\n", (RTP_20S_8KHZ_MAX-nbSample)*sizeof(int16), toSIP + nbSample); memset(toSIP + nbSample, 0, (nbSamplesMax-nbSample)*sizeof(int16));
memset(toSIP + nbSample, 0, (RTP_20S_8KHZ_MAX-nbSample)*sizeof(int16)); nbSample = nbSamplesMax;
nbSample = RTP_20S_8KHZ_MAX;
} }
//_debug("AR: Nb sample: %d int, [0]=%d [1]=%d [2]=%d\n", nbSample, toSIP[0], toSIP[1], toSIP[2]); //_debug("AR: Nb sample: %d int, [0]=%d [1]=%d [2]=%d\n", nbSample, toSIP[0], toSIP[1], toSIP[2]);
...@@ -326,7 +297,7 @@ AudioRtpRTX::sendSessionFromMic(int timestamp) ...@@ -326,7 +297,7 @@ AudioRtpRTX::sendSessionFromMic(int timestamp)
// encode divise by two // encode divise by two
// Send encoded audio sample over the network // Send encoded audio sample over the network
if (compSize > RTP_20S_8KHZ_MAX) { _debug("! ARTP: %d should be %d\n", compSize, RTP_20S_8KHZ_MAX);} if (compSize > nbSamplesMax) { _debug("! ARTP: %d should be %d\n", compSize, nbSamplesMax);}
if (!_sym) { if (!_sym) {
_sessionSend->putData(timestamp, _sendDataEncoded, compSize); _sessionSend->putData(timestamp, _sendDataEncoded, compSize);
} else { } else {
...@@ -340,7 +311,9 @@ AudioRtpRTX::sendSessionFromMic(int timestamp) ...@@ -340,7 +311,9 @@ AudioRtpRTX::sendSessionFromMic(int timestamp)
} }
void
void
AudioRtpRTX::receiveSessionForSpkr (int& countTime) AudioRtpRTX::receiveSessionForSpkr (int& countTime)
{ {
if (_ca == 0) { return; } if (_ca == 0) { return; }
...@@ -364,78 +337,43 @@ AudioRtpRTX::receiveSessionForSpkr (int& countTime) ...@@ -364,78 +337,43 @@ AudioRtpRTX::receiveSessionForSpkr (int& countTime)
unsigned char* data = (unsigned char*)adu->getData(); // data in char unsigned char* data = (unsigned char*)adu->getData(); // data in char
unsigned int size = adu->getSize(); // size in char unsigned int size = adu->getSize(); // size in char
if ( size > RTP_20S_8KHZ_MAX ) {
_debug("We have received from RTP a packet larger than expected: %s VS %s\n", size, RTP_20S_8KHZ_MAX); // Decode data with relevant codec
AudioCodec* audiocodec = _ca->getCodecMap().getCodec((CodecType)payload);
_codecSampleRate = audiocodec->getClockRate();
int max = (int)(_codecSampleRate * _layerFrameSize);
if ( size > max ) {
_debug("We have received from RTP a packet larger than expected: %s VS %s\n", size, max);
_debug("The packet size has been cropped\n"); _debug("The packet size has been cropped\n");
size=RTP_20S_8KHZ_MAX; size=max;
} }
// NOTE: L'audio rendu ici (dans data/size) est parfait.
// Decode data with relevant codec
AudioCodec* audiocodec = _ca->getCodecMap().getCodec((CodecType)payload);
if (audiocodec != NULL) { if (audiocodec != NULL) {
// codecDecode(int16 *dest, char* src, size in bytes of the src)
// decode multiply by two, so the number of byte should be double
// size shall be RTP_FRAME2SEND or lower
int expandedSize = audiocodec->codecDecode(_receiveDataDecoded, data, size); int expandedSize = audiocodec->codecDecode(_receiveDataDecoded, data, size);
//buffer _receiveDataDecoded ----> short int or int16, coded on 2 bytes
int nbInt16 = expandedSize / sizeof(int16); int nbInt16 = expandedSize / sizeof(int16);
if (nbInt16 > RTP_20S_8KHZ_MAX) { //nbInt16 represents the number of samples we just decoded
_debug("We have decoded an RTP packet larger than expected: %s VS %s. Cropping.\n", nbInt16, RTP_20S_8KHZ_MAX); if (nbInt16 > max) {
nbInt16=RTP_20S_8KHZ_MAX; _debug("We have decoded an RTP packet larger than expected: %s VS %s. Cropping.\n", nbInt16, max);
nbInt16=max;
} }
// NOTE: l'audio arrivé ici (dans _receiveDataDecoded/expandedSize) est parfait.
SFLDataFormat* toAudioLayer; SFLDataFormat* toAudioLayer;
int nbSample = nbInt16; int nbSample = nbInt16;
// 48000 / 8000 = 6, the number of samples for the maximum rate conversion.
int nbSampleMaxRate = nbInt16 * 6; // TODO: change it
// We assume over here that we pass from a lower rate to a higher one. Bad bad.
if ( audiolayer->getSampleRate() != audiocodec->getClockRate() && nbSample) {
// Do sample rate conversion // Do sample rate conversion
int nb_sample_down = nbSample;
double factord = (double) audiolayer->getSampleRate() / audiocodec->getClockRate(); nbSample = reSampleData(_codecSampleRate , nb_sample_down, UP_SAMPLING);
#ifdef DATAFORMAT_IS_FLOAT
// SRC_DATA from samplerate.h toAudioLayer = _floatBufferUp;
SRC_DATA src_data; #else
src_data.data_in = _floatBuffer8000;
src_data.data_out = _floatBuffer48000;
src_data.input_frames = nbSample;
src_data.output_frames = (int) floor(factord * nbSample);
src_data.src_ratio = factord;
src_data.end_of_input = 0; /* More data will come */
src_short_to_float_array(_receiveDataDecoded, _floatBuffer8000, nbSample);
// NOTE: L'audio arrivé ici (dans _floatBuffer8000/nbSample*sizeof(float) est parfait.
src_process(_src_state_spkr, &src_data);
// Truncate number of samples if too high (ouch!)
nbSample = ( src_data.output_frames_gen > RTP_20S_48KHZ_MAX) ? RTP_20S_48KHZ_MAX : src_data.output_frames_gen;
#ifdef DATAFORMAT_IS_FLOAT
toAudioLayer = _floatBuffer48000;
#else
src_float_to_short_array(_floatBuffer48000, _dataAudioLayer, nbSample);
toAudioLayer = _dataAudioLayer; toAudioLayer = _dataAudioLayer;
#endif #endif
} else {
nbSample = nbInt16;
#ifdef DATAFORMAT_IS_FLOAT
// convert _receiveDataDecoded to float inside _receiveData
src_short_to_float_array(_receiveDataDecoded, _floatBuffer8000, nbSample);
toAudioLayer = _floatBuffer8000;
#else
toAudioLayer = _receiveDataDecoded; // int to int
#endif
}
audiolayer->putMain(toAudioLayer, nbSample * sizeof(SFLDataFormat)); audiolayer->putMain(toAudioLayer, nbSample * sizeof(SFLDataFormat));
//_debug("ARTP: %d\n", nbSample * sizeof(SFLDataFormat));
// Notify (with a beep) an incoming call when there is already a call // Notify (with a beep) an incoming call when there is already a call
countTime += time->getSecond(); countTime += time->getSecond();
...@@ -457,12 +395,80 @@ AudioRtpRTX::receiveSessionForSpkr (int& countTime) ...@@ -457,12 +395,80 @@ AudioRtpRTX::receiveSessionForSpkr (int& countTime)
} }
} }
int
AudioRtpRTX::reSampleData(int sampleRate_codec, int nbSamples, int status)
{
if(status==UP_SAMPLING)
return upSampleData(sampleRate_codec, nbSamples);
else if(status==DOWN_SAMPLING)
return downSampleData(sampleRate_codec, nbSamples);
else
return 0;
}
////////////////////////////////////////////////////////////////////
//////////// RESAMPLING FUNCTIONS /////////////////////////////////
//////////////////////////////////////////////////////////////////
int
AudioRtpRTX::upSampleData(int sampleRate_codec, int nbSamples)
{
double upsampleFactor = (double) _layerSampleRate / sampleRate_codec;
int nbSamplesMax = (int) (_layerSampleRate * _layerFrameSize /1000);
if( upsampleFactor != 1 )
{
SRC_DATA src_data;
src_data.data_in = _floatBufferDown;
src_data.data_out = _floatBufferUp;
src_data.input_frames = nbSamples;
src_data.output_frames = (int) floor(upsampleFactor * nbSamples);
src_data.src_ratio = upsampleFactor;
src_data.end_of_input = 0; // More data will come
src_short_to_float_array(_receiveDataDecoded, _floatBufferDown, nbSamples);
src_process(_src_state_spkr, &src_data);
nbSamples = ( src_data.output_frames_gen > nbSamplesMax) ? nbSamplesMax : src_data.output_frames_gen;
src_float_to_short_array(_floatBufferUp, _dataAudioLayer, nbSamples);
}
return nbSamples;
}
int
AudioRtpRTX::downSampleData(int sampleRate_codec, int nbSamples)
{
double downsampleFactor = (double) sampleRate_codec / _layerSampleRate;
int nbSamplesMax = (int) (sampleRate_codec * _layerFrameSize / 1000);
if ( downsampleFactor != 1)
{
SRC_DATA src_data;
src_data.data_in = _floatBufferUp;
src_data.data_out = _floatBufferDown;
src_data.input_frames = nbSamples;
src_data.output_frames = (int) floor(downsampleFactor * nbSamples);
src_data.src_ratio = downsampleFactor;
src_data.end_of_input = 0; // More data will come
src_short_to_float_array(_dataAudioLayer, _floatBufferUp, nbSamples);
src_process(_src_state_mic, &src_data);
nbSamples = ( src_data.output_frames_gen > nbSamplesMax) ? nbSamplesMax : src_data.output_frames_gen;
src_float_to_short_array(_floatBufferDown, _intBufferDown, nbSamples);
}
return nbSamples;
}
//////////////////////// END RESAMPLING //////////////////////////////////////////////////////
void void
AudioRtpRTX::run () { AudioRtpRTX::run () {
//mic, we receive from soundcard in stereo, and we send encoded //mic, we receive from soundcard in stereo, and we send encoded
//encoding before sending //encoding before sending
AudioLayer *audiolayer = Manager::instance().getAudioDriver(); AudioLayer *audiolayer = Manager::instance().getAudioDriver();
_layerFrameSize = audiolayer->getFrameSize(); // en ms
_layerSampleRate = audiolayer->getSampleRate();
initBuffers();
int step = (int)(_layerFrameSize * _codecSampleRate / 1000);
try { try {
// Init the session // Init the session
initAudioRtpSession(); initAudioRtpSession();
...@@ -479,9 +485,7 @@ AudioRtpRTX::run () { ...@@ -479,9 +485,7 @@ AudioRtpRTX::run () {
int timestamp = 0; // for mic int timestamp = 0; // for mic
int countTime = 0; // for receive int countTime = 0; // for receive
// TODO: get frameSize from user config TimerPort::setTimer(_layerFrameSize);
int frameSize = 20; // 20ms frames
TimerPort::setTimer(frameSize);
audiolayer->flushMic(); audiolayer->flushMic();
audiolayer->startStream(); audiolayer->startStream();
...@@ -492,8 +496,7 @@ AudioRtpRTX::run () { ...@@ -492,8 +496,7 @@ AudioRtpRTX::run () {
// Send session // Send session
//////////////////////////// ////////////////////////////
sendSessionFromMic(timestamp); sendSessionFromMic(timestamp);
timestamp += RTP_20S_8KHZ_MAX; timestamp += step;
//////////////////////////// ////////////////////////////
// Recv session // Recv session
//////////////////////////// ////////////////////////////
...@@ -501,7 +504,7 @@ AudioRtpRTX::run () { ...@@ -501,7 +504,7 @@ AudioRtpRTX::run () {