Skip to content
Snippets Groups Projects
Commit 09d4d466 authored by Alexandre Savard's avatar Alexandre Savard
Browse files

[#3946] Fade in audio data in rtp sessions

parent b1f75abf
No related branches found
No related tags found
No related merge requests found
...@@ -81,6 +81,7 @@ typedef struct DtmfEvent { ...@@ -81,6 +81,7 @@ typedef struct DtmfEvent {
typedef list<DtmfEvent *> EventQueue; typedef list<DtmfEvent *> EventQueue;
template <typename D> template <typename D>
class AudioRtpSession : public ost::Thread, public ost::TimerPort class AudioRtpSession : public ost::Thread, public ost::TimerPort
{ {
...@@ -123,18 +124,51 @@ class AudioRtpSession : public ost::Thread, public ost::TimerPort ...@@ -123,18 +124,51 @@ class AudioRtpSession : public ost::Thread, public ost::TimerPort
private: private:
/**
* Allocate memory for RTP buffers and fill them with zeros
*/
void initBuffers (void); void initBuffers (void);
/**
* Set RTP Sockets send/receive timeouts
*/
void setSessionTimeouts (void); void setSessionTimeouts (void);
/**
* Set the audio codec for this RTP session
*/
void setSessionMedia (AudioCodec*); void setSessionMedia (AudioCodec*);
/**
* Retreive destination address for this session. Stored in CALL
*/
void setDestinationIpAddress (void); void setDestinationIpAddress (void);
/**
* Encode audio data from mainbuffer
*/
int processDataEncode (void); int processDataEncode (void);
/**
* Decode audio data received from peer
*/
void processDataDecode (unsigned char * spkrData, unsigned int size); void processDataDecode (unsigned char * spkrData, unsigned int size);
/**
* Send encoded data to peer
*/
void sendMicData(); void sendMicData();
/**
* Receive data from peer
*/
void receiveSpeakerData (); void receiveSpeakerData ();
/**
* Ramp In audio data to avoid audio click from peer
*/
bool fadeIn (SFLDataFormat *audio, int size, SFLDataFormat *factor);
ost::Time * _time; ost::Time * _time;
// This semaphore is not used // This semaphore is not used
...@@ -254,9 +288,31 @@ class AudioRtpSession : public ost::Thread, public ost::TimerPort ...@@ -254,9 +288,31 @@ class AudioRtpSession : public ost::Thread, public ost::TimerPort
*/ */
int _currentTime; int _currentTime;
/**
* Preprocess internal data
*/
SpeexPreprocessState *_noiseState; SpeexPreprocessState *_noiseState;
// ofstream *captureFile; /**
* State of mic fade in
*/
bool _micFadeInComplete;
/**
* State of spkr fade in
*/
bool _spkrFadeInComplete;
/**
* Ampliturde factor to fade in mic data
*/
SFLDataFormat _micAmplFactor;
/**
* Amplitude factor to fade in spkr data
*/
SFLDataFormat _spkrAmplFactor;
protected: protected:
...@@ -288,6 +344,10 @@ AudioRtpSession<D>::AudioRtpSession (ManagerImpl * manager, SIPCall * sipcall) : ...@@ -288,6 +344,10 @@ AudioRtpSession<D>::AudioRtpSession (ManagerImpl * manager, SIPCall * sipcall) :
_countNotificationTime (0), _countNotificationTime (0),
_jbuffer (NULL), _jbuffer (NULL),
_noiseState (NULL), _noiseState (NULL),
_micFadeInComplete (false),
_spkrFadeInComplete (false),
_micAmplFactor (32000),
_spkrAmplFactor (32000),
_ca (sipcall) _ca (sipcall)
{ {
setCancel (cancelDefault); setCancel (cancelDefault);
...@@ -316,7 +376,6 @@ AudioRtpSession<D>::AudioRtpSession (ManagerImpl * manager, SIPCall * sipcall) : ...@@ -316,7 +376,6 @@ AudioRtpSession<D>::AudioRtpSession (ManagerImpl * manager, SIPCall * sipcall) :
_packetLength = 20; _packetLength = 20;
_currentTime = 0; _currentTime = 0;
// captureFile = new ofstream ("probeCaptureFile", ofstream::binary);
} }
template <typename D> template <typename D>
...@@ -374,11 +433,6 @@ AudioRtpSession<D>::~AudioRtpSession() ...@@ -374,11 +433,6 @@ AudioRtpSession<D>::~AudioRtpSession()
speex_preprocess_state_destroy (_noiseState); speex_preprocess_state_destroy (_noiseState);
} }
// captureFile->close();
// delete captureFile;
} }
template <typename D> template <typename D>
...@@ -624,6 +678,12 @@ int AudioRtpSession<D>::processDataEncode (void) ...@@ -624,6 +678,12 @@ int AudioRtpSession<D>::processDataEncode (void)
// Get bytes from micRingBuffer to data_from_mic // Get bytes from micRingBuffer to data_from_mic
int nbSample = _manager->getAudioDriver()->getMainBuffer()->getData (_micData , bytesAvail, 100, _ca->getCallId()) / sizeof (SFLDataFormat); int nbSample = _manager->getAudioDriver()->getMainBuffer()->getData (_micData , bytesAvail, 100, _ca->getCallId()) / sizeof (SFLDataFormat);
if (!_micFadeInComplete)
_micFadeInComplete = fadeIn (_micData, nbSample, &_micAmplFactor);
if (nbSample == 0)
return nbSample;
// nb bytes to be sent over RTP // nb bytes to be sent over RTP
int compSize = 0; int compSize = 0;
...@@ -635,14 +695,15 @@ int AudioRtpSession<D>::processDataEncode (void) ...@@ -635,14 +695,15 @@ int AudioRtpSession<D>::processDataEncode (void)
nbSample = _converter->downsampleData (_micData , _micDataConverted , _audiocodec->getClockRate(), _mainBufferSampleRate, nb_sample_up); nbSample = _converter->downsampleData (_micData , _micDataConverted , _audiocodec->getClockRate(), _mainBufferSampleRate, nb_sample_up);
compSize = _audiocodec->codecEncode (_micDataEncoded, _micDataConverted, nbSample*sizeof (int16)); compSize = _audiocodec->codecEncode (_micDataEncoded, _micDataConverted, nbSample*sizeof (SFLDataFormat));
} else { } else {
_nSamplesMic = nbSample; _nSamplesMic = nbSample;
// no resampling required // no resampling required
compSize = _audiocodec->codecEncode (_micDataEncoded, _micData, nbSample*sizeof (int16)); compSize = _audiocodec->codecEncode (_micDataEncoded, _micData, nbSample*sizeof (SFLDataFormat));
} }
return compSize; return compSize;
...@@ -651,7 +712,6 @@ int AudioRtpSession<D>::processDataEncode (void) ...@@ -651,7 +712,6 @@ int AudioRtpSession<D>::processDataEncode (void)
template <typename D> template <typename D>
void AudioRtpSession<D>::processDataDecode (unsigned char * spkrData, unsigned int size) void AudioRtpSession<D>::processDataDecode (unsigned char * spkrData, unsigned int size)
{ {
if (_audiocodec != NULL) { if (_audiocodec != NULL) {
...@@ -660,11 +720,12 @@ void AudioRtpSession<D>::processDataDecode (unsigned char * spkrData, unsigned i ...@@ -660,11 +720,12 @@ void AudioRtpSession<D>::processDataDecode (unsigned char * spkrData, unsigned i
// Return the size of data in bytes // Return the size of data in bytes
int expandedSize = _audiocodec->codecDecode (_spkrDataDecoded , spkrData , size); int expandedSize = _audiocodec->codecDecode (_spkrDataDecoded , spkrData , size);
// captureFile->write ((const char *)_spkrDataDecoded, expandedSize);
// buffer _receiveDataDecoded ----> short int or int16, coded on 2 bytes // buffer _receiveDataDecoded ----> short int or int16, coded on 2 bytes
int nbSample = expandedSize / sizeof (SFLDataFormat); int nbSample = expandedSize / sizeof (SFLDataFormat);
if (!_spkrFadeInComplete)
_spkrFadeInComplete = fadeIn (_spkrDataDecoded, nbSample, &_spkrAmplFactor);
// test if resampling is required // test if resampling is required
if (_audiocodec->getClockRate() != _mainBufferSampleRate) { if (_audiocodec->getClockRate() != _mainBufferSampleRate) {
...@@ -764,36 +825,42 @@ void AudioRtpSession<D>::receiveSpeakerData () ...@@ -764,36 +825,42 @@ void AudioRtpSession<D>::receiveSpeakerData ()
unsigned char* spkrDataIn = NULL; unsigned char* spkrDataIn = NULL;
unsigned int size = 0; unsigned int size = 0;
int result;
jb_frame frame;
_jbuffer->info.conf.resync_threshold = 0;
if (adu) { if (adu) {
spkrDataIn = (unsigned char*) adu->getData(); // data in char spkrDataIn = (unsigned char*) adu->getData(); // data in char
size = adu->getSize(); // size in char size = adu->getSize(); // size in char
result = jb_put (_jbuffer, spkrDataIn, JB_TYPE_VOICE, _packetLength, _ts+=20, _currentTime);
} else { } else {
_debug ("No RTP packet available !!!!!!!!!!!!!!!!!!!!!!!\n"); _debug ("No RTP packet available !!!!!!!!!!!!!!!!!!!!!!!\n");
} }
result = jb_get (_jbuffer, &frame, _currentTime+=20, _packetLength);
// DTMF over RTP, size must be over 4 in order to process it as voice data // DTMF over RTP, size must be over 4 in order to process it as voice data
if (size > 4) { if (size > 4) {
processDataDecode (spkrDataIn, size); processDataDecode (spkrDataIn, size);
//if(result == JB_OK) {
// processDataDecode((unsigned char *)(frame.data), 160);
//}
} }
delete adu; delete adu;
} }
template <typename D>
bool AudioRtpSession<D>::fadeIn (SFLDataFormat *audio, int size, SFLDataFormat *factor)
{
// apply amplitude factor;
while (size) {
size--;
audio[size] /= *factor;
}
*factor /= 2;
if (*factor == 0)
return true;
return false;
}
template <typename D> template <typename D>
int AudioRtpSession<D>::startRtpThread (AudioCodec* audiocodec) int AudioRtpSession<D>::startRtpThread (AudioCodec* audiocodec)
{ {
...@@ -835,6 +902,8 @@ void AudioRtpSession<D>::run () ...@@ -835,6 +902,8 @@ void AudioRtpSession<D>::run ()
_debug ("RTP: Entering mainloop for call %s",_ca->getCallId().c_str()); _debug ("RTP: Entering mainloop for call %s",_ca->getCallId().c_str());
_manager->getAudioDriver()->getMainBuffer()->getInternalSamplingRate();
while (!testCancel()) { while (!testCancel()) {
// Reset timestamp to make sure the timing information are up to date // Reset timestamp to make sure the timing information are up to date
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment