diff --git a/src/audio/audiortp.cpp b/src/audio/audiortp.cpp
index 9a58c7ec0a3d65f6b17a3f475400a7855cc1413f..319ba0d92626666004fe4da8aa5509e599129746 100644
--- a/src/audio/audiortp.cpp
+++ b/src/audio/audiortp.cpp
@@ -48,12 +48,12 @@ AudioRtp::AudioRtp() :_RTXThread(0), _symmetric(), _threadMutex()
 }
 
 AudioRtp::~AudioRtp (void) {
-  delete _RTXThread; _RTXThread = 0;
+    delete _RTXThread; _RTXThread = 0;
 }
 
 int 
 AudioRtp::createNewSession (SIPCall *ca) {
-   
+
     ost::MutexLock m(_threadMutex);
 
     // something should stop the thread before...
@@ -64,46 +64,46 @@ AudioRtp::createNewSession (SIPCall *ca) {
         delete _RTXThread; _RTXThread = 0;
     }
 
-  // Start RTP Send/Receive threads
-  _symmetric = Manager::instance().getConfigInt(SIGNALISATION,SYMMETRIC) ? true : false;
-  _RTXThread = new AudioRtpRTX (ca, _symmetric);
-  try {
-    if (_RTXThread->start() != 0) {
-     _debug("! ARTP Failure: unable to start RTX Thread\n");
-      return -1;
+    // Start RTP Send/Receive threads
+    _symmetric = Manager::instance().getConfigInt(SIGNALISATION,SYMMETRIC) ? true : false;
+    _RTXThread = new AudioRtpRTX (ca, _symmetric);
+    try {
+        if (_RTXThread->start() != 0) {
+            _debug("! ARTP Failure: unable to start RTX Thread\n");
+            return -1;
+        }
+    } catch(...) {
+        _debugException("! ARTP Failure: when trying to start a thread");
+        throw;
     }
-  } catch(...) {
-    _debugException("! ARTP Failure: when trying to start a thread");
-    throw;
-  }
 
-  return 0;
+    return 0;
 }
 
 
 void
 AudioRtp::closeRtpSession () {
 
-  ost::MutexLock m(_threadMutex);
-  // This will make RTP threads finish.
-  _debug("Stopping AudioRTP\n");
-  try {
-    
-    delete _RTXThread; _RTXThread = 0;
-  } catch(...) {
-    _debugException("! ARTP Exception: when stopping audiortp\n");
-    throw;
-  }
-  AudioLayer* audiolayer = Manager::instance().getAudioDriver();
-  audiolayer->stopStream();
+    ost::MutexLock m(_threadMutex);
+    // This will make RTP threads finish.
+    _debug("Stopping AudioRTP\n");
+    try {
+
+        delete _RTXThread; _RTXThread = 0;
+    } catch(...) {
+        _debugException("! ARTP Exception: when stopping audiortp\n");
+        throw;
+    }
+    AudioLayer* audiolayer = Manager::instance().getAudioDriver();
+    audiolayer->stopStream();
 }
 
 
 void
 AudioRtp::setRecording() {
-  
-  _debug("AudioRtp::setRecording\n");
-  _RTXThread->_ca->setRecording();
+
+    _debug("AudioRtp::setRecording\n");
+    _RTXThread->_ca->setRecording();
 
 }
 
@@ -113,152 +113,152 @@ AudioRtp::setRecording() {
 // AudioRtpRTX Class                                                          //
 ////////////////////////////////////////////////////////////////////////////////
 AudioRtpRTX::AudioRtpRTX (SIPCall *sipcall, bool sym) : time(new ost::Time()), _ca(sipcall), _sessionSend(NULL), _sessionRecv(NULL), _session(NULL), _start(), 
-		               _sym(sym), micData(NULL), micDataConverted(NULL), micDataEncoded(NULL), spkrDataDecoded(NULL), spkrDataConverted(NULL), 
-		               converter(NULL), _layerSampleRate(),_codecSampleRate(), _layerFrameSize(), _audiocodec(NULL)
+    _sym(sym), micData(NULL), micDataConverted(NULL), micDataEncoded(NULL), spkrDataDecoded(NULL), spkrDataConverted(NULL), 
+    converter(NULL), _layerSampleRate(),_codecSampleRate(), _layerFrameSize(), _audiocodec(NULL)
 {
-  setCancel(cancelDefault);
-  // AudioRtpRTX should be close if we change sample rate
-  // TODO: Change bind address according to user settings.
-  // TODO: this should be the local ip not the external (router) IP
-  std::string localipConfig = _ca->getLocalIp(); // _ca->getLocalIp();
-  ost::InetHostAddress local_ip(localipConfig.c_str());
-  if (!_sym) {
-    _sessionRecv = new ost::RTPSession(local_ip, _ca->getLocalAudioPort());
-    _sessionSend = new ost::RTPSession(local_ip, _ca->getLocalAudioPort());
-    _session = NULL;
-  } else {
-    _session = new ost::SymmetricRTPSession (local_ip, _ca->getLocalAudioPort());
-    _sessionRecv = NULL;
-    _sessionSend = NULL;
-  }
+    setCancel(cancelDefault);
+    // AudioRtpRTX should be close if we change sample rate
+    // TODO: Change bind address according to user settings.
+    // TODO: this should be the local ip not the external (router) IP
+    std::string localipConfig = _ca->getLocalIp(); // _ca->getLocalIp();
+    ost::InetHostAddress local_ip(localipConfig.c_str());
+    if (!_sym) {
+        _sessionRecv = new ost::RTPSession(local_ip, _ca->getLocalAudioPort());
+        _sessionSend = new ost::RTPSession(local_ip, _ca->getLocalAudioPort());
+        _session = NULL;
+    } else {
+        _session = new ost::SymmetricRTPSession (local_ip, _ca->getLocalAudioPort());
+        _sessionRecv = NULL;
+        _sessionSend = NULL;
+    }
 }
 
 AudioRtpRTX::~AudioRtpRTX () {
-  _start.wait();
-
-  try {
-    this->terminate();
-  } catch(...) {
-    _debugException("! ARTP: Thread destructor didn't terminate correctly");
-    throw;
-  }
-  _ca = 0;
-  if (!_sym) {
-    delete _sessionRecv; _sessionRecv = NULL;
-    delete _sessionSend; _sessionSend = NULL;
-  } else {
-    delete _session;     _session = NULL;
-  }
- 
-  delete [] micData;  micData = NULL;
-  delete [] micDataConverted;  micDataConverted = NULL;
-  delete [] micDataEncoded;  micDataEncoded = NULL;
-
-  delete [] spkrDataDecoded; spkrDataDecoded = NULL;
-  delete [] spkrDataConverted; spkrDataConverted = NULL;
-
-  delete time; time = NULL;
-
-  delete converter; converter = NULL;
-  
-}
-
-  void
-AudioRtpRTX::initBuffers()
-{
-  converter = new SamplerateConverter( _layerSampleRate , _layerFrameSize );
+    _start.wait();
 
-  int nbSamplesMax = (int) (_layerSampleRate * _layerFrameSize /1000);
+    try {
+        this->terminate();
+    } catch(...) {
+        _debugException("! ARTP: Thread destructor didn't terminate correctly");
+        throw;
+    }
+    _ca = 0;
+    if (!_sym) {
+        delete _sessionRecv; _sessionRecv = NULL;
+        delete _sessionSend; _sessionSend = NULL;
+    } else {
+        delete _session;     _session = NULL;
+    }
 
-  micData = new SFLDataFormat[nbSamplesMax];
-  micDataConverted = new SFLDataFormat[nbSamplesMax];
-  micDataEncoded = new unsigned char[nbSamplesMax];
+    delete [] micData;  micData = NULL;
+    delete [] micDataConverted;  micDataConverted = NULL;
+    delete [] micDataEncoded;  micDataEncoded = NULL;
 
-  spkrDataConverted = new SFLDataFormat[nbSamplesMax];
-  spkrDataDecoded = new SFLDataFormat[nbSamplesMax];
-}
+    delete [] spkrDataDecoded; spkrDataDecoded = NULL;
+    delete [] spkrDataConverted; spkrDataConverted = NULL;
 
-  void
-AudioRtpRTX::initAudioRtpSession (void) 
-{
-  try {
-    if (_ca == 0) { return; }
-    _audiocodec = Manager::instance().getCodecDescriptorMap().getCodec( _ca->getAudioCodec() );
-    _codecSampleRate = _audiocodec->getClockRate();	
-
-    _debug("Init audio RTP session\n");
-    ost::InetHostAddress remote_ip(_ca->getRemoteIp().c_str());
-    if (!remote_ip) {
-      _debug("! ARTP Thread Error: Target IP address [%s] is not correct!\n", _ca->getRemoteIp().data());
-      return;
-    }
+    delete time; time = NULL;
 
+    delete converter; converter = NULL;
 
-    if (!_sym) {
-      _sessionRecv->setSchedulingTimeout (10000);
-      _sessionRecv->setExpireTimeout(1000000);
+}
 
-      _sessionSend->setSchedulingTimeout(10000);
-      _sessionSend->setExpireTimeout(1000000);
-    } else {
-      _session->setSchedulingTimeout(10000);
-      _session->setExpireTimeout(1000000);
-    }
+    void
+AudioRtpRTX::initBuffers()
+{
+    converter = new SamplerateConverter( _layerSampleRate , _layerFrameSize );
 
-    if (!_sym) {
-      if ( !_sessionRecv->addDestination(remote_ip, (unsigned short) _ca->getRemoteAudioPort()) ) {
-	_debug("AudioRTP Thread Error: could not connect to port %d\n",  _ca->getRemoteAudioPort());
-	return;
-      }
-      if (!_sessionSend->addDestination (remote_ip, (unsigned short) _ca->getRemoteAudioPort())) {
-	_debug("! ARTP Thread Error: could not connect to port %d\n",  _ca->getRemoteAudioPort());
-	return;
-      }
-
-      bool payloadIsSet = false;
-      if (_audiocodec) {
-	if (_audiocodec->hasDynamicPayload()) {
-	  payloadIsSet = _sessionRecv->setPayloadFormat(ost::DynamicPayloadFormat((ost::PayloadType) _audiocodec->getPayload(), _audiocodec->getClockRate()));
-	} else {
-	  payloadIsSet= _sessionRecv->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _audiocodec->getPayload()));
-	  payloadIsSet = _sessionSend->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _audiocodec->getPayload()));
-	}
-      }
-      _sessionSend->setMark(true);
-    } else {
+    int nbSamplesMax = (int) (_layerSampleRate * _layerFrameSize /1000);
 
-      //_debug("AudioRTP Thread: Added session destination %s\n", remote_ip.getHostname() );
+    micData = new SFLDataFormat[nbSamplesMax];
+    micDataConverted = new SFLDataFormat[nbSamplesMax];
+    micDataEncoded = new unsigned char[nbSamplesMax];
 
-      if (!_session->addDestination (remote_ip, (unsigned short) _ca->getRemoteAudioPort())) {
-	return;
-      }
+    spkrDataConverted = new SFLDataFormat[nbSamplesMax];
+    spkrDataDecoded = new SFLDataFormat[nbSamplesMax];
+}
 
-      bool payloadIsSet = false;
-      if (_audiocodec) {
-	if (_audiocodec->hasDynamicPayload()) {
-	  payloadIsSet = _session->setPayloadFormat(ost::DynamicPayloadFormat((ost::PayloadType) _audiocodec->getPayload(), _audiocodec->getClockRate()));
-	} else {
-	  payloadIsSet = _session->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _audiocodec->getPayload()));
-	}
-      }
+    void
+AudioRtpRTX::initAudioRtpSession (void) 
+{
+    try {
+        if (_ca == 0) { return; }
+        _audiocodec = Manager::instance().getCodecDescriptorMap().getCodec( _ca->getAudioCodec() );
+        _codecSampleRate = _audiocodec->getClockRate();	
+
+        _debug("Init audio RTP session\n");
+        ost::InetHostAddress remote_ip(_ca->getRemoteIp().c_str());
+        if (!remote_ip) {
+            _debug("! ARTP Thread Error: Target IP address [%s] is not correct!\n", _ca->getRemoteIp().data());
+            return;
+        }
+
+
+        if (!_sym) {
+            _sessionRecv->setSchedulingTimeout (10000);
+            _sessionRecv->setExpireTimeout(1000000);
+
+            _sessionSend->setSchedulingTimeout(10000);
+            _sessionSend->setExpireTimeout(1000000);
+        } else {
+            _session->setSchedulingTimeout(10000);
+            _session->setExpireTimeout(1000000);
+        }
+
+        if (!_sym) {
+            if ( !_sessionRecv->addDestination(remote_ip, (unsigned short) _ca->getRemoteAudioPort()) ) {
+                _debug("AudioRTP Thread Error: could not connect to port %d\n",  _ca->getRemoteAudioPort());
+                return;
+            }
+            if (!_sessionSend->addDestination (remote_ip, (unsigned short) _ca->getRemoteAudioPort())) {
+                _debug("! ARTP Thread Error: could not connect to port %d\n",  _ca->getRemoteAudioPort());
+                return;
+            }
+
+            bool payloadIsSet = false;
+            if (_audiocodec) {
+                if (_audiocodec->hasDynamicPayload()) {
+                    payloadIsSet = _sessionRecv->setPayloadFormat(ost::DynamicPayloadFormat((ost::PayloadType) _audiocodec->getPayload(), _audiocodec->getClockRate()));
+                } else {
+                    payloadIsSet= _sessionRecv->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _audiocodec->getPayload()));
+                    payloadIsSet = _sessionSend->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _audiocodec->getPayload()));
+                }
+            }
+            _sessionSend->setMark(true);
+        } else {
+
+            //_debug("AudioRTP Thread: Added session destination %s\n", remote_ip.getHostname() );
+
+            if (!_session->addDestination (remote_ip, (unsigned short) _ca->getRemoteAudioPort())) {
+                return;
+            }
+
+            bool payloadIsSet = false;
+            if (_audiocodec) {
+                if (_audiocodec->hasDynamicPayload()) {
+                    payloadIsSet = _session->setPayloadFormat(ost::DynamicPayloadFormat((ost::PayloadType) _audiocodec->getPayload(), _audiocodec->getClockRate()));
+                } else {
+                    payloadIsSet = _session->setPayloadFormat(ost::StaticPayloadFormat((ost::StaticPayloadType) _audiocodec->getPayload()));
+                }
+            }
+        }
+
+
+    } catch(...) {
+        _debugException("! ARTP Failure: initialisation failed");
+        throw;
     }
-
-
-  } catch(...) {
-    _debugException("! ARTP Failure: initialisation failed");
-    throw;
-  }
 }
 
-  void
+    void
 AudioRtpRTX::sendSessionFromMic(int timestamp)
 {
-  // STEP:
-  //   1. get data from mic
-  //   2. convert it to int16 - good sample, good rate
-  //   3. encode it
-  //   4. send it
-  //try {
+    // STEP:
+    //   1. get data from mic
+    //   2. convert it to int16 - good sample, good rate
+    //   3. encode it
+    //   4. send it
+    //try {
 
     timestamp += time->getSecond();
     if (_ca==0) { _debug(" !ARTP: No call associated (mic)\n"); return; } // no call, so we do nothing
@@ -278,7 +278,7 @@ AudioRtpRTX::sendSessionFromMic(int timestamp)
     //_debug("get data from mic\n");
     int nbSample = audiolayer->getMic( micData , bytesAvail ) / sizeof(SFLDataFormat);
     int nb_sample_up = nbSample;
-    
+
     // Store the length of the mic buffer in samples for recording
     _nSamplesMic = nbSample;
 
@@ -288,32 +288,32 @@ AudioRtpRTX::sendSessionFromMic(int timestamp)
     nbSample = reSampleData(_audiocodec->getClockRate(), nb_sample_up, DOWN_SAMPLING);	
 
     if ( nbSample < nbSamplesMax - 10 ) { // if only 10 is missing, it's ok
-      // fill end with 0...
-      memset( micDataConverted + nbSample, 0, (nbSamplesMax-nbSample)*sizeof(int16));
-      nbSample = nbSamplesMax;
+        // fill end with 0...
+        memset( micDataConverted + nbSample, 0, (nbSamplesMax-nbSample)*sizeof(int16));
+        nbSample = nbSamplesMax;
     }
     int compSize = _audiocodec->codecEncode( micDataEncoded , micDataConverted , nbSample*sizeof(int16));
     // encode divise by two
     // Send encoded audio sample over the network
     if (compSize > nbSamplesMax) { _debug("! ARTP: %d should be %d\n", compSize, nbSamplesMax);}
     if (!_sym) {
-      _sessionSend->putData(timestamp, micDataEncoded, compSize);
+        _sessionSend->putData(timestamp, micDataEncoded, compSize);
     } else {
-      _session->putData(timestamp, micDataEncoded, compSize);
+        _session->putData(timestamp, micDataEncoded, compSize);
     }
-  /*} catch(...) {
-    _debugException("! ARTP: sending failed");
-    throw;
-  }*/
+    /*} catch(...) {
+      _debugException("! ARTP: sending failed");
+      throw;
+      }*/
 }
 
-  void
+    void
 AudioRtpRTX::receiveSessionForSpkr (int& countTime)
 {
 
 
-  if (_ca == 0) { return; }
-  //try {
+    if (_ca == 0) { return; }
+    //try {
     AudioLayer* audiolayer = Manager::instance().getAudioDriver();
     if (!audiolayer) { return; }
 
@@ -321,13 +321,13 @@ AudioRtpRTX::receiveSessionForSpkr (int& countTime)
     // Get audio data stream
 
     if (!_sym) {
-      adu = _sessionRecv->getData(_sessionRecv->getFirstTimestamp());
+        adu = _sessionRecv->getData(_sessionRecv->getFirstTimestamp());
     } else {
-      adu = _session->getData(_session->getFirstTimestamp());
+        adu = _session->getData(_session->getFirstTimestamp());
     }
     if (adu == NULL) {
-      //_debug("No RTP audio stream\n");
-      return;
+        //_debug("No RTP audio stream\n");
+        return;
     }
 
     //int payload = adu->getType(); // codec type
@@ -338,93 +338,93 @@ AudioRtpRTX::receiveSessionForSpkr (int& countTime)
     unsigned int max = (unsigned int)(_codecSampleRate * _layerFrameSize / 1000);
 
     if ( size > max ) {
-      _debug("We have received from RTP a packet larger than expected: %d VS %d\n", size, max);
-      _debug("The packet size has been cropped\n");
-      size=max;
+        _debug("We have received from RTP a packet larger than expected: %d VS %d\n", size, max);
+        _debug("The packet size has been cropped\n");
+        size=max;
     }
 
     if (_audiocodec != NULL) {
-  
-
-      int expandedSize = _audiocodec->codecDecode( spkrDataDecoded , spkrData , size );
-      //buffer _receiveDataDecoded ----> short int or int16, coded on 2 bytes
-      int nbInt16 = expandedSize / sizeof(int16);
-      //nbInt16 represents the number of samples we just decoded
-      if ((unsigned int)nbInt16 > max) {
-	_debug("We have decoded an RTP packet larger than expected: %d VS %d. Cropping.\n", nbInt16, max);
-	nbInt16=max;
-      }
-      int nbSample = nbInt16;
-
-      // Do sample rate conversion
-      int nb_sample_down = nbSample;
-      nbSample = reSampleData(_codecSampleRate , nb_sample_down, UP_SAMPLING);
+
+
+        int expandedSize = _audiocodec->codecDecode( spkrDataDecoded , spkrData , size );
+        //buffer _receiveDataDecoded ----> short int or int16, coded on 2 bytes
+        int nbInt16 = expandedSize / sizeof(int16);
+        //nbInt16 represents the number of samples we just decoded
+        if ((unsigned int)nbInt16 > max) {
+            _debug("We have decoded an RTP packet larger than expected: %d VS %d. Cropping.\n", nbInt16, max);
+            nbInt16=max;
+        }
+        int nbSample = nbInt16;
+
+        // Do sample rate conversion
+        int nb_sample_down = nbSample;
+        nbSample = reSampleData(_codecSampleRate , nb_sample_down, UP_SAMPLING);
 #ifdef DATAFORMAT_IS_FLOAT
 #else
 #endif
-    
-      // Stor the number of samples for recording
-      _nSamplesSpkr = nbSample;
-        
-    //audiolayer->playSamples( spkrDataConverted, nbSample * sizeof(SFLDataFormat), true);
-    audiolayer->putMain (spkrDataConverted, nbSample * sizeof(SFLDataFormat));
-      
-
-      // Notify (with a beep) an incoming call when there is already a call 
-      countTime += time->getSecond();
-      if (Manager::instance().incomingCallWaiting() > 0) {
-	countTime = countTime % 500; // more often...
-	if (countTime == 0) {
-	  Manager::instance().notificationIncomingCall();
-	}
-      }
+
+        // Stor the number of samples for recording
+        _nSamplesSpkr = nbSample;
+
+        //audiolayer->playSamples( spkrDataConverted, nbSample * sizeof(SFLDataFormat), true);
+        audiolayer->putMain (spkrDataConverted, nbSample * sizeof(SFLDataFormat));
+
+
+        // Notify (with a beep) an incoming call when there is already a call 
+        countTime += time->getSecond();
+        if (Manager::instance().incomingCallWaiting() > 0) {
+            countTime = countTime % 500; // more often...
+            if (countTime == 0) {
+                Manager::instance().notificationIncomingCall();
+            }
+        }
 
     } else {
-      countTime += time->getSecond();
+        countTime += time->getSecond();
     }
     delete adu; adu = NULL;
-  //} catch(...) {
+    //} catch(...) {
     //_debugException("! ARTP: receiving failed");
     //throw;
-  //}
+    //}
 
 }
 
-  int 
+    int 
 AudioRtpRTX::reSampleData(int sampleRate_codec, int nbSamples, int status)
 {
-  if(status==UP_SAMPLING){
-    return converter->upsampleData( spkrDataDecoded , spkrDataConverted , sampleRate_codec , _layerSampleRate , nbSamples );
-  }
-  else if(status==DOWN_SAMPLING){
-    return converter->downsampleData( micData , micDataConverted , sampleRate_codec , _layerSampleRate , nbSamples );
-  }
-  else
-    return 0;
+    if(status==UP_SAMPLING){
+        return converter->upsampleData( spkrDataDecoded , spkrDataConverted , sampleRate_codec , _layerSampleRate , nbSamples );
+    }
+    else if(status==DOWN_SAMPLING){
+        return converter->downsampleData( micData , micDataConverted , sampleRate_codec , _layerSampleRate , nbSamples );
+    }
+    else
+        return 0;
 }
 
 void
 AudioRtpRTX::run () {
-  //mic, we receive from soundcard in stereo, and we send encoded
-  //encoding before sending
-  AudioLayer *audiolayer = Manager::instance().getAudioDriver();
-  _layerFrameSize = audiolayer->getFrameSize(); // en ms
-  _layerSampleRate = audiolayer->getSampleRate();	
-  initBuffers();
-  int step; 
-
-  //try {
+    //mic, we receive from soundcard in stereo, and we send encoded
+    //encoding before sending
+    AudioLayer *audiolayer = Manager::instance().getAudioDriver();
+    _layerFrameSize = audiolayer->getFrameSize(); // en ms
+    _layerSampleRate = audiolayer->getSampleRate();	
+    initBuffers();
+    int step; 
+
+    //try {
     // Init the session
     initAudioRtpSession();
     step = (int) (_layerFrameSize * _codecSampleRate / 1000);
     // start running the packet queue scheduler.
     //_debug("AudioRTP Thread started\n");
     if (!_sym) {
-      _sessionRecv->startRunning();
-      _sessionSend->startRunning();
+        _sessionRecv->startRunning();
+        _sessionSend->startRunning();
     } else {
-      _session->startRunning();
-      //_debug("Session is now: %d active\n", _session->isActive());
+        _session->startRunning();
+        //_debug("Session is now: %d active\n", _session->isActive());
     }
 
     int timestamp = 0; // for mic
@@ -435,36 +435,25 @@ AudioRtpRTX::run () {
     _start.post();
     _debug("- ARTP Action: Start\n");
     while (!testCancel()) {
-      ////////////////////////////
-      // Send session
-      ////////////////////////////
-      sendSessionFromMic(timestamp);
-      timestamp += step;
-      ////////////////////////////
-      // Recv session
-      ////////////////////////////
-      receiveSessionForSpkr(countTime);
-      // Let's wait for the next transmit cycle
-
-      _ca->recAudio.recData(spkrDataConverted,micData,_nSamplesSpkr,_nSamplesMic);
-
-      Thread::sleep(TimerPort::getTimer()); 
-      TimerPort::incTimer(_layerFrameSize); // 'frameSize' ms
+        ////////////////////////////
+        // Send session
+        ////////////////////////////
+        sendSessionFromMic(timestamp);
+        timestamp += step;
+        ////////////////////////////
+        // Recv session
+        ////////////////////////////
+        receiveSessionForSpkr(countTime);
+        // Let's wait for the next transmit cycle
+
+        _ca->recAudio.recData(spkrDataConverted,micData,_nSamplesSpkr,_nSamplesMic);
+
+        Thread::sleep(TimerPort::getTimer()); 
+        TimerPort::incTimer(_layerFrameSize); // 'frameSize' ms
     }
 
-    // _debug("stop stream for audiortp loop\n");
     audiolayer->stopStream();
     _debug("- ARTP Action: Stop\n");
-  //} catch(std::exception &e) {
-    //_start.post();
-    //_debug("! ARTP: Stop %s\n", e.what());
-    //throw;
-  //} catch(...) {
-    //_start.post();
-    //_debugException("* ARTP Action: Stop");
-    //throw;
-  //}
-    
 }
 
 
diff --git a/src/audio/pulselayer.cpp b/src/audio/pulselayer.cpp
index a3566314fb0d4ad4a05f1b6ec961f0eac334d672..a8ae5937fa270ec0e251cc8babcf0838545cc8e9 100644
--- a/src/audio/pulselayer.cpp
+++ b/src/audio/pulselayer.cpp
@@ -25,20 +25,20 @@ int PulseLayer::streamState;
 
 static  void audioCallback ( pa_stream* s, size_t bytes, void* userdata )
 { 
-  assert( s && bytes );
-  assert( bytes > 0 );
-  static_cast<PulseLayer*>(userdata)->processData();
+    assert( s && bytes );
+    assert( bytes > 0 );
+    static_cast<PulseLayer*>(userdata)->processData();
 }
-  
+
     PulseLayer::PulseLayer(ManagerImpl* manager)
-  : AudioLayer( manager , PULSEAUDIO ) 
-  , context(NULL)
-  , m(NULL)
-  , playback()
-  , record()
+    : AudioLayer( manager , PULSEAUDIO ) 
+    , context(NULL)
+    , m(NULL)
+    , playback()
+      , record()
 {
-  PulseLayer::streamState = 0;
-  _debug("Pulse audio constructor: Create context\n");
+    PulseLayer::streamState = 0;
+    _debug("Pulse audio constructor: Create context\n");
 }
 
 // Destructor
@@ -50,324 +50,324 @@ PulseLayer::~PulseLayer (void)
     pa_context_unref( context );
 }
 
-  void
+    void
 PulseLayer::closeLayer( void )
 { 
-  _debug(" Destroy pulselayer\n");
-  
-  playback->disconnect(); 
-  record->disconnect();
- 
-  while(PulseLayer::streamState != 2)
-    ;
-  PulseLayer::streamState = 0; 
+    _debug(" Destroy pulselayer\n");
+
+    playback->disconnect(); 
+    record->disconnect();
+
+    while(PulseLayer::streamState != 2)
+        ;
+    PulseLayer::streamState = 0; 
 
-  //TODO  Remove this ugly hack
-  sleep(2);
+    //TODO  Remove this ugly hack
+    sleep(2);
 }
 
-  void
+    void
 PulseLayer::connectPulseAudioServer( void )
 {
-  pa_context_flags_t flag = PA_CONTEXT_NOAUTOSPAWN ;  
+    pa_context_flags_t flag = PA_CONTEXT_NOAUTOSPAWN ;  
 
-  pa_threaded_mainloop_lock( m );
+    pa_threaded_mainloop_lock( m );
 
-  _debug("Connect the context to the server\n");
-  pa_context_connect( context, NULL , flag , NULL ); 
+    _debug("Connect the context to the server\n");
+    pa_context_connect( context, NULL , flag , NULL ); 
 
-  pa_context_set_state_callback(context, context_state_callback, this);
-  pa_threaded_mainloop_wait( m );
+    pa_context_set_state_callback(context, context_state_callback, this);
+    pa_threaded_mainloop_wait( m );
 
-  // Run the main loop
-  if( pa_context_get_state( context ) != PA_CONTEXT_READY ){
-    _debug("Error connecting to pulse audio server\n");
-    pa_threaded_mainloop_unlock( m );
-  }
+    // Run the main loop
+    if( pa_context_get_state( context ) != PA_CONTEXT_READY ){
+        _debug("Error connecting to pulse audio server\n");
+        pa_threaded_mainloop_unlock( m );
+    }
 
-  pa_threaded_mainloop_unlock( m );
-  //serverinfo();
-  //muteAudioApps(99);
-  _debug("Context creation done\n");
+    pa_threaded_mainloop_unlock( m );
+    //serverinfo();
+    //muteAudioApps(99);
+    _debug("Context creation done\n");
 
 }
 
 void PulseLayer::context_state_callback( pa_context* c, void* user_data )
 {
-  _debug("The state of the context changed\n");
-  PulseLayer* pulse = (PulseLayer*)user_data;
-  assert(c && pulse->m);
-  switch(pa_context_get_state(c)){
-    case PA_CONTEXT_CONNECTING:
-    case PA_CONTEXT_AUTHORIZING:
-    case PA_CONTEXT_SETTING_NAME:
-      _debug("Waiting....\n");
-      break;
-    case PA_CONTEXT_READY:
-      pulse->createStreams( c );
-      _debug("Connection to PulseAudio server established\n");	
-      break;
-    case PA_CONTEXT_TERMINATED:
-      _debug("Context terminated\n");
-      break;
-    case PA_CONTEXT_FAILED:
-    default:
-      _debug(" Error : %s\n" , pa_strerror(pa_context_errno(c)));
-      pulse->disconnectPulseAudioServer();
-      exit(0);
-      break;
-  }
+    _debug("The state of the context changed\n");
+    PulseLayer* pulse = (PulseLayer*)user_data;
+    assert(c && pulse->m);
+    switch(pa_context_get_state(c)){
+        case PA_CONTEXT_CONNECTING:
+        case PA_CONTEXT_AUTHORIZING:
+        case PA_CONTEXT_SETTING_NAME:
+            _debug("Waiting....\n");
+            break;
+        case PA_CONTEXT_READY:
+            pulse->createStreams( c );
+            _debug("Connection to PulseAudio server established\n");	
+            break;
+        case PA_CONTEXT_TERMINATED:
+            _debug("Context terminated\n");
+            break;
+        case PA_CONTEXT_FAILED:
+        default:
+            _debug(" Error : %s\n" , pa_strerror(pa_context_errno(c)));
+            pulse->disconnectPulseAudioServer();
+            exit(0);
+            break;
+    }
 }
 
 void PulseLayer::disconnectPulseAudioServer( void )
 {
-  if( playback )
-    delete playback; playback=NULL;
+    if( playback )
+        delete playback; playback=NULL;
 
-  if( record )
-    delete record; record=NULL;
+    if( record )
+        delete record; record=NULL;
 }
 
-  void
+    void
 PulseLayer::createStreams( pa_context* c )
 {
-  playback = new AudioStream(c, PLAYBACK_STREAM, PLAYBACK_STREAM_NAME, _manager->getSpkrVolume());
-  pa_stream_set_write_callback( playback->pulseStream() , audioCallback, this);
-  //pa_stream_set_overflow_callback( playback->pulseStream() , overflow , this);
-  record = new AudioStream(c, CAPTURE_STREAM, CAPTURE_STREAM_NAME , _manager->getMicVolume());
-  pa_stream_set_read_callback( record->pulseStream() , audioCallback, this);
-  //pa_stream_set_underflow_callback( record->pulseStream() , underflow , this);
+    playback = new AudioStream(c, PLAYBACK_STREAM, PLAYBACK_STREAM_NAME, _manager->getSpkrVolume());
+    pa_stream_set_write_callback( playback->pulseStream() , audioCallback, this);
+    //pa_stream_set_overflow_callback( playback->pulseStream() , overflow , this);
+    record = new AudioStream(c, CAPTURE_STREAM, CAPTURE_STREAM_NAME , _manager->getMicVolume());
+    pa_stream_set_read_callback( record->pulseStream() , audioCallback, this);
+    //pa_stream_set_underflow_callback( record->pulseStream() , underflow , this);
 
-  pa_threaded_mainloop_signal(m , 0);
+    pa_threaded_mainloop_signal(m , 0);
 }
 
-  bool 
+    bool 
 PulseLayer::openDevice(int indexIn UNUSED, int indexOut UNUSED, int sampleRate, int frameSize , int stream UNUSED, std::string plugin UNUSED) 
 {
-  _sampleRate = sampleRate;
-  _frameSize = frameSize;	
+    _sampleRate = sampleRate;
+    _frameSize = frameSize;	
 
-  m = pa_threaded_mainloop_new();
-  assert(m);
+    m = pa_threaded_mainloop_new();
+    assert(m);
 
-  if( pa_threaded_mainloop_start( m ) < 0  ){
-    _debug("Failed starting the mainloop\n");
-  }
+    if( pa_threaded_mainloop_start( m ) < 0  ){
+        _debug("Failed starting the mainloop\n");
+    }
 
-  // Instanciate a context
-  if( !(context = pa_context_new( pa_threaded_mainloop_get_api( m ) , "SFLphone" )))
-    _debug("Error while creating the context\n");
+    // Instanciate a context
+    if( !(context = pa_context_new( pa_threaded_mainloop_get_api( m ) , "SFLphone" )))
+        _debug("Error while creating the context\n");
 
-  assert(context);
+    assert(context);
 
-  connectPulseAudioServer();
+    connectPulseAudioServer();
 
-  _debug("Connection Done!! \n");
-  return true;
+    _debug("Connection Done!! \n");
+    return true;
 }
 
-  void 
+    void 
 PulseLayer::closeCaptureStream( void )
 {
 }
 
-  void 
+    void 
 PulseLayer::closePlaybackStream( void )
 {
 }
 
-  int
+    int
 PulseLayer::canGetMic()
 {
-  if( record )
-    return  _micRingBuffer.AvailForGet();
-  else
-    return 0;
+    if( record )
+        return  _micRingBuffer.AvailForGet();
+    else
+        return 0;
 }
 
-  int 
+    int 
 PulseLayer::getMic(void *buffer, int toCopy)
 {
-  if( record ){
-    return _micRingBuffer.Get(buffer, toCopy, 100);
-  }
-  else
-    return 0;
+    if( record ){
+        return _micRingBuffer.Get(buffer, toCopy, 100);
+    }
+    else
+        return 0;
 }
 
-  void 
+    void 
 PulseLayer::startStream (void) 
 {
-  _micRingBuffer.flush();
-  _debug("Start stream\n");
-  pa_threaded_mainloop_lock(m);
-  pa_stream_cork( record->pulseStream(), NULL, NULL, NULL);
-  pa_threaded_mainloop_unlock(m);
+    flushMic();
+    _debug("Start stream\n");
+    pa_threaded_mainloop_lock(m);
+    pa_stream_cork( record->pulseStream(), NULL, NULL, NULL);
+    pa_threaded_mainloop_unlock(m);
 
 }
 
-  void 
+    void 
 PulseLayer::stopStream (void) 
 {
-  _debug("Stop stream\n");
-  pa_stream_flush( playback->pulseStream(), NULL, NULL );
-  pa_stream_flush( record->pulseStream(), NULL, NULL );
-  flushMic();
-  flushMain();
-  flushUrgent();
+    _debug("Stop stream\n");
+    pa_stream_flush( playback->pulseStream(), NULL, NULL );
+    pa_stream_flush( record->pulseStream(), NULL, NULL );
+    flushMic();
+    flushMain();
+    flushUrgent();
 }
 
 
 
-  void 
+    void 
 PulseLayer::underflow ( pa_stream* s UNUSED,  void* userdata UNUSED )
 { 
-  _debug("Buffer Underflow\n");
+    _debug("Buffer Underflow\n");
 }
 
 
-  void 
+    void 
 PulseLayer::overflow ( pa_stream* s, void* userdata UNUSED )
 { 
-  //PulseLayer* pulse = (PulseLayer*) userdata;
-  pa_stream_drop( s );
-  pa_stream_trigger( s, NULL, NULL);
+    //PulseLayer* pulse = (PulseLayer*) userdata;
+    pa_stream_drop( s );
+    pa_stream_trigger( s, NULL, NULL);
 }
 
-  void
+    void
 PulseLayer::processData( void )
 {
-  // Handle the mic
-  // We check if the stream is ready
-  if( (record->pulseStream()) && pa_stream_get_state( record->pulseStream()) == PA_STREAM_READY) 
-    readFromMic();
+    // Handle the mic
+    // We check if the stream is ready
+    if( (record->pulseStream()) && pa_stream_get_state( record->pulseStream()) == PA_STREAM_READY) 
+        readFromMic();
 
-  // Handle the data for the speakers
-  if( (playback->pulseStream()) && pa_stream_get_state( playback->pulseStream()) == PA_STREAM_READY){
+    // Handle the data for the speakers
+    if( (playback->pulseStream()) && pa_stream_get_state( playback->pulseStream()) == PA_STREAM_READY){
 
-    // If the playback buffer is full, we don't overflow it; wait for it to have free space
-    if( pa_stream_writable_size(playback->pulseStream()) == 0 )
-      return;
+        // If the playback buffer is full, we don't overflow it; wait for it to have free space
+        if( pa_stream_writable_size(playback->pulseStream()) == 0 )
+            return;
 
-    writeToSpeaker();
-  }
+        writeToSpeaker();
+    }
 }
 
-void
+    void
 PulseLayer::writeToSpeaker( void )
 {   
-  /** Bytes available in the urgent ringbuffer ( reserved for DTMF ) */
-  int urgentAvail; 
-  /** Bytes available in the regular ringbuffer ( reserved for voice ) */
-  int normalAvail; 
-  int toGet;
-  int toPlay;
-
-  SFLDataFormat* out;// = (SFLDataFormat*)pa_xmalloc(framesPerBuffer);
-  urgentAvail = _urgentRingBuffer.AvailForGet();
-
-  if (urgentAvail > 0) {
-    // Urgent data (dtmf, incoming call signal) come first.		
-    //_debug("Play urgent!: %i\e" , urgentAvail);
-    toGet = (urgentAvail < (int)(framesPerBuffer * sizeof(SFLDataFormat))) ? urgentAvail : framesPerBuffer * sizeof(SFLDataFormat);
-    out =  (SFLDataFormat*)pa_xmalloc(toGet * sizeof(SFLDataFormat) );
-    _urgentRingBuffer.Get(out, toGet, 100);
-    pa_stream_write( playback->pulseStream() , out , toGet  , pa_xfree, 0 , PA_SEEK_RELATIVE);
-    // Consume the regular one as well (same amount of bytes)
-    _voiceRingBuffer.Discard(toGet);
-  }
-  else
-  {
-    AudioLoop* tone = _manager->getTelephoneTone();
-    if ( tone != 0) {
-      toGet = framesPerBuffer;
-      out =  (SFLDataFormat*)pa_xmalloc(toGet * sizeof(SFLDataFormat) );
-      tone->getNext(out, toGet , 100);
-      pa_stream_write( playback->pulseStream() , out , toGet  * sizeof(SFLDataFormat)   , pa_xfree, 0 , PA_SEEK_RELATIVE);
-    } 
-    if ( (tone=_manager->getTelephoneFile()) != 0 ) {
-      toGet = framesPerBuffer;
-      toPlay = ( (int)(toGet * sizeof(SFLDataFormat)) > framesPerBuffer )? framesPerBuffer : toGet * sizeof(SFLDataFormat) ;
-      out =  (SFLDataFormat*)pa_xmalloc(toPlay);
-      tone->getNext(out, toPlay/2 , 100);
-      pa_stream_write( playback->pulseStream() , out , toPlay   , pa_xfree, 0 , PA_SEEK_RELATIVE) ; 
-    } 
-    else {
-      out =  (SFLDataFormat*)pa_xmalloc(framesPerBuffer * sizeof(SFLDataFormat));
-      normalAvail = _voiceRingBuffer.AvailForGet();
-      toGet = (normalAvail < (int)(framesPerBuffer * sizeof(SFLDataFormat))) ? normalAvail : framesPerBuffer * sizeof(SFLDataFormat);
-      if (toGet) {
-	    _voiceRingBuffer.Get(out, toGet, 100);
-	    _voiceRingBuffer.Discard(toGet);
-      } 
-      else {
-	    bzero(out, framesPerBuffer * sizeof(SFLDataFormat));
-      }
-      pa_stream_write( playback->pulseStream() , out , toGet  , NULL, 0 , PA_SEEK_RELATIVE);
-      pa_xfree(out);
+    /** Bytes available in the urgent ringbuffer ( reserved for DTMF ) */
+    int urgentAvail; 
+    /** Bytes available in the regular ringbuffer ( reserved for voice ) */
+    int normalAvail; 
+    int toGet;
+    int toPlay;
+
+    SFLDataFormat* out;// = (SFLDataFormat*)pa_xmalloc(framesPerBuffer);
+    urgentAvail = _urgentRingBuffer.AvailForGet();
+
+    if (urgentAvail > 0) {
+        // Urgent data (dtmf, incoming call signal) come first.		
+        //_debug("Play urgent!: %i\e" , urgentAvail);
+        toGet = (urgentAvail < (int)(framesPerBuffer * sizeof(SFLDataFormat))) ? urgentAvail : framesPerBuffer * sizeof(SFLDataFormat);
+        out =  (SFLDataFormat*)pa_xmalloc(toGet * sizeof(SFLDataFormat) );
+        _urgentRingBuffer.Get(out, toGet, 100);
+        pa_stream_write( playback->pulseStream() , out , toGet  , pa_xfree, 0 , PA_SEEK_RELATIVE);
+        // Consume the regular one as well (same amount of bytes)
+        _voiceRingBuffer.Discard(toGet);
+    }
+    else
+    {
+        AudioLoop* tone = _manager->getTelephoneTone();
+        if ( tone != 0) {
+            toGet = framesPerBuffer;
+            out =  (SFLDataFormat*)pa_xmalloc(toGet * sizeof(SFLDataFormat) );
+            tone->getNext(out, toGet , 100);
+            pa_stream_write( playback->pulseStream() , out , toGet  * sizeof(SFLDataFormat)   , pa_xfree, 0 , PA_SEEK_RELATIVE);
+        } 
+        if ( (tone=_manager->getTelephoneFile()) != 0 ) {
+            toGet = framesPerBuffer;
+            toPlay = ( (int)(toGet * sizeof(SFLDataFormat)) > framesPerBuffer )? framesPerBuffer : toGet * sizeof(SFLDataFormat) ;
+            out =  (SFLDataFormat*)pa_xmalloc(toPlay);
+            tone->getNext(out, toPlay/2 , 100);
+            pa_stream_write( playback->pulseStream() , out , toPlay   , pa_xfree, 0 , PA_SEEK_RELATIVE) ; 
+        } 
+        else {
+            out =  (SFLDataFormat*)pa_xmalloc(framesPerBuffer * sizeof(SFLDataFormat));
+            normalAvail = _voiceRingBuffer.AvailForGet();
+            toGet = (normalAvail < (int)(framesPerBuffer * sizeof(SFLDataFormat))) ? normalAvail : framesPerBuffer * sizeof(SFLDataFormat);
+            if (toGet) {
+                _voiceRingBuffer.Get(out, toGet, 100);
+                _voiceRingBuffer.Discard(toGet);
+            } 
+            else {
+                bzero(out, framesPerBuffer * sizeof(SFLDataFormat));
+            }
+            pa_stream_write( playback->pulseStream() , out , toGet  , NULL, 0 , PA_SEEK_RELATIVE);
+            pa_xfree(out);
+        }
     }
-  }
 
 }
- 
+
 void PulseLayer::readFromMic( void )
 {
-  const char* data;
-  size_t r;
+    const char* data;
+    size_t r;
 
-  if( pa_stream_peek( record->pulseStream() , (const void**)&data , &r ) < 0 || !data ){
-    //_debug("pa_stream_peek() failed: %s\n" , pa_strerror( pa_context_errno( context) ));
-  }
+    if( pa_stream_peek( record->pulseStream() , (const void**)&data , &r ) < 0 || !data ){
+        //_debug("pa_stream_peek() failed: %s\n" , pa_strerror( pa_context_errno( context) ));
+    }
 
-  if( data != 0 ){
-    _micRingBuffer.Put( (void*)data ,r, 100);
-  }
+    if( data != 0 ){
+        _micRingBuffer.Put( (void*)data ,r, 100);
+    }
 
-  if( pa_stream_drop( record->pulseStream() ) < 0 ) {
-    //_debug("pa_stream_drop() failed: %s\n" , pa_strerror( pa_context_errno( context) ));
-  }
+    if( pa_stream_drop( record->pulseStream() ) < 0 ) {
+        //_debug("pa_stream_drop() failed: %s\n" , pa_strerror( pa_context_errno( context) ));
+    }
 }
 
 static void retrieve_server_info(pa_context *c UNUSED, const pa_server_info *i, void *userdata UNUSED)
 {
-  _debug("Server Info: Process owner : %s\n" , i->user_name);  
-  _debug("\t\tServer name : %s - Server version = %s\n" , i->server_name, i->server_version);  
-  _debug("\t\tDefault sink name : %s\n" , i->default_sink_name);  
-  _debug("\t\tDefault source name : %s\n" , i->default_source_name);  
+    _debug("Server Info: Process owner : %s\n" , i->user_name);  
+    _debug("\t\tServer name : %s - Server version = %s\n" , i->server_name, i->server_version);  
+    _debug("\t\tDefault sink name : %s\n" , i->default_sink_name);  
+    _debug("\t\tDefault source name : %s\n" , i->default_source_name);  
 }
 
 static void reduce_sink_list_cb(pa_context *c UNUSED, const pa_sink_input_info *i, int eol, void *userdata)
 {
-  PulseLayer* pulse = (PulseLayer*) userdata;
-  if( !eol ){
-    //_debug("Sink Info: index : %i\n" , i->index);  
-    //_debug("\t\tClient : %i\n" , i->client); 
-    //_debug("\t\tVolume : %i\n" , i->volume.values[0]); 
-    //_debug("\t\tChannels : %i\n" , i->volume.channels); 
-    if( strcmp( i->name , PLAYBACK_STREAM_NAME ) != 0)
-      pulse->setSinkVolume( i->index , i->volume.channels, 10 );
-  }  
+    PulseLayer* pulse = (PulseLayer*) userdata;
+    if( !eol ){
+        //_debug("Sink Info: index : %i\n" , i->index);  
+        //_debug("\t\tClient : %i\n" , i->client); 
+        //_debug("\t\tVolume : %i\n" , i->volume.values[0]); 
+        //_debug("\t\tChannels : %i\n" , i->volume.channels); 
+        if( strcmp( i->name , PLAYBACK_STREAM_NAME ) != 0)
+            pulse->setSinkVolume( i->index , i->volume.channels, 10 );
+    }  
 }
 
 static void restore_sink_list_cb(pa_context *c UNUSED, const pa_sink_input_info *i, int eol, void *userdata)
 {
-  PulseLayer* pulse = (PulseLayer*) userdata;
-  if( !eol ){
-    //_debug("Sink Info: index : %i\n" , i->index);  
-    //_debug("\t\tSink name : -%s-\n" , i->name);  
-    //_debug("\t\tClient : %i\n" , i->client); 
-    //_debug("\t\tVolume : %i\n" , i->volume.values[0]); 
-    //_debug("\t\tChannels : %i\n" , i->volume.channels); 
-    if( strcmp( i->name , PLAYBACK_STREAM_NAME ) != 0)
-      pulse->setSinkVolume( i->index , i->volume.channels, 100);
-  }  
+    PulseLayer* pulse = (PulseLayer*) userdata;
+    if( !eol ){
+        //_debug("Sink Info: index : %i\n" , i->index);  
+        //_debug("\t\tSink name : -%s-\n" , i->name);  
+        //_debug("\t\tClient : %i\n" , i->client); 
+        //_debug("\t\tVolume : %i\n" , i->volume.values[0]); 
+        //_debug("\t\tChannels : %i\n" , i->volume.channels); 
+        if( strcmp( i->name , PLAYBACK_STREAM_NAME ) != 0)
+            pulse->setSinkVolume( i->index , i->volume.channels, 100);
+    }  
 }
 
 static void set_playback_volume_cb(pa_context *c UNUSED, const pa_sink_input_info *i, int eol, void *userdata)
 {
     PulseLayer* pulse;
     int volume;
-  
+
     pulse = (PulseLayer*) userdata;
     volume = pulse->getSpkrVolume();
 
@@ -381,7 +381,7 @@ static void set_capture_volume_cb(pa_context *c UNUSED, const pa_source_output_i
 {
     PulseLayer* pulse;
     int volume;
-  
+
     pulse = (PulseLayer*) userdata;
     volume = pulse->getMicVolume();
 
@@ -391,45 +391,45 @@ static void set_capture_volume_cb(pa_context *c UNUSED, const pa_source_output_i
     }  
 }
 
-  void
+    void
 PulseLayer::reducePulseAppsVolume( void )
 {
-  pa_context_get_sink_input_info_list( context , reduce_sink_list_cb , this );
+    pa_context_get_sink_input_info_list( context , reduce_sink_list_cb , this );
 }
 
-  void
+    void
 PulseLayer::restorePulseAppsVolume( void )
 {
-  pa_context_get_sink_input_info_list( context , restore_sink_list_cb , this );
+    pa_context_get_sink_input_info_list( context , restore_sink_list_cb , this );
 }
 
-  void
+    void
 PulseLayer::serverinfo( void )
 {
-  pa_context_get_server_info( context , retrieve_server_info , NULL );
+    pa_context_get_server_info( context , retrieve_server_info , NULL );
 }
 
 
 void PulseLayer::setSinkVolume( int index, int channels, int volume )
 {
-    
+
     pa_cvolume cvolume;
     pa_volume_t vol = PA_VOLUME_NORM * ((double)volume / 100) ;
 
     pa_cvolume_set( &cvolume , channels , vol);
-   _debug("Set sink volume of index %i\n" , index);
-   pa_context_set_sink_input_volume( context, index, &cvolume, NULL, NULL) ;
+    _debug("Set sink volume of index %i\n" , index);
+    pa_context_set_sink_input_volume( context, index, &cvolume, NULL, NULL) ;
 
 }
 
 void PulseLayer::setSourceVolume( int index, int channels, int volume )
 {
-    
+
     pa_cvolume cvolume;
     pa_volume_t vol = PA_VOLUME_NORM * ((double)volume / 100) ;
 
     pa_cvolume_set( &cvolume , channels , vol);
-   _debug("Set source volume of index %i\n" , index);
+    _debug("Set source volume of index %i\n" , index);
     pa_context_set_source_volume_by_index(context, index, &cvolume, NULL, NULL);
 
 }
diff --git a/src/iaxvoiplink.cpp b/src/iaxvoiplink.cpp
index e7f8ec57685ce46cadd8436c1067c48a3b81a73d..f5c65eea29615baf338a9346a120ab5c87d06fd1 100644
--- a/src/iaxvoiplink.cpp
+++ b/src/iaxvoiplink.cpp
@@ -429,7 +429,6 @@ IAXVoIPLink::answer(const CallID& id)
     call->setConnectionState(Call::Connected);
     // Start audio
     audiolayer->startStream();
-    audiolayer->flushMic();
 
     return true;
 }
@@ -701,7 +700,6 @@ IAXVoIPLink::iaxHandleCallEvent(iax_event* event, IAXCall* call)
                 }
 
                 Manager::instance().peerAnsweredCall(id);
-                audiolayer->flushMic();
                 // start audio here?
             } else {
                 // deja connecté ?
@@ -805,7 +803,7 @@ IAXVoIPLink::iaxHandleVoiceEvent(iax_event* event, IAXCall* call)
         // resample
         nbInt16 = converter->upsampleData( spkrDataDecoded , spkrDataConverted , ac->getClockRate() , audiolayer->getSampleRate() , nbSample_);
 
-        //audiolayer->playSamples( spkrDataConverted , nbInt16 * sizeof(SFLDataFormat), true);
+        /* Write the data to the mic ring buffer */
         audiolayer->putMain (spkrDataConverted , nbInt16 * sizeof(SFLDataFormat));
 
     } else {