Commit 222ce2c0 authored by Philippe Gorley's avatar Philippe Gorley

audiobuffer: use raii when dealing with AVFrame

Returns AudioFrame instead of raw AVFRame pointer when calling
AudioBuffer.toAVFrame, and take AudioFrame as parameter when appending
an AVFrame to an AudioBuffer.

No longer need to free AVFrame during encoding/conversions.

Change-Id: I28aa992a5483f84f6cb1a5157718c11c3a69518c
parent 34e72c5a
...@@ -95,7 +95,8 @@ AudioInput::process() ...@@ -95,7 +95,8 @@ AudioInput::process()
resampled = micData_; resampled = micData_;
} }
AVFrame* frame = resampled.toAVFrame(); auto audioFrame = resampled.toAVFrame();
auto frame = audioFrame->pointer();
auto ms = MediaStream("a:local", format_); auto ms = MediaStream("a:local", format_);
frame->pts = getNextTimestamp(sent_samples, ms.sampleRate, static_cast<rational<int64_t>>(ms.timeBase)); frame->pts = getNextTimestamp(sent_samples, ms.sampleRate, static_cast<rational<int64_t>>(ms.timeBase));
sent_samples += frame->nb_samples; sent_samples += frame->nb_samples;
......
...@@ -209,7 +209,8 @@ AudioSender::process() ...@@ -209,7 +209,8 @@ AudioSender::process()
if (muteState_) // audio is muted, set samples to 0 if (muteState_) // audio is muted, set samples to 0
buffer.reset(); buffer.reset();
AVFrame* frame = buffer.toAVFrame(); auto audioFrame = buffer.toAVFrame();
auto frame = audioFrame->pointer();
auto ms = MediaStream("a:local", buffer.getFormat()); auto ms = MediaStream("a:local", buffer.getFormat());
frame->pts = getNextTimestamp(sent_samples, ms.sampleRate, static_cast<rational<int64_t>>(ms.timeBase)); frame->pts = getNextTimestamp(sent_samples, ms.sampleRate, static_cast<rational<int64_t>>(ms.timeBase));
ms.firstTimestamp = frame->pts; ms.firstTimestamp = frame->pts;
...@@ -221,7 +222,7 @@ AudioSender::process() ...@@ -221,7 +222,7 @@ AudioSender::process()
rec->recordData(frame, ms); rec->recordData(frame, ms);
} }
if (audioEncoder_->encodeAudio(frame) < 0) if (audioEncoder_->encodeAudio(*audioFrame) < 0)
RING_ERR("encoding failed"); RING_ERR("encoding failed");
} }
......
...@@ -290,15 +290,11 @@ size_t AudioBuffer::copy(AudioSample* in, size_t sample_num, size_t pos_out /* = ...@@ -290,15 +290,11 @@ size_t AudioBuffer::copy(AudioSample* in, size_t sample_num, size_t pos_out /* =
return sample_num; return sample_num;
} }
AVFrame* std::unique_ptr<AudioFrame>
AudioBuffer::toAVFrame() const AudioBuffer::toAVFrame() const
{ {
AVFrame* frame = av_frame_alloc(); auto audioFrame = std::make_unique<AudioFrame>();
if (!frame) { auto frame = audioFrame->pointer();
RING_ERR() << "Failed to allocate audio frame";
return nullptr;
}
frame->format = AV_SAMPLE_FMT_S16; frame->format = AV_SAMPLE_FMT_S16;
frame->nb_samples = frames(); frame->nb_samples = frames();
frame->channel_layout = av_get_default_channel_layout(channels()); frame->channel_layout = av_get_default_channel_layout(channels());
...@@ -313,12 +309,13 @@ AudioBuffer::toAVFrame() const ...@@ -313,12 +309,13 @@ AudioBuffer::toAVFrame() const
interleave(reinterpret_cast<AudioSample*>(frame->data[0])); interleave(reinterpret_cast<AudioSample*>(frame->data[0]));
return frame; return audioFrame;
} }
int int
AudioBuffer::append(AVFrame* frame) AudioBuffer::append(const AudioFrame& audioFrame)
{ {
auto frame = audioFrame.pointer();
// FIXME we assume frame is s16 interleaved // FIXME we assume frame is s16 interleaved
if (channels() != static_cast<unsigned>(frame->channels) if (channels() != static_cast<unsigned>(frame->channels)
|| getSampleRate() != frame->sample_rate) { || getSampleRate() != frame->sample_rate) {
......
...@@ -35,6 +35,7 @@ extern "C" { ...@@ -35,6 +35,7 @@ extern "C" {
} }
#include "ring_types.h" #include "ring_types.h"
#include "media_buffer.h"
#include <ciso646> // fix windows compiler bug #include <ciso646> // fix windows compiler bug
...@@ -357,9 +358,9 @@ class AudioBuffer { ...@@ -357,9 +358,9 @@ class AudioBuffer {
*/ */
size_t copy(AudioSample* in, size_t sample_num, size_t pos_out = 0); size_t copy(AudioSample* in, size_t sample_num, size_t pos_out = 0);
AVFrame* toAVFrame() const; std::unique_ptr<AudioFrame> toAVFrame() const;
int append(AVFrame* frame); int append(const AudioFrame& frame);
private: private:
int sampleRate_; int sampleRate_;
......
...@@ -78,22 +78,20 @@ Resampler::resample(const AVFrame* input, AVFrame* output) ...@@ -78,22 +78,20 @@ Resampler::resample(const AVFrame* input, AVFrame* output)
void void
Resampler::resample(const AudioBuffer& dataIn, AudioBuffer& dataOut) Resampler::resample(const AudioBuffer& dataIn, AudioBuffer& dataOut)
{ {
auto input = dataIn.toAVFrame(); auto inputFrame = dataIn.toAVFrame();
auto input = inputFrame->pointer();
AudioFrame resampled; AudioFrame resampled;
auto output = resampled.pointer(); auto output = resampled.pointer();
output->sample_rate = dataOut.getSampleRate(); output->sample_rate = dataOut.getSampleRate();
output->channel_layout = av_get_default_channel_layout(dataOut.channels()); output->channel_layout = av_get_default_channel_layout(dataOut.channels());
output->format = AV_SAMPLE_FMT_S16; output->format = AV_SAMPLE_FMT_S16;
if (resample(input, output) < 0) { if (resample(input, output) < 0)
av_frame_free(&input);
return; return;
}
dataOut.resize(output->nb_samples); dataOut.resize(output->nb_samples);
dataOut.deinterleave(reinterpret_cast<const AudioSample*>(output->extended_data[0]), dataOut.deinterleave(reinterpret_cast<const AudioSample*>(output->extended_data[0]),
output->nb_samples, output->channels); output->nb_samples, output->channels);
av_frame_free(&input);
} }
} // namespace ring } // namespace ring
...@@ -83,7 +83,7 @@ AudioFile::AudioFile(const std::string &fileName, unsigned int sampleRate) : ...@@ -83,7 +83,7 @@ AudioFile::AudioFile(const std::string &fileName, unsigned int sampleRate) :
resampled->format = AV_SAMPLE_FMT_S16; resampled->format = AV_SAMPLE_FMT_S16;
if (resampler->resample(input.pointer(), resampled) < 0) if (resampler->resample(input.pointer(), resampled) < 0)
throw AudioFileException("Frame could not be resampled"); throw AudioFileException("Frame could not be resampled");
if (buf->append(resampled) < 0) if (buf->append(output) < 0)
throw AudioFileException("Error while decoding: " + fileName); throw AudioFileException("Error while decoding: " + fileName);
break; break;
case MediaDecoder::Status::DecodeError: case MediaDecoder::Status::DecodeError:
......
...@@ -380,13 +380,12 @@ MediaEncoder::encode(VideoFrame& input, bool is_keyframe, ...@@ -380,13 +380,12 @@ MediaEncoder::encode(VideoFrame& input, bool is_keyframe,
} }
#endif // RING_VIDEO #endif // RING_VIDEO
int MediaEncoder::encodeAudio(AVFrame* frame) int MediaEncoder::encodeAudio(AudioFrame& frame)
{ {
auto enc = encoders_[currentStreamIdx_]; auto enc = encoders_[currentStreamIdx_];
frame->pts = getNextTimestamp(sent_samples, enc->sample_rate, enc->time_base); frame.pointer()->pts = getNextTimestamp(sent_samples, enc->sample_rate, enc->time_base);
sent_samples += frame->nb_samples; sent_samples += frame.pointer()->nb_samples;
encode(frame, currentStreamIdx_); encode(frame.pointer(), currentStreamIdx_);
av_frame_free(&frame);
return 0; return 0;
} }
......
...@@ -75,7 +75,7 @@ public: ...@@ -75,7 +75,7 @@ public:
int encode(VideoFrame &input, bool is_keyframe, int64_t frame_number); int encode(VideoFrame &input, bool is_keyframe, int64_t frame_number);
#endif // RING_VIDEO #endif // RING_VIDEO
int encodeAudio(AVFrame* frame); int encodeAudio(AudioFrame& frame);
// frame should be ready to be sent to the encoder at this point // frame should be ready to be sent to the encoder at this point
int encode(AVFrame* frame, int streamIdx); int encode(AVFrame* frame, int streamIdx);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment