Commit 222ce2c0 authored by Philippe Gorley's avatar Philippe Gorley

audiobuffer: use raii when dealing with AVFrame

Returns AudioFrame instead of raw AVFRame pointer when calling
AudioBuffer.toAVFrame, and take AudioFrame as parameter when appending
an AVFrame to an AudioBuffer.

No longer need to free AVFrame during encoding/conversions.

Change-Id: I28aa992a5483f84f6cb1a5157718c11c3a69518c
parent 34e72c5a
......@@ -95,7 +95,8 @@ AudioInput::process()
resampled = micData_;
}
AVFrame* frame = resampled.toAVFrame();
auto audioFrame = resampled.toAVFrame();
auto frame = audioFrame->pointer();
auto ms = MediaStream("a:local", format_);
frame->pts = getNextTimestamp(sent_samples, ms.sampleRate, static_cast<rational<int64_t>>(ms.timeBase));
sent_samples += frame->nb_samples;
......
......@@ -209,7 +209,8 @@ AudioSender::process()
if (muteState_) // audio is muted, set samples to 0
buffer.reset();
AVFrame* frame = buffer.toAVFrame();
auto audioFrame = buffer.toAVFrame();
auto frame = audioFrame->pointer();
auto ms = MediaStream("a:local", buffer.getFormat());
frame->pts = getNextTimestamp(sent_samples, ms.sampleRate, static_cast<rational<int64_t>>(ms.timeBase));
ms.firstTimestamp = frame->pts;
......@@ -221,7 +222,7 @@ AudioSender::process()
rec->recordData(frame, ms);
}
if (audioEncoder_->encodeAudio(frame) < 0)
if (audioEncoder_->encodeAudio(*audioFrame) < 0)
RING_ERR("encoding failed");
}
......
......@@ -290,15 +290,11 @@ size_t AudioBuffer::copy(AudioSample* in, size_t sample_num, size_t pos_out /* =
return sample_num;
}
AVFrame*
std::unique_ptr<AudioFrame>
AudioBuffer::toAVFrame() const
{
AVFrame* frame = av_frame_alloc();
if (!frame) {
RING_ERR() << "Failed to allocate audio frame";
return nullptr;
}
auto audioFrame = std::make_unique<AudioFrame>();
auto frame = audioFrame->pointer();
frame->format = AV_SAMPLE_FMT_S16;
frame->nb_samples = frames();
frame->channel_layout = av_get_default_channel_layout(channels());
......@@ -313,12 +309,13 @@ AudioBuffer::toAVFrame() const
interleave(reinterpret_cast<AudioSample*>(frame->data[0]));
return frame;
return audioFrame;
}
int
AudioBuffer::append(AVFrame* frame)
AudioBuffer::append(const AudioFrame& audioFrame)
{
auto frame = audioFrame.pointer();
// FIXME we assume frame is s16 interleaved
if (channels() != static_cast<unsigned>(frame->channels)
|| getSampleRate() != frame->sample_rate) {
......
......@@ -35,6 +35,7 @@ extern "C" {
}
#include "ring_types.h"
#include "media_buffer.h"
#include <ciso646> // fix windows compiler bug
......@@ -357,9 +358,9 @@ class AudioBuffer {
*/
size_t copy(AudioSample* in, size_t sample_num, size_t pos_out = 0);
AVFrame* toAVFrame() const;
std::unique_ptr<AudioFrame> toAVFrame() const;
int append(AVFrame* frame);
int append(const AudioFrame& frame);
private:
int sampleRate_;
......
......@@ -78,22 +78,20 @@ Resampler::resample(const AVFrame* input, AVFrame* output)
void
Resampler::resample(const AudioBuffer& dataIn, AudioBuffer& dataOut)
{
auto input = dataIn.toAVFrame();
auto inputFrame = dataIn.toAVFrame();
auto input = inputFrame->pointer();
AudioFrame resampled;
auto output = resampled.pointer();
output->sample_rate = dataOut.getSampleRate();
output->channel_layout = av_get_default_channel_layout(dataOut.channels());
output->format = AV_SAMPLE_FMT_S16;
if (resample(input, output) < 0) {
av_frame_free(&input);
if (resample(input, output) < 0)
return;
}
dataOut.resize(output->nb_samples);
dataOut.deinterleave(reinterpret_cast<const AudioSample*>(output->extended_data[0]),
output->nb_samples, output->channels);
av_frame_free(&input);
}
} // namespace ring
......@@ -83,7 +83,7 @@ AudioFile::AudioFile(const std::string &fileName, unsigned int sampleRate) :
resampled->format = AV_SAMPLE_FMT_S16;
if (resampler->resample(input.pointer(), resampled) < 0)
throw AudioFileException("Frame could not be resampled");
if (buf->append(resampled) < 0)
if (buf->append(output) < 0)
throw AudioFileException("Error while decoding: " + fileName);
break;
case MediaDecoder::Status::DecodeError:
......
......@@ -380,13 +380,12 @@ MediaEncoder::encode(VideoFrame& input, bool is_keyframe,
}
#endif // RING_VIDEO
int MediaEncoder::encodeAudio(AVFrame* frame)
int MediaEncoder::encodeAudio(AudioFrame& frame)
{
auto enc = encoders_[currentStreamIdx_];
frame->pts = getNextTimestamp(sent_samples, enc->sample_rate, enc->time_base);
sent_samples += frame->nb_samples;
encode(frame, currentStreamIdx_);
av_frame_free(&frame);
frame.pointer()->pts = getNextTimestamp(sent_samples, enc->sample_rate, enc->time_base);
sent_samples += frame.pointer()->nb_samples;
encode(frame.pointer(), currentStreamIdx_);
return 0;
}
......
......@@ -75,7 +75,7 @@ public:
int encode(VideoFrame &input, bool is_keyframe, int64_t frame_number);
#endif // RING_VIDEO
int encodeAudio(AVFrame* frame);
int encodeAudio(AudioFrame& frame);
// frame should be ready to be sent to the encoder at this point
int encode(AVFrame* frame, int streamIdx);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment