Skip to content
Snippets Groups Projects
Commit 9d36c29e authored by Hugo Lefeuvre's avatar Hugo Lefeuvre Committed by Philippe Gorley
Browse files

media_filter: remove simple filters


Change-Id: Ie45773677290111f92b81230071835ac42e7197e
Reviewed-by: default avatarPhilippe Gorley <philippe.gorley@savoirfairelinux.com>
parent eda3c1a2
Branches
No related tags found
No related merge requests found
...@@ -49,15 +49,6 @@ MediaFilter::getFilterDesc() const ...@@ -49,15 +49,6 @@ MediaFilter::getFilterDesc() const
return desc_; return desc_;
} }
int
MediaFilter::initialize(const std::string& filterDesc, MediaStream msp)
{
std::vector<MediaStream> msps;
msps.push_back(msp);
desc_ = filterDesc;
return initialize(desc_, msps);
}
int int
MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaStream> msps) MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaStream> msps)
{ {
...@@ -94,25 +85,20 @@ MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaStream> ...@@ -94,25 +85,20 @@ MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaStream>
return fail("Size mismatch between number of inputs in filter graph and input parameter array", return fail("Size mismatch between number of inputs in filter graph and input parameter array",
AVERROR(EINVAL)); AVERROR(EINVAL));
if (count > 1) {
/* Complex filter */
for (AVFilterInOut* current = inputs.get(); current; current = current->next) { for (AVFilterInOut* current = inputs.get(); current; current = current->next) {
if (!current->name) if (!current->name)
return fail("Complex filters' inputs require names", AVERROR(EINVAL)); return fail("Filters require non empty names", AVERROR(EINVAL));
std::string name = current->name; std::string name = current->name;
const auto& it = std::find_if(msps.begin(), msps.end(), [name](const MediaStream& msp) const auto& it = std::find_if(msps.begin(), msps.end(), [name](const MediaStream& msp)
{ return msp.name == name; }); { return msp.name == name; });
if (it != msps.end()) { if (it != msps.end()) {
if ((ret = initInputFilter(current, *it, false)) < 0) { if ((ret = initInputFilter(current, *it)) < 0) {
std::string msg = "Failed to find matching parameters for: " + name; std::string msg = "Failed to initialize input: " + name;
return fail(msg, ret); return fail(msg, ret);
} }
}
}
} else { } else {
/* Simple filter */ std::string msg = "Failed to find matching parameters for: " + name;
if ((ret = initInputFilter(inputs.get(), msps[0], true)) < 0) { return fail(msg, ret);
return fail("Failed to create input for filter graph", ret);
} }
} }
...@@ -124,12 +110,6 @@ MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaStream> ...@@ -124,12 +110,6 @@ MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaStream>
return 0; return 0;
} }
MediaStream
MediaFilter::getInputParams() const
{
return getInputParams("default");
}
MediaStream MediaStream
MediaFilter::getInputParams(const std::string& inputName) const MediaFilter::getInputParams(const std::string& inputName) const
{ {
...@@ -174,12 +154,6 @@ MediaFilter::getOutputParams() const ...@@ -174,12 +154,6 @@ MediaFilter::getOutputParams() const
return output; return output;
} }
int
MediaFilter::feedInput(AVFrame* frame)
{
return feedInput(frame, "default");
}
int int
MediaFilter::feedInput(AVFrame* frame, std::string inputName) MediaFilter::feedInput(AVFrame* frame, std::string inputName)
{ {
...@@ -218,7 +192,7 @@ MediaFilter::readOutput() ...@@ -218,7 +192,7 @@ MediaFilter::readOutput()
if (ret >= 0) { if (ret >= 0) {
return frame; return frame;
} else if (ret == AVERROR(EAGAIN)) { } else if (ret == AVERROR(EAGAIN)) {
// return nullptr // no data available right now, try again
} else if (ret == AVERROR_EOF) { } else if (ret == AVERROR_EOF) {
RING_WARN() << "Filters have reached EOF, no more frames will be output"; RING_WARN() << "Filters have reached EOF, no more frames will be output";
} else { } else {
...@@ -228,19 +202,6 @@ MediaFilter::readOutput() ...@@ -228,19 +202,6 @@ MediaFilter::readOutput()
return nullptr; return nullptr;
} }
AVFrame*
MediaFilter::apply(AVFrame* frame)
{
if (inputs_.size() != 1) {
RING_ERR() << "Cannot use apply(AVFrame*) shortcut with a complex filter";
return nullptr;
}
if (feedInput(frame) < 0)
return nullptr;
return readOutput();
}
int int
MediaFilter::initOutputFilter(AVFilterInOut* out) MediaFilter::initOutputFilter(AVFilterInOut* out)
{ {
...@@ -270,7 +231,7 @@ MediaFilter::initOutputFilter(AVFilterInOut* out) ...@@ -270,7 +231,7 @@ MediaFilter::initOutputFilter(AVFilterInOut* out)
} }
int int
MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple) MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp)
{ {
int ret = 0; int ret = 0;
AVBufferSrcParameters* params = av_buffersrc_parameters_alloc(); AVBufferSrcParameters* params = av_buffersrc_parameters_alloc();
...@@ -296,9 +257,6 @@ MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple) ...@@ -296,9 +257,6 @@ MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple)
AVFilterContext* buffersrcCtx = nullptr; AVFilterContext* buffersrcCtx = nullptr;
if (buffersrc) { if (buffersrc) {
char name[128]; char name[128];
if (simple)
snprintf(name, sizeof(name), "buffersrc");
else
snprintf(name, sizeof(name), "buffersrc_%s_%d", in->name, in->pad_idx); snprintf(name, sizeof(name), "buffersrc_%s_%d", in->name, in->pad_idx);
buffersrcCtx = avfilter_graph_alloc_filter(graph_, buffersrc, name); buffersrcCtx = avfilter_graph_alloc_filter(graph_, buffersrc, name);
} }
...@@ -318,9 +276,6 @@ MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple) ...@@ -318,9 +276,6 @@ MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple)
return fail("Failed to link buffer source to graph", ret); return fail("Failed to link buffer source to graph", ret);
inputs_.push_back(buffersrcCtx); inputs_.push_back(buffersrcCtx);
if (simple)
msp.name = "default";
else
msp.name = in->name; msp.name = in->name;
inputParams_.push_back(msp); inputParams_.push_back(msp);
return ret; return ret;
......
...@@ -37,21 +37,22 @@ namespace ring { ...@@ -37,21 +37,22 @@ namespace ring {
/** /**
* Provides access to libavfilter. * Provides access to libavfilter.
* *
* Can be used for simple filters (1 input, 1 output), or complex filters (multiple inputs, 1 output). * Can be used for filters with unlimited number of inputs.
* Multiple outputs are not supported. They add complexity for little gain. * Multiple outputs are not supported. They add complexity for little gain.
* *
* For information on how to write a filter graph description, see: * For information on how to write a filter graph description, see:
* https://ffmpeg.org/ffmpeg-filters.html * https://ffmpeg.org/ffmpeg-filters.html
* http://trac.ffmpeg.org/wiki/FilteringGuide * http://trac.ffmpeg.org/wiki/FilteringGuide
* *
* For complex filters, it is required to name each filter graph input. These names are used to feed the correct input. * It is required to name each filter graph input. These names are used to feed the correct input.
* It is the same name that will be passed as second argument to feedInput(AVFrame*, std::string). This is not required * It is the same name that will be passed as second argument to feedInput(AVFrame*, std::string).
* for simple filters, as there is only one input.
* *
* Simple filter: "scale=320:240" * Examples:
* Scales the input to 320x240. No need to specify input names.
* *
* Complex filter: "[in1] scale=iw/4:ih/4 [mid]; [in2] [mid] overlay=main_w-overlay_w-10:main_h-overlay_h-10" * - "[in1] scale=320:240"
* Scales the input to 320x240.
*
* - "[in1] scale=iw/4:ih/4 [mid]; [in2] [mid] overlay=main_w-overlay_w-10:main_h-overlay_h-10"
* in1 will be scaled to 1/16th its size and placed over in2 in the bottom right corner. When feeding frames to * in1 will be scaled to 1/16th its size and placed over in2 in the bottom right corner. When feeding frames to
* the filter, you need to specify whether the frame is destined for in1 or in2. * the filter, you need to specify whether the frame is destined for in1 or in2.
*/ */
...@@ -65,26 +66,11 @@ class MediaFilter { ...@@ -65,26 +66,11 @@ class MediaFilter {
*/ */
std::string getFilterDesc() const; std::string getFilterDesc() const;
/**
* Initializes the filter graph with 1 input.
*
* NOTE This method will fail if @filterDesc has more than 1 input.
* NOTE Wraps @msp in a vector and calls initialize.
*/
int initialize(const std::string& filterDesc, MediaStream msp);
/** /**
* Initializes the filter graph with one or more inputs and one output. Returns a negative code on error. * Initializes the filter graph with one or more inputs and one output. Returns a negative code on error.
*/ */
int initialize(const std::string& filterDesc, std::vector<MediaStream> msps); int initialize(const std::string& filterDesc, std::vector<MediaStream> msps);
/**
* Returns a MediaStream object describing the input.
*
* NOTE This is a shortcut for simple filters and will fail when called on a complex filter.
*/
MediaStream getInputParams() const;
/** /**
* Returns a MediaStream object describing the input specified by @inputName. * Returns a MediaStream object describing the input specified by @inputName.
*/ */
...@@ -97,14 +83,6 @@ class MediaFilter { ...@@ -97,14 +83,6 @@ class MediaFilter {
*/ */
MediaStream getOutputParams() const; MediaStream getOutputParams() const;
/**
* Give the filter graph an input frame. Caller is responsible for freeing the frame.
*
* NOTE This is a wrapper for feedInput(AVFrame*, std::string)
* NOTE This is for filters with 1 input.
*/
int feedInput(AVFrame* frame);
/** /**
* Give the specified source filter an input frame. Caller is responsible for freeing the frame. * Give the specified source filter an input frame. Caller is responsible for freeing the frame.
* *
...@@ -121,16 +99,6 @@ class MediaFilter { ...@@ -121,16 +99,6 @@ class MediaFilter {
*/ */
AVFrame* readOutput(); AVFrame* readOutput();
/**
* Passes a frame through a simple filter (1 input, 1 output).
*
* This is a shortcut for feedInput(AVFrame*)+readOutput().
*
* NOTE Returns nullptr if the filter graph has multiple inputs/outputs.
* NOTE Caller is responsible for freeing the input and output frames.
*/
AVFrame* apply(AVFrame* frame);
private: private:
NON_COPYABLE(MediaFilter); NON_COPYABLE(MediaFilter);
...@@ -142,7 +110,7 @@ class MediaFilter { ...@@ -142,7 +110,7 @@ class MediaFilter {
/** /**
* Initializes an input of filter graph. * Initializes an input of filter graph.
*/ */
int initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple); int initInputFilter(AVFilterInOut* in, MediaStream msp);
/** /**
* Convenience method that prints @msg and returns err. * Convenience method that prints @msg and returns err.
......
...@@ -339,37 +339,31 @@ MediaRecorder::setupVideoOutput() ...@@ -339,37 +339,31 @@ MediaRecorder::setupVideoOutput()
MediaStream encoderStream; MediaStream encoderStream;
const MediaStream& peer = streams_[true][true]; const MediaStream& peer = streams_[true][true];
const MediaStream& local = streams_[true][false]; const MediaStream& local = streams_[true][false];
int ret = -1;
// vp8 supports only yuv420p // vp8 supports only yuv420p
videoFilter_.reset(new MediaFilter); videoFilter_.reset(new MediaFilter);
switch (nbReceivedVideoStreams_) { switch (nbReceivedVideoStreams_) {
case 1: case 1:
encoderStream = (peer.width > 0 && peer.height > 0 ? peer : local); ret = videoFilter_->initialize("[v:main] format=pix_fmts=yuv420p",
if (videoFilter_->initialize("format=pix_fmts=yuv420p", encoderStream) < 0) { std::vector<MediaStream>{peer.isValid() ? peer : local});
RING_ERR() << "Failed to initialize video filter";
encoderStream.format = -1; // invalidate stream
} else {
encoderStream = videoFilter_->getOutputParams();
}
break; break;
case 2: // overlay local video over peer video case 2: // overlay local video over peer video
if (videoFilter_->initialize(buildVideoFilter(), ret = videoFilter_->initialize(buildVideoFilter(),
std::vector<MediaStream>{peer, local}) < 0) { std::vector<MediaStream>{peer, local});
RING_ERR() << "Failed to initialize video filter";
encoderStream.format = -1; // invalidate stream
} else {
encoderStream = videoFilter_->getOutputParams();
}
break; break;
default: default:
RING_ERR() << "Recording more than 2 video streams is not supported"; RING_ERR() << "Recording more than 2 video streams is not supported";
break; break;
} }
if (encoderStream.format < 0) if (ret >= 0) {
return encoderStream; encoderStream = videoFilter_->getOutputParams();
RING_DBG() << "Recorder output: " << encoderStream; RING_DBG() << "Recorder output: " << encoderStream;
} else {
RING_ERR() << "Failed to initialize video filter";
}
return encoderStream; return encoderStream;
} }
...@@ -406,39 +400,32 @@ MediaRecorder::setupAudioOutput() ...@@ -406,39 +400,32 @@ MediaRecorder::setupAudioOutput()
MediaStream encoderStream; MediaStream encoderStream;
const MediaStream& peer = streams_[false][true]; const MediaStream& peer = streams_[false][true];
const MediaStream& local = streams_[false][false]; const MediaStream& local = streams_[false][false];
std::string filter = "aresample=osr=48000:ocl=stereo:osf=s16";
int ret = -1;
// resample to common audio format, so any player can play the file // resample to common audio format, so any player can play the file
audioFilter_.reset(new MediaFilter); audioFilter_.reset(new MediaFilter);
switch (nbReceivedAudioStreams_) { switch (nbReceivedAudioStreams_) {
case 1: case 1:
encoderStream = (peer.sampleRate > 0 && peer.nbChannels > 0 ? peer : local); filter.insert(0, "[a:1] ");
if (audioFilter_->initialize("aresample=osr=48000:ocl=stereo:osf=s16", ret = audioFilter_->initialize(filter, std::vector<MediaStream>{peer.isValid() ? peer : local});
encoderStream) < 0) {
RING_ERR() << "Failed to initialize audio filter";
encoderStream.format = -1; // invalidate stream
} else {
encoderStream = audioFilter_->getOutputParams();
}
break; break;
case 2: // mix both audio streams case 2: // mix both audio streams
if (audioFilter_->initialize("[a:1][a:2] amix,aresample=osr=48000:ocl=stereo:osf=s16", filter.insert(0, "[a:1][a:2] amix,");
std::vector<MediaStream>{peer, local}) < 0) { ret = audioFilter_->initialize(filter, std::vector<MediaStream>{peer, local});
RING_ERR() << "Failed to initialize audio filter";
encoderStream.format = -1; // invalidate stream
} else {
encoderStream = audioFilter_->getOutputParams();
}
break; break;
default: default:
RING_ERR() << "Recording more than 2 audio streams is not supported"; RING_ERR() << "Recording more than 2 audio streams is not supported";
encoderStream.format = -1; // invalidate stream
break; break;
} }
if (encoderStream.format < 0) if (ret >= 0) {
return encoderStream; encoderStream = audioFilter_->getOutputParams();
RING_DBG() << "Recorder output: " << encoderStream; RING_DBG() << "Recorder output: " << encoderStream;
} else {
RING_ERR() << "Failed to initialize audio filter";
}
return encoderStream; return encoderStream;
} }
......
...@@ -38,18 +38,14 @@ public: ...@@ -38,18 +38,14 @@ public:
void tearDown(); void tearDown();
private: private:
void testSimpleVideoFilter(); void testAudioFilter();
void testSimpleAudioFilter(); void testVideoFilter();
void testComplexVideoFilter(); void testFilterParams();
void testSimpleFilterParams();
void testComplexFilterParams();
CPPUNIT_TEST_SUITE(MediaFilterTest); CPPUNIT_TEST_SUITE(MediaFilterTest);
CPPUNIT_TEST(testSimpleVideoFilter); CPPUNIT_TEST(testAudioFilter);
CPPUNIT_TEST(testSimpleAudioFilter); CPPUNIT_TEST(testVideoFilter);
CPPUNIT_TEST(testComplexVideoFilter); CPPUNIT_TEST(testFilterParams);
CPPUNIT_TEST(testSimpleFilterParams);
CPPUNIT_TEST(testComplexFilterParams);
CPPUNIT_TEST_SUITE_END(); CPPUNIT_TEST_SUITE_END();
std::unique_ptr<MediaFilter> filter_; std::unique_ptr<MediaFilter> filter_;
...@@ -113,44 +109,9 @@ fill_samples(uint16_t* samples, int sampleRate, int nbSamples, int nbChannels, f ...@@ -113,44 +109,9 @@ fill_samples(uint16_t* samples, int sampleRate, int nbSamples, int nbChannels, f
} }
void void
MediaFilterTest::testSimpleVideoFilter() MediaFilterTest::testAudioFilter()
{ {
std::string filterSpec = "scale=200x100"; std::string filterSpec = "[in1] aformat=sample_fmts=u8";
// constants
const constexpr int width = 320;
const constexpr int height = 240;
const constexpr AVPixelFormat format = AV_PIX_FMT_YUV420P;
// prepare video frame
frame_ = av_frame_alloc();
frame_->format = format;
frame_->width = width;
frame_->height = height;
// construct the filter parameters
rational<int> one = rational<int>(1);
auto params = MediaStream("vf", format, one, width, height, one, one);
// allocate and fill frame buffers
CPPUNIT_ASSERT(av_frame_get_buffer(frame_, 32) >= 0);
fill_yuv_image(frame_->data, frame_->linesize, frame_->width, frame_->height, 0);
// prepare filter
CPPUNIT_ASSERT(filter_->initialize(filterSpec, params) >= 0);
// apply filter
frame_ = filter_->apply(frame_);
CPPUNIT_ASSERT(frame_);
// check if the filter worked
CPPUNIT_ASSERT(frame_->width == 200 && frame_->height == 100);
}
void
MediaFilterTest::testSimpleAudioFilter()
{
std::string filterSpec = "aformat=sample_fmts=u8";
// constants // constants
const constexpr int nbSamples = 100; const constexpr int nbSamples = 100;
...@@ -158,6 +119,7 @@ MediaFilterTest::testSimpleAudioFilter() ...@@ -158,6 +119,7 @@ MediaFilterTest::testSimpleAudioFilter()
const constexpr int sampleRate = 44100; const constexpr int sampleRate = 44100;
const constexpr enum AVSampleFormat format = AV_SAMPLE_FMT_S16; const constexpr enum AVSampleFormat format = AV_SAMPLE_FMT_S16;
// prepare audio frame // prepare audio frame
frame_ = av_frame_alloc(); frame_ = av_frame_alloc();
frame_->format = format; frame_->format = format;
...@@ -167,17 +129,21 @@ MediaFilterTest::testSimpleAudioFilter() ...@@ -167,17 +129,21 @@ MediaFilterTest::testSimpleAudioFilter()
frame_->channels = av_get_channel_layout_nb_channels(channelLayout); frame_->channels = av_get_channel_layout_nb_channels(channelLayout);
// construct the filter parameters // construct the filter parameters
auto params = MediaStream("af", format, rational<int>(1, 1), sampleRate, frame_->channels); auto params = MediaStream("in1", format, rational<int>(1, sampleRate), sampleRate, frame_->channels);
// allocate and fill frame buffers // allocate and fill frame buffers
CPPUNIT_ASSERT(av_frame_get_buffer(frame_, 0) >= 0); CPPUNIT_ASSERT(av_frame_get_buffer(frame_, 0) >= 0);
fill_samples(reinterpret_cast<uint16_t*>(frame_->data[0]), sampleRate, nbSamples, frame_->channels, 440.0); fill_samples(reinterpret_cast<uint16_t*>(frame_->data[0]), sampleRate, nbSamples, frame_->channels, 440.0);
// prepare filter // prepare filter
CPPUNIT_ASSERT(filter_->initialize(filterSpec, params) >= 0); std::vector<MediaStream> vec;
vec.push_back(params);
CPPUNIT_ASSERT(filter_->initialize(filterSpec, vec) >= 0);
// apply filter // apply filter
frame_ = filter_->apply(frame_); CPPUNIT_ASSERT(filter_->feedInput(frame_, "in1") >= 0);
av_frame_free(&frame_);
frame_ = filter_->readOutput();
CPPUNIT_ASSERT(frame_); CPPUNIT_ASSERT(frame_);
// check if the filter worked // check if the filter worked
...@@ -185,7 +151,7 @@ MediaFilterTest::testSimpleAudioFilter() ...@@ -185,7 +151,7 @@ MediaFilterTest::testSimpleAudioFilter()
} }
void void
MediaFilterTest::testComplexVideoFilter() MediaFilterTest::testVideoFilter()
{ {
std::string filterSpec = "[main] [top] overlay=main_w-overlay_w-10:main_h-overlay_h-10"; std::string filterSpec = "[main] [top] overlay=main_w-overlay_w-10:main_h-overlay_h-10";
std::string main = "main"; std::string main = "main";
...@@ -238,36 +204,7 @@ MediaFilterTest::testComplexVideoFilter() ...@@ -238,36 +204,7 @@ MediaFilterTest::testComplexVideoFilter()
} }
void void
MediaFilterTest::testSimpleFilterParams() MediaFilterTest::testFilterParams()
{
std::string filterSpec = "scale=200x100";
// constants
const constexpr int width = 320;
const constexpr int height = 240;
const constexpr AVPixelFormat format = AV_PIX_FMT_YUV420P;
// construct the filter parameters
rational<int> one = rational<int>(1);
auto params = MediaStream("vf", format, one, width, height, one, one);
// returned params should be invalid
CPPUNIT_ASSERT(filter_->getOutputParams().format < 0);
// prepare filter
CPPUNIT_ASSERT(filter_->initialize(filterSpec, params) >= 0);
// check input params
auto msin = filter_->getInputParams();
CPPUNIT_ASSERT(msin.format == format && msin.width == width && msin.height == height);
// output params should now be valid
auto msout = filter_->getOutputParams();
CPPUNIT_ASSERT(msout.format >= 0 && msout.width > 0 && msout.height > 0);
}
void
MediaFilterTest::testComplexFilterParams()
{ {
std::string filterSpec = "[main] [top] overlay=main_w-overlay_w-10:main_h-overlay_h-10"; std::string filterSpec = "[main] [top] overlay=main_w-overlay_w-10:main_h-overlay_h-10";
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment