Commit 9d36c29e authored by Hugo Lefeuvre's avatar Hugo Lefeuvre Committed by Philippe Gorley

media_filter: remove simple filters

Change-Id: Ie45773677290111f92b81230071835ac42e7197e
Reviewed-by: Philippe Gorley's avatarPhilippe Gorley <philippe.gorley@savoirfairelinux.com>
parent eda3c1a2
......@@ -49,15 +49,6 @@ MediaFilter::getFilterDesc() const
return desc_;
}
int
MediaFilter::initialize(const std::string& filterDesc, MediaStream msp)
{
std::vector<MediaStream> msps;
msps.push_back(msp);
desc_ = filterDesc;
return initialize(desc_, msps);
}
int
MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaStream> msps)
{
......@@ -94,25 +85,20 @@ MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaStream>
return fail("Size mismatch between number of inputs in filter graph and input parameter array",
AVERROR(EINVAL));
if (count > 1) {
/* Complex filter */
for (AVFilterInOut* current = inputs.get(); current; current = current->next) {
if (!current->name)
return fail("Complex filters' inputs require names", AVERROR(EINVAL));
std::string name = current->name;
const auto& it = std::find_if(msps.begin(), msps.end(), [name](const MediaStream& msp)
{ return msp.name == name; });
if (it != msps.end()) {
if ((ret = initInputFilter(current, *it, false)) < 0) {
std::string msg = "Failed to find matching parameters for: " + name;
return fail(msg, ret);
}
for (AVFilterInOut* current = inputs.get(); current; current = current->next) {
if (!current->name)
return fail("Filters require non empty names", AVERROR(EINVAL));
std::string name = current->name;
const auto& it = std::find_if(msps.begin(), msps.end(), [name](const MediaStream& msp)
{ return msp.name == name; });
if (it != msps.end()) {
if ((ret = initInputFilter(current, *it)) < 0) {
std::string msg = "Failed to initialize input: " + name;
return fail(msg, ret);
}
}
} else {
/* Simple filter */
if ((ret = initInputFilter(inputs.get(), msps[0], true)) < 0) {
return fail("Failed to create input for filter graph", ret);
} else {
std::string msg = "Failed to find matching parameters for: " + name;
return fail(msg, ret);
}
}
......@@ -124,12 +110,6 @@ MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaStream>
return 0;
}
MediaStream
MediaFilter::getInputParams() const
{
return getInputParams("default");
}
MediaStream
MediaFilter::getInputParams(const std::string& inputName) const
{
......@@ -174,12 +154,6 @@ MediaFilter::getOutputParams() const
return output;
}
int
MediaFilter::feedInput(AVFrame* frame)
{
return feedInput(frame, "default");
}
int
MediaFilter::feedInput(AVFrame* frame, std::string inputName)
{
......@@ -218,7 +192,7 @@ MediaFilter::readOutput()
if (ret >= 0) {
return frame;
} else if (ret == AVERROR(EAGAIN)) {
// return nullptr
// no data available right now, try again
} else if (ret == AVERROR_EOF) {
RING_WARN() << "Filters have reached EOF, no more frames will be output";
} else {
......@@ -228,19 +202,6 @@ MediaFilter::readOutput()
return nullptr;
}
AVFrame*
MediaFilter::apply(AVFrame* frame)
{
if (inputs_.size() != 1) {
RING_ERR() << "Cannot use apply(AVFrame*) shortcut with a complex filter";
return nullptr;
}
if (feedInput(frame) < 0)
return nullptr;
return readOutput();
}
int
MediaFilter::initOutputFilter(AVFilterInOut* out)
{
......@@ -270,7 +231,7 @@ MediaFilter::initOutputFilter(AVFilterInOut* out)
}
int
MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple)
MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp)
{
int ret = 0;
AVBufferSrcParameters* params = av_buffersrc_parameters_alloc();
......@@ -296,10 +257,7 @@ MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple)
AVFilterContext* buffersrcCtx = nullptr;
if (buffersrc) {
char name[128];
if (simple)
snprintf(name, sizeof(name), "buffersrc");
else
snprintf(name, sizeof(name), "buffersrc_%s_%d", in->name, in->pad_idx);
snprintf(name, sizeof(name), "buffersrc_%s_%d", in->name, in->pad_idx);
buffersrcCtx = avfilter_graph_alloc_filter(graph_, buffersrc, name);
}
if (!buffersrcCtx) {
......@@ -318,10 +276,7 @@ MediaFilter::initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple)
return fail("Failed to link buffer source to graph", ret);
inputs_.push_back(buffersrcCtx);
if (simple)
msp.name = "default";
else
msp.name = in->name;
msp.name = in->name;
inputParams_.push_back(msp);
return ret;
}
......
......@@ -37,21 +37,22 @@ namespace ring {
/**
* Provides access to libavfilter.
*
* Can be used for simple filters (1 input, 1 output), or complex filters (multiple inputs, 1 output).
* Can be used for filters with unlimited number of inputs.
* Multiple outputs are not supported. They add complexity for little gain.
*
* For information on how to write a filter graph description, see:
* https://ffmpeg.org/ffmpeg-filters.html
* http://trac.ffmpeg.org/wiki/FilteringGuide
*
* For complex filters, it is required to name each filter graph input. These names are used to feed the correct input.
* It is the same name that will be passed as second argument to feedInput(AVFrame*, std::string). This is not required
* for simple filters, as there is only one input.
* It is required to name each filter graph input. These names are used to feed the correct input.
* It is the same name that will be passed as second argument to feedInput(AVFrame*, std::string).
*
* Simple filter: "scale=320:240"
* Scales the input to 320x240. No need to specify input names.
* Examples:
*
* Complex filter: "[in1] scale=iw/4:ih/4 [mid]; [in2] [mid] overlay=main_w-overlay_w-10:main_h-overlay_h-10"
* - "[in1] scale=320:240"
* Scales the input to 320x240.
*
* - "[in1] scale=iw/4:ih/4 [mid]; [in2] [mid] overlay=main_w-overlay_w-10:main_h-overlay_h-10"
* in1 will be scaled to 1/16th its size and placed over in2 in the bottom right corner. When feeding frames to
* the filter, you need to specify whether the frame is destined for in1 or in2.
*/
......@@ -65,26 +66,11 @@ class MediaFilter {
*/
std::string getFilterDesc() const;
/**
* Initializes the filter graph with 1 input.
*
* NOTE This method will fail if @filterDesc has more than 1 input.
* NOTE Wraps @msp in a vector and calls initialize.
*/
int initialize(const std::string& filterDesc, MediaStream msp);
/**
* Initializes the filter graph with one or more inputs and one output. Returns a negative code on error.
*/
int initialize(const std::string& filterDesc, std::vector<MediaStream> msps);
/**
* Returns a MediaStream object describing the input.
*
* NOTE This is a shortcut for simple filters and will fail when called on a complex filter.
*/
MediaStream getInputParams() const;
/**
* Returns a MediaStream object describing the input specified by @inputName.
*/
......@@ -97,14 +83,6 @@ class MediaFilter {
*/
MediaStream getOutputParams() const;
/**
* Give the filter graph an input frame. Caller is responsible for freeing the frame.
*
* NOTE This is a wrapper for feedInput(AVFrame*, std::string)
* NOTE This is for filters with 1 input.
*/
int feedInput(AVFrame* frame);
/**
* Give the specified source filter an input frame. Caller is responsible for freeing the frame.
*
......@@ -121,16 +99,6 @@ class MediaFilter {
*/
AVFrame* readOutput();
/**
* Passes a frame through a simple filter (1 input, 1 output).
*
* This is a shortcut for feedInput(AVFrame*)+readOutput().
*
* NOTE Returns nullptr if the filter graph has multiple inputs/outputs.
* NOTE Caller is responsible for freeing the input and output frames.
*/
AVFrame* apply(AVFrame* frame);
private:
NON_COPYABLE(MediaFilter);
......@@ -142,7 +110,7 @@ class MediaFilter {
/**
* Initializes an input of filter graph.
*/
int initInputFilter(AVFilterInOut* in, MediaStream msp, bool simple);
int initInputFilter(AVFilterInOut* in, MediaStream msp);
/**
* Convenience method that prints @msg and returns err.
......
......@@ -339,37 +339,31 @@ MediaRecorder::setupVideoOutput()
MediaStream encoderStream;
const MediaStream& peer = streams_[true][true];
const MediaStream& local = streams_[true][false];
int ret = -1;
// vp8 supports only yuv420p
videoFilter_.reset(new MediaFilter);
switch (nbReceivedVideoStreams_) {
case 1:
encoderStream = (peer.width > 0 && peer.height > 0 ? peer : local);
if (videoFilter_->initialize("format=pix_fmts=yuv420p", encoderStream) < 0) {
RING_ERR() << "Failed to initialize video filter";
encoderStream.format = -1; // invalidate stream
} else {
encoderStream = videoFilter_->getOutputParams();
}
ret = videoFilter_->initialize("[v:main] format=pix_fmts=yuv420p",
std::vector<MediaStream>{peer.isValid() ? peer : local});
break;
case 2: // overlay local video over peer video
if (videoFilter_->initialize(buildVideoFilter(),
std::vector<MediaStream>{peer, local}) < 0) {
RING_ERR() << "Failed to initialize video filter";
encoderStream.format = -1; // invalidate stream
} else {
encoderStream = videoFilter_->getOutputParams();
}
ret = videoFilter_->initialize(buildVideoFilter(),
std::vector<MediaStream>{peer, local});
break;
default:
RING_ERR() << "Recording more than 2 video streams is not supported";
break;
}
if (encoderStream.format < 0)
return encoderStream;
if (ret >= 0) {
encoderStream = videoFilter_->getOutputParams();
RING_DBG() << "Recorder output: " << encoderStream;
} else {
RING_ERR() << "Failed to initialize video filter";
}
RING_DBG() << "Recorder output: " << encoderStream;
return encoderStream;
}
......@@ -406,39 +400,32 @@ MediaRecorder::setupAudioOutput()
MediaStream encoderStream;
const MediaStream& peer = streams_[false][true];
const MediaStream& local = streams_[false][false];
std::string filter = "aresample=osr=48000:ocl=stereo:osf=s16";
int ret = -1;
// resample to common audio format, so any player can play the file
audioFilter_.reset(new MediaFilter);
switch (nbReceivedAudioStreams_) {
case 1:
encoderStream = (peer.sampleRate > 0 && peer.nbChannels > 0 ? peer : local);
if (audioFilter_->initialize("aresample=osr=48000:ocl=stereo:osf=s16",
encoderStream) < 0) {
RING_ERR() << "Failed to initialize audio filter";
encoderStream.format = -1; // invalidate stream
} else {
encoderStream = audioFilter_->getOutputParams();
}
filter.insert(0, "[a:1] ");
ret = audioFilter_->initialize(filter, std::vector<MediaStream>{peer.isValid() ? peer : local});
break;
case 2: // mix both audio streams
if (audioFilter_->initialize("[a:1][a:2] amix,aresample=osr=48000:ocl=stereo:osf=s16",
std::vector<MediaStream>{peer, local}) < 0) {
RING_ERR() << "Failed to initialize audio filter";
encoderStream.format = -1; // invalidate stream
} else {
encoderStream = audioFilter_->getOutputParams();
}
filter.insert(0, "[a:1][a:2] amix,");
ret = audioFilter_->initialize(filter, std::vector<MediaStream>{peer, local});
break;
default:
RING_ERR() << "Recording more than 2 audio streams is not supported";
encoderStream.format = -1; // invalidate stream
break;
}
if (encoderStream.format < 0)
return encoderStream;
if (ret >= 0) {
encoderStream = audioFilter_->getOutputParams();
RING_DBG() << "Recorder output: " << encoderStream;
} else {
RING_ERR() << "Failed to initialize audio filter";
}
RING_DBG() << "Recorder output: " << encoderStream;
return encoderStream;
}
......
......@@ -38,18 +38,14 @@ public:
void tearDown();
private:
void testSimpleVideoFilter();
void testSimpleAudioFilter();
void testComplexVideoFilter();
void testSimpleFilterParams();
void testComplexFilterParams();
void testAudioFilter();
void testVideoFilter();
void testFilterParams();
CPPUNIT_TEST_SUITE(MediaFilterTest);
CPPUNIT_TEST(testSimpleVideoFilter);
CPPUNIT_TEST(testSimpleAudioFilter);
CPPUNIT_TEST(testComplexVideoFilter);
CPPUNIT_TEST(testSimpleFilterParams);
CPPUNIT_TEST(testComplexFilterParams);
CPPUNIT_TEST(testAudioFilter);
CPPUNIT_TEST(testVideoFilter);
CPPUNIT_TEST(testFilterParams);
CPPUNIT_TEST_SUITE_END();
std::unique_ptr<MediaFilter> filter_;
......@@ -113,44 +109,9 @@ fill_samples(uint16_t* samples, int sampleRate, int nbSamples, int nbChannels, f
}
void
MediaFilterTest::testSimpleVideoFilter()
MediaFilterTest::testAudioFilter()
{
std::string filterSpec = "scale=200x100";
// constants
const constexpr int width = 320;
const constexpr int height = 240;
const constexpr AVPixelFormat format = AV_PIX_FMT_YUV420P;
// prepare video frame
frame_ = av_frame_alloc();
frame_->format = format;
frame_->width = width;
frame_->height = height;
// construct the filter parameters
rational<int> one = rational<int>(1);
auto params = MediaStream("vf", format, one, width, height, one, one);
// allocate and fill frame buffers
CPPUNIT_ASSERT(av_frame_get_buffer(frame_, 32) >= 0);
fill_yuv_image(frame_->data, frame_->linesize, frame_->width, frame_->height, 0);
// prepare filter
CPPUNIT_ASSERT(filter_->initialize(filterSpec, params) >= 0);
// apply filter
frame_ = filter_->apply(frame_);
CPPUNIT_ASSERT(frame_);
// check if the filter worked
CPPUNIT_ASSERT(frame_->width == 200 && frame_->height == 100);
}
void
MediaFilterTest::testSimpleAudioFilter()
{
std::string filterSpec = "aformat=sample_fmts=u8";
std::string filterSpec = "[in1] aformat=sample_fmts=u8";
// constants
const constexpr int nbSamples = 100;
......@@ -158,6 +119,7 @@ MediaFilterTest::testSimpleAudioFilter()
const constexpr int sampleRate = 44100;
const constexpr enum AVSampleFormat format = AV_SAMPLE_FMT_S16;
// prepare audio frame
frame_ = av_frame_alloc();
frame_->format = format;
......@@ -167,17 +129,21 @@ MediaFilterTest::testSimpleAudioFilter()
frame_->channels = av_get_channel_layout_nb_channels(channelLayout);
// construct the filter parameters
auto params = MediaStream("af", format, rational<int>(1, 1), sampleRate, frame_->channels);
auto params = MediaStream("in1", format, rational<int>(1, sampleRate), sampleRate, frame_->channels);
// allocate and fill frame buffers
CPPUNIT_ASSERT(av_frame_get_buffer(frame_, 0) >= 0);
fill_samples(reinterpret_cast<uint16_t*>(frame_->data[0]), sampleRate, nbSamples, frame_->channels, 440.0);
// prepare filter
CPPUNIT_ASSERT(filter_->initialize(filterSpec, params) >= 0);
std::vector<MediaStream> vec;
vec.push_back(params);
CPPUNIT_ASSERT(filter_->initialize(filterSpec, vec) >= 0);
// apply filter
frame_ = filter_->apply(frame_);
CPPUNIT_ASSERT(filter_->feedInput(frame_, "in1") >= 0);
av_frame_free(&frame_);
frame_ = filter_->readOutput();
CPPUNIT_ASSERT(frame_);
// check if the filter worked
......@@ -185,7 +151,7 @@ MediaFilterTest::testSimpleAudioFilter()
}
void
MediaFilterTest::testComplexVideoFilter()
MediaFilterTest::testVideoFilter()
{
std::string filterSpec = "[main] [top] overlay=main_w-overlay_w-10:main_h-overlay_h-10";
std::string main = "main";
......@@ -238,36 +204,7 @@ MediaFilterTest::testComplexVideoFilter()
}
void
MediaFilterTest::testSimpleFilterParams()
{
std::string filterSpec = "scale=200x100";
// constants
const constexpr int width = 320;
const constexpr int height = 240;
const constexpr AVPixelFormat format = AV_PIX_FMT_YUV420P;
// construct the filter parameters
rational<int> one = rational<int>(1);
auto params = MediaStream("vf", format, one, width, height, one, one);
// returned params should be invalid
CPPUNIT_ASSERT(filter_->getOutputParams().format < 0);
// prepare filter
CPPUNIT_ASSERT(filter_->initialize(filterSpec, params) >= 0);
// check input params
auto msin = filter_->getInputParams();
CPPUNIT_ASSERT(msin.format == format && msin.width == width && msin.height == height);
// output params should now be valid
auto msout = filter_->getOutputParams();
CPPUNIT_ASSERT(msout.format >= 0 && msout.width > 0 && msout.height > 0);
}
void
MediaFilterTest::testComplexFilterParams()
MediaFilterTest::testFilterParams()
{
std::string filterSpec = "[main] [top] overlay=main_w-overlay_w-10:main_h-overlay_h-10";
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment