Skip to content
Snippets Groups Projects
Commit ea2933d4 authored by Philippe Gorley's avatar Philippe Gorley Committed by Sébastien Blin
Browse files

media: add filters


Adds dependency to libavfilter, which provides many useful utilities
such as hardware encoding, mixing audio or rotating video.

Access to libavfilter is done through the MediaFilter class, which takes
a filter graph string and AVFrames as input and outputs filtered
AVFrames.

Adds unit tests for MediaFilter.

Change-Id: I0bf3869a9f19ad9cbbe1fc5e2d37eb83ece66ccc
Reviewed-by: default avatarSebastien Blin <sebastien.blin@savoirfairelinux.com>
parent 5b60066f
Branches
Tags
No related merge requests found
...@@ -449,6 +449,8 @@ PKG_CHECK_MODULES(LIBAVFORMAT, libavformat >= 56.40.101,, AC_MSG_ERROR([Missing ...@@ -449,6 +449,8 @@ PKG_CHECK_MODULES(LIBAVFORMAT, libavformat >= 56.40.101,, AC_MSG_ERROR([Missing
PKG_CHECK_MODULES(LIBAVDEVICE, libavdevice >= 56.4.100,, AC_MSG_ERROR([Missing libavdevice development files])) PKG_CHECK_MODULES(LIBAVDEVICE, libavdevice >= 56.4.100,, AC_MSG_ERROR([Missing libavdevice development files]))
PKG_CHECK_MODULES(LIBAVFILTER, libavfilter >= 5.40.101,, AC_MSG_ERROR([Missing libavfilter development files]))
PKG_CHECK_MODULES(LIBSWSCALE, libswscale >= 3.1.101,, AC_MSG_ERROR([Missing libswscale development files])) PKG_CHECK_MODULES(LIBSWSCALE, libswscale >= 3.1.101,, AC_MSG_ERROR([Missing libswscale development files]))
dnl Video is default-enabled dnl Video is default-enabled
......
...@@ -3,7 +3,7 @@ FFMPEG_URL := https://git.ffmpeg.org/gitweb/ffmpeg.git/snapshot/$(FFMPEG_HASH).t ...@@ -3,7 +3,7 @@ FFMPEG_URL := https://git.ffmpeg.org/gitweb/ffmpeg.git/snapshot/$(FFMPEG_HASH).t
PKGS+=ffmpeg PKGS+=ffmpeg
ifeq ($(call need_pkg,"libavutil >= 55.75.100 libavcodec >= 57.106.101 libavformat >= 57.82.100 libavdevice >= 57.8.101 libswscale >= 4.7.103"),) ifeq ($(call need_pkg,"libavutil >= 55.75.100 libavcodec >= 57.106.101 libavformat >= 57.82.100 libavdevice >= 57.8.101 libavfilter >= 6.105.100 libswscale >= 4.7.103"),)
PKGS_FOUND += ffmpeg PKGS_FOUND += ffmpeg
endif endif
...@@ -21,6 +21,7 @@ FFMPEGCONF += \ ...@@ -21,6 +21,7 @@ FFMPEGCONF += \
--enable-swscale \ --enable-swscale \
--enable-protocols \ --enable-protocols \
--enable-bsfs \ --enable-bsfs \
--enable-filters \
--disable-programs --disable-programs
#enable muxers/demuxers #enable muxers/demuxers
......
...@@ -18,7 +18,8 @@ libmedia_la_SOURCES = \ ...@@ -18,7 +18,8 @@ libmedia_la_SOURCES = \
media_codec.cpp \ media_codec.cpp \
system_codec_container.cpp \ system_codec_container.cpp \
srtp.c \ srtp.c \
recordable.cpp recordable.cpp \
media_filter.cpp
noinst_HEADERS = \ noinst_HEADERS = \
rtp_session.h \ rtp_session.h \
...@@ -34,7 +35,8 @@ noinst_HEADERS = \ ...@@ -34,7 +35,8 @@ noinst_HEADERS = \
system_codec_container.h \ system_codec_container.h \
srtp.h \ srtp.h \
recordable.h \ recordable.h \
decoder_finder.h decoder_finder.h \
media_filter.h
libmedia_la_LIBADD = \ libmedia_la_LIBADD = \
./audio/libaudio.la ./audio/libaudio.la
...@@ -44,12 +46,12 @@ libmedia_la_libADD = \ ...@@ -44,12 +46,12 @@ libmedia_la_libADD = \
./video/libvideo.la ./video/libvideo.la
endif endif
libmedia_la_LDFLAGS = @LIBAVCODEC_LIBS@ @LIBAVFORMAT_LIBS@ @LIBAVDEVICE_LIBS@ @LIBSWSCALE_LIBS@ @LIBAVUTIL_LIBS@ libmedia_la_LDFLAGS = @LIBAVCODEC_LIBS@ @LIBAVFORMAT_LIBS@ @LIBAVDEVICE_LIBS@ @LIBAVFILTER_LIBS@ @LIBSWSCALE_LIBS@ @LIBAVUTIL_LIBS@
if HAVE_WIN32 if HAVE_WIN32
libmedia_la_LDFLAGS += -lws2_32 -lwsock32 -lshlwapi libmedia_la_LDFLAGS += -lws2_32 -lwsock32 -lshlwapi
endif endif
AM_CFLAGS=@LIBAVCODEC_CFLAGS@ @LIBAVFORMAT_CFLAGS@ @LIBAVDEVICE_CFLAGS@ @LIBSWSCALE_CFLAGS@ AM_CFLAGS=@LIBAVCODEC_CFLAGS@ @LIBAVFORMAT_CFLAGS@ @LIBAVDEVICE_CFLAGS@ @LIBAVFILTER_CFLAGS@ @LIBSWSCALE_CFLAGS@
AM_CXXFLAGS=@LIBAVCODEC_CFLAGS@ @LIBAVFORMAT_CFLAGS@ @LIBAVDEVICE_CFLAGS@ @LIBSWSCALE_CFLAGS@ AM_CXXFLAGS=@LIBAVCODEC_CFLAGS@ @LIBAVFORMAT_CFLAGS@ @LIBAVDEVICE_CFLAGS@ @LIBAVFILTER_CFLAGS@ @LIBSWSCALE_CFLAGS@
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
extern "C" { extern "C" {
#include <libavcodec/avcodec.h> #include <libavcodec/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
#include <libavdevice/avdevice.h> #include <libavdevice/avdevice.h>
#include <libswscale/swscale.h> #include <libswscale/swscale.h>
......
...@@ -141,6 +141,9 @@ init_once() ...@@ -141,6 +141,9 @@ init_once()
#endif #endif
avdevice_register_all(); avdevice_register_all();
avformat_network_init(); avformat_network_init();
#if LIBAVFILTER_VERSION_INT < AV_VERSION_INT(7, 13, 100)
avfilter_register_all();
#endif
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100) #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)
av_lockmgr_register(avcodecManageMutex); av_lockmgr_register(avcodecManageMutex);
......
/*
* Copyright (C) 2018 Savoir-faire Linux Inc.
*
* Author: Philippe Gorley <philippe.gorley@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "libav_deps.h" // MUST BE INCLUDED FIRST
#include "logger.h"
#include "media_filter.h"
extern "C" {
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
}
#include <functional>
#include <memory>
#include <sstream>
namespace ring {
MediaFilter::MediaFilter()
{}
MediaFilter::~MediaFilter()
{
clean();
}
std::string
MediaFilter::getFilterDesc() const
{
return desc_;
}
int
MediaFilter::initialize(const std::string& filterDesc, MediaFilterParameters mfp)
{
std::vector<MediaFilterParameters> mfps;
mfps.push_back(mfp);
desc_ = filterDesc;
return initialize(desc_, mfps);
}
int
MediaFilter::initialize(const std::string& filterDesc, std::vector<MediaFilterParameters> mfps)
{
int ret = 0;
desc_ = filterDesc;
graph_ = avfilter_graph_alloc();
if (!graph_)
return fail("Failed to allocate filter graph", AVERROR(ENOMEM));
AVFilterInOut* in;
AVFilterInOut* out;
if ((ret = avfilter_graph_parse2(graph_, desc_.c_str(), &in, &out)) < 0)
return fail("Failed to parse filter graph", ret);
using AVFilterInOutPtr = std::unique_ptr<AVFilterInOut, std::function<void(AVFilterInOut*)>>;
AVFilterInOutPtr outputs(out, [](AVFilterInOut* f){ avfilter_inout_free(&f); });
AVFilterInOutPtr inputs(in, [](AVFilterInOut* f){ avfilter_inout_free(&f); });
if (outputs && outputs->next)
return fail("Filters with multiple outputs are not supported", AVERROR(ENOTSUP));
if ((ret = initOutputFilter(outputs.get())) < 0)
return fail("Failed to create output for filter graph", ret);
// make sure inputs linked list is the same size as mfps
size_t count = 0;
AVFilterInOut* dummyInput = inputs.get();
while (dummyInput && ++count) // increment count before evaluating its value
dummyInput = dummyInput->next;
if (count != mfps.size())
return fail("Size mismatch between number of inputs in filter graph and input parameter array",
AVERROR(EINVAL));
int index = 0;
for (AVFilterInOut* current = inputs.get(); current; current = current->next)
if ((ret = initInputFilter(current, mfps[index++])) < 0)
return fail("Failed to create input for filter graph", ret);
if ((ret = avfilter_graph_config(graph_, nullptr)) < 0)
return fail("Failed to configure filter graph", ret);
RING_DBG() << "Filter graph initialized with: " << desc_;
initialized_ = true;
return 0;
}
int
MediaFilter::feedInput(AVFrame* frame)
{
int ret = 0;
if (inputs_.size() == 0)
return fail("No inputs found", AVERROR(EINVAL));
auto filterCtx = inputs_[0];
if (!filterCtx)
return fail("No inputs found", AVERROR(EINVAL));
if ((ret = av_buffersrc_write_frame(filterCtx, frame)) < 0)
return fail("Could not pass frame to filters", ret);
return 0;
}
int
MediaFilter::feedInput(AVFrame* frame, std::string inputName)
{
int ret = 0;
for (size_t i = 0; i < inputs_.size(); ++i) {
auto filterCtx = inputs_[i];
int requested = av_buffersrc_get_nb_failed_requests(filterCtx);
if (requested > 0)
RING_WARN() << inputNames_[i] << " filter needs more input to produce output";
if (inputNames_[i] != inputName)
continue;
if ((ret = av_buffersrc_write_frame(filterCtx, frame)) < 0)
return fail("Could not pass frame to filters", ret);
else
return 0;
}
std::stringstream ss;
ss << "Specified filter (" << inputName << ") not found";
return fail(ss.str(), AVERROR(EINVAL));
}
AVFrame*
MediaFilter::readOutput()
{
int ret = 0;
AVFrame* frame = av_frame_alloc();
ret = av_buffersink_get_frame_flags(output_, frame, 0);
if (ret >= 0) {
return frame;
} else if (ret == AVERROR(EAGAIN)) {
RING_WARN() << "No frame available in sink: " << output_->filter->name
<< " (" << output_->name << "): send more input";
} else if (ret == AVERROR_EOF) {
RING_WARN() << "Filters have reached EOF, no more frames will be output";
} else {
fail("Error occurred while pulling from filter graph", ret);
}
av_frame_free(&frame);
return NULL;
}
int
MediaFilter::initOutputFilter(AVFilterInOut* out)
{
int ret = 0;
const AVFilter* buffersink;
AVFilterContext* buffersinkCtx = nullptr;
AVMediaType mediaType = avfilter_pad_get_type(out->filter_ctx->input_pads, out->pad_idx);
if (mediaType == AVMEDIA_TYPE_VIDEO)
buffersink = avfilter_get_by_name("buffersink");
else
buffersink = avfilter_get_by_name("abuffersink");
if ((ret = avfilter_graph_create_filter(&buffersinkCtx, buffersink, "out",
nullptr, nullptr, graph_)) < 0) {
avfilter_free(buffersinkCtx);
return fail("Failed to create buffer sink", ret);
}
if ((ret = avfilter_link(out->filter_ctx, out->pad_idx, buffersinkCtx, 0)) < 0) {
avfilter_free(buffersinkCtx);
return fail("Could not link buffer sink to graph", ret);
}
output_ = buffersinkCtx;
return ret;
}
int
MediaFilter::initInputFilter(AVFilterInOut* in, MediaFilterParameters mfp)
{
int ret = 0;
bool simple = !in->name; // simple filters don't require the graph input to be labelled
AVBufferSrcParameters* params = av_buffersrc_parameters_alloc();
if (!params)
return -1;
const AVFilter* buffersrc;
AVMediaType mediaType = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
params->format = mfp.format;
params->time_base.num = mfp.timeBase.numerator();
params->time_base.den = mfp.timeBase.denominator();
if (mediaType == AVMEDIA_TYPE_VIDEO) {
params->width = mfp.width;
params->height = mfp.height;
params->sample_aspect_ratio.num = mfp.aspectRatio.numerator();
params->sample_aspect_ratio.den = mfp.aspectRatio.denominator();
params->frame_rate.num = mfp.frameRate.numerator();
params->frame_rate.den = mfp.frameRate.denominator();
buffersrc = avfilter_get_by_name("buffer");
} else {
params->sample_rate = mfp.sampleRate;
params->channel_layout = av_get_default_channel_layout(mfp.nbChannels);
buffersrc = avfilter_get_by_name("abuffer");
}
AVFilterContext* buffersrcCtx = nullptr;
if (buffersrc) {
char name[128];
if (simple)
snprintf(name, sizeof(name), "buffersrc");
else
snprintf(name, sizeof(name), "buffersrc_%s_%d", in->name, in->pad_idx);
buffersrcCtx = avfilter_graph_alloc_filter(graph_, buffersrc, name);
}
if (!buffersrcCtx) {
av_free(params);
return fail("Failed to allocate filter graph input", AVERROR(ENOMEM));
}
ret = av_buffersrc_parameters_set(buffersrcCtx, params);
av_free(params);
if (ret < 0)
return fail("Failed to set filter graph input parameters", ret);
if ((ret = avfilter_init_str(buffersrcCtx, nullptr)) < 0)
return fail("Failed to initialize buffer source", ret);
if ((ret = avfilter_link(buffersrcCtx, 0, in->filter_ctx, in->pad_idx)) < 0)
return fail("Failed to link buffer source to graph", ret);
inputs_.push_back(buffersrcCtx);
if (simple)
inputNames_.push_back("default");
else
inputNames_.push_back(in->name);
return ret;
}
int
MediaFilter::fail(std::string msg, int err)
{
if (!msg.empty())
RING_ERR() << msg << ": " << libav_utils::getError(err);
failed_ = true;
//clean();
return err;
}
void
MediaFilter::clean()
{
avfilter_graph_free(&graph_);
}
} // namespace ring
/*
* Copyright (C) 2018 Savoir-faire Linux Inc.
*
* Author: Philippe Gorley <philippe.gorley@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#pragma once
#include "config.h"
#include "noncopyable.h"
#include "rational.h"
#include <map>
#include <string>
#include <vector>
class AVFilterContext;
class AVFilterGraph;
namespace ring {
/**
* Contains necessary parameters for a filter graph input.
*/
struct MediaFilterParameters {
/* Video and audio */
int format {-1}; // Default is an invalid FFmpeg format (both audio and video)
rational<int> timeBase;
/* Video */
int width {0};
int height {0};
rational<int> aspectRatio;
rational<int> frameRate;
MediaFilterParameters(int fmt, rational<int> tb, int w, int h, rational<int> sar, rational<int> fr)
: format(fmt)
, timeBase(tb)
, width(w)
, height(h)
, aspectRatio(sar)
, frameRate(fr)
{}
/* Audio */
int sampleRate {0};
int nbChannels {0};
MediaFilterParameters(int fmt, rational<int> tb, int sr, int channels)
: format(fmt)
, timeBase(tb)
, sampleRate(sr)
, nbChannels(channels)
{}
};
/**
* Provides access to libavfilter.
*
* Can be used for simple filters (1 input, 1 output), or complex filters (multiple inputs, 1 output).
* Multiple outputs are not supported. They add complexity for little gain.
*
* For information on how to write a filter graph description, see:
* https://ffmpeg.org/ffmpeg-filters.html
* http://trac.ffmpeg.org/wiki/FilteringGuide
*
* For complex filters, it is required to name each filter graph input. These names are used to feed the correct input.
* It is the same name that will be passed as second argument to feedInput(AVFrame*, std::string). This is not required
* for simple filters, as there is only one input.
*
* Simple filter: "scale=320:240"
* Scales the input to 320x240. No need to specify input names.
*
* Complex filter: "[in1] scale=iw/4:ih/4 [mid]; [in2] [mid] overlay=main_w-overlay_w-10:main_h-overlay_h-10"
* in1 will be scaled to 1/16th its size and placed over in2 in the bottom right corner. When feeding frames to
* the filter, you need to specify whether the frame is destined for in1 or in2.
*/
class MediaFilter {
public:
MediaFilter();
~MediaFilter();
/**
* Returns the current filter graph string.
*/
std::string getFilterDesc() const;
/**
* Initializes the filter graph with 1 input.
*
* NOTE This method will fail if @filterDesc has more than 1 input.
* NOTE Wraps mfp in a vector and calls initialize.
*/
int initialize(const std::string& filterDesc, MediaFilterParameters mfp);
/**
* Initializes the filter graph with one or more inputs and one output. Returns a negative code on error.
*
* NOTE @mfps must be in the same order as the inputs in @filterDesc
*/
int initialize(const std::string& filterDesc, std::vector<MediaFilterParameters> mfps);
/**
* Give the filter graph an input frame. Caller is responsible for freeing the frame.
*
* NOTE This is for filters with 1 input.
*/
int feedInput(AVFrame* frame);
/**
* Give the specified source filter an input frame. Caller is responsible for freeing the frame.
*
* NOTE Will fail if @inputName is not found in the graph.
*/
int feedInput(AVFrame* frame, std::string inputName);
/**
* Pull a frame from the filter graph. Caller owns the frame reference.
*
* Returns AVERROR(EAGAIN) if filter graph requires more input.
*/
AVFrame* readOutput(); // frame reference belongs to caller
private:
NON_COPYABLE(MediaFilter);
/**
* Initializes output of filter graph.
*/
int initOutputFilter(AVFilterInOut* out);
/**
* Initializes an input of filter graph.
*/
int initInputFilter(AVFilterInOut* in, MediaFilterParameters mfp);
/**
* Convenience method that prints @msg and returns err.
*
* NOTE @msg should not be null.
*/
int fail(std::string msg, int err);
/**
* Frees resources used by MediaFilter.
*/
void clean();
/**
* Filter graph pointer.
*/
AVFilterGraph* graph_ = nullptr;
/**
* Filter graph output. Corresponds to a buffersink/abuffersink filter.
*/
AVFilterContext* output_;
/**
* List of filter graph inputs. Each corresponds to a buffer/abuffer filter.
*/
std::vector<AVFilterContext*> inputs_;
/**
* List of filter graph input names. Same order as @inputs_.
*/
std::vector<std::string> inputNames_;
/**
* Filter graph string.
*/
std::string desc_ {};
/**
* Flag to know whether or not the filter graph was initialized.
*/
bool initialized_ {false};
/**
* Flag to know whether or not there was a failure during initialization or processing.
*/
bool failed_ {false};
};
}; // namespace ring
...@@ -61,4 +61,10 @@ ut_string_utils_SOURCES = string_utils/testString_utils.cpp ...@@ -61,4 +61,10 @@ ut_string_utils_SOURCES = string_utils/testString_utils.cpp
check_PROGRAMS += ut_video_input check_PROGRAMS += ut_video_input
ut_video_input_SOURCES = media/video/testVideo_input.cpp ut_video_input_SOURCES = media/video/testVideo_input.cpp
#
# media_filter
#
check_PROGRAMS += ut_media_filter
ut_media_filter_SOURCES = media/test_media_filter.cpp
TESTS = $(check_PROGRAMS) TESTS = $(check_PROGRAMS)
/*
* Copyright (C) 2018 Savoir-faire Linux Inc.
*
* Author: Philippe Gorley <philippe.gorley@savoirfairelinux.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <cppunit/TestAssert.h>
#include <cppunit/TestFixture.h>
#include <cppunit/extensions/HelperMacros.h>
#include "dring.h"
#include "libav_deps.h"
#include "media_filter.h"
#include "../../test_runner.h"
namespace ring { namespace test {
class MediaFilterTest : public CppUnit::TestFixture {
public:
static std::string name() { return "media_filter"; }
void setUp();
void tearDown();
private:
void testSimpleVideoFilter();
void testSimpleAudioFilter();
void testComplexVideoFilter();
CPPUNIT_TEST_SUITE(MediaFilterTest);
CPPUNIT_TEST(testSimpleVideoFilter);
CPPUNIT_TEST(testSimpleAudioFilter);
CPPUNIT_TEST(testComplexVideoFilter);
CPPUNIT_TEST_SUITE_END();
std::unique_ptr<MediaFilter> filter_;
AVFrame* frame_ = nullptr;
AVFrame* extra_ = nullptr; // used for filters with multiple inputs
};
CPPUNIT_TEST_SUITE_NAMED_REGISTRATION(MediaFilterTest, MediaFilterTest::name());
void
MediaFilterTest::setUp()
{
DRing::init(DRing::InitFlag(DRing::DRING_FLAG_DEBUG | DRing::DRING_FLAG_CONSOLE_LOG));
libav_utils::ring_avcodec_init();
filter_.reset(new MediaFilter);
}
void
MediaFilterTest::tearDown()
{
av_frame_free(&frame_);
av_frame_free(&extra_);
DRing::fini();
}
static void
fill_yuv_image(uint8_t *data[4], int linesize[4], int width, int height, int frame_index)
{
int x, y;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
}
}
}
static void
fill_samples(uint16_t* samples, int sampleRate, int nbSamples, int nbChannels, float tone)
{
const constexpr float pi = 3.14159265358979323846264338327950288; // M_PI
const float tincr = 2 * pi * tone / sampleRate;
float t = 0;
for (int i = 0; i < 200; ++i) {
for (int j = 0; j < nbSamples; ++j) {
samples[2 * j] = static_cast<int>(sin(t) * 10000);
for (int k = 1; k < nbChannels; ++k) {
samples[2 * j + k] = samples[2 * j];
}
t += tincr;
}
}
}
void
MediaFilterTest::testSimpleVideoFilter()
{
std::string filterSpec = "scale=200x100";
// constants
const constexpr int width = 320;
const constexpr int height = 240;
const constexpr AVPixelFormat format = AV_PIX_FMT_YUV420P;
// prepare video frame
frame_ = av_frame_alloc();
frame_->format = format;
frame_->width = width;
frame_->height = height;
// construct the filter parameters
rational<int> one = rational<int>(1);
auto params = MediaFilterParameters(format, one, width, height, one, one);
// allocate and fill frame buffers
CPPUNIT_ASSERT(av_frame_get_buffer(frame_, 32) >= 0);
fill_yuv_image(frame_->data, frame_->linesize, frame_->width, frame_->height, 0);
// prepare filter
CPPUNIT_ASSERT(filter_->initialize(filterSpec, params) >= 0);
// apply filter
CPPUNIT_ASSERT(filter_->feedInput(frame_) >= 0);
frame_ = filter_->readOutput();
CPPUNIT_ASSERT(frame_);
// check if the filter worked
CPPUNIT_ASSERT(frame_->width == 200 && frame_->height == 100);
}
void
MediaFilterTest::testSimpleAudioFilter()
{
std::string filterSpec = "aformat=sample_fmts=u8";
// constants
const constexpr int nbSamples = 100;
const constexpr int64_t channelLayout = AV_CH_LAYOUT_STEREO;
const constexpr int sampleRate = 44100;
const constexpr enum AVSampleFormat format = AV_SAMPLE_FMT_S16;
// prepare audio frame
frame_ = av_frame_alloc();
frame_->format = format;
frame_->channel_layout = channelLayout;
frame_->nb_samples = nbSamples;
frame_->sample_rate = sampleRate;
frame_->channels = av_get_channel_layout_nb_channels(channelLayout);
// construct the filter parameters
auto params = MediaFilterParameters(format, rational<int>(1, 1), sampleRate, frame_->channels);
// allocate and fill frame buffers
CPPUNIT_ASSERT(av_frame_get_buffer(frame_, 0) >= 0);
fill_samples(reinterpret_cast<uint16_t*>(frame_->data[0]), sampleRate, nbSamples, frame_->channels, 440.0);
// prepare filter
CPPUNIT_ASSERT(filter_->initialize(filterSpec, params) >= 0);
// apply filter
CPPUNIT_ASSERT(filter_->feedInput(frame_) >= 0);
frame_ = filter_->readOutput();
CPPUNIT_ASSERT(frame_);
// check if the filter worked
CPPUNIT_ASSERT(frame_->format == AV_SAMPLE_FMT_U8);
}
void
MediaFilterTest::testComplexVideoFilter()
{
std::string filterSpec = "[main] [top] overlay=main_w-overlay_w-10:main_h-overlay_h-10";
std::string main = "main";
std::string top = "top";
// constants
const constexpr int width1 = 320;
const constexpr int height1 = 240;
const constexpr int width2 = 30;
const constexpr int height2 = 30;
const constexpr AVPixelFormat format = AV_PIX_FMT_YUV420P;
// prepare video frame
frame_ = av_frame_alloc();
frame_->format = format;
frame_->width = width1;
frame_->height = height1;
extra_ = av_frame_alloc();
extra_->format = format;
extra_->width = width2;
extra_->height = height2;
// construct the filter parameters
rational<int> one = rational<int>(1);
auto params1 = MediaFilterParameters(format, one, width1, height1, one, one);
auto params2 = MediaFilterParameters(format, one, width2, height2, one, one);
// allocate and fill frame buffers
CPPUNIT_ASSERT(av_frame_get_buffer(frame_, 32) >= 0);
fill_yuv_image(frame_->data, frame_->linesize, frame_->width, frame_->height, 0);
CPPUNIT_ASSERT(av_frame_get_buffer(extra_, 32) >= 0);
fill_yuv_image(extra_->data, extra_->linesize, extra_->width, extra_->height, 0);
// prepare filter
auto vec = std::vector<MediaFilterParameters>();
vec.push_back(params1);
vec.push_back(params2);
CPPUNIT_ASSERT(filter_->initialize(filterSpec, vec) >= 0);
// apply filter
CPPUNIT_ASSERT(filter_->feedInput(frame_, main) >= 0);
CPPUNIT_ASSERT(filter_->feedInput(extra_, top) >= 0);
av_frame_free(&frame_);
av_frame_free(&extra_);
frame_ = filter_->readOutput();
CPPUNIT_ASSERT(frame_);
// check if the filter worked
CPPUNIT_ASSERT(frame_->width == width1 && frame_->height == height1);
}
}} // namespace ring::test
RING_TEST_RUNNER(ring::test::MediaFilterTest::name());
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment