Skip to content
Snippets Groups Projects
Commit ac1940dc authored by Aline Gondim Santos's avatar Aline Gondim Santos
Browse files

jenkins: linux-gnu, armeabi-v7, windows

Change-Id: I572b43a41344bfe438e18a2a5892b6b7be416e36
parent 9dcf4309
Branches
No related tags found
No related merge requests found
Showing
with 1147 additions and 995 deletions
# Based on the .clang-format for Qt Creator
#
# This is for clang-format >= 10.0.
#
# https://releases.llvm.org/10.0.0/tools/clang/docs/ClangFormatStyleOptions.html
#
---
Language: Cpp
AccessModifierOffset: -4
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: DontAlign
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: Inline
AllowShortIfStatementsOnASingleLine: false
AllowShortLambdasOnASingleLine: Inline
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterReturnType: TopLevelDefinitions
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: true
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: true
AfterUnion: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: false
SplitEmptyRecord: false
SplitEmptyNamespace: false
BreakBeforeBinaryOperators: All
BreakBeforeBraces: Custom
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeComma
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- forever # avoids { wrapped to next line
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeCategories:
- Regex: '^<Q.*'
Priority: 200
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
IndentWidth: 4
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ""
MacroBlockEnd: ""
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBlockIndentWidth: 4
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 150
PenaltyBreakBeforeFirstCallParameter: 300
PenaltyBreakComment: 500
PenaltyBreakFirstLessLess: 400
PenaltyBreakString: 600
PenaltyExcessCharacter: 50
PenaltyReturnTypeOnItsOwnLine: 300
PointerAlignment: Left
ReflowComments: true
SortIncludes: false
SortUsingDeclarations: false
SpaceAfterCStyleCast: true
SpaceAfterTemplateKeyword: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 4
UseTab: Never
/build/
/contrib/
/simpleplugin/
modelEvaluation.py
/loader/
*msvc*
*android-toolchain-*
config.mak
*Libs*
......@@ -6,14 +6,14 @@ set (Version 1.0)
project(${ProjectName} VERSION ${Version})
set (DAEMON ./../../daemon)
set (DAEMON ${PROJECT_SOURCE_DIR}/../../daemon)
set (PLUGIN_NAME GreenScreen)
set (JPL_FILE_NAME ${PLUGIN_NAME}.jpl)
set (DAEMON_SRC ${DAEMON}/src)
set (CONTRIB_PATH ${DAEMON}/contrib)
set (DESTINATION_PATH ./../build/)
set (PLUGINS_LIB ../lib)
set (PLUGINS_LIB ${PROJECT_SOURCE_DIR}/../lib)
set (JPL_DIRECTORY ${PROJECT_BINARY_DIR}/jpl)
set (LIBS_DIR ${PROJECT_SOURCE_DIR}/../contrib/Libs)
if(WIN32)
message(OS:\ WINDOWS\ ${CMAKE_SYSTEM_PROCESSOR})
......@@ -23,8 +23,7 @@ if(WIN32)
set (CONTRIB_PLATFORM_CURT x64)
set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-windows)
set (LIBRARY_FILE_NAME ${PLUGIN_NAME}.dll)
set (LIBS_DIR $ENV{HOME}/Documents/GITHUB/Libs)
set (OPENCV $ENV{HOME}/Documents/GITHUB/opencv/build-bash/)
set (LIBS_BIN_DIR $ENV{PLUGIN_ENV})
set (FFMPEG ${CONTRIB_PATH}/build/ffmpeg/Build/win32/x64)
endif()
......@@ -34,7 +33,7 @@ if(UNIX)
set (CONTRIB_PLATFORM_CURT ${CMAKE_SYSTEM_PROCESSOR})
set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-linux-gnu)
set (LIBRARY_FILE_NAME lib${PLUGIN_NAME}.so)
set (LIBS_DIR /home/${USER}/Libs)
set (LIBS_BIN_DIR /home/${USER}/Libs)
endif()
......@@ -99,43 +98,43 @@ add_library(${ProjectName} SHARED ${plugin_SRC}
${plugin_HDR}
)
if (WIN32)
target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
${PROJECT_SOURCE_DIR}
${PLUGINS_LIB}
${DAEMON_SRC}
${CONTRIB_PATH}
${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include
${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4
${OPENCV}/install/include
${FFMPEG}/include
${PLUGINS_LIB}
${LIBS_DIR}
${CONTRIB_PATH}/build/opencv/build/install/include
${LIBS_DIR}/${TENSORFLOW}/include
${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
)
if (WIN32)
target_link_directories(${ProjectName} PUBLIC ${CONTRIB_PATH}
${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
${OPENCV}/install/x64/vc16/staticlib
${LIBS_BIN_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
${CONTRIB_PATH}/build/opencv/build/lib/Release
${CONTRIB_PATH}/build/opencv/build/3rdparty/lib/Release
${FFMPEG}/bin
${LIBS_DIR}
${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib
${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty
${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
)
target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs411 opencv_imgproc411 opencv_core411 ${TFLIB} zlib)
endif()
if (UNIX)
target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
${PROJECT_SOURCE_DIR}
${PLUGINS_LIB}
${DAEMON_SRC}
${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include
${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4
${LIBS_DIR}/${TENSORFLOW}/include
${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
)
link_directories(${ProjectName} PUBLIC ${CONTRIB_PATH}
${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
${OPENCV}/install/x64/vc16/staticlib
${FFMPEG}/bin
${LIBS_DIR}
${LIBS_BIN_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib
${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty
${LIBS_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
)
target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs opencv_imgproc opencv_core ${TFLIB})
endif()
......@@ -145,9 +144,9 @@ add_custom_command(
PRE_BUILD
COMMAND ${CMAKE_COMMAND} -E remove_directory -r ${JPL_DIRECTORY}
COMMAND ${CMAKE_COMMAND} -E remove_directory -r ${JPL_DIRECTORY}/../../../build/${ProjectName}
COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/${ProjectName}
COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_SOURCE_DIR}/data ${JPL_DIRECTORY}/data
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBS_DIR}/${TENSORFLOW}/lib/ ${JPL_DIRECTORY}/lib
COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/ ${JPL_DIRECTORY}/lib
COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/data/models
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/modelsSRC/${model} ${JPL_DIRECTORY}/data/models
COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/models/${model} ${JPL_DIRECTORY}/data/models/mModel${modelType}
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/manifest.json ${JPL_DIRECTORY}
......@@ -160,17 +159,19 @@ if (WIN32)
add_custom_command(
TARGET ${ProjectName}
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/x64-windows/${TENSORFLOW}
COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${ProjectName}.lib ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
COMMAND python ${PROJECT_SOURCE_DIR}/../assemblePlugin.py --plugins=GreenScreen
COMMAND python ${PROJECT_SOURCE_DIR}/../assemble-plugin.py --plugins=GreenScreen --extraPath=${TENSORFLOW}
COMMENT "Generating JPL archive"
)
else()
add_custom_command(
TARGET ${ProjectName}
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/x86_64-linux-gnu/${TENSORFLOW}
COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
COMMAND python ${PROJECT_SOURCE_DIR}/../assemblePlugin.py --plugins=GreenScreen
COMMAND python ${PROJECT_SOURCE_DIR}/../assemble-plugin.py --plugins=GreenScreen --extraPath=${TENSORFLOW}
COMMENT "Generating JPL archive"
)
......
......@@ -15,20 +15,19 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
* USA.
*/
#include "TFInference.h"
// Std libraries
#include <fstream>
#include <numeric>
#include <iostream>
#include <numeric>
#include <stdlib.h>
#ifdef TFLITE
// Tensorflow headers
#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/builtin_op_data.h>
#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
......@@ -49,7 +48,9 @@ const char sep = separator();
const std::string TAG = "FORESEG";
namespace jami {
TensorflowInference::TensorflowInference(TFModel tfModel) : tfModel(tfModel) {}
TensorflowInference::TensorflowInference(TFModel tfModel)
: tfModel(tfModel)
{}
TensorflowInference::~TensorflowInference() {}
......@@ -70,7 +71,6 @@ TensorflowInference::loadModel()
std::runtime_error("Failed to load the model file");
}
Plog::log(Plog::LogPriority::INFO, "TENSOR", "MODEL LOADED");
}
void
TensorflowInference::buildInterpreter()
......@@ -89,13 +89,11 @@ TensorflowInference::buildInterpreter()
if (interpreter->ModifyGraphWithDelegate(optionalNnApiDelegate) != kTfLiteOk) {
Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER ERROR!!!");
}
else {
} else {
Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER SET");
allocateTensors();
}
}
else {
} else {
allocateTensors();
}
}
......@@ -129,8 +127,7 @@ TensorflowInference::describeModelTensors() const
int output = interpreter->outputs()[0];
oss << "input 0 index: " << input << std::endl;
oss << "output 0 index: " << output << std::endl;
oss << "=============== input dimensions ==============="
<< std::endl;
oss << "=============== input dimensions ===============" << std::endl;
Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str());
// get input dimension from the input tensor metadata
// assuming one input only
......@@ -173,8 +170,7 @@ TensorflowInference::describeTensor(std::string prefix, int index) const
for (size_t i = 0; i < nbDimensions; i++) {
if (i == dimensions.size() - 1) {
tensorDescription << dimensions[i];
}
else {
} else {
tensorDescription << dimensions[i] << " x ";
}
}
......@@ -203,7 +199,9 @@ TensorflowInference::runGraph()
{
for (size_t i = 0; i < tfModel.numberOfRuns; i++) {
if (interpreter->Invoke() != kTfLiteOk) {
Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "A problem occured when running the graph");
Plog::log(Plog::LogPriority::INFO,
"RUN GRAPH",
"A problem occured when running the graph");
}
}
}
......@@ -224,7 +222,9 @@ void
TensorflowInference::LoadGraph()
{
tensorflow::GraphDef graph_def;
tensorflow::Status load_graph_status = tensorflow::ReadBinaryProto(tensorflow::Env::Default(), tfModel.modelPath, &graph_def);
tensorflow::Status load_graph_status = tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
tfModel.modelPath,
&graph_def);
if (!load_graph_status.ok()) {
allocated_ = false;
Plog::log(Plog::LogPriority::INFO, "LOAD GRAPH", "A problem occured when loading the graph");
......@@ -232,11 +232,14 @@ TensorflowInference::LoadGraph()
}
Plog::log(Plog::LogPriority::INFO, "LOAD GRAPH", "graph loaded");
// Plog::log(Plog::LogPriority::INFO, "GRAPH SIZE: ", std::to_string(graph_def.node_size()));
// for (auto& node : *graph_def.mutable_node())
// Plog::log(Plog::LogPriority::INFO, "GRAPH SIZE: ",
// std::to_string(graph_def.node_size())); for (auto& node :
// *graph_def.mutable_node())
// {
// Plog::log(Plog::LogPriority::INFO, "GRAPH NODE: ", node.name().c_str());
// // Plog::log(Plog::LogPriority::INFO, "\tNODE SIZE: ", node.().c_str());
// Plog::log(Plog::LogPriority::INFO, "GRAPH NODE: ",
// node.name().c_str());
// // Plog::log(Plog::LogPriority::INFO, "\tNODE SIZE: ",
// node.().c_str());
// }
PluginParameters* parameters = getGlobalPluginParameters();
......@@ -245,8 +248,7 @@ TensorflowInference::LoadGraph()
if (parameters->useGPU) {
options.config.mutable_gpu_options()->set_allow_growth(true);
options.config.mutable_gpu_options()->set_per_process_gpu_memory_fraction(0.3);
}
else {
} else {
#ifdef WIN32
options.config.mutable_device_count()->insert({"CPU", 1});
options.config.mutable_device_count()->insert({"GPU", 0});
......@@ -258,7 +260,9 @@ TensorflowInference::LoadGraph()
(&session)->reset(tensorflow::NewSession(options));
tensorflow::Status session_create_status = session->Create(graph_def);
if (!session_create_status.ok()) {
Plog::log(Plog::LogPriority::INFO, "INIT SESSION", "A problem occured when initializating session");
Plog::log(Plog::LogPriority::INFO,
"INIT SESSION",
"A problem occured when initializating session");
allocated_ = true;
return;
}
......@@ -272,9 +276,14 @@ TensorflowInference::runGraph()
{
for (size_t i = 0; i < tfModel.numberOfRuns; i++) {
// Actually run the image through the model.
tensorflow::Status run_status = session->Run({{tfModel.inputLayer, imageTensor}}, {tfModel.outputLayer}, {}, &outputs);
tensorflow::Status run_status = session->Run({{tfModel.inputLayer, imageTensor}},
{tfModel.outputLayer},
{},
&outputs);
if (!run_status.ok()) {
Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "A problem occured when running the graph");
Plog::log(Plog::LogPriority::INFO,
"RUN GRAPH",
"A problem occured when running the graph");
}
}
}
......@@ -287,4 +296,4 @@ TensorflowInference::init()
}
#endif
}
} // namespace jami
......@@ -61,14 +61,13 @@ struct SessionOptions;
class TensorShape;
class Env;
enum DataType : int;
} // namespace namespace tensorflow
} // namespace tensorflow
#endif
namespace jami
namespace jami {
class TensorflowInference
{
class TensorflowInference {
public:
/**
* @brief TensorflowInference
......@@ -150,4 +149,4 @@ protected:
bool allocated_ = false;
};
}
} // namespace jami
......@@ -25,8 +25,11 @@
#include <vector>
#include "pluginParameters.h"
struct TFModelConfiguration {
TFModelConfiguration (std::string& model): modelPath{model} {}
struct TFModelConfiguration
{
TFModelConfiguration(std::string& model)
: modelPath {model}
{}
std::string modelPath;
std::vector<unsigned int> normalizationValues;
std::vector<int> dims = {1, 385, 385, 3}; // model Input dimensions
......@@ -48,14 +51,24 @@ struct TFModelConfiguration {
std::string inputLayer = "sub_2";
std::string outputLayer = "float_segments";
#endif // TFLITE
};
struct TFModel : TFModelConfiguration {
TFModel(std::string&& model, std::string&& labels): TFModelConfiguration(model), labelsPath{labels}{}
TFModel(std::string& model, std::string& labels): TFModelConfiguration(model), labelsPath{labels}{}
TFModel(std::string&& model): TFModelConfiguration(model) {}
TFModel(std::string& model): TFModelConfiguration(model) {}
struct TFModel : TFModelConfiguration
{
TFModel(std::string&& model, std::string&& labels)
: TFModelConfiguration(model)
, labelsPath {labels}
{}
TFModel(std::string& model, std::string& labels)
: TFModelConfiguration(model)
, labelsPath {labels}
{}
TFModel(std::string&& model)
: TFModelConfiguration(model)
{}
TFModel(std::string& model)
: TFModelConfiguration(model)
{}
std::string labelsPath = " ";
unsigned int labelsPadding = 16;
......
#! /bin/bash
# Build the plugin for the project
if [ -z $DAEMON ]; then
export OSTYPE
ARCH=$(arch)
# Flags:
# -p: number of processors to use
# -c: Runtime plugin cpu/gpu setting.
# -t: target platform.
if [ -z "${DAEMON}" ]; then
DAEMON="./../../daemon"
echo "DAEMON not provided, building for ${DAEMON}"
echo "DAEMON not provided, building with ${DAEMON}"
fi
PLUGIN_NAME="GreenScreen"
JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
JPL_FILE_NAME="${PLUGIN_NAME}.jpl"
SO_FILE_NAME="lib${PLUGIN_NAME}.so"
DAEMON_SRC="${DAEMON}/src"
CONTRIB_PATH="${DAEMON}/contrib"
DESTINATION_PATH="./../build/"
PLUGINS_LIB="../lib"
LIBS_DIR="/home/${USER}/Libs"
LIBS_DIR="./../contrib/Libs"
if [ -z "${TF_LIBS_DIR}" ]; then
TF_LIBS_DIR="./../../../Libs"
fi
echo "Building with ${TF_LIBS_DIR}"
PLATFORM="linux-gnu"
PROCESSOR='GPU'
while getopts t:c:p OPT; do
case "$OPT" in
t)
PLATFORM="${OPTARG}"
if [ -z "${TF}" ]; then
if [ "$PLATFORM" = 'linux-gnu' ]; then
TF="_tensorflow_cc"
elif [ "$PLATFORM" = 'android' ]; then
TF="_tensorflowLite"
fi
fi
;;
c)
PROCESSOR="${OPTARG}"
;;
p)
;;
\?)
exit 1
;;
esac
done
if [ -z "${TF}" ]; then
TF="_tensorflow_cc"
fi
echo "Building with ${TF}"
mkdir ./data/models
if [[ "${TF}" = "_tensorflow_cc" ]] && [[ "${PLATFORM}" = "linux-gnu" ]]
then
if [ -z "$CUDALIBS" ]; then
rm -r ./data/models
echo "CUDALIBS must point to CUDA 10.1!"
exit
fi
if [ -z "$CUDNN" ]; then
rm -r ./data/models
echo "CUDNN must point to libcudnn.so 7!"
exit
fi
echo "Building for ${PROCESSOR}"
CONTRIB_PLATFORM_CURT=x86_64
CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-linux-gnu
CONTRIB_PLATFORM_CURT=${ARCH}
CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-${PLATFORM}
DESTINATION_PATH="./../build/${CONTRIB_PLATFORM}/${TF}"
mkdir -p "lib/${CONTRIB_PLATFORM}"
mkdir -p "${DESTINATION_PATH}"
mkdir -p lib/${CONTRIB_PLATFORM}
mkdir -p ${DESTINATION_PATH}/jpl
# Compile
clang++ -std=c++17 -shared -fPIC \
-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
-Wall -Wextra \
-Wno-unused-variable \
-Wno-unused-function \
-Wno-unused-parameter \
-D"${PROCESSOR}" \
-I"." \
-I"${DAEMON_SRC}" \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-I"${LIBS_DIR}/${TF}/include" \
-I"${LIBS_DIR}/${TF}/include/third_party/eigen3" \
-I"${PLUGINS_LIB}" \
main.cpp \
videoSubscriber.cpp \
pluginProcessor.cpp \
pluginMediaHandler.cpp \
TFInference.cpp \
pluginInference.cpp \
pluginParameters.cpp \
-L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
-L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/" \
-L"${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}-gpu61/" \
-lswscale \
-lavutil \
-lopencv_imgcodecs \
-lopencv_imgproc \
-lopencv_core \
-ltensorflow_cc \
-lpng \
-o "lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}"
cp "${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}-gpu61/libtensorflow_cc.so" "lib/$CONTRIB_PLATFORM/libtensorflow_cc.so.2"
cp "/usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4" "lib/$CONTRIB_PLATFORM"
cp "/usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55" "lib/$CONTRIB_PLATFORM"
cp "/usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16" "lib/$CONTRIB_PLATFORM"
cp "${CUDALIBS}/libcudart.so" "lib/$CONTRIB_PLATFORM/libcudart.so.10.0"
cp "${CUDNN}/libcublas.so.10" "lib/$CONTRIB_PLATFORM/libcublas.so.10.0"
cp "${CUDALIBS}/libcufft.so.10" "lib/$CONTRIB_PLATFORM/libcufft.so.10.0"
cp "${CUDALIBS}/libcurand.so.10" "lib/$CONTRIB_PLATFORM/libcurand.so.10.0"
cp "${CUDALIBS}/libcusolver.so.10" "lib/$CONTRIB_PLATFORM/libcusolver.so.10.0"
cp "${CUDALIBS}/libcusparse.so.10" "lib/$CONTRIB_PLATFORM/libcusparse.so.10.0"
cp "${CUDNN}/libcudnn.so.7" "lib/$CONTRIB_PLATFORM"
cp ./modelsSRC/mModel-resnet50float.pb ./data/models/mModel.pb
cp ./preferences-tfcc.json ./data/preferences.json
elif [ "${TF}" = "_tensorflowLite" ]
then
if [ "${PLATFORM}" = "linux-gnu" ]
then
CONTRIB_PLATFORM_CURT=${ARCH}
CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-${PLATFORM}
DESTINATION_PATH="./../build/${CONTRIB_PLATFORM}/${TF}"
mkdir -p "lib/${CONTRIB_PLATFORM}"
mkdir -p "${DESTINATION_PATH}"
# Compile
clang++ -std=c++17 -shared -fPIC \
......@@ -30,12 +151,12 @@ clang++ -std=c++17 -shared -fPIC \
-Wno-unused-parameter \
-DTFLITE \
-I"." \
-I${DAEMON_SRC} \
-I"${DAEMON_SRC}" \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-I${LIBS_DIR}/_tensorflow_distribution/include/flatbuffers \
-I${LIBS_DIR}/_tensorflow_distribution/include \
-I${PLUGINS_LIB} \
-I"${LIBS_DIR}/${TF}/include/flatbuffers" \
-I"${LIBS_DIR}/${TF}/include" \
-I"${PLUGINS_LIB}" \
main.cpp \
videoSubscriber.cpp \
pluginProcessor.cpp \
......@@ -43,9 +164,9 @@ pluginMediaHandler.cpp \
TFInference.cpp \
pluginInference.cpp \
pluginParameters.cpp \
-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/ \
-L${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/ \
-L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
-L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/" \
-L"${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}/" \
-lswscale \
-lavutil \
-lopencv_imgcodecs \
......@@ -53,26 +174,182 @@ pluginParameters.cpp \
-lopencv_core \
-ltensorflowlite \
-lpng \
-o lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}
# (above) Always put opencv_core after all other opencv libs
# (above) Always put avutil after all other ffmpeg libs
# (above) Always put png after all other libs
-o "lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}"
mkdir ./data/models
cp ${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/libtensorflowlite.so lib/$CONTRIB_PLATFORM
cp /usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4 lib/$CONTRIB_PLATFORM
cp /usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55 lib/$CONTRIB_PLATFORM
cp /usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16 lib/$CONTRIB_PLATFORM
cp "${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}/libtensorflowlite.so" "lib/$CONTRIB_PLATFORM"
cp "/usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4" "lib/$CONTRIB_PLATFORM"
cp "/usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55" "lib/$CONTRIB_PLATFORM"
cp "/usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16" "lib/$CONTRIB_PLATFORM"
elif [ "${PLATFORM}" = "android" ]
then
DESTINATION_PATH="./../build/android"
mkdir -p "${DESTINATION_PATH}"
if [ -z "$ANDROID_NDK" ]; then
ANDROID_NDK="/home/${USER}/Android/Sdk/ndk/21.1.6352462"
echo "ANDROID_NDK not provided, building with ${ANDROID_NDK}"
fi
#=========================================================
# Check if the ANDROID_ABI was provided
# if not, set default
#=========================================================
if [ -z "$ANDROID_ABI" ]; then
ANDROID_ABI="armeabi-v7a arm64-v8a"
echo "ANDROID_ABI not provided, building for ${ANDROID_ABI}"
fi
buildlib() {
echo "$CURRENT_ABI"
mkdir -p "lib/$CURRENT_ABI"
#=========================================================
# ANDROID TOOLS
#=========================================================
export HOST_TAG=linux-x86_64
export TOOLCHAIN=$ANDROID_NDK/toolchains/llvm/prebuilt/$HOST_TAG
if [ "$CURRENT_ABI" = armeabi-v7a ]
then
export AR=$TOOLCHAIN/bin/arm-linux-android-ar
export AS=$TOOLCHAIN/bin/arm-linux-android-as
export CC=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang
export CXX=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang++
export LD=$TOOLCHAIN/bin/arm-linux-android-ld
export RANLIB=$TOOLCHAIN/bin/arm-linux-android-ranlib
export STRIP=$TOOLCHAIN/bin/arm-linux-androideabi-strip
export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm/sysroot
elif [ "$CURRENT_ABI" = arm64-v8a ]
then
export AR=$TOOLCHAIN/bin/aarch64-linux-android-ar
export AS=$TOOLCHAIN/bin/aarch64-linux-android-as
export CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang
export CXX=$TOOLCHAIN/bin/aarch64-linux-android21-clang++
export LD=$TOOLCHAIN/bin/aarch64-linux-android-ld
export RANLIB=$TOOLCHAIN/bin/aarch64-linux-android-ranlib
export STRIP=$TOOLCHAIN/bin/aarch64-linux-android-strip
export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm64/sysroot
elif [ "$CURRENT_ABI" = x86_64 ]
then
export AR=$TOOLCHAIN/bin/x86_64-linux-android-ar
export AS=$TOOLCHAIN/bin/x86_64-linux-android-as
export CC=$TOOLCHAIN/bin/x86_64-linux-android21-clang
export CXX=$TOOLCHAIN/bin/x86_64-linux-android21-clang++
export LD=$TOOLCHAIN/bin/x86_64-linux-android-ld
export RANLIB=$TOOLCHAIN/bin/x86_64-linux-android-ranlib
export STRIP=$TOOLCHAIN/bin/x86_64-linux-android-strip
export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-x86_64/sysroot
else
echo "ABI NOT OK" >&2
rm -r lib/
rm -r ./data/models
exit 1
fi
#=========================================================
# CONTRIBS
#=========================================================
if [ "$CURRENT_ABI" = armeabi-v7a ]
then
CONTRIB_PLATFORM=arm-linux-androideabi
elif [ "$CURRENT_ABI" = arm64-v8a ]
then
CONTRIB_PLATFORM=aarch64-linux-android
elif [ "$CURRENT_ABI" = x86_64 ]
then
CONTRIB_PLATFORM=x86_64-linux-android
fi
#NDK SOURCES FOR cpufeatures
NDK_SOURCES=${ANDROID_NDK}/sources/android
#=========================================================
# LD_FLAGS
#=========================================================
if [ "$CURRENT_ABI" = armeabi-v7a ]
then
export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi/21"
elif [ "$CURRENT_ABI" = arm64-v8a ]
then
export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android/21"
elif [ "$CURRENT_ABI" = x86_64 ]
then
export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android/21"
fi
#=========================================================
# Compile CPU FEATURES, NEEDED FOR OPENCV
#=========================================================
$CC -c "$NDK_SOURCES/cpufeatures/cpu-features.c" -o cpu-features.o -o cpu-features.o --sysroot=$ANDROID_SYSROOT
#=========================================================
# Compile the plugin
#=========================================================
# Create so destination folder
$CXX --std=c++14 -O3 -g -fPIC \
-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
-shared \
-Wall -Wextra \
-Wno-unused-variable \
-Wno-unused-function \
-Wno-unused-parameter \
-DTFLITE \
-I"." \
-I"${DAEMON_SRC}" \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-I"${LIBS_DIR}/${TF}/include/flatbuffers" \
-I"${LIBS_DIR}/${TF}/include" \
-I"${PLUGINS_LIB}" \
main.cpp \
videoSubscriber.cpp \
pluginProcessor.cpp \
pluginMediaHandler.cpp \
TFInference.cpp \
pluginInference.cpp \
pluginParameters.cpp \
cpu-features.o \
-L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
-L"${TF_LIBS_DIR}/${TF}/lib/${CURRENT_ABI}/" \
-lswscale \
-lavutil \
-lopencv_imgcodecs \
-lopencv_imgproc \
-lopencv_core \
-llibpng \
-ltensorflowlite \
-llog -lz \
--sysroot=$ANDROID_SYSROOT \
-o "lib/$CURRENT_ABI/${SO_FILE_NAME}"
cp "${TF_LIBS_DIR}/${TF}/lib/${CURRENT_ABI}/libtensorflowlite.so" "lib/$CURRENT_ABI"
rm cpu-features.o
}
# Build the so
for i in ${ANDROID_ABI}; do
CURRENT_ABI=$i
buildlib
done
fi
cp ./modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite ./data/models/mModel.tflite
cp ./preferences-tflite.json ./data/preferences.json
fi
zip -r ${JPL_FILE_NAME} data manifest.json lib
mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/
mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/
# Cleanup
# Remove lib after compilation
rm -rf lib
rm -r ./data/models/
rm ./data/models/mModel.tflite
rm -r ./data/models
rm ./data/preferences.json
#! /bin/bash
# Build the plugin for the project
if [ -z $DAEMON ]; then
DAEMON="./../../daemon"
echo "DAEMON not provided, building for ${DAEMON}"
fi
if [ -z $ANDROID_NDK ]; then
ANDROID_NDK=/home/${USER}/Android/Sdk/ndk/21.1.6352462
echo "ANDROID_NDK not provided, building with ${ANDROID_NDK}"
fi
PLUGIN_NAME="GreenScreen"
JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
LIBS_DIR="/home/${USER}/Libs"
DAEMON_SRC="${DAEMON}/src"
CONTRIB_PATH="${DAEMON}/contrib"
DESTINATION_PATH="./../build/"
PLUGINS_LIB="../lib"
#=========================================================
# Check if the ANDROID_ABI was provided
# if not, set default
#=========================================================
if [ -z $ANDROID_ABI ]; then
ANDROID_ABI="armeabi-v7a arm64-v8a"
echo "ANDROID_ABI not provided, building for ${ANDROID_ABI}"
fi
buildlib() {
echo $CURRENT_ABI
#=========================================================
# ANDROID TOOLS
#=========================================================
export HOST_TAG=linux-x86_64
export TOOLCHAIN=$ANDROID_NDK/toolchains/llvm/prebuilt/$HOST_TAG
if [ $CURRENT_ABI = armeabi-v7a ]
then
export AR=$TOOLCHAIN/bin/arm-linux-android-ar
export AS=$TOOLCHAIN/bin/arm-linux-android-as
export CC=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang
export CXX=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang++
export LD=$TOOLCHAIN/bin/arm-linux-android-ld
export RANLIB=$TOOLCHAIN/bin/arm-linux-android-ranlib
export STRIP=$TOOLCHAIN/bin/arm-linux-androideabi-strip
export ANDROID_SYSROOT=./../../client-android/android-toolchain-21-arm/sysroot
elif [ $CURRENT_ABI = arm64-v8a ]
then
export AR=$TOOLCHAIN/bin/aarch64-linux-android-ar
export AS=$TOOLCHAIN/bin/aarch64-linux-android-as
export CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang
export CXX=$TOOLCHAIN/bin/aarch64-linux-android21-clang++
export LD=$TOOLCHAIN/bin/aarch64-linux-android-ld
export RANLIB=$TOOLCHAIN/bin/aarch64-linux-android-ranlib
export STRIP=$TOOLCHAIN/bin/aarch64-linux-android-strip
export ANDROID_SYSROOT=./../../client-android/android-toolchain-21-arm64/sysroot
elif [ $CURRENT_ABI = x86_64 ]
then
export AR=$TOOLCHAIN/bin/x86_64-linux-android-ar
export AS=$TOOLCHAIN/bin/x86_64-linux-android-as
export CC=$TOOLCHAIN/bin/x86_64-linux-android21-clang
export CXX=$TOOLCHAIN/bin/x86_64-linux-android21-clang++
export LD=$TOOLCHAIN/bin/x86_64-linux-android-ld
export RANLIB=$TOOLCHAIN/bin/x86_64-linux-android-ranlib
export STRIP=$TOOLCHAIN/bin/x86_64-linux-android-strip
export ANDROID_SYSROOT=./../../client-android/android-toolchain-21-x86_64/sysroot
else
echo "ABI NOT OK" >&2
exit 1
fi
#=========================================================
# CONTRIBS
#=========================================================
if [ $CURRENT_ABI = armeabi-v7a ]
then
CONTRIB_PLATFORM=arm-linux-androideabi
elif [ $CURRENT_ABI = arm64-v8a ]
then
CONTRIB_PLATFORM=aarch64-linux-android
elif [ $CURRENT_ABI = x86_64 ]
then
CONTRIB_PLATFORM=x86_64-linux-android
fi
# ASSETS
ANDROID_PROJECT_ASSETS=./../../client-android/ring-android/app/src/main/assets
# LIBS FOLDER
ANDROID_PROJECT_LIBS=./../../client-android/ring-android/app/src/main/libs/$CURRENT_ABI
#NDK SOURCES FOR cpufeatures
NDK_SOURCES=${ANDROID_NDK}/sources/android
#=========================================================
# LD_FLAGS
#=========================================================
if [ $CURRENT_ABI = armeabi-v7a ]
then
export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi/21"
elif [ $CURRENT_ABI = arm64-v8a ]
then
export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android/21"
elif [ $CURRENT_ABI = x86_64 ]
then
export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android/21"
fi
#=========================================================
# Compile CPU FEATURES, NEEDED FOR OPENCV
#=========================================================
$CC -c $NDK_SOURCES/cpufeatures/cpu-features.c -o cpu-features.o -o cpu-features.o --sysroot=$ANDROID_SYSROOT
#=========================================================
# Compile the plugin
#=========================================================
# Create so destination folder
mkdir -p lib/$CURRENT_ABI
# Create so destination folder
$CXX --std=c++14 -O3 -g -fPIC \
-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
-shared \
-Wall -Wextra \
-Wno-unused-variable \
-Wno-unused-function \
-Wno-unused-parameter \
-DTFLITE \
-I"." \
-I${DAEMON_SRC} \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-I${LIBS_DIR}/_tensorflow_distribution/include/flatbuffers \
-I${LIBS_DIR}/_tensorflow_distribution/include \
-I${PLUGINS_LIB} \
main.cpp \
videoSubscriber.cpp \
pluginProcessor.cpp \
pluginMediaHandler.cpp \
TFInference.cpp \
pluginInference.cpp \
pluginParameters.cpp \
cpu-features.o \
-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
-L${LIBS_DIR}/_tensorflow_distribution/lib/${CURRENT_ABI}/ \
-lswscale \
-lavutil \
-lopencv_imgcodecs \
-lopencv_imgproc \
-lopencv_core \
-llibpng \
-ltensorflowlite \
-llog -lz \
--sysroot=$ANDROID_SYSROOT \
-o lib/$CURRENT_ABI/${SO_FILE_NAME}
# (above) Always put opencv_core after all other opencv libs when linking statically
# (above) Put libavutil after other ffmpeg libraries
cp ${LIBS_DIR}/_tensorflow_distribution/lib/${CURRENT_ABI}/libtensorflowlite.so lib/$CURRENT_ABI
}
mkdir ./data/models
cp ./modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite ./data/models/mModel.tflite
cp ./preferences-tflite.json ./data/preferences.json
# Build the so
for i in ${ANDROID_ABI}; do
CURRENT_ABI=$i
buildlib
done
#Export the plugin data folder
mkdir -p ${DESTINATION_PATH}/jpl/${PLUGIN_NAME}/
zip -r ${JPL_FILE_NAME} data manifest.json lib
mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/${PLUGIN_NAME}/
# Cleanup
# Remove cpu-features object after compilation
rm cpu-features.o
rm -rf lib
rm -r ./data/models
rm ./data/preferences.json
#! /bin/bash
# Build the plugin for the project
if [ -z $DAEMON ]; then
DAEMON="./../../daemon"
echo "DAEMON not provided, building for ${DAEMON}"
fi
if [ -z $CUDALIBS ]; then
CUDALIBS=~/anaconda3/envs/tf114/lib/
echo "CUDALIBS not provided, building for ${CUDALIBS}"
fi
if [ -z $PROCESSOR ]; then
PROCESSOR=GPU
echo "PROCESSOR not defined, building for GPU"
fi
PLUGIN_NAME="GreenScreen"
JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
DAEMON_SRC="${DAEMON}/src"
CONTRIB_PATH="${DAEMON}/contrib"
DESTINATION_PATH="./../build/"
PLUGINS_LIB="../lib"
LIBS_DIR="/home/${USER}/Libs"
CONTRIB_PLATFORM_CURT=x86_64
CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-linux-gnu
mkdir -p lib/${CONTRIB_PLATFORM}
mkdir -p ${DESTINATION_PATH}/jpl
# Compile
clang++ -std=c++17 -shared -fPIC \
-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
-Wall -Wextra \
-Wno-unused-variable \
-Wno-unused-function \
-Wno-unused-parameter \
-D${PROCESSOR} \
-I"." \
-I${DAEMON_SRC} \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-I${LIBS_DIR}/_tensorflow_cc/include \
-I${LIBS_DIR}/_tensorflow_cc/include/third_party/eigen3 \
-I${PLUGINS_LIB} \
main.cpp \
videoSubscriber.cpp \
pluginProcessor.cpp \
pluginMediaHandler.cpp \
TFInference.cpp \
pluginInference.cpp \
pluginParameters.cpp \
-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/ \
-L${LIBS_DIR}/_tensorflow_cc/lib/${CONTRIB_PLATFORM}-gpu61/ \
-lswscale \
-lavutil \
-lopencv_imgcodecs \
-lopencv_imgproc \
-lopencv_core \
-ltensorflow_cc \
-lpng \
-o lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}
# (above) Always put opencv_core after all other opencv libs
# (above) Always put avutil after all other ffmpeg libs
# (above) Always put png after all other libs
cp ${LIBS_DIR}/_tensorflow_cc/lib/${CONTRIB_PLATFORM}-gpu61/libtensorflow_cc.so lib/$CONTRIB_PLATFORM/libtensorflow_cc.so.2
cp /usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4 lib/$CONTRIB_PLATFORM
cp /usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55 lib/$CONTRIB_PLATFORM
cp /usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16 lib/$CONTRIB_PLATFORM
cp ${CUDALIBS}libcudart.so.10.0 lib/$CONTRIB_PLATFORM
cp ${CUDALIBS}libcublas.so.10.0 lib/$CONTRIB_PLATFORM
cp ${CUDALIBS}libcufft.so.10.0 lib/$CONTRIB_PLATFORM
cp ${CUDALIBS}libcurand.so.10.0 lib/$CONTRIB_PLATFORM
cp ${CUDALIBS}libcusolver.so.10.0 lib/$CONTRIB_PLATFORM
cp ${CUDALIBS}libcusparse.so.10.0 lib/$CONTRIB_PLATFORM
cp ${CUDALIBS}libcudnn.so.7 lib/$CONTRIB_PLATFORM
mkdir ./data/models
cp ./modelsSRC/mModel-resnet50float.pb ./data/models/mModel.pb
cp ./preferences-tfcc.json ./data/preferences.json
zip -r ${JPL_FILE_NAME} data manifest.json lib
mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/
# Cleanup
# Remove lib after compilation
rm -rf lib
rm ./data/models/mModel.pb
rm ./data/preferences.json
......@@ -34,9 +34,10 @@
#define GreenScreen_VERSION_MAJOR 1
#define GreenScreen_VERSION_MINOR 0
extern "C"
{
void pluginExit(void) { }
extern "C" {
void
pluginExit(void)
{}
EXPORT_PLUGIN JAMI_PluginExitFunc
JAMI_dynPluginInit(const JAMI_PluginAPI* api)
......@@ -44,11 +45,11 @@ JAMI_dynPluginInit(const JAMI_PluginAPI* api)
std::cout << "**************************" << std::endl << std::endl;
std::cout << "** GREENSCREEN PLUGIN **" << std::endl;
std::cout << "**************************" << std::endl << std::endl;
std::cout << " Version " << GreenScreen_VERSION_MAJOR << "." << GreenScreen_VERSION_MINOR << std::endl;
std::cout << " Version " << GreenScreen_VERSION_MAJOR << "." << GreenScreen_VERSION_MINOR
<< std::endl;
// If invokeService doesn't return an error
if(api)
{
if (api) {
std::map<std::string, std::string> ppm;
api->invokeService(api, "getPluginPreferences", &ppm);
std::string dataPath;
......
{
"name": "GreenScreen",
"version": "1.0",
"extractLibs": true,
"deps": [
"ffmpeg",
"opencv"],
"defines": [
"TFLITE=False",
"CPU=False"],
"custom_scripts": {
"pre_build": [
"mkdir msvc"
],
"build": [
"cmake --build ./msvc --config Release"
],
"post_build": []
}
}
......@@ -15,21 +15,23 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
* USA.
*/
#include "pluginInference.h"
// Std libraries
#include "pluglog.h"
#include <cstring>
#include <numeric>
#include "pluglog.h"
const char sep = separator();
const std::string TAG = "FORESEG";
namespace jami {
PluginInference::PluginInference(TFModel model) : TensorflowInference(model)
PluginInference::PluginInference(TFModel model)
: TensorflowInference(model)
{
#ifndef TFLITE
// Initialize TENSORFLOW_CC lib
......@@ -84,30 +86,26 @@ PluginInference::masksPredictions() const
int outputIndex = interpreter->outputs()[0];
std::vector<int> dims = getTensorDimensions(outputIndex);
int totalDimensions = 1;
for (size_t i = 0; i < dims.size(); i++)
{
for (size_t i = 0; i < dims.size(); i++) {
totalDimensions *= dims[i];
}
std::vector<float> out;
int type = interpreter->tensor(outputIndex)->type;
switch (type) {
case 1:
{
case 1: {
float* outputDataPointer = interpreter->typed_tensor<float>(outputIndex);
std::vector<float> output(outputDataPointer, outputDataPointer + totalDimensions);
out = std::vector<float>(output.begin(), output.end());
break;
}
case 2:
{
case 2: {
int* outputDataPointer = interpreter->typed_tensor<int>(outputIndex);
std::vector<int> output(outputDataPointer, outputDataPointer + totalDimensions);
out = std::vector<float>(output.begin(), output.end());
break;
}
case 4:
{
case 4: {
int64_t* outputDataPointer = interpreter->typed_tensor<int64_t>(outputIndex);
std::vector<int64_t> output(outputDataPointer, outputDataPointer + totalDimensions);
out = std::vector<float>(output.begin(), output.end());
......@@ -137,7 +135,8 @@ PluginInference::setExpectedImageDimensions()
void
PluginInference::ReadTensorFromMat(const cv::Mat& image)
{
imageTensor = tensorflow::Tensor(tensorflow::DataType::DT_FLOAT, tensorflow::TensorShape({ 1, image.cols, image.rows, 3 }));
imageTensor = tensorflow::Tensor(tensorflow::DataType::DT_FLOAT,
tensorflow::TensorShape({1, image.cols, image.rows, 3}));
float* p = imageTensor.flat<float>().data();
cv::Mat temp(image.rows, image.cols, CV_32FC3, p);
image.convertTo(temp, CV_32FC3);
......@@ -158,29 +157,25 @@ PluginInference::masksPredictions() const
int type = outputs[0].dtype();
switch (type) {
case tensorflow::DataType::DT_FLOAT:
{
case tensorflow::DataType::DT_FLOAT: {
for (int offset = 0; offset < flatSize; offset++) {
out.push_back(outputs[0].flat<float>()(offset));
}
break;
}
case tensorflow::DataType::DT_INT32:
{
case tensorflow::DataType::DT_INT32: {
for (int offset = 0; offset < flatSize; offset++) {
out.push_back(static_cast<float>(outputs[0].flat<tensorflow::int32>()(offset)));
}
break;
}
case tensorflow::DataType::DT_INT64:
{
case tensorflow::DataType::DT_INT64: {
for (int offset = 0; offset < flatSize; offset++) {
out.push_back(static_cast<float>(outputs[0].flat<tensorflow::int64>()(offset)));
}
break;
}
default:
{
default: {
for (int offset = 0; offset < flatSize; offset++) {
out.push_back(0);
}
......
......@@ -15,7 +15,8 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
* USA.
*/
#pragma once
......@@ -26,13 +27,14 @@
#include <opencv2/core.hpp>
// STL
#include <array>
#include <vector>
#include <tuple>
#include <iostream>
#include <tuple>
#include <vector>
namespace jami {
class PluginInference : public TensorflowInference {
class PluginInference : public TensorflowInference
{
public:
/**
* @brief PluginInference
......@@ -63,7 +65,6 @@ public:
std::vector<float> masksPredictions() const;
/**
* @brief setExpectedImageDimensions
* Sets imageWidth and imageHeight from the sources
......@@ -75,7 +76,6 @@ public:
int getImageHeight() const;
int getImageNbChannels() const;
private:
int imageWidth = 0;
int imageHeight = 0;
......
......@@ -15,7 +15,8 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
* USA.
*/
#include "pluginMediaHandler.h"
......@@ -28,8 +29,10 @@ const std::string TAG = "FORESEG";
namespace jami {
PluginMediaHandler::PluginMediaHandler(std::map<std::string, std::string>&& ppm, std::string&& datapath):
datapath_{datapath}, ppm_{ppm}
PluginMediaHandler::PluginMediaHandler(std::map<std::string, std::string>&& ppm,
std::string&& datapath)
: datapath_ {datapath}
, ppm_ {ppm}
{
setGlobalPluginParameters(ppm_);
setId(datapath_);
......@@ -50,11 +53,12 @@ PluginMediaHandler::notifyAVFrameSubject(const StreamData& data, jami::avSubject
preferredStreamDirection = ppm_.at("streamslist") == "in" ? true : false;
}
oss << "preferredStreamDirection " << preferredStreamDirection << std::endl;
if (data.type == StreamType::video && !data.direction && data.direction == preferredStreamDirection) {
if (data.type == StreamType::video && !data.direction
&& data.direction == preferredStreamDirection) {
subject->attach(mVS.get()); // my image
oss << "got my sent image attached" << std::endl;
}
else if (data.type == StreamType::video && data.direction && data.direction == preferredStreamDirection)
} else if (data.type == StreamType::video && data.direction
&& data.direction == preferredStreamDirection)
subject->attach(mVS.get()); // the image I receive from the others on the call
Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
......@@ -107,4 +111,4 @@ PluginMediaHandler::~PluginMediaHandler()
Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
detach();
}
}
} // namespace jami
......@@ -15,7 +15,8 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
* USA.
*/
#pragma once
......@@ -31,7 +32,8 @@ using avSubjectPtr = std::weak_ptr<jami::Observable<AVFrame*>>;
namespace jami {
class PluginMediaHandler : public jami::CallMediaHandler {
class PluginMediaHandler : public jami::CallMediaHandler
{
public:
PluginMediaHandler(std::map<std::string, std::string>&& ppm, std::string&& dataPath);
~PluginMediaHandler() override;
......@@ -51,4 +53,4 @@ private:
const std::string datapath_;
std::map<std::string, std::string> ppm_;
};
}
} // namespace jami
......@@ -42,15 +42,16 @@ setGlobalPluginParameters(std::map<std::string, std::string> pp)
}
}
void getGlobalPluginParameters(PluginParameters* mPluginParameters)
void
getGlobalPluginParameters(PluginParameters* mPluginParameters)
{
mPluginParameters->image = pluginParameters.image;
mPluginParameters->model = pluginParameters.model;
mPluginParameters->stream = pluginParameters.stream;
}
PluginParameters* getGlobalPluginParameters()
PluginParameters*
getGlobalPluginParameters()
{
return &pluginParameters;
}
\ No newline at end of file
......@@ -24,7 +24,8 @@
#include <string>
#include <map>
struct PluginParameters {
struct PluginParameters
{
std::string stream = "out";
#ifdef TFLITE
bool useGPU = false;
......
......@@ -15,7 +15,8 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
* USA.
*/
#include "pluginProcessor.h"
......@@ -23,9 +24,9 @@
#include <algorithm>
#include <cstring>
// OpenCV headers
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
// Logger
#include <pluglog.h>
......@@ -39,11 +40,10 @@ const std::string TAG = "FORESEG";
PluginParameters* mPluginParameters = getGlobalPluginParameters();
namespace jami
{
namespace jami {
PluginProcessor::PluginProcessor(const std::string& dataPath):
pluginInference{TFModel{dataPath + sep + "models" + sep + mPluginParameters->model}}
PluginProcessor::PluginProcessor(const std::string& dataPath)
: pluginInference {TFModel {dataPath + sep + "models" + sep + mPluginParameters->model}}
{
initModel();
setBackgroundImage(mPluginParameters->image);
......@@ -60,8 +60,7 @@ PluginProcessor::setBackgroundImage(const std::string& backgroundPath)
cv::Mat newBackgroundImage = cv::imread(backgroundPath);
if (newBackgroundImage.cols == 0) {
Plog::log(Plog::LogPriority::ERR, TAG, "Background image not Loaded");
}
else {
} else {
Plog::log(Plog::LogPriority::INFO, TAG, "Background image Loaded");
cv::cvtColor(newBackgroundImage, newBackgroundImage, cv::COLOR_BGR2RGB);
newBackgroundImage.convertTo(newBackgroundImage, CV_32FC3);
......@@ -80,8 +79,7 @@ PluginProcessor::initModel()
{
try {
pluginInference.init();
}
catch (std::exception& e) {
} catch (std::exception& e) {
Plog::log(Plog::LogPriority::ERR, TAG, e.what());
}
std::ostringstream oss;
......@@ -89,7 +87,6 @@ PluginProcessor::initModel()
Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
}
#ifdef TFLITE
void
PluginProcessor::feedInput(const cv::Mat& frame)
......@@ -152,8 +149,7 @@ PluginProcessor::computePredictions()
void
PluginProcessor::printMask()
{
for (size_t i = 0; i < computedMask.size(); i++)
{
for (size_t i = 0; i < computedMask.size(); i++) {
// Log the predictions
std::ostringstream oss;
oss << "\nclass: " << computedMask[i] << std::endl;
......@@ -161,14 +157,13 @@ PluginProcessor::printMask()
}
}
void
copyByLine(uchar* frameData, uchar* applyMaskData, const int lineSize, cv::Size size)
{
if (3 * size.width == lineSize) {
std::memcpy(frameData, applyMaskData, size.height * size.width * 3);;
}
else {
std::memcpy(frameData, applyMaskData, size.height * size.width * 3);
;
} else {
int rows = size.height;
int offset = 0;
int maskoffset = 0;
......@@ -181,10 +176,8 @@ copyByLine(uchar* frameData, uchar* applyMaskData, const int lineSize, cv::Size
}
void
PluginProcessor::drawMaskOnFrame(cv::Mat& frame,
cv::Mat& frameReduced,
std::vector<float>computedMask,
int lineSize, int angle)
PluginProcessor::drawMaskOnFrame(
cv::Mat& frame, cv::Mat& frameReduced, std::vector<float> computedMask, int lineSize, int angle)
{
if (computedMask.empty()) {
return;
......@@ -203,7 +196,10 @@ PluginProcessor::drawMaskOnFrame(cv::Mat& frame,
if (maskImg.at<float>(j, i) == 15)
maskImg.at<float>(j, i) = 255.;
else
maskImg.at<float>(j, i) = (float)((int)((0.6 * maskImg.at<float>(j, i) + 0.3 * previousMasks[0].at<float>(j, i) + 0.1 * previousMasks[1].at<float>(j, i))) % 256);
maskImg.at<float>(j, i) = (float) ((int) ((0.6 * maskImg.at<float>(j, i)
+ 0.3 * previousMasks[0].at<float>(j, i)
+ 0.1 * previousMasks[1].at<float>(j, i)))
% 256);
}
}
#else // TFLITE
......@@ -214,8 +210,7 @@ PluginProcessor::drawMaskOnFrame(cv::Mat& frame,
if (M < 2) { // avoid detection if there is any one in frame
maskImg = 0. * maskImg;
}
else {
} else {
for (int i = 0; i < maskImg.cols; i++) {
for (int j = 0; j < maskImg.rows; j++) {
maskImg.at<float>(j, i) = (maskImg.at<float>(j, i) - m) / (M - m);
......@@ -223,12 +218,13 @@ PluginProcessor::drawMaskOnFrame(cv::Mat& frame,
if (maskImg.at<float>(j, i) < 0.4)
maskImg.at<float>(j, i) = 0.;
else if (maskImg.at<float>(j, i) < 0.7) {
float value = maskImg.at<float>(j, i) * 0.6 + previousMasks[0].at<float>(j, i) * 0.3 + previousMasks[1].at<float>(j, i) * 0.1;
float value = maskImg.at<float>(j, i) * 0.6
+ previousMasks[0].at<float>(j, i) * 0.3
+ previousMasks[1].at<float>(j, i) * 0.1;
maskImg.at<float>(j, i) = 0.;
if (value > 0.7)
maskImg.at<float>(j, i) = 1.;
}
else
} else
maskImg.at<float>(j, i) = 1.;
}
}
......
......@@ -15,18 +15,19 @@
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
* USA.
*/
#pragma once
// STL
#include <condition_variable>
#include <cstdint>
#include <map>
#include <memory>
#include <mutex>
#include <thread>
#include <vector>
#include <map>
// Filters
#include "pluginInference.h"
// AvFrame
......@@ -41,7 +42,8 @@ extern "C" {
namespace jami {
class PluginProcessor {
class PluginProcessor
{
public:
PluginProcessor(const std::string& dataPath);
......@@ -61,7 +63,11 @@ public:
void computePredictions();
void printMask();
void drawMaskOnFrame(cv::Mat& frame, cv::Mat& frameReduced, std::vector<float>computedMask, int lineSize, int angle);
void drawMaskOnFrame(cv::Mat& frame,
cv::Mat& frameReduced,
std::vector<float> computedMask,
int lineSize,
int angle);
int getBackgroundRotation();
void setBackgroundRotation(int angle);
void setBackgroundImage(const std::string& backgroundPath);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment