Skip to content
Snippets Groups Projects
Commit 66b9adbf authored by Aline Gondim Santos's avatar Aline Gondim Santos
Browse files

adds gpu options for desktop

Change-Id: Ica7c2a2892919ee1f81fd9f96c174f7448ae4d3a
parent 20a28760
No related branches found
No related tags found
No related merge requests found
...@@ -219,11 +219,21 @@ namespace jami ...@@ -219,11 +219,21 @@ namespace jami
return ; //tensorflow::errors::NotFound("Failed to load compute graph at '", return ; //tensorflow::errors::NotFound("Failed to load compute graph at '",
//tfModel.modelPath.c_str(), "'"); //tfModel.modelPath.c_str(), "'");
} }
(&session)->reset(tensorflow::NewSession(tensorflow::SessionOptions()));
PluginParameters* parameters = getGlobalPluginParameters();
tensorflow::SessionOptions options;
if(parameters->useGPU)
{
options.config.mutable_gpu_options()->set_allow_growth(true);
options.config.mutable_gpu_options()->set_per_process_gpu_memory_fraction(0.5);
}
(&session)->reset(tensorflow::NewSession(options));
tensorflow::Status session_create_status = session->Create(graph_def); tensorflow::Status session_create_status = session->Create(graph_def);
if (!session_create_status.ok()) { if (!session_create_status.ok()) {
return ; return ;
} }
allocated = true; allocated = true;
} }
......
...@@ -25,6 +25,7 @@ namespace tflite ...@@ -25,6 +25,7 @@ namespace tflite
#include <tensorflow/core/framework/tensor.h> #include <tensorflow/core/framework/tensor.h>
#include <tensorflow/core/framework/types.pb.h> #include <tensorflow/core/framework/types.pb.h>
#include <tensorflow/core/platform/init_main.h> #include <tensorflow/core/platform/init_main.h>
#include <tensorflow/core/protobuf/config.pb.h>
namespace tensorflow namespace tensorflow
{ {
...@@ -32,6 +33,7 @@ namespace tensorflow ...@@ -32,6 +33,7 @@ namespace tensorflow
class Status; class Status;
class GraphDef; class GraphDef;
class Session; class Session;
struct SessionOptions;
class TensorShape; class TensorShape;
class Env; class Env;
enum DataType:int; enum DataType:int;
......
#! /bin/bash
# Build the plugin for the project
if [ -z $DAEMON ]; then
DAEMON="./../../daemon"
echo "DAEMON not provided, building for ${DAEMON}"
fi
PLUGIN_NAME="foregroundsegmentation"
JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
DAEMON_SRC="${DAEMON}/src"
CONTRIB_PATH="${DAEMON}/contrib"
DESTINATION_PATH="./../build/"
PLUGINS_LIB="../lib"
LIBS_DIR="/home/${USER}/Libs"
CONTRIB_PLATFORM_CURT=x86_64
CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-linux-gnu
mkdir -p lib/${CONTRIB_PLATFORM_CURT}
mkdir -p ${DESTINATION_PATH}/${CONTRIB_PLATFORM}/jpl
# Compile
clang++ -std=c++14 -shared -fPIC \
-Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
-Wall -Wextra \
-Wno-unused-variable \
-Wno-unused-function \
-Wno-unused-parameter \
-I"." \
-I${DAEMON_SRC} \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-I${LIBS_DIR}/_tensorflow_cc/include \
-I${LIBS_DIR}/_tensorflow_cc/include/third_party/eigen3 \
-I${PLUGINS_LIB} \
main.cpp \
videoSubscriber.cpp \
pluginProcessor.cpp \
pluginMediaHandler.cpp \
TFInference.cpp \
pluginInference.cpp \
pluginParameters.cpp \
-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/ \
-L${LIBS_DIR}/_tensorflow_cc/lib/${CONTRIB_PLATFORM}-gpu61/ \
-lswscale \
-lavutil \
-lopencv_imgcodecs \
-lopencv_imgproc \
-lopencv_core \
-ltensorflow_cc \
-lpng \
-o lib/${CONTRIB_PLATFORM_CURT}/${SO_FILE_NAME}
# (above) Always put opencv_core after all other opencv libs
# (above) Always put avutil after all other ffmpeg libs
# (above) Always put png after all other libs
cp ${LIBS_DIR}/_tensorflow_cc/lib/${CONTRIB_PLATFORM}-gpu61/libtensorflow_cc.so lib/$CONTRIB_PLATFORM_CURT/libtensorflow_cc.so.2
cp /usr/lib/${CONTRIB_PLATFORM}/libswscale.so.4 lib/$CONTRIB_PLATFORM_CURT
cp /usr/lib/${CONTRIB_PLATFORM}/libavutil.so.55 lib/$CONTRIB_PLATFORM_CURT
cp /usr/lib/${CONTRIB_PLATFORM}/libpng16.so.16 lib/$CONTRIB_PLATFORM_CURT
zip -r ${JPL_FILE_NAME} data manifest.json lib
mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/${CONTRIB_PLATFORM}/jpl/
# Cleanup
# Remove lib after compilation
rm -rf lib
...@@ -159,11 +159,11 @@ namespace jami ...@@ -159,11 +159,11 @@ namespace jami
for (int offset = 0; offset < flatSize; offset++) for (int offset = 0; offset < flatSize; offset++)
{ {
// Get vaule through .flat() // Get vaule through .flat()
if (outputs[0].flat<tensorflow::int64>()(offset) == 15 or outputs[0].flat<tensorflow::int64>()(offset) == 1) // if (outputs[0].flat<tensorflow::int64>()(offset) == 15 or outputs[0].flat<tensorflow::int64>()(offset) == 1)
{ // {
oss << " " << outputs[0].flat<tensorflow::int64>()(offset); // oss << " " << outputs[0].flat<tensorflow::int64>()(offset);
Plog::log(Plog::LogPriority::INFO, "masksPredictions", oss.str()); // Plog::log(Plog::LogPriority::INFO, "masksPredictions", oss.str());
} // }
out.push_back(static_cast<float> (outputs[0].flat<tensorflow::int64>()(offset))); out.push_back(static_cast<float> (outputs[0].flat<tensorflow::int64>()(offset)));
} }
break; break;
......
...@@ -8,12 +8,14 @@ ...@@ -8,12 +8,14 @@
struct PluginParameters { struct PluginParameters {
std::string stream = "out"; std::string stream = "out";
#ifdef TFLITE #ifdef TFLITE
bool useGPU = false; //only used when on desktop
#ifdef __ANDROID #ifdef __ANDROID
std::string model = "model_256_Qlatency.tflite"; std::string model = "model_256_Qlatency.tflite";
#else #else
std::string model = "model_256_F_16.tflite"; std::string model = "model_256_F_16.tflite";
#endif #endif
#else #else
bool useGPU = true; //only used when on desktop
std::string model = "frozen_inference_graph.pb"; std::string model = "frozen_inference_graph.pb";
#endif //TFLITE #endif //TFLITE
std::string image = "background2.png"; std::string image = "background2.png";
......
...@@ -60,7 +60,6 @@ namespace jami ...@@ -60,7 +60,6 @@ namespace jami
// Plog::log(Plog::LogPriority::INFO, TAG, "inside update()"); // Plog::log(Plog::LogPriority::INFO, TAG, "inside update()");
if (isAttached) if (isAttached)
{ {
std::ostringstream oss;
//====================================================================================== //======================================================================================
// GET FRAME ROTATION // GET FRAME ROTATION
AVFrameSideData *side_data = AVFrameSideData *side_data =
...@@ -73,6 +72,7 @@ namespace jami ...@@ -73,6 +72,7 @@ namespace jami
angle = static_cast<int>(av_display_rotation_get(matrix_rotation)); angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
} }
std::ostringstream oss;
// Plog::log(Plog::LogPriority::INFO, TAG, "step GET RAW FRAME"); // Plog::log(Plog::LogPriority::INFO, TAG, "step GET RAW FRAME");
//====================================================================================== //======================================================================================
// GET RAW FRAME // GET RAW FRAME
...@@ -93,8 +93,8 @@ namespace jami ...@@ -93,8 +93,8 @@ namespace jami
cv::Mat clone = frame.clone(); cv::Mat clone = frame.clone();
//====================================================================================== //======================================================================================
// ROTATE THE FRAME // ROTATE THE FRAME
// rotateFrame(angle, clone); rotateFrame(angle, clone);
// rotateFrame(angle, frame); rotateFrame(angle, frame);
if (firstRun) if (firstRun)
{ {
...@@ -129,8 +129,8 @@ namespace jami ...@@ -129,8 +129,8 @@ namespace jami
//====================================================================================== //======================================================================================
// REPLACE AVFRAME DATA WITH FRAME DATA // REPLACE AVFRAME DATA WITH FRAME DATA
// rotateFrame(-angle, clone); rotateFrame(-angle, clone);
// rotateFrame(-angle, frame); rotateFrame(-angle, frame);
// Plog::log(Plog::LogPriority::INFO, TAG, "step REPLACE AVFRAME DATA WITH FRAME DATA"); // Plog::log(Plog::LogPriority::INFO, TAG, "step REPLACE AVFRAME DATA WITH FRAME DATA");
if (bgrFrame && bgrFrame->data[0]) if (bgrFrame && bgrFrame->data[0])
......
...@@ -67,10 +67,10 @@ For Linux: ...@@ -67,10 +67,10 @@ For Linux:
$ ./configure $ ./configure
For TFLite: For TFLite:
$ bazel build --config=v1 --define framework_shared_object=false //tensorflow:libtensorflow_cc.so $ bazel build //tensorflow/lite:libtensorflowlite.so
or or
For Tensorflow C++ API: For Tensorflow C++ API:
$ bazel build //tensorflow/lite:libtensorflowlite.so $ bazel build --config=v1 --define framework_shared_object=false //tensorflow:libtensorflow_cc.so
OBS.: If you want to build Tensorflow C++ API with GPU suport, be sure to have a CUDA capable GPU and that you have OBS.: If you want to build Tensorflow C++ API with GPU suport, be sure to have a CUDA capable GPU and that you have
followed all installation steps for the Nvidia drivers, CUDA Toolkit, CUDNN, Tensor RT, that their versions followed all installation steps for the Nvidia drivers, CUDA Toolkit, CUDNN, Tensor RT, that their versions
...@@ -87,7 +87,7 @@ TENSORFLOWLITE INCLUDES ASSEMBLE INSTRUCTIONS ...@@ -87,7 +87,7 @@ TENSORFLOWLITE INCLUDES ASSEMBLE INSTRUCTIONS
"<tensorflow>/bazel-genfiles/tensorflow/lite/" "<tensorflow>/bazel-genfiles/tensorflow/lite/"
or at: or at:
"<tensorflow>/bazel-out/<cpu>-opt/bin/tensorflow/lite/" "<tensorflow>/bazel-out/<cpu>-opt/bin/tensorflow/lite/"
(cpu may be "armeabi-v7a", "arm64-v8a", "x86_64" or "k8" depending on the build realized) (cpu may be "armeabi-v7a", "arm64-v8a", "x86_64", "x86" or "k8" depending on the build realized)
The lib in the first folder is overwritten after each build. The lib in the first folder is overwritten after each build.
The lib in the second folder is not. The lib in the second folder is not.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment