diff --git a/ForegroundSegmentation/TFInference.cpp b/ForegroundSegmentation/TFInference.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3464069dff1d90c336aa19cc7beca2a1f0454397
--- /dev/null
+++ b/ForegroundSegmentation/TFInference.cpp
@@ -0,0 +1,197 @@
+#include "TFInference.h"
+// Std libraries
+#include <fstream>
+#include <numeric>
+#include <iostream>
+// Tensorflow headers
+#include <tensorflow/lite/builtin_op_data.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/optional_debug_tools.h>
+
+#include "pluglog.h"
+
+namespace jami 
+    {
+    TensorflowInference::TensorflowInference(TFModel tfModel) : tfModel(tfModel) {}
+
+    TensorflowInference::~TensorflowInference() 
+    {
+        // delete(optionalNnApiDelegate);
+    }
+
+    bool TensorflowInference::isAllocated() const 
+    {
+        return allocated;
+    }
+
+    void TensorflowInference::loadModel() 
+    {
+        flatbufferModel = tflite::FlatBufferModel::BuildFromFile(tfModel.modelPath.c_str());
+        if (!flatbufferModel) 
+        {
+            std::runtime_error("Failed to load the model file");
+        }
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", "MODEL LOADED" );
+    }
+
+    void TensorflowInference::buildInterpreter() 
+    {
+        // Build the interpreter
+        tflite::ops::builtin::BuiltinOpResolver resolver;
+        tflite::InterpreterBuilder builder(*flatbufferModel, resolver);
+        builder(&interpreter);
+        if(interpreter) 
+        {
+            setInterpreterSettings();
+            Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER BUILT" );
+            if (tfModel.useNNAPI)
+            {
+                TfLiteDelegate* optionalNnApiDelegate = tflite::NnApiDelegate();
+                // optionalNnApiDelegate = std::make_unique<TfLiteDelegate*>(tflite::NnApiDelegate());
+                
+                // if (interpreter->ModifyGraphWithDelegate(*(optionalNnApiDelegate.get())) != kTfLiteOk)
+                if (interpreter->ModifyGraphWithDelegate(optionalNnApiDelegate) != kTfLiteOk)
+                {
+                    Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER ERROR!!!" );
+                }
+                else
+                {
+                    Plog::log(Plog::LogPriority::INFO, "TENSOR", "INTERPRETER SET" );
+                    allocateTensors();
+                }
+            }
+            else
+            {
+                allocateTensors();
+            }
+        }
+    }
+
+    void TensorflowInference::setInterpreterSettings() 
+    {
+        // interpreter->UseNNAPI(tfModel.useNNAPI);
+        interpreter->SetAllowFp16PrecisionForFp32(tfModel.allowFp16PrecisionForFp32);
+        interpreter->SetNumThreads(static_cast<int>(tfModel.numberOfThreads));
+    }
+
+    void TensorflowInference::init() 
+    {
+        // Loading the model
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", "INSIDE THE INIT" );
+        loadModel();
+        buildInterpreter();
+        describeModelTensors();
+    }
+
+    void TensorflowInference::allocateTensors() 
+    {
+        {    
+            if (interpreter->AllocateTensors() != kTfLiteOk) 
+            {
+                std::runtime_error("Failed to allocate tensors!");
+            } else 
+            {
+                Plog::log(Plog::LogPriority::INFO, "TENSOR", "TENSORS ALLOCATED" );
+                allocated = true;
+            }
+        }
+    }
+
+    void TensorflowInference::describeModelTensors() const 
+    {
+        //PrintInterpreterState(interpreter.get());
+        std::ostringstream oss;
+        oss << "=============== inputs/outputs dimensions ==============="
+                << "\n";
+        const std::vector<int> inputs = interpreter->inputs();
+        const std::vector<int> outputs = interpreter->outputs();
+        oss << "number of inputs: " << inputs.size() << std::endl;
+        oss << "number of outputs: " << outputs.size() << std::endl;
+
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str() );
+        int input = interpreter->inputs()[0];
+        int output = interpreter->outputs()[0];
+        oss << "input 0 index: " << input << std::endl;
+        oss << "output 0 index: " << output << std::endl;
+        oss << "=============== input dimensions ==============="
+                << std::endl;
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str() );
+        // get input dimension from the input tensor metadata
+        // assuming one input only
+
+        for (size_t i = 0; i < inputs.size(); i++) 
+        {
+            std::stringstream ss;
+            ss << "Input  " << i << "   âž› ";
+            describeTensor(ss.str(), interpreter->inputs()[i]);
+        }
+        oss.str("");
+        oss << "=============== output dimensions ==============="
+                << "\n";
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", oss.str() );
+        // get input dimension from the input tensor metadata
+        // assuming one input only
+        for (size_t i = 0; i < outputs.size(); i++) 
+        {
+            std::stringstream ss;
+            ss << "Output " << i << "   âž› ";
+            describeTensor(ss.str(), interpreter->outputs()[i]);
+        }
+    }
+
+    void TensorflowInference::describeTensor(std::string prefix, int index) const 
+    {
+        std::vector<int> dimensions = getTensorDimensions(index);
+        size_t nbDimensions = dimensions.size();
+
+        std::ostringstream tensorDescription;
+        tensorDescription << prefix;
+        for (size_t i = 0; i < nbDimensions; i++) 
+        {
+            if (i == dimensions.size() - 1)
+            {
+                tensorDescription << dimensions[i];
+            } else 
+            {
+                tensorDescription << dimensions[i] << " x ";
+            }
+        }
+        tensorDescription << std::endl;
+        Plog::log(Plog::LogPriority::INFO, "TENSOR", tensorDescription.str() );
+    }
+
+    std::vector<int>
+    TensorflowInference::getTensorDimensions(int index) const 
+    {
+        TfLiteIntArray *dims = interpreter->tensor(index)->dims;
+        size_t size = static_cast<size_t>(interpreter->tensor(index)->dims->size);
+        std::vector<int> result;
+        result.reserve(size);
+        for (size_t i = 0; i != size; i++) 
+        {
+            result.push_back(dims->data[i]);
+        }
+
+        dims = nullptr;
+
+        return result;
+    }
+
+    void TensorflowInference::runGraph() 
+    {
+        for (size_t i = 0; i < tfModel.numberOfRuns; i++) 
+        {
+            if (interpreter->Invoke() != kTfLiteOk) 
+            {
+                Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "A problem occured when running the graph");
+            }
+            else
+            {
+                Plog::log(Plog::LogPriority::INFO, "RUN GRAPH", "TF RUN OK");
+            }
+            
+        }
+    }
+}
diff --git a/ForegroundSegmentation/TFInference.h b/ForegroundSegmentation/TFInference.h
new file mode 100644
index 0000000000000000000000000000000000000000..1c90b46ce76e1c36770729d97b64673138843f87
--- /dev/null
+++ b/ForegroundSegmentation/TFInference.h
@@ -0,0 +1,94 @@
+#pragma once
+
+// Library headers
+#include "TFModels.h"
+#include <tensorflow/lite/delegates/nnapi/nnapi_delegate.h>
+
+// STL
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace tflite 
+{
+    class FlatBufferModel;
+    class Interpreter;
+    class StatefulNnApiDelegate;
+} // namespace tflite
+
+namespace jami 
+{
+    class TensorflowInference 
+    {
+        public:
+            /**
+             * @brief TensorflowInference
+             * Takes a supervised model where the model and labels files are defined
+             * @param model
+             */
+            TensorflowInference(TFModel model);
+            ~TensorflowInference();
+            /**
+             * @brief loadModel
+             * Load the model from the file described in the Supervised Model
+             */
+            void loadModel();
+            void buildInterpreter();
+            void setInterpreterSettings();
+
+            /**
+             * @brief allocateTensors
+             * Tries to allocate space for the tensors
+             * In case of success isAllocated() should return true
+             */
+            void allocateTensors();
+
+            /**
+             * @brief runGraph
+             * runs the underlaying graph model.numberOfRuns times
+             * Where numberOfRuns is defined in the model
+             */
+            void runGraph();
+
+            /**
+             * @brief init
+             * Inits the model, interpreter, allocates tensors and load the labels
+             */
+            void init();
+            // Getters
+            bool isAllocated() const;
+            // Debug methods
+            void describeModelTensors() const;
+            void describeTensor(std::string prefix, int index) const;
+
+        protected:
+
+            /**
+             * @brief getTensorDimensions
+             * Utility method to get Tensorflow Tensor dimensions
+             * Given the index of the tensor, the function gives back a vector
+             * Where each element is the dimension of the vector-space (finite dimension)
+             * Thus, vector.size() is the number of vector-space used by the tensor
+             * @param index
+             * @return
+             */
+            std::vector<int> getTensorDimensions(int index) const;
+
+            TFModel tfModel;
+            std::vector<std::string> labels;
+
+            /**
+             * @brief nbLabels
+             * The real number of labels may not match the labels.size() because of padding
+             */
+            size_t nbLabels;
+
+            // Tensorflow model and interpreter
+            std::unique_ptr<tflite::FlatBufferModel> flatbufferModel;
+            std::unique_ptr<tflite::Interpreter> interpreter;
+            // std::unique_ptr<TfLiteDelegate*> optionalNnApiDelegate;
+
+            // tflite::StatefulNnApiDelegate delegate = tflite::StatefulNnApiDelegate();
+            bool allocated = false;
+    };
+}
diff --git a/ForegroundSegmentation/TFModels.h b/ForegroundSegmentation/TFModels.h
new file mode 100644
index 0000000000000000000000000000000000000000..01dc4957968ffc34d0c145b8b99389023636c221
--- /dev/null
+++ b/ForegroundSegmentation/TFModels.h
@@ -0,0 +1,39 @@
+#pragma once
+
+// Std libraries
+#include <string>
+#include <vector>
+#include "pluginParameters.h"
+
+struct TFModelConfiguration 
+{
+    TFModelConfiguration(std::string& model): modelPath{model} {}
+    std::string modelPath;
+    std::vector<unsigned int> normalizationValues;
+
+    // Tensorflow specific settings
+
+    #ifdef __ANDROID__
+    bool useNNAPI = true;
+    #else
+    bool useNNAPI = false;
+    #endif // __ANDROID__
+
+    bool allowFp16PrecisionForFp32 = true;
+    unsigned int numberOfThreads = 4;
+
+    // User defined details
+    bool inputFloating = false;
+    unsigned int numberOfRuns = 1;
+};
+
+struct TFModel : TFModelConfiguration 
+{
+    TFModel(std::string&& model, std::string&& labels): TFModelConfiguration(model), labelsPath{labels}{}
+    TFModel(std::string& model, std::string& labels): TFModelConfiguration(model), labelsPath{labels}{}
+    TFModel(std::string&& model): TFModelConfiguration(model) {}
+    TFModel(std::string& model): TFModelConfiguration(model) {}
+    
+    std::string labelsPath = " ";
+    unsigned int labelsPadding = 16;
+};
diff --git a/ForegroundSegmentation/build.sh b/ForegroundSegmentation/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c2d691a5ee1ae7eb2fee5e820e453ee57f2fbab9
--- /dev/null
+++ b/ForegroundSegmentation/build.sh
@@ -0,0 +1,63 @@
+#! /bin/bash
+# Build the plugin for the project
+if [ -z $DAEMON ]; then
+    DAEMON="./../../daemon"
+    echo "DAEMON not provided, building for ${DAEMON}"
+fi
+
+PLUGIN_NAME="foregroungsegmentation"
+JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
+SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
+DAEMON_SRC="${DAEMON}/src"
+CONTRIB_PATH="${DAEMON}/contrib"
+# DESTINATION_PATH=/home/${USER}/Projects/ring-plugins
+DESTINATION_PATH="./../build/"
+PLUGINS_LIB="../lib"
+LIBS_DIR="/home/${USER}/Libs"
+
+CONTRIB_PLATFORM_CURT=x86_64
+CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-linux-gnu
+
+mkdir -p lib/${CONTRIB_PLATFORM_CURT}
+mkdir -p ${DESTINATION_PATH}/${CONTRIB_PLATFORM}/jpl
+
+# Compile
+clang++ -std=c++14 -shared -fPIC \
+-Wl,-Bsymbolic \
+-Wall -Wextra \
+-Wno-unused-variable \
+-Wno-unused-function \
+-Wno-unused-parameter \
+-I"." \
+-I${DAEMON_SRC} \
+-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+-I${LIBS_DIR}/_tensorflow_distribution/include/flatbuffers \
+-I${LIBS_DIR}/_tensorflow_distribution/include \
+-I${PLUGINS_LIB} \
+main.cpp \
+videoSubscriber.cpp \
+pluginProcessor.cpp \
+pluginMediaHandler.cpp \
+TFInference.cpp \
+pluginInference.cpp \
+pluginParameters.cpp \
+-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
+-L${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/ \
+-lswscale \
+-lavutil \
+-lopencv_imgcodecs \
+-lopencv_imgproc \
+-lopencv_core \
+-ltensorflowlite \
+-o lib/${CONTRIB_PLATFORM_CURT}/${SO_FILE_NAME}
+# (above) Always put opencv_core after all other opencv libs when linking statically
+
+cp ${LIBS_DIR}/_tensorflow_distribution/lib/${CONTRIB_PLATFORM}/libtensorflowlite.so lib/$CONTRIB_PLATFORM_CURT
+
+zip -r ${JPL_FILE_NAME} data manifest.json lib
+mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/${CONTRIB_PLATFORM}/jpl/
+
+# Cleanup
+# Remove lib after compilation
+rm -rf lib
diff --git a/ForegroundSegmentation/buildandroid.sh b/ForegroundSegmentation/buildandroid.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2300dc3cea8403004eb70b1076ed6fb13a6ce017
--- /dev/null
+++ b/ForegroundSegmentation/buildandroid.sh
@@ -0,0 +1,181 @@
+#! /bin/bash
+# Build the plugin for the project
+if [ -z $DAEMON ]; then
+    DAEMON="./../../daemon"
+    echo "DAEMON not provided, building for ${DAEMON}"
+fi
+if [ -z $ANDROID_NDK ]; then
+	ANDROID_NDK=/home/${USER}/Android/Sdk/ndk/21.1.6352462
+    echo "ANDROID_NDK not provided, building with ${ANDROID_NDK}"
+fi
+
+PLUGIN_NAME="foregroungsegmentation"
+JPL_FILE_NAME=${PLUGIN_NAME}".jpl"
+SO_FILE_NAME="lib"${PLUGIN_NAME}".so"
+LIBS_DIR="/home/${USER}/Libs"
+DAEMON_SRC="${DAEMON}/src"
+CONTRIB_PATH="${DAEMON}/contrib"
+# DESTINATION_PATH=/home/${USER}/Projects/ring-plugins
+DESTINATION_PATH="./../build/"
+PLUGINS_LIB="../lib"
+
+#=========================================================
+#	Check if the ANDROID_ABI was provided
+#	if not, set default
+#=========================================================
+if [ -z $ANDROID_ABI ]; then
+    ANDROID_ABI="armeabi-v7a arm64-v8a x86_64"
+    echo "ANDROID_ABI not provided, building for ${ANDROID_ABI}"
+fi
+
+buildlib() {
+	echo $CURRENT_ABI
+	#=========================================================
+	#	ANDROID TOOLS
+	#=========================================================
+	export HOST_TAG=linux-x86_64
+	export TOOLCHAIN=$ANDROID_NDK/toolchains/llvm/prebuilt/$HOST_TAG
+
+	if [ $CURRENT_ABI = armeabi-v7a ]
+	then
+	export AR=$TOOLCHAIN/bin/arm-linux-android-ar
+	export AS=$TOOLCHAIN/bin/arm-linux-android-as
+	export CC=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang
+	export CXX=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang++
+	export LD=$TOOLCHAIN/bin/arm-linux-android-ld
+	export RANLIB=$TOOLCHAIN/bin/arm-linux-android-ranlib
+	export STRIP=$TOOLCHAIN/bin/arm-linux-androideabi-strip
+	export ANDROID_SYSROOT=/home/${USER}/Projects/ring-android-project/client-android/android-toolchain-21-arm/sysroot
+
+	elif [ $CURRENT_ABI = arm64-v8a ]
+	then
+	export AR=$TOOLCHAIN/bin/aarch64-linux-android-ar
+	export AS=$TOOLCHAIN/bin/aarch64-linux-android-as
+	export CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang
+	export CXX=$TOOLCHAIN/bin/aarch64-linux-android21-clang++
+	export LD=$TOOLCHAIN/bin/aarch64-linux-android-ld
+	export RANLIB=$TOOLCHAIN/bin/aarch64-linux-android-ranlib
+	export STRIP=$TOOLCHAIN/bin/aarch64-linux-android-strip
+	export ANDROID_SYSROOT=/home/${USER}/Projects/ring-android-project/client-android/android-toolchain-21-arm64/sysroot
+
+	elif [ $CURRENT_ABI = x86_64 ]
+	then
+	export AR=$TOOLCHAIN/bin/x86_64-linux-android-ar
+	export AS=$TOOLCHAIN/bin/x86_64-linux-android-as
+	export CC=$TOOLCHAIN/bin/x86_64-linux-android21-clang
+	export CXX=$TOOLCHAIN/bin/x86_64-linux-android21-clang++
+	export LD=$TOOLCHAIN/bin/x86_64-linux-android-ld
+	export RANLIB=$TOOLCHAIN/bin/x86_64-linux-android-ranlib
+	export STRIP=$TOOLCHAIN/bin/x86_64-linux-android-strip
+	export ANDROID_SYSROOT=/home/${USER}/Projects/ring-android-project/client-android/android-toolchain-21-x86_64/sysroot
+
+	else
+	echo "ABI NOT OK" >&2
+	exit 1
+	fi
+	
+	#=========================================================
+	#	CONTRIBS
+	#=========================================================
+	if [ $CURRENT_ABI = armeabi-v7a ]
+	then
+	CONTRIB_PLATFORM=arm-linux-androideabi
+
+	elif [ $CURRENT_ABI = arm64-v8a ]
+	then
+	CONTRIB_PLATFORM=aarch64-linux-android
+
+	elif [ $CURRENT_ABI = x86_64 ]
+	then
+	CONTRIB_PLATFORM=x86_64-linux-android
+	fi
+	
+	# ASSETS
+	ANDROID_PROJECT_ASSETS=/home/${USER}/Projects/ring-android-project/client-android/ring-android/app/src/main/assets
+	# LIBS FOLDER
+	ANDROID_PROJECT_LIBS=/home/${USER}/Projects/ring-android-project/client-android/ring-android/app/src/main/libs/$CURRENT_ABI
+	#NDK SOURCES FOR cpufeatures
+	NDK_SOURCES=${ANDROID_NDK}/sources/android
+	
+	#=========================================================
+	#	LD_FLAGS
+	#=========================================================
+	if [ $CURRENT_ABI = armeabi-v7a ]
+	then
+	export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi/21"
+	elif [ $CURRENT_ABI = arm64-v8a ]
+	then
+	export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android/21"
+	elif [ $CURRENT_ABI = x86_64 ]
+	then
+	export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android/21"
+	fi
+	
+	#=========================================================
+	#	Compile CPU FEATURES, NEEDED FOR OPENCV
+	#=========================================================
+	$CC -c $NDK_SOURCES/cpufeatures/cpu-features.c -o cpu-features.o -o cpu-features.o --sysroot=$ANDROID_SYSROOT
+
+	#=========================================================
+	#	Compile the plugin
+	#=========================================================
+	
+	# Create so destination folder
+	mkdir -p lib/$CURRENT_ABI
+
+	# Create so destination folder
+    $CXX --std=c++14 -O3 -g -fPIC \
+	-Wl,-Bsymbolic \
+	-shared \
+	-Wall -Wextra \
+	-Wno-unused-variable \
+	-Wno-unused-function \
+	-Wno-unused-parameter \
+	-I"." \
+	-I${DAEMON_SRC} \
+	-I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+    -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+    -I${LIBS_DIR}/_tensorflow_distribution/include/flatbuffers \
+	-I${LIBS_DIR}/_tensorflow_distribution/include \
+	-I${PLUGINS_LIB} \
+	main.cpp \
+	videoSubscriber.cpp \
+	pluginProcessor.cpp \
+    pluginMediaHandler.cpp \
+	TFInference.cpp \
+	pluginInference.cpp \
+	pluginParameters.cpp \
+	cpu-features.o \
+	-L${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/ \
+	-L${LIBS_DIR}/_tensorflow_distribution/lib/${CURRENT_ABI}/ \
+	-lswscale \
+	-lavutil \
+	-lopencv_imgcodecs \
+	-lopencv_imgproc \
+	-lopencv_core \
+    -llibpng \
+    -ltensorflowlite \
+	-llog -lz \
+	--sysroot=$ANDROID_SYSROOT \
+	-o lib/$CURRENT_ABI/${SO_FILE_NAME}
+	# (above) Always put opencv_core after all other opencv libs when linking statically
+	# (above) Put libavutil after other ffmpeg libraries
+	
+	cp ${LIBS_DIR}/_tensorflow_distribution/lib/${CURRENT_ABI}/libtensorflowlite.so lib/$CURRENT_ABI
+}
+
+# Build the so 
+for i in ${ANDROID_ABI}; do
+	CURRENT_ABI=$i
+	buildlib
+done
+
+#Export the plugin data folder
+mkdir -p ${DESTINATION_PATH}/jpl/${PLUGIN_NAME}/
+zip -r ${JPL_FILE_NAME} data manifest.json lib
+mv ${JPL_FILE_NAME} ${DESTINATION_PATH}/jpl/${PLUGIN_NAME}/
+
+# Cleanup
+# Remove cpu-features object after compilation
+rm cpu-features.o
+rm -rf lib
diff --git a/ForegroundSegmentation/data/backgrounds/background1.png b/ForegroundSegmentation/data/backgrounds/background1.png
new file mode 100644
index 0000000000000000000000000000000000000000..849251c69bd6a1a1ce7917b7d806cec85f0b7b01
Binary files /dev/null and b/ForegroundSegmentation/data/backgrounds/background1.png differ
diff --git a/ForegroundSegmentation/data/backgrounds/background2.png b/ForegroundSegmentation/data/backgrounds/background2.png
new file mode 100644
index 0000000000000000000000000000000000000000..849251c69bd6a1a1ce7917b7d806cec85f0b7b01
Binary files /dev/null and b/ForegroundSegmentation/data/backgrounds/background2.png differ
diff --git a/ForegroundSegmentation/data/icon.png b/ForegroundSegmentation/data/icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..f44370f1f48de6fe24377c74c1fbbcd0097e6a12
Binary files /dev/null and b/ForegroundSegmentation/data/icon.png differ
diff --git a/ForegroundSegmentation/data/models/mobilenet_v2_deeplab_v3_256_myquant.tflite b/ForegroundSegmentation/data/models/mobilenet_v2_deeplab_v3_256_myquant.tflite
new file mode 100644
index 0000000000000000000000000000000000000000..6a7f99cca4f075e3e74aa82e718c789d16224e1f
Binary files /dev/null and b/ForegroundSegmentation/data/models/mobilenet_v2_deeplab_v3_256_myquant.tflite differ
diff --git a/ForegroundSegmentation/data/models/model_256_Qlatency.tflite b/ForegroundSegmentation/data/models/model_256_Qlatency.tflite
new file mode 100644
index 0000000000000000000000000000000000000000..3d54e057ac12aab550ee2644afba1f848408dbd9
Binary files /dev/null and b/ForegroundSegmentation/data/models/model_256_Qlatency.tflite differ
diff --git a/ForegroundSegmentation/data/models/model_256_Qlatency_16.tflite b/ForegroundSegmentation/data/models/model_256_Qlatency_16.tflite
new file mode 100644
index 0000000000000000000000000000000000000000..9fb7771c97e81815b48c8829f27d877e3470692d
Binary files /dev/null and b/ForegroundSegmentation/data/models/model_256_Qlatency_16.tflite differ
diff --git a/ForegroundSegmentation/data/preferences.json b/ForegroundSegmentation/data/preferences.json
new file mode 100644
index 0000000000000000000000000000000000000000..edc5ca749056bcf8c8f02df2954e41b08697bd90
--- /dev/null
+++ b/ForegroundSegmentation/data/preferences.json
@@ -0,0 +1,32 @@
+[
+    {
+        "category" : "StreamsListPreference",
+        "type": "List",
+        "key": "streamslist",
+        "title": "Streams to transform",
+        "summary": "Select your input color",
+        "defaultValue": "out",
+        "entries": ["sent", "received"],
+        "entryValues": ["out", "in"]
+    },
+    {
+        "category" : "ModelPreference",
+        "type": "List",
+        "key": "modellist",
+        "title": "Model to load",
+        "summary": "Select the model to use",
+        "defaultValue": "model_256_Qlatency.tflite",
+        "entries": ["mv2_DLV3_256_MQ", "mv2_DLV3_256_QLATENCY_16", "mv2_DLV3_256_QLATENCY_8"],
+        "entryValues": ["mobilenet_v2_deeplab_v3_256_myquant.tflite", "model_256_Qlatency_16.tflite", "model_256_Qlatency.tflite"]
+    },
+    {
+        "category" : "ImageBackground",
+        "type": "List",
+        "key": "backgroundlist",
+        "title": "Background image",
+        "summary": "Select the image background to use",
+        "defaultValue": "background1.png",
+        "entries": ["background1", "background2"],
+        "entryValues": ["background1.png", "background2.png"]
+    }
+]
diff --git a/ForegroundSegmentation/main.cpp b/ForegroundSegmentation/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..037fbbd6b1ad7959fc88339046dd86f2725388b5
--- /dev/null
+++ b/ForegroundSegmentation/main.cpp
@@ -0,0 +1,34 @@
+#include <iostream>
+#include <string.h>
+#include <thread>
+#include <memory>
+#include "plugin/jamiplugin.h"
+#include "pluginMediaHandler.h"
+
+extern "C" 
+{
+    void pluginExit(void) { }
+
+    JAMI_PluginExitFunc JAMI_dynPluginInit(const JAMI_PluginAPI *api)
+    {
+        std::cout << "**************************************" << std::endl << std::endl;
+        std::cout << "**  FOREGROUND SEGMENTATION PLUGIN  **" << std::endl;
+        std::cout << "**************************************" << std::endl << std::endl;
+
+        // If invokeService doesn't return an error
+        if(api) 
+        {
+            std::map<std::string, std::string> ppm;
+            api->invokeService(api, "getPluginPreferences", &ppm);
+            std::string dataPath;
+            api->invokeService(api, "getPluginDataPath", &dataPath);
+            auto fmp = std::make_unique<jami::PluginMediaHandler>(std::move(ppm), std::move(dataPath));
+
+            if(!api->manageComponent(api,"CallMediaHandlerManager", fmp.release())) 
+            {
+                return pluginExit;
+            }
+        }
+        return nullptr;
+    }
+}
diff --git a/ForegroundSegmentation/manifest.json b/ForegroundSegmentation/manifest.json
new file mode 100644
index 0000000000000000000000000000000000000000..587c7657586d91ee78c128f310e5cb4976076bfb
--- /dev/null
+++ b/ForegroundSegmentation/manifest.json
@@ -0,0 +1,6 @@
+{
+	"name": "foregroungsegmentation",
+	"description" : "Foreground segmentation plugin with tensorflow",
+	"version" : "1.0.0",
+	"libs" : "libtensorflowlite.so"
+}
diff --git a/ForegroundSegmentation/pluginInference.cpp b/ForegroundSegmentation/pluginInference.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..581dae5a24fd2fe9d7d850581551b17822e9546a
--- /dev/null
+++ b/ForegroundSegmentation/pluginInference.cpp
@@ -0,0 +1,137 @@
+#include "pluginInference.h"
+// Std libraries
+#include <cstring>
+#include <numeric>
+#include "pluglog.h"
+// Tensorflow headers
+#include "tensorflow/lite/interpreter.h"
+
+namespace jami 
+{
+	PluginInference::PluginInference(TFModel model) : TensorflowInference(model) {	}
+
+	PluginInference::~PluginInference(){}
+
+	void PluginInference::feedInput(std::vector<uint8_t> &in, int imageWidth,
+										int imageHeight, int imageNbChannels) 
+	{
+		auto input = getInput();
+		std::vector<int> dims = input.second;
+		// Relevant data starts from index 1, dims.at(0) = 1
+		int expectedWidth = dims.at(1);
+		int expectedHeight = dims.at(2);
+		int expectedNbChannels = dims.at(3);
+
+		if (imageNbChannels != expectedNbChannels) 
+		{
+			std::cerr << "The number of channels in the input should match the number "
+						"of channels in the model";
+		} else if (imageWidth != expectedWidth || imageHeight != expectedHeight) 
+		{
+			std::cerr << "The width and height of the input image doesn't match the "
+						"expected width and height of the model";
+		} else 
+		{
+			// Get the input pointer and feed it with data
+			uint8_t *inputDataPointer = input.first;
+			
+			for (size_t i = 0; i < in.size(); i++) 
+			{
+				inputDataPointer[i] = in.at(i);
+			}
+			// Use of memcopy for performance
+			std::memcpy(inputDataPointer, in.data(), in.size() * sizeof(uint8_t));
+		}
+	}
+
+	std::pair<uint8_t *, std::vector<int>> PluginInference::getInput() 
+	{
+		// We assume that we have only one input
+		// Get the input index
+		int input = interpreter->inputs()[0];
+		uint8_t *inputDataPointer = interpreter->typed_tensor<uint8_t>(input);
+		// Get the input dimensions vector
+		std::vector<int> dims = getTensorDimensions(input);
+
+		return std::make_pair(inputDataPointer, dims);
+	}
+
+	// // Types returned by tensorflow
+	// int type = interpreter->tensor(outputIndex)->type
+	// typedef enum {
+	// kTfLiteNoType = 0,
+	// kTfLiteFloat32 = 1, float
+	// kTfLiteInt32 = 2, int // int32_t
+	// kTfLiteUInt8 = 3, uint8_t
+	// kTfLiteInt64 = 4, int64_t
+	// kTfLiteString = 5, 
+	// kTfLiteBool = 6,
+	// kTfLiteInt16 = 7, int16_t
+	// kTfLiteComplex64 = 8,
+	// kTfLiteInt8 = 9, int8_t
+	// kTfLiteFloat16 = 10, float16_t
+	// } TfLiteType;
+
+	std::vector<float>
+	PluginInference::masksPredictions() const 
+	{
+        int outputIndex = interpreter->outputs()[0];
+        std::vector<int> dims = getTensorDimensions(outputIndex);
+		int totalDimensions = 1;
+		for (size_t i = 0; i < dims.size(); i++)
+		{
+			totalDimensions *= dims[i];
+		}
+		std::vector<float> out;
+		
+		int type = interpreter->tensor(outputIndex)->type;
+		switch(type)
+		{
+			case 2:
+			{
+				int* outputDataPointer = interpreter->typed_tensor<int>(outputIndex);
+				std::vector<int> output(outputDataPointer, outputDataPointer + totalDimensions); //when mod model
+				out=std::vector<float>(output.begin(), output.end());
+				break;
+			}
+			case 4:
+			{
+				int64_t* outputDataPointer = interpreter->typed_tensor<int64_t>(outputIndex);
+				std::vector<int64_t> output(outputDataPointer, outputDataPointer + totalDimensions); //when orig model
+				out=std::vector<float>(output.begin(), output.end());
+				break;
+			}
+		}
+
+        return out;
+	}
+
+
+	void PluginInference::setExpectedImageDimensions() 
+	{
+		// We assume that we have only one input
+		// Get the input index
+		int input = interpreter->inputs()[0];
+		// Get the input dimensions vector
+		std::vector<int> dims = getTensorDimensions(input);
+		// Relevant data starts from index 1, dims.at(0) = 1
+		imageWidth = dims.at(1);
+		imageHeight = dims.at(2);
+		imageNbChannels = dims.at(3);
+	}
+
+	int PluginInference::getImageWidth() const 
+	{ 
+		return imageWidth; 
+	}
+
+	int PluginInference::getImageHeight() const 
+	{ 
+		return imageHeight; 
+	}
+
+	int PluginInference::getImageNbChannels() const 
+	{
+		return imageNbChannels;
+	}
+} // namespace jami
diff --git a/ForegroundSegmentation/pluginInference.h b/ForegroundSegmentation/pluginInference.h
new file mode 100644
index 0000000000000000000000000000000000000000..4829ebd64871a0a293e9aed0fd9fef823e245feb
--- /dev/null
+++ b/ForegroundSegmentation/pluginInference.h
@@ -0,0 +1,70 @@
+#pragma once
+
+#include "TFInference.h"
+
+// OpenCV headers
+#include <opencv2/core.hpp>
+// STL
+#include <array>
+#include <vector>
+#include <tuple>
+#include <iostream>
+
+namespace jami 
+{
+	class PluginInference : public TensorflowInference 
+	{
+		public:
+			/**
+			 * @brief PluginInference
+			 * Is a type of supervised learning where we detect objects in images
+			 * Draw a bounding boxes around them
+			 * @param model
+			 */
+			PluginInference(TFModel model);
+			~PluginInference();
+
+			std::vector<float> masksPredictions() const;
+
+			/**
+			 * @brief feedInput
+			 * Checks if the image input dimensions matches the expected ones in the model
+			 * If so, fills the image data directly to the model input pointer
+			 * Otherwise, resizes the image in order to match the model expected image
+			 * dimensions And fills the image data throught the resize method
+			 * @param in: image data
+			 * @param imageWidth
+			 * @param imageHeight
+			 * @param imageNbChannels
+			 **/
+			void feedInput(std::vector<uint8_t> &in, int imageWidth, int imageHeight,
+							int imageNbChannels);
+			/**
+			 * @brief getInput
+			 * Returns the input where to fill the data
+			 * Use this method if you know what you are doing, all the necessary checks
+			 * on dimensions must be done on your part
+			 * @return std::tuple<uint8_t *, std::vector<int>>
+			 * The first element in the tuple is the pointer to the storage location
+			 * The second element is a dimensions vector that will helps you make
+			 * The necessary checks to make your data size match the input one
+			 */
+			std::pair<uint8_t *, std::vector<int>> getInput();
+
+			/**
+			 * @brief setExpectedImageDimensions
+			 * Sets imageWidth and imageHeight from the sources
+			 */
+			void setExpectedImageDimensions();
+
+			// Getters
+			int getImageWidth() const;
+			int getImageHeight() const;
+			int getImageNbChannels() const;
+
+		private:
+			int imageWidth = 0;
+			int imageHeight = 0;
+			int imageNbChannels = 0;
+	};
+} // namespace jami
diff --git a/ForegroundSegmentation/pluginMediaHandler.cpp b/ForegroundSegmentation/pluginMediaHandler.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..fe73f2d0f73c368a8f9632308e4d5a519ba8fffb
--- /dev/null
+++ b/ForegroundSegmentation/pluginMediaHandler.cpp
@@ -0,0 +1,84 @@
+#include "pluginMediaHandler.h"
+// Logger
+#include "pluglog.h"
+const char sep = separator();
+const std::string TAG = "GENERIC";
+
+namespace jami 
+{
+	PluginMediaHandler::PluginMediaHandler(std::map<std::string, std::string>&& ppm, std::string &&datapath):
+		datapath_{datapath}, ppm_{ppm}
+	{
+    	setGlobalPluginParameters(ppm_);
+    	setId(datapath_);
+		mpInput = std::make_shared<VideoSubscriber>(datapath_);
+		mpReceive = std::make_shared<VideoSubscriber>(datapath_);
+	}
+
+	void PluginMediaHandler::notifyAVFrameSubject(const StreamData &data, jami::avSubjectPtr subject)
+	{
+		Plog::log(Plog::LogPriority::INFO, TAG, "IN AVFRAMESUBJECT");
+		std::ostringstream oss;
+		std::string direction = data.direction ? "Receive" : "Preview";
+		oss << "NEW SUBJECT: [" << data.id << "," << direction << "]" << std::endl;
+
+
+		bool preferredStreamDirection = false;
+		if (!ppm_.empty() && ppm_.find("streamslist") != ppm_.end()) 
+		{
+			Plog::log(Plog::LogPriority::INFO, TAG, "SET PARAMETERS");
+			// PluginParameters* mPluginParameters = nullptr; 
+			// getGlobalPluginParameters(mPluginParameters);
+			// Plog::log(Plog::LogPriority::INFO, TAG, "GOT PARAMETERS");
+			preferredStreamDirection = ppm_.at("streamslist")=="in"?true:false;
+			// if(mPluginParameters != nullptr)
+			// {
+				// preferredStreamDirection = mPluginParameters->stream=="in"?true:false;
+			// }
+		}
+		oss << "preferredStreamDirection " << preferredStreamDirection << std::endl;
+		if (data.type == StreamType::video && !data.direction && data.direction == preferredStreamDirection) 
+		{
+			subject->attach(mpInput.get()); // my image
+			oss << "got my sent image attached" << std::endl;
+		} else if (data.type == StreamType::video && data.direction && data.direction == preferredStreamDirection) 
+		{
+			subject->attach(mpReceive.get()); // the image i receive from the others on the call
+			oss << "got my received image attached" << std::endl;
+		}
+		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+	}
+
+	std::map<std::string, std::string> PluginMediaHandler::getCallMediaHandlerDetails()
+	{
+		return {{"icoPath", datapath_ + sep + "icon.png"}};
+	}
+
+	void PluginMediaHandler::setPreferenceAttribute(const std::string &key, const std::string &value)
+	{
+
+	}
+
+	bool PluginMediaHandler::preferenceMapHasKey(const std::string &key)
+	{
+		if (ppm_.find(key) == ppm_.end()) 
+		{
+			return false;
+		}
+		return true;
+	}
+
+	void PluginMediaHandler::detach()
+	{
+		mpInput->detach();
+		mpReceive->detach();
+	}
+
+	PluginMediaHandler::~PluginMediaHandler() 
+	{
+		std::ostringstream oss;
+		oss << " ~GENERIC Plugin" << std::endl;
+		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+		detach();
+	}
+}
diff --git a/ForegroundSegmentation/pluginMediaHandler.h b/ForegroundSegmentation/pluginMediaHandler.h
new file mode 100644
index 0000000000000000000000000000000000000000..2243e6978adf5cb996fb495dcdb4e1dc7d8dd995
--- /dev/null
+++ b/ForegroundSegmentation/pluginMediaHandler.h
@@ -0,0 +1,39 @@
+#pragma once
+
+//Project
+#include "videoSubscriber.h"
+
+// Plugin
+#include "plugin/jamiplugin.h"
+#include "plugin/mediahandler.h"
+
+using avSubjectPtr = std::weak_ptr<jami::Observable<AVFrame*>>;
+
+namespace jami 
+{
+	class PluginMediaHandler : public jami::CallMediaHandler 
+	{
+		public:
+			PluginMediaHandler(std::map<std::string, std::string>&& ppm, std::string &&dataPath);
+			~PluginMediaHandler() override;
+
+			virtual void notifyAVFrameSubject(const StreamData &data, avSubjectPtr subject) override;
+			virtual std::map<std::string, std::string> getCallMediaHandlerDetails() override;
+
+			virtual void detach() override;
+			virtual void setPreferenceAttribute(const std::string& key, const std::string& value) override;
+			
+			std::shared_ptr<VideoSubscriber> mpInput;
+			std::shared_ptr<VideoSubscriber> mpReceive;
+			
+			std::string dataPath() const { return datapath_; }
+
+		private:
+        	bool preferenceMapHasKey(const std::string& key);
+
+		private:
+			const std::string datapath_;
+			std::map<std::string, std::string> ppm_;
+	};
+
+}
diff --git a/ForegroundSegmentation/pluginParameters.cpp b/ForegroundSegmentation/pluginParameters.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..eb05df544d04e616d0c92bdae50415fecd0ea184
--- /dev/null
+++ b/ForegroundSegmentation/pluginParameters.cpp
@@ -0,0 +1,41 @@
+#include "pluginParameters.h"// Logger
+#include "pluglog.h"
+
+PluginParameters pluginParameters;
+
+void setGlobalPluginParameters(std::map<std::string, std::string> pp)
+{
+    Plog::log(Plog::LogPriority::INFO, "GLOBAL PARAMETERS", "IN");
+    if (!pp.empty())
+    {
+        Plog::log(Plog::LogPriority::INFO, "GLOBAL PARAMETERS", "PP NOT EMPTY");
+        if(pp.find("streamslist") != pp.end())
+        {
+            pluginParameters.stream = pp.at("streamslist");
+            Plog::log(Plog::LogPriority::INFO, "GLOBAL STREAM ", pluginParameters.stream);
+        }
+        if(pp.find("modellist") != pp.end())
+        {
+            pluginParameters.model = pp.at("modellist");
+            Plog::log(Plog::LogPriority::INFO, "GLOBAL MODEL ", pluginParameters.model);
+        }
+        if(pp.find("backgroundlist") != pp.end())
+        {
+            pluginParameters.image = pp.at("backgroundlist");
+            Plog::log(Plog::LogPriority::INFO, "GLOBAL IMAGE ", pluginParameters.image);
+        }
+    }
+}
+
+void getGlobalPluginParameters(PluginParameters* mPluginParameters)
+{
+    mPluginParameters->image = pluginParameters.image;
+    mPluginParameters->model = pluginParameters.model;
+    mPluginParameters->stream = pluginParameters.stream;
+}
+
+
+PluginParameters* getGlobalPluginParameters()
+{
+    return &pluginParameters;
+}
\ No newline at end of file
diff --git a/ForegroundSegmentation/pluginParameters.h b/ForegroundSegmentation/pluginParameters.h
new file mode 100644
index 0000000000000000000000000000000000000000..5bd5d7d3801b8eaae9e6cf5c74206e5b6adb62db
--- /dev/null
+++ b/ForegroundSegmentation/pluginParameters.h
@@ -0,0 +1,20 @@
+#ifndef _PLUGINPARAMETERS_H_
+#define _PLUGINPARAMETERS_H_
+
+// #pragma once
+#include <string>
+#include <map>
+
+
+struct PluginParameters {
+    std::string stream = "out";
+    std::string model = "model_256_Qlatency.tflite";
+    std::string image = "background1.png";
+};
+
+void setGlobalPluginParameters(std::map<std::string, std::string> pp);
+
+void getGlobalPluginParameters(PluginParameters* mPluginParameters);
+PluginParameters* getGlobalPluginParameters();
+
+#endif //__PLUGINPREFERENCE_H_
\ No newline at end of file
diff --git a/ForegroundSegmentation/pluginProcessor.cpp b/ForegroundSegmentation/pluginProcessor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e66cf72df7bbe93cdddfcee790db8e6d685e0734
--- /dev/null
+++ b/ForegroundSegmentation/pluginProcessor.cpp
@@ -0,0 +1,156 @@
+#include "pluginProcessor.h"
+// System includes
+#include <cstring>
+// OpenCV headers
+#include <opencv2/imgproc.hpp>
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/core.hpp>
+// Logger
+#include "pluglog.h"
+// Avutil/Display for rotation
+
+extern "C" {
+#include <libavutil/display.h>
+}
+
+const char sep = separator();
+
+const std::string TAG = "GENERIC";
+
+PluginParameters* mPluginParameters = getGlobalPluginParameters(); 
+
+namespace jami 
+{
+
+	PluginProcessor::PluginProcessor(const std::string &dataPath):
+	pluginInference{TFModel{dataPath + sep + "models/" + mPluginParameters->model,
+	// pluginInference{TFModel{dataPath + sep + "models/mobilenet_v2_deeplab_v3_256_myquant.tflite",
+							dataPath + sep + "models/pascal_voc_labels_list.tflite"}},
+	// backgroundPath{dataPath + sep + "backgrounds" + sep + "background1.png"}
+	backgroundPath{dataPath + sep + "backgrounds" + sep + mPluginParameters->image}
+	{
+		initModel();
+		backgroundImage = cv::imread(backgroundPath);
+	}
+
+	void PluginProcessor::initModel()
+	{
+		try {
+			pluginInference.init();
+		} catch (std::exception& e) 
+		{
+			Plog::log(Plog::LogPriority::ERROR, TAG, e.what());
+		}
+		std::ostringstream oss;
+        oss << "Model is allocated " << pluginInference.isAllocated();
+        Plog::log(Plog::LogPriority::INFO, "GENERIC", oss.str());
+	}
+
+	void PluginProcessor::feedInput(const cv::Mat &frame) 
+	{
+		auto pair = pluginInference.getInput();
+		uint8_t *inputPointer = pair.first;
+		// Relevant data starts from index 1, dims.at(0) = 1
+		size_t imageWidth = static_cast<size_t>(pair.second[1]);
+		size_t imageHeight = static_cast<size_t>(pair.second[2]);
+		size_t imageNbChannels = static_cast<size_t>(pair.second[3]);
+		std::memcpy(inputPointer, frame.data,
+					imageWidth * imageHeight * imageNbChannels * sizeof(uint8_t));
+
+		inputPointer = nullptr;
+	}
+
+	void PluginProcessor::computePredictions() 
+	{
+		// Run the graph
+		pluginInference.runGraph();
+		auto predictions = pluginInference.masksPredictions();
+
+		// Save the predictions
+		computedMask = predictions;
+	}
+
+	void PluginProcessor::printMask() 
+	{
+		for (size_t i = 0; i < computedMask.size(); i++) 
+		{
+			// for (int j = 0; j < computedMask.rows; j++) 
+			{
+				// Log the predictions
+				std::ostringstream oss;
+				// oss << "\nrows: " << computedMask.rows << std::endl;
+				// oss << "\ncols: " << computedMask.cols << std::endl;
+				// oss << "\nclass "<<i<<"x"<<j<<": " << computedMask.at<int>(cv::Point(i,j)) << std::endl;
+				oss << "\nclass: "<< computedMask[i] << std::endl;
+				Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+			}
+		}
+	}
+
+	void PluginProcessor::drawMaskOnFrame(
+		cv::Mat &frame, std::vector<float>computedMask) 
+	{
+		scaleX = (float)(backgroundImage.cols) / (float)(pluginInference.getImageWidth());
+		scaleY = (float)(backgroundImage.rows) / (float)(pluginInference.getImageHeight());
+		int absOFFSETY = 8*scaleY;
+		int absOFFSETX = 8*scaleX;
+		int OFFSETY = -absOFFSETY;
+		int OFFSETX = -absOFFSETX;
+		if (computedMask1.size() == 0)
+		{
+			computedMask3 = std::vector<float>(computedMask.size(), 0);
+			computedMask2 = std::vector<float>(computedMask.size(), 0);
+			computedMask1 = std::vector<float>(computedMask.size(), 0);
+		}
+
+		std::vector<float> mFloatMask(computedMask.begin(), computedMask.end());
+		for (size_t i = 0; i < computedMask.size(); i++)
+		{
+			if(computedMask[i] == 15)
+			{
+				computedMask[i] = 255;
+				mFloatMask[i] = 255;
+			}
+			else
+			{
+				computedMask[i] = 0;
+				mFloatMask[i] = (float)(   (int)((0.6 * computedMask1[i] + 0.3 * computedMask2[i] + 0.1 * computedMask3[i])) % 256   );
+			}			
+		}
+
+        cv::Mat maskImg(pluginInference.getImageWidth(), pluginInference.getImageHeight(), 
+							CV_32FC1, mFloatMask.data());
+
+		cv::resize(maskImg, maskImg, cv::Size(backgroundImage.cols+2*absOFFSETX, backgroundImage.rows+2*absOFFSETY));
+
+		kSize = cv::Size(maskImg.cols*0.05, maskImg.rows*0.05);
+		if(kSize.height%2 == 0)
+		{
+			kSize.height -= 1;
+		}
+		if(kSize.width%2 == 0)
+		{
+			kSize.width -= 1;
+		}
+
+		GaussianBlur (maskImg, maskImg, kSize, 0);
+		
+		
+		for (int col = 0; col < frame.cols; col++)
+		{
+			for (int row = 0; row < frame.rows; row++)
+			{
+				cv::Point point(col+absOFFSETX+OFFSETX, row+absOFFSETY+OFFSETY);
+				float maskValue = maskImg.at<float>(point)/255.;
+				frame.at<cv::Vec3b>(cv::Point(col, row)) = 
+					backgroundImage.at<cv::Vec3b>(cv::Point(col, row)) * (1 - maskValue)
+					+ frame.at<cv::Vec3b>(cv::Point(col, row)) * maskValue;
+			}
+		}
+
+		computedMask3 = std::vector<float>(computedMask2.begin(), computedMask2.end());
+		computedMask2 = std::vector<float>(computedMask1.begin(), computedMask1.end());
+		computedMask1 = std::vector<float>(computedMask.begin(), computedMask.end());
+	}
+
+} // namespace jami
diff --git a/ForegroundSegmentation/pluginProcessor.h b/ForegroundSegmentation/pluginProcessor.h
new file mode 100644
index 0000000000000000000000000000000000000000..91ea19723f1bc21f4e7eceacb8041df59f8b508b
--- /dev/null
+++ b/ForegroundSegmentation/pluginProcessor.h
@@ -0,0 +1,69 @@
+#pragma once
+// STL
+#include <condition_variable>
+#include <cstdint>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <vector>
+#include <map>
+// Filters
+#include "pluginInference.h"
+// AvFrame
+extern "C" {
+#include <libavutil/frame.h>
+}
+// Plugin
+#include "plugin/jamiplugin.h"
+#include "plugin/mediahandler.h"
+// Frame scaler for frame transformations
+#include "framescaler.h"
+
+namespace jami 
+{
+	class PluginProcessor 
+	{
+		public:
+			PluginProcessor(const std::string &dataPath);			
+			//~PluginProcessor();
+
+			void initModel();
+			/**
+			 * @brief feedInput
+			 * Takes a frame and feeds it to the model storage for predictions
+			 * @param frame
+			 */
+			void feedInput(const cv::Mat &frame);
+
+			/**
+			 * @brief computePredictions
+			 * Uses the model to compute the predictions and store them in
+			 * computedPredictions
+			 */
+			void computePredictions();
+						
+			void printMask();
+			void drawMaskOnFrame(
+				cv::Mat &frame,
+				const std::vector<float> computedMask);		
+
+			// Output predictions
+			std::vector<float> computedMask;
+			std::vector<float> computedMask1;
+			std::vector<float> computedMask2;
+			std::vector<float> computedMask3;
+
+            cv::Mat backgroundImage;	
+			
+			cv::Size kSize;
+			float scaleX = 0;
+			float scaleY = 0;	
+			
+            PluginInference pluginInference;
+			std::string backgroundPath;			
+
+		private:
+            // Frame
+            cv::Mat frame;
+	};
+} // namespace jami
diff --git a/ForegroundSegmentation/videoSubscriber.cpp b/ForegroundSegmentation/videoSubscriber.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cf1b3659a8930d03526c3d061cd8d76a36ed1b31
--- /dev/null
+++ b/ForegroundSegmentation/videoSubscriber.cpp
@@ -0,0 +1,223 @@
+
+#include "videoSubscriber.h"
+// Use for display rotation matrix
+extern "C" {
+#include <libavutil/display.h>
+}
+
+// Opencv processing
+#include <opencv2/imgproc.hpp>
+#include <opencv2/imgcodecs.hpp>
+
+// LOGGING
+#include "pluglog.h"
+
+const std::string TAG = "FORESEG";
+const char sep = separator();
+
+namespace jami 
+{
+	VideoSubscriber::VideoSubscriber(const std::string &dataPath): path_{dataPath},
+	pluginProcessor{dataPath}
+	{
+		/**
+		 * Waits for new frames and then process them
+		 * Writes the predictions in computedPredictions
+		 **/
+		processFrameThread = std::thread([this] 
+        {
+            while (running) 
+            {
+                std::unique_lock<std::mutex> l(inputLock);
+                inputCv.wait(l, [this] { return not running or newFrame; });
+                if (not running) 
+                {
+                    break;
+                }
+				pluginProcessor.feedInput(fcopy.resizedFrameRGB);
+                newFrame = false;
+                /** Unclock the mutex, this way we let the other thread
+                 *  copy new data while we are processing the old one
+                 **/
+                l.unlock();
+				pluginProcessor.computePredictions();
+            }
+        });
+	}
+
+	VideoSubscriber::~VideoSubscriber()
+	{
+		std::ostringstream oss;
+		oss << "~MediaProcessor" << std::endl;
+		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+		stop();
+		processFrameThread.join();
+	}
+
+	void VideoSubscriber::update(jami::Observable<AVFrame *> *, AVFrame *const &iFrame) 
+	{
+		if (isAttached) 
+		{
+			std::ostringstream oss;
+			//oss << "Looking for iFrame signal: ";
+			//Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+			//======================================================================================
+			// GET FRAME ROTATION
+			AVFrameSideData *side_data =
+				av_frame_get_side_data(iFrame, AV_FRAME_DATA_DISPLAYMATRIX);
+			
+			int angle{0};
+			if (side_data) 
+			{
+				auto matrix_rotation = reinterpret_cast<int32_t *>(side_data->data);
+				angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
+			}
+
+			//======================================================================================
+			// GET RAW FRAME
+			// Use a non-const Frame
+			AVFrame *incFrame = const_cast<AVFrame *>(iFrame);
+			// Convert input frame to BGR
+			int inputHeight = incFrame->height;
+			int inputWidth = incFrame->width;
+
+			fcopy.originalSize = cv::Size{inputWidth, inputHeight};
+            FrameUniquePtr bgrFrame = scaler.convertFormat(incFrame, AV_PIX_FMT_RGB24);
+			cv::Mat frame =
+				cv::Mat{bgrFrame->height, bgrFrame->width, CV_8UC3, bgrFrame->data[0],
+						static_cast<size_t>(bgrFrame->linesize[0])};
+			// First clone the frame as the original one is unusable because of
+			// linespace
+			cv::Mat clone = frame.clone();
+			//pluginProcessor.backgroundImage = frame.clone();
+			//======================================================================================
+			// ROTATE THE FRAME
+			// rotateFrame(angle, clone);
+			// rotateFrame(angle, frame);
+			
+			if (firstRun) 
+			{
+				pluginProcessor.pluginInference.setExpectedImageDimensions();
+				fcopy.resizedSize = cv::Size{pluginProcessor.pluginInference.getImageWidth(), pluginProcessor.pluginInference.getImageHeight()};
+
+				cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+				cv::resize(pluginProcessor.backgroundImage, pluginProcessor.backgroundImage, fcopy.originalSize);
+				
+				// Print Frame dimensions
+				// std::ostringstream oss1;
+				// oss1 << "IS ALLOCATED " << pluginProcessor.pluginInference.isAllocated() << std::endl;
+				// oss1 << "FRAME[]: w: " << iFrame->width << " , h: " << iFrame->height
+					// << " , format: " << iFrame->format << std::endl;
+				// oss1 << "DESIRED WIDTH: " << pluginProcessor.pluginInference.getImageWidth() << std::endl;
+				// oss1 << "DESIRED WIDTH: " << pluginProcessor.pluginInference.getImageHeight() << std::endl;
+				// Plog::log(Plog::LogPriority::INFO, TAG, oss1.str());
+
+				firstRun = false;
+			}
+
+            auto process_start = std::chrono::system_clock::now();
+
+			if (!newFrame) 
+			{
+				std::lock_guard<std::mutex> l(inputLock);
+				cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+				newFrame = true;
+				inputCv.notify_all();
+			}
+
+			fcopy.predictionsFrameBGR = frame;
+			// pluginProcessor.printMask();
+			pluginProcessor.drawMaskOnFrame(fcopy.predictionsFrameBGR, pluginProcessor.computedMask);
+
+
+
+			//======================================================================================
+			// REPLACE AVFRAME DATA WITH FRAME DATA
+
+			// rotateFrame(-angle, clone);
+			// rotateFrame(-angle, frame);
+
+			if (bgrFrame && bgrFrame->data[0]) 
+			{
+				uint8_t* frameData = bgrFrame->data[0];
+				if(angle == 90 || angle == -90) 
+				{
+					std::memmove(frameData, fcopy.predictionsFrameBGR.data, static_cast<size_t>(iFrame->width*iFrame->height*3) * sizeof(uint8_t));
+				}
+			}
+
+			// Copy Frame meta data
+			if (bgrFrame && incFrame) 
+			{
+				av_frame_copy_props(bgrFrame.get(), incFrame);
+				scaler.moveFrom(incFrame, bgrFrame.get());
+			}
+
+			auto process_end = std::chrono::system_clock::now();
+			std::chrono::duration<double> processing_duration = process_end-process_start;
+
+			// std::ostringstream oss;
+			oss << "Processing time: " << std::chrono::duration_cast<std::chrono::milliseconds>(processing_duration).count() << " ms\n";
+			Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+
+			// Remove the pointer
+			//incFrame = nullptr;
+		}
+	}
+
+	void VideoSubscriber::attached(jami::Observable<AVFrame *> *observable) 
+	{
+		std::ostringstream oss;
+		oss << "::Attached ! " << std::endl;
+		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+		observable_ = observable;
+		isAttached = true;
+	}
+
+	void VideoSubscriber::detached(jami::Observable<AVFrame *> *)
+	{
+		isAttached = false;
+		observable_ = nullptr;
+		std::ostringstream oss;
+		oss << "::Detached()" << std::endl;
+		Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+	}
+
+	void VideoSubscriber::detach() 
+	{
+		if (isAttached)
+		{
+			std::ostringstream oss;
+			oss << "::Calling detach()" << std::endl;
+			Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+			observable_->detach(this);
+		}
+	}
+
+	void VideoSubscriber::stop()
+	{
+		running = false;
+		inputCv.notify_all();
+	}
+
+	void VideoSubscriber::rotateFrame(int angle, cv::Mat &mat) 
+	{
+		if (angle != 0) 
+		{
+			switch (angle) 
+			{
+				case -90:
+					cv::rotate(mat, mat, cv::ROTATE_90_COUNTERCLOCKWISE);
+					break;
+				case 180:
+				case -180:
+					cv::rotate(mat, mat, cv::ROTATE_180);
+					break;
+				case 90:
+					cv::rotate(mat, mat, cv::ROTATE_90_CLOCKWISE);
+					break;
+			}
+		}
+	}
+}
+
diff --git a/ForegroundSegmentation/videoSubscriber.h b/ForegroundSegmentation/videoSubscriber.h
new file mode 100644
index 0000000000000000000000000000000000000000..de08c5a7c78ea6b48dde2ad466fc8b0234380968
--- /dev/null
+++ b/ForegroundSegmentation/videoSubscriber.h
@@ -0,0 +1,83 @@
+#pragma once
+
+// AvFrame
+extern "C" {
+    #include <libavutil/frame.h>
+}
+#include "observer.h"
+
+//STl
+#include <map>
+#include <thread>
+#include <condition_variable>
+
+// Frame Scaler
+#include "framescaler.h"
+
+// OpenCV headers
+#include <opencv2/core.hpp>
+
+// Flatbuffers / Tensorflow headers
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/optional_debug_tools.h>
+
+#include "pluginProcessor.h"
+
+namespace jami 
+{
+    class FrameCopy 
+	{
+		public:
+		// This frame is a resized version of the original in RGB format
+		cv::Mat resizedFrameRGB;
+		cv::Size resizedSize;
+		// This frame is used to draw predictions into in RGB format
+		cv::Mat predictionsFrameBGR;
+        cv::Size originalSize;
+	};
+
+    class VideoSubscriber : public jami::Observer<AVFrame *> 
+    {
+        public:
+            VideoSubscriber(const std::string &dataPath);
+            ~VideoSubscriber();
+
+            virtual void update(jami::Observable<AVFrame *> *, AVFrame *const &) override;
+            virtual void attached(jami::Observable<AVFrame *> *) override;
+            virtual void detached(jami::Observable<AVFrame *> *) override;
+
+            void detach();
+            void stop();
+
+
+        private:
+            // Observer pattern
+            Observable<AVFrame *> *observable_;
+            bool isAttached{false};
+
+            //Data
+            std::string path_;
+            
+            // Frame
+            FrameCopy fcopy;
+            cv::Mat frame;
+            
+            FrameScaler scaler;
+            void rotateFrame(int angle, cv::Mat &mat);
+
+            // Threading
+            std::thread processFrameThread;
+            std::mutex inputLock;
+            std::condition_variable inputCv;
+
+            // Status variables of the processing
+            bool firstRun{true};
+            bool running{true};
+            bool newFrame{false};
+
+            //std::shared_ptr<PluginProcessor> pluginProcessor;
+            PluginProcessor pluginProcessor;
+    };
+}
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2dd5f93921f9745b34646ebffb80f1f2e5139393
--- /dev/null
+++ b/README.md
@@ -0,0 +1,142 @@
+$ export DAEMON="<ring-project>/daemon"
+
+OPENCV AND OPENCV_CONTRIB BUILD INSTRUCTIONS
+
+OPENCV VERSION 4.1.1
+OPENCV_CONTRIB VERSION 4.1.1
+
+For Android:
+    $ cd <ring-project>/clent-android/
+    change line 158:
+        from ../bootstrap --host=${TARGET_TUPLE} --enable-ffmpeg #--disable-opencv --disable-opencv_contrib
+        to ../bootstrap --host=${TARGET_TUPLE} --enable-ffmpeg --disable-opencv --disable-opencv_contrib
+    $ cd .. && ./make-ring.py --install --distribution=Android
+
+For Linux:
+    $ cd ${DAEMON}/contrib/native/
+    $ ./../bootstrap --enable-opencv --enable-opencv_contrib
+    $ make install
+
+
+TENSORFLOW BUILD INSTRUCTIONS
+
+TF VERSION 2.1.0
+
+Dependencies:
+    1 - python 3
+    2 - bazel 0.27.1
+
+$ git clone https://github.com/tensorflow/tensorflow.git
+$ cd tensorflow
+$ git checkout -b v2.1.0
+
+
+For Android:
+    Dependencies:
+        1 - Android NDK 18r
+
+    $ ./configure 
+        >> Do you wish to build TensorFlow with XLA JIT support? [Y/n]: n
+        >> Do you wish to download a fresh release of clang? (Experimental) [y/N]: y
+        >> Would you like to interactively configure ./WORKSPACE for Android builds? [y/N]: y
+        >> Please specify the home path of the Android NDK to use. [Default is /home/pfreitas/Android/Sdk/ndk-bundle]: put the right path to ndk 18r
+
+    $ bazel build //tensorflow/lite:libtensorflowlite.so --crosstool_top=//external:android/crosstool --cpu=armeabi-v7a --host_crosstool_top=@bazel_tools//tools/cpp:toolchain --cxxopt="-std=c++11"
+
+    $ bazel build //tensorflow/lite:libtensorflowlite.so --crosstool_top=//external:android/crosstool --cpu=arm64-v8a --host_crosstool_top=@bazel_tools//tools/cpp:toolchain --cxxopt="-std=c++11"    
+
+    $ bazel build //tensorflow/lite:libtensorflowlite.so --crosstool_top=//external:android/crosstool --cpu=x86_64  --host_crosstool_top=@bazel_tools//tools/cpp:toolchain --cxxopt="-std=c++11"
+
+For Linux:
+
+    $ ./configure 
+
+    $ bazel build //tensorflow/lite:libtensorflowlite.so
+
+
+
+TENSORFLOW INCLUDES ASSEMBLE INSTRUCTIONS
+
+    Keep in mind that after each of bazel build instructions above listed, there will be a "libtensorflowlite.so" created at:
+        "<tensorflow>/bazel-genfiles/tensorflow/lite/"
+    or at:
+        "<tensorflow>/bazel-out/<cpu>-opt/bin/tensorflow/lite/"
+    (cpu may be "armeabi-v7a", "arm64-v8a", "x86_64" or "k8" depending on the build realized)
+    The lib in the first folder is overwritten after each build.
+    The lib in the second folder is not.
+
+    create folders and copy files to have the following path struture:
+
+    ~home/Libs/
+            _tensorflow_distribuiton/
+                lib/
+                    arm64-v8a/
+                        libtensorflowlite.so
+                    armeabi-v7a/
+                        libtensorflowlite.so
+                    x86_64/
+                        libtensorflowlite.so
+                    x86_64-linux-gnu/
+                        libtensorflowlite.so
+                    ...
+                include/
+                    tensorflow/
+                        lite/
+                            c/
+                                buitin_op_data.h
+                                c_api_internal.h
+                            core/
+                                api/
+                                    error_reporter.h
+                                    op_resolver.h
+                                    profiler.h
+                                subgraph.h
+                            delegates/
+                                gpu/
+                                    delegate.h
+                                nnapi/
+                                    nnapi_delegate.h
+                            experimental/
+                                resource_variable/
+                                    resource_variable.h
+                            kernels/
+                                register.h
+                            nnapi/
+                                NeuralNetworksShim.h
+                                NeuralNetworksTypes.h
+                                nnapi_implementation.h
+                                nnapi_util.h
+                            schema/
+                                schema_generated.h
+                            tools/
+                                evaluation/
+                                    utils.h
+                            allocation.h
+                            builtin_op_data.h
+                            context.h
+                            external_cpu_backend_context.h
+                            interpreter.h
+                            memory_planner.h
+                            model.h
+                            mutable_op_resolver.h
+                            optinal_debug_tools.h
+                            simple_memory_arena.h
+                            stderr_reporter.h
+                            string_type.h
+                            util.h
+                    flatbuffers/
+                        base.h
+                        code_generators.h
+                        flatbuffers.h
+                        flatc.h
+                        flexbuffers.h
+                        grpc.h
+                        hash.h
+                        idl.h
+                        minireflect.h
+                        reflection.h
+                        reflection_generated.h
+                        registry.h
+                        stl_emulation.h
+                        util.h
+
diff --git a/lib/framescaler.h b/lib/framescaler.h
new file mode 100644
index 0000000000000000000000000000000000000000..dbe1248ea4a9c5eb4a70ba1b8c76ad6e4ae173bd
--- /dev/null
+++ b/lib/framescaler.h
@@ -0,0 +1,89 @@
+#pragma once
+extern "C" {
+#include <libavutil/avutil.h>
+#include <libavutil/frame.h>
+#include <libavutil/pixfmt.h>
+#include <libswscale/swscale.h>
+}
+
+//STL
+#include <memory>
+#include <functional>
+
+using FrameUniquePtr = std::unique_ptr<AVFrame, void(*)(AVFrame*)>;
+
+class FrameScaler{
+public:
+    FrameScaler() : ctx_(nullptr), mode_(SWS_FAST_BILINEAR) {}
+
+    /**
+     * @brief scaleConvert
+     * Scales an av frame accoding to the desired width height/height
+     * Converts the frame to another format if the desiredFromat is different from the input PixelFormat
+     * @param input
+     * @param desiredWidth
+     * @param desiredHeight
+     * @param desiredFormat
+     * @return
+     */
+    FrameUniquePtr scaleConvert(const AVFrame* input, const size_t desiredWidth, const size_t desiredHeight,
+                                const AVPixelFormat desiredFormat){
+        FrameUniquePtr output{av_frame_alloc(), [](AVFrame* frame){ if(frame) {av_frame_free(&frame);} }};
+        if(input) {
+            output->width = static_cast<int>(desiredWidth);
+            output->height = static_cast<int>(desiredHeight);
+            output->format = static_cast<int>(desiredFormat);
+
+            auto output_frame = output.get();
+
+            if (av_frame_get_buffer(output_frame, 0))
+                throw std::bad_alloc();
+
+            ctx_ = sws_getCachedContext(ctx_,
+                                        input->width,
+                                        input->height,
+                                        static_cast<AVPixelFormat>(input->format),
+                                        output_frame->width,
+                                        output_frame->height,
+                                        static_cast<AVPixelFormat>(output_frame->format),
+                                        mode_,
+                                        nullptr, nullptr, nullptr);
+            if (!ctx_) {
+                throw std::bad_alloc();
+            }
+
+            sws_scale(ctx_, input->data, input->linesize, 0,
+                      input->height, output_frame->data,
+                      output_frame->linesize);
+        }
+
+        return output;
+    }
+
+    /**
+     * @brief convertFormat
+     * @param input
+     * @param pix
+     * @return
+     */
+    FrameUniquePtr convertFormat(const AVFrame* input, AVPixelFormat pix) {
+        return input?scaleConvert(input,static_cast<size_t>(input->width),static_cast<size_t>(input->height), pix):
+                     std::unique_ptr<AVFrame, void(*)(AVFrame*)>{nullptr, [](AVFrame* frame){(void)frame;}};
+    }
+
+    /**
+     * @brief moveFrom
+     * @param dst
+     * @param src
+     */
+    void moveFrom(AVFrame* dst,  AVFrame* src) {
+        if(dst && src) {
+            av_frame_unref(dst);
+            av_frame_move_ref(dst, src);
+        }
+    }
+
+protected:
+    SwsContext *ctx_;
+    int mode_;
+};
diff --git a/lib/pluglog.h b/lib/pluglog.h
new file mode 100644
index 0000000000000000000000000000000000000000..9f1787560637034bdadee334e47ceb3abb067fc6
--- /dev/null
+++ b/lib/pluglog.h
@@ -0,0 +1,95 @@
+#ifndef PLUGLOG_H
+#define PLUGLOG_H
+#include <string>
+#include <sstream>
+
+#ifndef __ANDROID__
+#include <iostream>
+#endif
+
+#ifdef __ANDROID__
+#include <android/log.h>
+#endif
+
+inline char separator()
+{
+#ifdef _WIN32
+    return '\\';
+#else
+    return '/';
+#endif
+}
+
+class Plog{
+private:
+    Plog() = delete;
+    Plog(const Plog&) = delete;
+    Plog(Plog&&) = default;
+public:
+    enum class LogPriority{
+        /** For internal use only.  */
+        UNKNOWN,
+        /** The default priority, for internal use only.  */
+        DEFAULT,
+        /** Verbose logging. Should typically be disabled for a release apk. */
+        VERBOSE,
+        /** Debug logging. Should typically be disabled for a release apk. */
+        DEBUG,
+        /** Informational logging. Should typically be disabled for a release apk. */
+        INFO,
+        /** Warning logging. For use with recoverable failures. */
+        WARN,
+        /** Error logging. For use with unrecoverable failures. */
+        ERROR,
+        /** Fatal logging. For use when aborting. */
+        FATAL,
+        /** For internal use only.  */
+        SILENT, /* only for SetMinPriority(); must be last */
+    };
+
+    static void log(const LogPriority priority, const std::string& tag, const std::string& s) {
+
+// Android only
+#ifdef __ANDROID__
+        switch (priority) {
+        case LogPriority::DEBUG:
+            __android_log_print(ANDROID_LOG_DEBUG, tag.c_str(), ": %s", s.c_str());
+            break;
+        case LogPriority::INFO:
+            __android_log_print(ANDROID_LOG_INFO, tag.c_str(), ": %s", s.c_str());
+            break;
+        case LogPriority::WARN:
+            __android_log_print(ANDROID_LOG_WARN, tag.c_str(), ": %s", s.c_str());
+            break;
+        case LogPriority::ERROR:
+            __android_log_print(ANDROID_LOG_ERROR, tag.c_str(), ": %s", s.c_str());
+        default:
+            break;
+        }
+
+// Anything but Android
+#else
+        switch (priority) {
+            case LogPriority::UNKNOWN:
+            case LogPriority::DEFAULT:
+            case LogPriority::VERBOSE:
+            case LogPriority::DEBUG:
+            case LogPriority::INFO:
+            case LogPriority::WARN:
+                std::cout<< tag <<": " << s <<std::endl;
+                break;
+            case LogPriority::ERROR:
+            case LogPriority::FATAL:
+                std::cerr<< tag <<": " << s <<std::endl;
+                break;
+            case LogPriority::SILENT:
+                break;
+
+        }
+
+#endif
+
+    }
+};
+
+#endif // PLUGLOG_H
diff --git a/lib/preferenceHelper.h b/lib/preferenceHelper.h
new file mode 100644
index 0000000000000000000000000000000000000000..a900978f2aeb4abd5512e6f1af791549626c7b57
--- /dev/null
+++ b/lib/preferenceHelper.h
@@ -0,0 +1,25 @@
+#pragma once
+
+#include <string>
+#include <set>
+
+namespace jami {
+namespace Preference {
+
+std::set<std::string> parsePreferenceSetValue(const std::string& s) {
+    size_t startIndex{1};
+    std::set<std::string> a;
+    for(size_t i{1}; i< s.size()-1; ++i ){
+        if(s[i] == ',') {
+            a.insert(s.substr(startIndex, i-startIndex));
+            startIndex = i+1;
+        } else if ( i == s.size()-2) {
+            a.insert(s.substr(startIndex, s.size()-1-startIndex));
+        }
+    }
+    
+    return a;
+}
+}
+}
+