diff --git a/.gitignore b/.gitignore
index 451c56024dd7c16062abf02a3d78baf83024741e..1e8bf2c837115d068a31e9044fc19c0a038c8f9f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,7 +4,7 @@
 *build-local*
 *android-toolchain-*
 config.mak
-*Libs*
+/contrib/Libs/
 *__pycache__*
 /foo/
 /.vscode/
diff --git a/GreenScreen/CMakeLists.txt b/GreenScreen/CMakeLists.txt
index 3a7890bde56fcc637bd0605082d7ce4b2d97ea1e..552a6b1827bb66d4569d5f20e1617f5bdfca6cc9 100644
--- a/GreenScreen/CMakeLists.txt
+++ b/GreenScreen/CMakeLists.txt
@@ -7,13 +7,13 @@ set (Version 1.0.2)
 project(${ProjectName} VERSION ${Version})
 
 set (DAEMON ${PROJECT_SOURCE_DIR}/../../daemon)
-set (PLUGIN_NAME GreenScreen)
-set (JPL_FILE_NAME ${PLUGIN_NAME}.jpl)
+set (JPL_FILE_NAME ${ProjectName}.jpl)
 set (DAEMON_SRC ${DAEMON}/src)
 set (CONTRIB_PATH ${DAEMON}/contrib)
 set (PLUGINS_LIB ${PROJECT_SOURCE_DIR}/../lib)
 set (JPL_DIRECTORY ${PROJECT_BINARY_DIR}/jpl)
 set (LIBS_DIR ${PROJECT_SOURCE_DIR}/../contrib/Libs)
+set (ONNX_DIR $ENV{PLUGIN_ENV}/onnxruntime)
 
 if(WIN32)
     message(OS:\  WINDOWS\ ${CMAKE_SYSTEM_PROCESSOR})
@@ -22,47 +22,24 @@ if(WIN32)
     endif()
     set (CONTRIB_PLATFORM_CURT x64)
     set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-windows)
-    set (LIBRARY_FILE_NAME ${PLUGIN_NAME}.dll)
-    set (LIBS_BIN_DIR $ENV{PLUGIN_ENV})
+    set (LIBRARY_FILE_NAME ${ProjectName}.dll)
     set (FFMPEG ${CONTRIB_PATH}/build/ffmpeg/Build/win32/x64)
 endif()
 
-if(UNIX)
-    message( FATAL_ERROR "\nUse CMake only for Windows! For linux or Android (linux host), use our bash scripts.\nPlese refer to documentation for more infos." )
-    message(OS:\  LINUX\ ${CMAKE_SYSTEM_PROCESSOR})
-    set (CONTRIB_PLATFORM_CURT ${CMAKE_SYSTEM_PROCESSOR})
-    set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-linux-gnu)
-    set (LIBRARY_FILE_NAME lib${PLUGIN_NAME}.so)
-    set (LIBS_BIN_DIR /home/${USER}/Libs)
-endif()
-
-
 message(Building:\   ${ProjectName}\   ${Version})
 message(Build\ path:\ ${PROJECT_BINARY_DIR})
 message(JPL\ assembling\ path:\ ${JPL_DIRECTORY})
 message(JPL\ path:\ ${JPL_DIRECTORY}/../../../build/${ProjectName}/${JPL_FILE_NAME})
 
-set(TENSORFLOW _tensorflow_cc)
-set(model mModel-resnet50float.pb)
-set(modelType .pb)
-set(preferencesFile preferences-tfcc.json)
-set(TFLIB libtensorflow_cc)
-set(GPU -gpu61)
-
-if (CPU)
-    set(GPU )
-    add_definitions(-DCPU)
-    message(CPU\ BUILDING!)
-endif()
-
-if (TFLITE)
-    add_definitions(-DTFLITE)
-    set(TENSORFLOW _tensorflowLite)
-    set(model mobilenet_v2_deeplab_v3_256_myquant.tflite)
-    set(modelType .tflite)
-    set(preferencesFile preferences-tflite.json)
-    set(TFLIB libtensorflowlite)
-    message(TFLITE\ BUILDING!)
+if(NVIDIA)
+add_definitions(-DNVIDIA)
+set(ONNX_DIR ${ONNX_DIR}/nvidia-gpu)
+message(Provider:\ NVIDIA)
+set(EXTRA_PATH nvidia-gpu)
+else()
+set(ONNX_DIR ${ONNX_DIR}/cpu)
+message(Provider:\ NONE)
+set(EXTRA_PATH cpu)
 endif()
 
 set(CMAKE_CXX_STANDARD 17)
@@ -71,23 +48,16 @@ set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
 set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
 
 set(plugin_SRC main.cpp
-               pluginInference.cpp
                pluginMediaHandler.cpp
-               pluginParameters.cpp
-               pluginProcessor.cpp
-               TFInference.cpp
                videoSubscriber.cpp
+               pluginProcessor.cpp
                ./../lib/accel.cpp
                ./../lib/frameUtils.cpp
                )
 
-set(plugin_HDR pluginInference.h
-               pluginMediaHandler.h
-               pluginParameters.h
-               pluginProcessor.h
-               TFInference.h
-               TFModels.h
+set(plugin_HDR pluginMediaHandler.h
                videoSubscriber.h
+               pluginProcessor.h
                ./../lib/accel.h
                ./../lib/frameScaler.h
                ./../lib/frameUtils.h
@@ -109,49 +79,28 @@ target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
                                                  ${CONTRIB_PATH}
                                                  ${FFMPEG}/include
                                                  ${CONTRIB_PATH}/build/opencv/build/install/include
-                                                 ${LIBS_DIR}/${TENSORFLOW}/include
-                                                 ${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
-                                                 ${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
+                                                 ${ONNX_DIR}/../include/session
+                                                 ${ONNX_DIR}/../include/providers/cuda
                                                  )
 target_link_directories(${ProjectName} PUBLIC ${CONTRIB_PATH}
-                                        ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
                                         ${CONTRIB_PATH}/build/opencv/build/lib/Release
                                         ${CONTRIB_PATH}/build/opencv/build/3rdparty/lib/Release
                                         ${FFMPEG}/bin
+                                        ${ONNX_DIR}
                                         )
 
-target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs411 opencv_imgproc411 opencv_core411 ${TFLIB} zlib)
-endif()
-
-if (UNIX)
-target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
-                                                 ${PROJECT_SOURCE_DIR}
-                                                 ${PLUGINS_LIB}
-                                                 ${DAEMON_SRC}
-                                                 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include
-                                                 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4
-                                                 ${LIBS_DIR}/${TENSORFLOW}/include
-                                                 ${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
-                                                 ${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
-                                                 )
-link_directories(${ProjectName} PUBLIC    ${CONTRIB_PATH}
-                                        ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
-                                        ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib
-                                        ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty
-                                        )
-target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs opencv_imgproc opencv_core ${TFLIB})
+target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng libjpeg-turbo opencv_imgcodecs411 opencv_imgproc411 opencv_core411 onnxruntime zlib)
 endif()
 
 add_custom_command(
     TARGET ${ProjectName}
     PRE_BUILD
-    COMMAND python ${PROJECT_SOURCE_DIR}/../SDK/jplManipulation.py --preassemble --plugin=GreenScreen
-    COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/ ${JPL_DIRECTORY}/lib
-    COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/data/models
-    COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/modelsSRC/${model} ${JPL_DIRECTORY}/data/models
-    COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/models/${model} ${JPL_DIRECTORY}/data/models/mModel${modelType}
-    COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/${preferencesFile} ${JPL_DIRECTORY}/data
-    COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/${preferencesFile} ${JPL_DIRECTORY}/data/preferences.json
+    COMMAND python ${PROJECT_SOURCE_DIR}/../SDK/jplManipulation.py --preassemble --plugin=${ProjectName}
+    COMMAND ${CMAKE_COMMAND} -E copy ${ONNX_DIR}/onnxruntime.lib ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
+    COMMAND ${CMAKE_COMMAND} -E copy ${ONNX_DIR}/onnxruntime.dll ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
+    COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/data/model
+    COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/modelSRC/mModel.onnx ${JPL_DIRECTORY}/data/model
+    COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/preferences-onnx.json ${JPL_DIRECTORY}/data/preferences.json
     COMMENT "Assembling Plugin files"
 )
 
@@ -159,20 +108,10 @@ if (WIN32)
     add_custom_command(
         TARGET ${ProjectName}
         POST_BUILD
-        COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/x64-windows/${TENSORFLOW}
         COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${ProjectName}.lib ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
         COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
-        COMMAND python ${PROJECT_SOURCE_DIR}/../SDK/jplManipulation.py --assemble --plugin=GreenScreen --extraPath=${TENSORFLOW}
-        COMMENT "Generating JPL archive"
-    )
-else()
-    add_custom_command(
-        TARGET ${ProjectName}
-        POST_BUILD
-        COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/x86_64-linux-gnu/${TENSORFLOW}
-        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
-        COMMAND python3 ${PROJECT_SOURCE_DIR}/../SDK/jplManipulation.py --assemble --plugin=GreenScreen --extraPath=${TENSORFLOW}
+        COMMAND python ${PROJECT_SOURCE_DIR}/../SDK/jplManipulation.py --assemble --plugin=${ProjectName} --extraPath=${EXTRA_PATH}
         COMMENT "Generating JPL archive"
     )
 
-endif()
\ No newline at end of file
+endif()
diff --git a/GreenScreen/build.sh b/GreenScreen/build.sh
index fd936e8efc8bd7367a31e6f9f1880bfa0e3d2b84..04e9efbd17915643ab837aa0c702cd3926f39a3b 100755
--- a/GreenScreen/build.sh
+++ b/GreenScreen/build.sh
@@ -10,11 +10,14 @@ EXTRAPATH=''
   # -c: Runtime plugin cpu/gpu setting.
   # -t: target platform.
 
-
 if [ -z "${DAEMON}" ]; then
     DAEMON="./../../daemon"
     echo "DAEMON not provided, building with ${DAEMON}"
 fi
+if [ -z "${PROCESSOR}" ]; then
+    PROCESSOR="CPU"
+    echo "PROCESSOR not provided, building with ${PROCESSOR}"
+fi
 
 PLUGIN_NAME="GreenScreen"
 JPL_FILE_NAME="${PLUGIN_NAME}.jpl"
@@ -24,28 +27,24 @@ CONTRIB_PATH="${DAEMON}/contrib"
 PLUGINS_LIB="../lib"
 LIBS_DIR="./../contrib/Libs"
 
-if [ -z "${TF_LIBS_DIR}" ]; then
-    TF_LIBS_DIR="./../../../Libs"
+if [ "${PROCESSOR}" = "CPU" ]; then
+    ONNX_LIBS="cpu"
+elif [ "${PROCESSOR}" = "NVIDIA" ]; then
+    ONNX_LIBS="nvidia-gpu"
+    CUBLASLT="-lcublasLt"
 fi
-echo "Building with ${TF_LIBS_DIR}"
 
 PLATFORM="linux-gnu"
-PROCESSOR='GPU'
 
 while getopts t:c:p OPT; do
   case "$OPT" in
     t)
       PLATFORM="${OPTARG}"
-      if [ -z "${TF}" ]; then
-          if [ "$PLATFORM" = 'linux-gnu' ]; then
-              TF="_tensorflow_cc"
-          elif [ "$PLATFORM" = 'android' ]; then
-              TF="_tensorflowLite"
-          fi
+      if [ ${PLATFORM} = "android" ]; then
+          ONNX_LIBS=""
       fi
     ;;
     c)
-      PROCESSOR="${OPTARG}"
     ;;
     p)
     ;;
@@ -55,30 +54,17 @@ while getopts t:c:p OPT; do
   esac
 done
 
-
-if [ -z "${TF}" ]; then
-    TF="_tensorflow_cc"
-fi
-echo "Building with ${TF}"
-
-if [[ "${TF}" = "_tensorflow_cc" ]] && [[ "${PLATFORM}" = "linux-gnu" ]]
+if [ "${PLATFORM}" = "linux-gnu" ]
 then
-    if [ -z "$CUDALIBS" ]; then
-        echo "CUDALIBS must point to CUDA 10.1!"
-        exit
-    fi
-    if [ -z "$CUDNN" ]; then
-        echo "CUDNN must point to libcudnn.so 7!"
-        exit
-    fi
-
-    echo "Building for ${PROCESSOR}"
-
     python3 ./../SDK/jplManipulation.py --preassemble --plugin=${PLUGIN_NAME} 
 
     CONTRIB_PLATFORM_CURT=${ARCH}
     CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-${PLATFORM}
-    EXTRAPATH=${TF}
+    ONNX_PATH=${EXTRALIBS_PATH}
+    if [ -z "${EXTRALIBS_PATH}" ]
+    then
+      ONNX_PATH="${CONTRIB_PATH}/${CONTRIB_PLATFORM}"
+    fi
 
     # Compile
     clang++ -std=c++17 -shared -fPIC \
@@ -87,258 +73,217 @@ then
     -Wno-unused-variable \
     -Wno-unused-function \
     -Wno-unused-parameter \
-    -D"${PROCESSOR}" \
+    -D${PROCESSOR} \
     -I"." \
     -I"${DAEMON_SRC}" \
     -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
     -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-    -I"${LIBS_DIR}/${TF}/include" \
-    -I"${LIBS_DIR}/${TF}/include/third_party/eigen3" \
+    -I"${ONNX_PATH}/include/onnxruntime/session" \
+    -I"${ONNX_PATH}/include/onnxruntime/providers/cuda" \
     -I"${PLUGINS_LIB}" \
     ./../lib/accel.cpp \
     ./../lib/frameUtils.cpp \
     main.cpp \
     videoSubscriber.cpp \
-    pluginProcessor.cpp \
     pluginMediaHandler.cpp \
-    TFInference.cpp \
-    pluginInference.cpp \
-    pluginParameters.cpp \
+    pluginProcessor.cpp \
     -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
     -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/" \
-    -L"${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}-gpu61/" \
+    -L"${ONNX_PATH}/lib/onnxruntime/${ONNX_LIBS}" \
+    -L"${CUDA_HOME}/lib64/" \
     -l:libswscale.a \
     -l:libavutil.a \
     -lopencv_imgcodecs \
     -lopencv_imgproc \
     -lopencv_core \
     -llibpng \
-    -lva \
-    -ltensorflow_cc \
+    -llibjpeg-turbo \
+    -lva ${CUBLASLT} \
+    -lonnxruntime \
     -o "build-local/jpl/lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}"
 
-    cp "${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}-gpu61/libtensorflow_cc.so" "build-local/jpl/lib/$CONTRIB_PLATFORM/libtensorflow_cc.so.2"
-    cp "${CUDALIBS}/libcudart.so" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcudart.so.10.0"
-    cp "${CUDNN}/libcublas.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcublas.so.10.0"
-    cp "${CUDALIBS}/libcufft.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcufft.so.10.0"
-    cp "${CUDALIBS}/libcurand.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcurand.so.10.0"
-    cp "${CUDALIBS}/libcusolver.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcusolver.so.10.0"
-    cp "${CUDALIBS}/libcusparse.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcusparse.so.10.0"
-    cp "${CUDNN}/libcudnn.so.7" "build-local/jpl/lib/$CONTRIB_PLATFORM"
-
-    pwd
-    mkdir ./build-local/jpl/data/models
-    cp ./modelsSRC/mModel-resnet50float.pb ./build-local/jpl/data/models/mModel.pb
-    cp ./preferences-tfcc.json ./build-local/jpl/data/preferences.json
-elif [ "${TF}" = "_tensorflowLite" ]
-then
-    if [ "${PLATFORM}" = "linux-gnu" ]
+
+    cp "${ONNX_PATH}/lib/onnxruntime/${ONNX_LIBS}/libonnxruntime.so" "build-local/jpl/lib/$CONTRIB_PLATFORM/libonnxruntime.so.1.6.0"
+    if [ "${PROCESSOR}" = "NVIDIA" ]
     then
-        python3 ./../SDK/jplManipulation.py --preassemble --plugin=${PLUGIN_NAME}
+      cp "${CUDA_HOME}/lib64/libcudart.so.10.2.89" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcudart.so.10.2"
+      cp "${CUDA_HOME}/lib64/libcublas.so.10.2.3.254" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcublas.so.10"
+      cp "${CUDA_HOME}/lib64/libcufft.so.10.1.2.89" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcufft.so.10"
+      cp "${CUDA_HOME}/lib64/libcurand.so.10.1.2.89" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcurand.so.10"
+      cp "${CUDA_HOME}/lib64/libcublasLt.so.10.2.3.254" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcublasLt.so.10"
+      cp "${CUDNN_HOME}/libcudnn.so.8" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcudnn.so.8"
+      cp "${CUDNN_HOME}/libcudnn_ops_infer.so.8" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcudnn_ops_infer.so.8"
+    fi
+
+    mkdir "./build-local/jpl/data/model"
+    cp "./modelSRC/mModel.onnx" "./build-local/jpl/data/model/mModel.onnx"
+    cp "./preferences-onnx.json" "./build-local/jpl/data/preferences.json"
+
+elif [ "${PLATFORM}" = "android" ]
+then
+    python3 ./../SDK/jplManipulation.py --preassemble --plugin=${PLUGIN_NAME} --distribution=${PLATFORM}
+
+    if [ -z "$ANDROID_NDK" ]; then
+        ANDROID_NDK="/home/${USER}/Android/Sdk/ndk/21.1.6352462"
+        echo "ANDROID_NDK not provided, building with ${ANDROID_NDK}"
+    fi
+
+    #=========================================================
+    #    Check if the ANDROID_ABI was provided
+    #    if not, set default
+    #=========================================================
+    if [ -z "$ANDROID_ABI" ]; then
+        ANDROID_ABI="armeabi-v7a arm64-v8a x86_64"
+        echo "ANDROID_ABI not provided, building for ${ANDROID_ABI}"
+    fi
+
+    buildlib() {
+        echo "$CURRENT_ABI"
+
+        #=========================================================
+        #    ANDROID TOOLS
+        #=========================================================
+        export HOST_TAG=linux-x86_64
+        export TOOLCHAIN=$ANDROID_NDK/toolchains/llvm/prebuilt/$HOST_TAG
+
+        if [ "$CURRENT_ABI" = armeabi-v7a ]
+        then
+        export AR=$TOOLCHAIN/bin/arm-linux-android-ar
+        export AS=$TOOLCHAIN/bin/arm-linux-android-as
+        export CC=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang
+        export CXX=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang++
+        export LD=$TOOLCHAIN/bin/arm-linux-android-ld
+        export RANLIB=$TOOLCHAIN/bin/arm-linux-android-ranlib
+        export STRIP=$TOOLCHAIN/bin/arm-linux-androideabi-strip
+        export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm/sysroot
+
+        elif [ "$CURRENT_ABI" = arm64-v8a ]
+        then
+        export AR=$TOOLCHAIN/bin/aarch64-linux-android-ar
+        export AS=$TOOLCHAIN/bin/aarch64-linux-android-as
+        export CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang
+        export CXX=$TOOLCHAIN/bin/aarch64-linux-android21-clang++
+        export LD=$TOOLCHAIN/bin/aarch64-linux-android-ld
+        export RANLIB=$TOOLCHAIN/bin/aarch64-linux-android-ranlib
+        export STRIP=$TOOLCHAIN/bin/aarch64-linux-android-strip
+        export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm64/sysroot
+
+        elif [ "$CURRENT_ABI" = x86_64 ]
+        then
+        export AR=$TOOLCHAIN/bin/x86_64-linux-android-ar
+        export AS=$TOOLCHAIN/bin/x86_64-linux-android-as
+        export CC=$TOOLCHAIN/bin/x86_64-linux-android21-clang
+        export CXX=$TOOLCHAIN/bin/x86_64-linux-android21-clang++
+        export LD=$TOOLCHAIN/bin/x86_64-linux-android-ld
+        export RANLIB=$TOOLCHAIN/bin/x86_64-linux-android-ranlib
+        export STRIP=$TOOLCHAIN/bin/x86_64-linux-android-strip
+        export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-x86_64/sysroot
+
+        else
+        echo "ABI NOT OK" >&2
+        exit 1
+        fi
+
+        #=========================================================
+        #    CONTRIBS
+        #=========================================================
+        if [ "$CURRENT_ABI" = armeabi-v7a ]
+        then
+        CONTRIB_PLATFORM=arm-linux-androideabi
 
-        CONTRIB_PLATFORM_CURT=${ARCH}
-        CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-${PLATFORM}
-        EXTRAPATH="${TF}"
+        elif [ "$CURRENT_ABI" = arm64-v8a ]
+        then
+        CONTRIB_PLATFORM=aarch64-linux-android
 
-        # Compile
-        clang++ -std=c++17 -shared -fPIC \
+        elif [ "$CURRENT_ABI" = x86_64 ]
+        then
+        CONTRIB_PLATFORM=x86_64-linux-android
+        fi
+
+        #NDK SOURCES FOR cpufeatures
+        NDK_SOURCES=${ANDROID_NDK}/sources/android
+
+        #=========================================================
+        #    LD_FLAGS
+        #=========================================================
+        if [ "$CURRENT_ABI" = armeabi-v7a ]
+        then
+        export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi/21"
+        elif [ "$CURRENT_ABI" = arm64-v8a ]
+        then
+        export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android/21"
+        elif [ "$CURRENT_ABI" = x86_64 ]
+        then
+        export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android/21"
+        fi
+
+        #=========================================================
+        #    Compile CPU FEATURES, NEEDED FOR OPENCV
+        #=========================================================
+        $CC -c "$NDK_SOURCES/cpufeatures/cpu-features.c" -o cpu-features.o -o cpu-features.o --sysroot=$ANDROID_SYSROOT
+
+        #=========================================================
+        #    Compile the plugin
+        #=========================================================
+
+        ONNX_PATH="${EXTRALIBS_PATH}/${CURRENT_ABI}"
+        if [ -z ${EXTRALIBS_PATH} ]
+        then
+          ONNX_PATH="${CONTRIB_PATH}/${CONTRIB_PLATFORM}"
+        fi
+
+        # Create so destination folder
+        $CXX --std=c++17 -O3 -g -fPIC \
         -Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+        -shared \
         -Wall -Wextra \
         -Wno-unused-variable \
         -Wno-unused-function \
         -Wno-unused-parameter \
-        -DTFLITE \
+        -DANDROID \
         -I"." \
         -I"${DAEMON_SRC}" \
         -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
         -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-        -I"${LIBS_DIR}/${TF}/include/flatbuffers" \
-        -I"${LIBS_DIR}/${TF}/include" \
+        -I"${ONNX_PATH}/include/onnxruntime/session" \
+        -I"${ONNX_PATH}/include/onnxruntime/providers/nnapi" \
+        -I"${ONNX_PATH}/../include/onnxruntime/session" \
+        -I"${ONNX_PATH}/../include/onnxruntime/providers/nnapi" \
         -I"${PLUGINS_LIB}" \
         ./../lib/accel.cpp \
         ./../lib/frameUtils.cpp \
+        main.cpp \
         videoSubscriber.cpp \
-        pluginProcessor.cpp \
         pluginMediaHandler.cpp \
-        TFInference.cpp \
-        pluginInference.cpp \
-        pluginParameters.cpp \
-        main.cpp \
+        pluginProcessor.cpp \
+        cpu-features.o \
         -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
-        -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/" \
-        -L"${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}/" \
-        -l:libswscale.a \
-        -l:libavutil.a \
+        -L"${ONNX_PATH}/lib/" \
+        -lswscale \
+        -lavutil \
         -lopencv_imgcodecs \
         -lopencv_imgproc \
         -lopencv_core \
-        -ltensorflowlite \
         -llibpng \
-        -lva \
-        -o "build-local/jpl/lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}"
-
-        cp "${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}/libtensorflowlite.so" "build-local/jpl/lib/$CONTRIB_PLATFORM"
-
-    elif [ "${PLATFORM}" = "android" ]
-    then
-        python3 ./../SDK/jplManipulation.py --preassemble --plugin=${PLUGIN_NAME} --distribution=${PLATFORM}
-
-        if [ -z "$ANDROID_NDK" ]; then
-             ANDROID_NDK="/home/${USER}/Android/Sdk/ndk/21.1.6352462"
-            echo "ANDROID_NDK not provided, building with ${ANDROID_NDK}"
-        fi
-
-        #=========================================================
-        #    Check if the ANDROID_ABI was provided
-        #    if not, set default
-        #=========================================================
-        if [ -z "$ANDROID_ABI" ]; then
-            ANDROID_ABI="armeabi-v7a arm64-v8a"
-            echo "ANDROID_ABI not provided, building for ${ANDROID_ABI}"
-        fi
-
-        buildlib() {
-            echo "$CURRENT_ABI"
-
-            #=========================================================
-            #    ANDROID TOOLS
-            #=========================================================
-            export HOST_TAG=linux-x86_64
-            export TOOLCHAIN=$ANDROID_NDK/toolchains/llvm/prebuilt/$HOST_TAG
-
-            if [ "$CURRENT_ABI" = armeabi-v7a ]
-            then
-            export AR=$TOOLCHAIN/bin/arm-linux-android-ar
-            export AS=$TOOLCHAIN/bin/arm-linux-android-as
-            export CC=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang
-            export CXX=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang++
-            export LD=$TOOLCHAIN/bin/arm-linux-android-ld
-            export RANLIB=$TOOLCHAIN/bin/arm-linux-android-ranlib
-            export STRIP=$TOOLCHAIN/bin/arm-linux-androideabi-strip
-            export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm/sysroot
-
-            elif [ "$CURRENT_ABI" = arm64-v8a ]
-            then
-            export AR=$TOOLCHAIN/bin/aarch64-linux-android-ar
-            export AS=$TOOLCHAIN/bin/aarch64-linux-android-as
-            export CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang
-            export CXX=$TOOLCHAIN/bin/aarch64-linux-android21-clang++
-            export LD=$TOOLCHAIN/bin/aarch64-linux-android-ld
-            export RANLIB=$TOOLCHAIN/bin/aarch64-linux-android-ranlib
-            export STRIP=$TOOLCHAIN/bin/aarch64-linux-android-strip
-            export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm64/sysroot
-
-            elif [ "$CURRENT_ABI" = x86_64 ]
-            then
-            export AR=$TOOLCHAIN/bin/x86_64-linux-android-ar
-            export AS=$TOOLCHAIN/bin/x86_64-linux-android-as
-            export CC=$TOOLCHAIN/bin/x86_64-linux-android21-clang
-            export CXX=$TOOLCHAIN/bin/x86_64-linux-android21-clang++
-            export LD=$TOOLCHAIN/bin/x86_64-linux-android-ld
-            export RANLIB=$TOOLCHAIN/bin/x86_64-linux-android-ranlib
-            export STRIP=$TOOLCHAIN/bin/x86_64-linux-android-strip
-            export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-x86_64/sysroot
-
-            else
-            echo "ABI NOT OK" >&2
-            exit 1
-            fi
-
-            #=========================================================
-            #    CONTRIBS
-            #=========================================================
-            if [ "$CURRENT_ABI" = armeabi-v7a ]
-            then
-            CONTRIB_PLATFORM=arm-linux-androideabi
-
-            elif [ "$CURRENT_ABI" = arm64-v8a ]
-            then
-            CONTRIB_PLATFORM=aarch64-linux-android
-
-            elif [ "$CURRENT_ABI" = x86_64 ]
-            then
-            CONTRIB_PLATFORM=x86_64-linux-android
-            fi
-
-            #NDK SOURCES FOR cpufeatures
-            NDK_SOURCES=${ANDROID_NDK}/sources/android
-
-            #=========================================================
-            #    LD_FLAGS
-            #=========================================================
-            if [ "$CURRENT_ABI" = armeabi-v7a ]
-            then
-            export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi/21"
-            elif [ "$CURRENT_ABI" = arm64-v8a ]
-            then
-            export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android/21"
-            elif [ "$CURRENT_ABI" = x86_64 ]
-            then
-            export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android/21"
-            fi
-
-            #=========================================================
-            #    Compile CPU FEATURES, NEEDED FOR OPENCV
-            #=========================================================
-            $CC -c "$NDK_SOURCES/cpufeatures/cpu-features.c" -o cpu-features.o -o cpu-features.o --sysroot=$ANDROID_SYSROOT
-
-            #=========================================================
-            #    Compile the plugin
-            #=========================================================
-
-            # Create so destination folder
-            $CXX --std=c++17 -O3 -g -fPIC \
-            -Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
-            -shared \
-            -Wall -Wextra \
-            -Wno-unused-variable \
-            -Wno-unused-function \
-            -Wno-unused-parameter \
-            -DTFLITE \
-            -I"." \
-            -I"${DAEMON_SRC}" \
-            -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
-            -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
-            -I"${LIBS_DIR}/${TF}/include/flatbuffers" \
-            -I"${LIBS_DIR}/${TF}/include" \
-            -I"${PLUGINS_LIB}" \
-            ./../lib/accel.cpp \
-            ./../lib/frameUtils.cpp \
-            main.cpp \
-            videoSubscriber.cpp \
-            pluginProcessor.cpp \
-            pluginMediaHandler.cpp \
-            TFInference.cpp \
-            pluginInference.cpp \
-            pluginParameters.cpp \
-            cpu-features.o \
-            -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
-            -L"${TF_LIBS_DIR}/${TF}/lib/${CURRENT_ABI}/" \
-            -lswscale \
-            -lavutil \
-            -lopencv_imgcodecs \
-            -lopencv_imgproc \
-            -lopencv_core \
-            -llibpng \
-            -ltensorflowlite \
-            -llog -lz \
-            --sysroot=$ANDROID_SYSROOT \
-            -o "build-local/jpl/lib/$CURRENT_ABI/${SO_FILE_NAME}"
-
-            cp "${TF_LIBS_DIR}/${TF}/lib/${CURRENT_ABI}/libtensorflowlite.so" "build-local/jpl/lib/$CURRENT_ABI"
-            rm cpu-features.o
-        }
-
-        # Build the so
-        for i in ${ANDROID_ABI}; do
-            CURRENT_ABI=$i
-            buildlib
-        done
-    fi
-
-    mkdir ./build-local/jpl/data/models
-    cp ./modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite ./build-local/jpl/data/models/mModel.tflite
-    cp ./preferences-tflite.json ./build-local/jpl/data/preferences.json
+        -llibjpeg-turbo \
+        -llog -lz \
+        -lonnxruntime \
+        --sysroot=$ANDROID_SYSROOT \
+        -o "build-local/jpl/lib/$CURRENT_ABI/${SO_FILE_NAME}"
+
+        rm cpu-features.o
+        cp "${ONNX_PATH}/lib/libonnxruntime.so" "build-local/jpl/lib/${CURRENT_ABI}/libonnxruntime.so"
+    }
+
+    # Build the so
+    for i in ${ANDROID_ABI}; do
+        CURRENT_ABI=$i
+        buildlib
+    done
+
+    mkdir "./build-local/jpl/data/model"
+    cp "./modelSRC/mModel.ort" "./build-local/jpl/data/model/mModel.ort"
+    cp "./preferences-ort.json" "./build-local/jpl/data/preferences.json"
 fi
 
-python3 ./../SDK/jplManipulation.py --assemble --plugin=${PLUGIN_NAME} --distribution=${PLATFORM} --extraPath=${EXTRAPATH}
+python3 ./../SDK/jplManipulation.py --assemble --plugin=${PLUGIN_NAME} --distribution=${PLATFORM} --extraPath=${ONNX_LIBS}
diff --git a/GreenScreen/data/backgrounds/background1.png b/GreenScreen/data/backgrounds/background1.png
index 849251c69bd6a1a1ce7917b7d806cec85f0b7b01..4e3a20783f89b021c9b576207a24d156f3e523ef 100644
Binary files a/GreenScreen/data/backgrounds/background1.png and b/GreenScreen/data/backgrounds/background1.png differ
diff --git a/GreenScreen/data/backgrounds/background2.jpeg b/GreenScreen/data/backgrounds/background2.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..71e8cef8b8b1786c10d403a0815e8e4744df139b
Binary files /dev/null and b/GreenScreen/data/backgrounds/background2.jpeg differ
diff --git a/GreenScreen/data/backgrounds/background3.png b/GreenScreen/data/backgrounds/background3.png
new file mode 100644
index 0000000000000000000000000000000000000000..0f95b827ad2b5b5e7241f8da7fe10c5881055f87
Binary files /dev/null and b/GreenScreen/data/backgrounds/background3.png differ
diff --git a/GreenScreen/data/icon.png b/GreenScreen/data/icon.png
index f44370f1f48de6fe24377c74c1fbbcd0097e6a12..7e85b06cfdb29ca4765589d79942bcb0a60b9bce 100644
Binary files a/GreenScreen/data/icon.png and b/GreenScreen/data/icon.png differ
diff --git a/GreenScreen/manifest.json b/GreenScreen/manifest.json
index 14b175df10eb72b19b39234a256d3a5bbe0a9fa0..c1a2221c35b1b6804113dd3163a1674c8c5bf938 100644
--- a/GreenScreen/manifest.json
+++ b/GreenScreen/manifest.json
@@ -1,5 +1,5 @@
 {
     "name": "GreenScreen",
-    "description": "GreenScreen Plugin with Tensorflow 2.1.1",
+    "description": "GreenScreen Plugin with onnx",
     "version": "1.0.2"
 }
\ No newline at end of file
diff --git a/GreenScreen/modelSRC/mModel.onnx b/GreenScreen/modelSRC/mModel.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..0535110e4dcc516da95af5716948600c87eb0173
Binary files /dev/null and b/GreenScreen/modelSRC/mModel.onnx differ
diff --git a/GreenScreen/modelSRC/mModel.ort b/GreenScreen/modelSRC/mModel.ort
new file mode 100644
index 0000000000000000000000000000000000000000..d166b9f9d11d569b78f52a1fab7aa5f16a5a0d44
Binary files /dev/null and b/GreenScreen/modelSRC/mModel.ort differ
diff --git a/GreenScreen/package.json b/GreenScreen/package.json
index 974b1923ffc5594655db065b93448ebcc5c32fa0..373f1b831979f8723f9b86ce01f7e42fececb0fd 100644
--- a/GreenScreen/package.json
+++ b/GreenScreen/package.json
@@ -1,14 +1,13 @@
 {
     "name": "GreenScreen",
     "version": "1.0.2",
-    "extractLibs": true,
+    "extractLibs": false,
     "deps": [
         "ffmpeg",
         "opencv"
     ],
     "defines": [
-        "TFLITE=False",
-        "CPU=False"
+        "NVIDIA=False"
     ],
     "custom_scripts": {
         "pre_build": [
diff --git a/GreenScreen/pluginMediaHandler.cpp b/GreenScreen/pluginMediaHandler.cpp
index 44d68c75b63cf0e0ff9b6f3f57ec2bdcf3f2c38c..51b9edfa9a3756c4e9bb7d2df34ee0d153e6bab7 100644
--- a/GreenScreen/pluginMediaHandler.cpp
+++ b/GreenScreen/pluginMediaHandler.cpp
@@ -35,9 +35,8 @@ PluginMediaHandler::PluginMediaHandler(std::map<std::string, std::string>&& pref
     : datapath_ {datapath}
     , preferences_ {preferences}
 {
-    setGlobalPluginParameters(preferences_);
     setId(datapath_);
-    mVS = std::make_shared<VideoSubscriber>(datapath_);
+    mVS = std::make_shared<VideoSubscriber>(datapath_, preferences_.at("modellist"), preferences_.at("background"), preferences_.at("acceleration") == "1");
 }
 
 void
@@ -50,7 +49,6 @@ PluginMediaHandler::notifyAVFrameSubject(const StreamData& data, jami::avSubject
     bool preferredStreamDirection = false;
     auto it = preferences_.find("streamslist");
     if (it != preferences_.end()) {
-        Plog::log(Plog::LogPriority::INFO, TAG, "SET PARAMETERS");
         preferredStreamDirection = it->second == "in";
     }
     oss << "preferredStreamDirection " << preferredStreamDirection << std::endl;
@@ -62,6 +60,7 @@ PluginMediaHandler::notifyAVFrameSubject(const StreamData& data, jami::avSubject
     } else if (data.type == StreamType::video && data.direction
                && data.direction == preferredStreamDirection) {
         subject->attach(mVS.get()); // the image I receive from the others on the call
+        oss << "got my received image attached" << std::endl;
         attached_ = '1';
     }
 
diff --git a/GreenScreen/pluginProcessor.cpp b/GreenScreen/pluginProcessor.cpp
index 3b38830a53bc6e6c3ef51f4690f897b692f63ef9..37db9db3fff850cc818632da2bb372c846de8ffe 100644
--- a/GreenScreen/pluginProcessor.cpp
+++ b/GreenScreen/pluginProcessor.cpp
@@ -33,20 +33,24 @@
 extern "C" {
 #include <libavutil/display.h>
 }
-
 const char sep = separator();
 
 const std::string TAG = "FORESEG";
 
-PluginParameters* mPluginParameters = getGlobalPluginParameters();
-
 namespace jami {
 
-PluginProcessor::PluginProcessor(const std::string& dataPath)
-    : pluginInference {TFModel {dataPath + sep + "models" + sep + mPluginParameters->model}}
+PluginProcessor::PluginProcessor(const std::string& dataPath, const std::string& model, const std::string& backgroundImage, bool acc)
+{
+    activateAcc_ = acc;
+    initModel(dataPath+sep+"model/"+model);
+    setBackgroundImage(backgroundImage);
+}
+
+PluginProcessor::~PluginProcessor()
 {
-    initModel();
-    setBackgroundImage(mPluginParameters->image);
+    Plog::log(Plog::LogPriority::INFO, TAG, "~pluginprocessor");
+    if (session_)
+        delete session_;
 }
 
 void
@@ -75,37 +79,51 @@ PluginProcessor::setBackgroundImage(const std::string& backgroundPath)
 }
 
 void
-PluginProcessor::initModel()
+PluginProcessor::initModel(const std::string& modelPath)
 {
     try {
-        pluginInference.init();
+        auto allocator_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
+        input_tensor_ = Ort::Value::CreateTensor<float>(allocator_info, input_image_.data(), input_image_.size(), input_shape_.data(), input_shape_.size());
+        output_tensor_ = Ort::Value::CreateTensor<float>(allocator_info, results_.data(), results_.size(), output_shape_.data(), output_shape_.size());
+        sessOpt_ =  Ort::SessionOptions();
+
+#ifdef NVIDIA
+        if (activateAcc_)
+            Ort::ThrowOnError(OrtSessionOptionsAppendExecutionProvider_CUDA(sessOpt_, 0));
+#endif
+#ifdef ANDROID
+        if (activateAcc_)
+            Ort::ThrowOnError(OrtSessionOptionsAppendExecutionProvider_Nnapi(sessOpt_, 0));
+#endif
+
+        sessOpt_.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);
+#ifdef WIN32
+        std::wstring wsTmp(modelPath.begin(), modelPath.end());
+        session_ = new Ort::Session(env, wsTmp.c_str(), sessOpt_);
+#else
+        session_ = new Ort::Session(env, modelPath.c_str(), sessOpt_);
+#endif
+        isAllocated_ = true;
     } catch (std::exception& e) {
         Plog::log(Plog::LogPriority::ERR, TAG, e.what());
     }
     std::ostringstream oss;
-    oss << "Model is allocated " << pluginInference.isAllocated();
+    oss << "Model is allocated " << isAllocated_;
     Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
 }
 
-#ifdef TFLITE
-void
-PluginProcessor::feedInput(const cv::Mat& frame)
+bool
+PluginProcessor::isAllocated()
 {
-    auto pair = pluginInference.getInput();
-    uint8_t* inputPointer = pair.first;
-
-    cv::Mat temp(frame.rows, frame.cols, CV_8UC3, inputPointer);
-    frame.convertTo(temp, CV_8UC3);
-
-    inputPointer = nullptr;
+    return isAllocated_;
 }
-#else
+
 void
 PluginProcessor::feedInput(const cv::Mat& frame)
 {
-    pluginInference.ReadTensorFromMat(frame);
+    cv::Mat temp(frame.rows, frame.cols, CV_32FC3, input_image_.data());
+    frame.convertTo(temp, CV_32FC3);
 }
-#endif // TFLITE
 
 int
 PluginProcessor::getBackgroundRotation()
@@ -127,11 +145,8 @@ PluginProcessor::computePredictions()
 {
     if (count == 0) {
         // Run the graph
-        pluginInference.runGraph();
-        auto predictions = pluginInference.masksPredictions();
-
-        // Save the predictions
-        computedMask = predictions;
+        session_->Run(Ort::RunOptions{nullptr}, input_names, &input_tensor_, 1, output_names, &output_tensor_, 1);
+        computedMask = std::vector(results_.begin(), results_.end());
     }
 }
 
@@ -188,30 +203,12 @@ PluginProcessor::drawMaskOnFrame(
         return;
     }
 
-    int maskSize = static_cast<int>(std::sqrt(computedMask.size()));
-    cv::Mat maskImg(maskSize, maskSize, CV_32FC1, computedMask.data());
-    cv::Mat* applyMask = &frameReduced;
-    cv::Mat output;
-
     if (count == 0) {
+        int maskSize = static_cast<int>(std::sqrt(computedMask.size()));
+        cv::Mat maskImg(maskSize, maskSize, CV_32FC1, computedMask.data());
+        cv::Mat* applyMask = &frameReduced;
+
         rotateFrame(-angle, maskImg);
-#ifdef TFLITE
-        for (int i = 0; i < maskImg.cols; i++) {
-            for (int j = 0; j < maskImg.rows; j++) {
-                if (maskImg.at<float>(j, i) == 15)
-                    maskImg.at<float>(j, i) = 1.;
-                else
-                    maskImg.at<float>(j, i) = smoothFactors[0] * previousMasks[0].at<float>(j, i)
-                                              + smoothFactors[1] * previousMasks[1].at<float>(j, i);
-            }
-        }
-        cv::morphologyEx(maskImg,
-                         maskImg,
-                         cv::MORPH_CLOSE,
-                         cv::getStructuringElement(cv::MORPH_ELLIPSE, kSize),
-                         cv::Point(-1, -1),
-                         4);
-#else
         cv::resize(maskImg, maskImg, cv::Size(frameReduced.cols, frameReduced.rows));
 
         double m, M;
@@ -238,52 +235,39 @@ PluginProcessor::drawMaskOnFrame(
                 }
             }
         }
-#endif
         if (cv::countNonZero(maskImg) != 0) {
-#ifdef TFLITE
-            cv::Mat tfMask;
-            tfMask = maskImg.clone();
-            tfMask *= 255.;
-            tfMask.convertTo(tfMask, CV_8UC1);
-            cv::threshold(tfMask, tfMask, 127, 255, cv::THRESH_BINARY);
-            if (cv::countNonZero(tfMask) != 0) {
-#endif
-                cv::Mat dilate;
-                cv::dilate(maskImg,
-                           dilate,
-                           cv::getStructuringElement(cv::MORPH_ELLIPSE, kSize),
-                           cv::Point(-1, -1),
-                           2);
-                cv::erode(maskImg,
-                          maskImg,
-                          cv::getStructuringElement(cv::MORPH_ELLIPSE, kSize),
-                          cv::Point(-1, -1),
-                          2);
-                for (int i = 0; i < maskImg.cols; i++) {
-                    for (int j = 0; j < maskImg.rows; j++) {
-                        if (dilate.at<float>(j, i) != maskImg.at<float>(j, i))
-                            maskImg.at<float>(j, i) = grabcutClass;
-                    }
+            cv::Mat dilate;
+            cv::dilate(maskImg,
+                        dilate,
+                        cv::getStructuringElement(cv::MORPH_ELLIPSE, kSize),
+                        cv::Point(-1, -1),
+                        2);
+            cv::erode(maskImg,
+                        maskImg,
+                        cv::getStructuringElement(cv::MORPH_ELLIPSE, kSize),
+                        cv::Point(-1, -1),
+                        2);
+            for (int i = 0; i < maskImg.cols; i++) {
+                for (int j = 0; j < maskImg.rows; j++) {
+                    if (dilate.at<float>(j, i) != maskImg.at<float>(j, i))
+                        maskImg.at<float>(j, i) = grabcutClass;
                 }
-                maskImg.convertTo(maskImg, CV_8UC1);
-                applyMask->convertTo(*applyMask, CV_8UC1);
-                cv::Rect rect(1, 1, maskImg.rows, maskImg.cols);
-                cv::grabCut(*applyMask,
-                            maskImg,
-                            rect,
-                            bgdModel,
-                            fgdModel,
-                            grabCutIterations,
-                            grabCutMode);
-
-                grabCutMode = cv::GC_EVAL;
-                grabCutIterations = 1;
-
-                maskImg = maskImg & 1;
-#ifdef TFLITE
-                cv::bitwise_and(maskImg, tfMask, maskImg);
             }
-#endif
+            maskImg.convertTo(maskImg, CV_8UC1);
+            applyMask->convertTo(*applyMask, CV_8UC1);
+            cv::Rect rect(1, 1, maskImg.rows, maskImg.cols);
+            cv::grabCut(*applyMask,
+                        maskImg,
+                        rect,
+                        bgdModel,
+                        fgdModel,
+                        grabCutIterations,
+                        grabCutMode);
+
+            grabCutMode = cv::GC_EVAL;
+            grabCutIterations = 1;
+
+            maskImg = maskImg & 1;
             maskImg.convertTo(maskImg, CV_32FC1);
             maskImg *= 255.;
             GaussianBlur(maskImg, maskImg, cv::Size(7, 7), 0); // float mask from 0 to 255.
@@ -309,13 +293,11 @@ PluginProcessor::drawMaskOnFrame(
     cv::merge(channels, roiMaskImg);
     cv::merge(channelsComplementary, roiMaskImgComplementary);
 
-    int origType = frameReduced.type();
-    int roiMaskType = roiMaskImg.type();
-
-    frameReduced.convertTo(output, roiMaskType);
+    cv::Mat output;
+    frameReduced.convertTo(output, roiMaskImg.type());
     output = output.mul(roiMaskImg);
     output += backgroundImage.mul(roiMaskImgComplementary);
-    output.convertTo(output, origType);
+    output.convertTo(output, frameReduced.type());
 
     cv::resize(output, output, cv::Size(frame.cols, frame.rows));
 
diff --git a/GreenScreen/pluginProcessor.h b/GreenScreen/pluginProcessor.h
index e81dd955982d180efd8bf6dd8d0545d7cc0aaa75..9b86b7bb9d4aa6c97276200d06613357e921e3b6 100644
--- a/GreenScreen/pluginProcessor.h
+++ b/GreenScreen/pluginProcessor.h
@@ -28,8 +28,6 @@
 #include <mutex>
 #include <thread>
 #include <vector>
-// Filters
-#include "pluginInference.h"
 // AvFrame
 extern "C" {
 #include <libavutil/frame.h>
@@ -38,14 +36,24 @@ extern "C" {
 #include <plugin/jamiplugin.h>
 #include <plugin/mediahandler.h>
 
+// Opencv processing
+#include <opencv2/core.hpp>
+#include <onnxruntime_cxx_api.h>
+#ifdef NVIDIA
+#include <cuda_provider_factory.h>
+#endif
+#ifdef ANDROID
+#include <nnapi_provider_factory.h>
+#endif
 namespace jami {
 
 class PluginProcessor
 {
 public:
-    PluginProcessor(const std::string& dataPath);
+    PluginProcessor(const std::string& dataPath, const std::string& model, const std::string& backgroundImage, bool acc);
+    ~PluginProcessor();
 
-    void initModel();
+    void initModel(const std::string& modelPath);
     /**
      * @brief feedInput
      * Takes a frame and feeds it to the model storage for predictions
@@ -72,6 +80,7 @@ public:
     void rotateFrame(int angle, cv::Mat& mat);
     bool hasBackground() const;
     void resetInitValues(const cv::Size& modelInputSize);
+    bool isAllocated();
 
     // Output predictions
     std::vector<float> computedMask;
@@ -81,28 +90,38 @@ public:
 
     cv::Size kSize;
 
-    PluginInference pluginInference;
     std::string backgroundPath;
-    int count = 0;
 
 private:
-    // Frame
+    int count{0};
     cv::Mat frame;
-    int backgroundRotation = 0;
-    bool hasBackground_ = false;
+    int backgroundRotation{0};
+    bool hasBackground_{false};
     cv::Mat bgdModel, fgdModel;
-    int grabCutMode = 1; // cv::GC_INIT_WITH_MASK = 1;
-    int grabCutIterations = 5;
-#ifdef TFLITE
-    int grabcutClass = 2;
-    int frameCount = 3;
-    float smoothFactors[2] = {0.3f, 0.05f};
-    float kernelSize = 0.1f;
-#else
-    int grabcutClass = 3;
-    int frameCount = 5;
+    int grabCutMode{1}; // cv::GC_INIT_WITH_MASK = 1;
+    int grabCutIterations{5};
+    int grabcutClass{3};
+    int frameCount{3};
     float smoothFactors[3] = {0.6f, 0.3f, 0.1f};
-    float kernelSize = 0.05f;
-#endif
+    float kernelSize{0.05f};
+
+    bool isAllocated_{false};
+    Ort::Env env{ORT_LOGGING_LEVEL_WARNING, "test"};
+    Ort::Value input_tensor_{nullptr};
+    std::array<int64_t, 3> input_shape_{257, 257, 3};
+
+    Ort::Value output_tensor_{nullptr};
+    std::array<int64_t, 4> output_shape_{1, 17, 17, 1};
+
+
+    std::array<float, 257 * 257 * 3> input_image_{};
+    
+    std::array<float, 17 * 17> results_{};
+    Ort::Session* session_{};
+    const char* input_names[8] = {"image:0"};
+    const char* output_names[11] = {"Identity:0"};
+    Ort::SessionOptions sessOpt_;
+
+    bool activateAcc_{false};
 };
 } // namespace jami
diff --git a/GreenScreen/preferences-onnx.json b/GreenScreen/preferences-onnx.json
new file mode 100644
index 0000000000000000000000000000000000000000..8d31f95311b15ceb75060f3cc8d12bd771aa1bd8
--- /dev/null
+++ b/GreenScreen/preferences-onnx.json
@@ -0,0 +1,45 @@
+[
+    {
+        "category" : "StreamsListPreference",
+        "type": "List",
+        "key": "streamslist",
+        "title": "Streams to transform",
+        "summary": "Select video to transform",
+        "defaultValue": "out",
+        "entries": ["sent", "received"],
+        "entryValues": ["out", "in"],
+        "scope": "plugin"
+    },
+    {
+        "category" : "acceleration",
+        "type": "List",
+        "key": "acceleration",
+        "title": "Use HW Acceleration",
+        "summary": "Define Yes or No",
+        "defaultValue": "1",
+        "entries": ["Yes", "No"],
+        "entryValues": ["1", "0"],
+        "scope": "plugin"
+    },
+    {
+        "category" : "models",
+        "type": "List",
+        "key": "modellist",
+        "title": "Model to load",
+        "summary": "Select the model to use",
+        "defaultValue": "mModel.onnx",
+        "entries": ["mModel"],
+        "entryValues": ["mModel.onnx"],
+        "scope": "plugin"
+    },
+    {
+        "category" : "backgrounds",
+        "type": "Path",
+        "mimeType": "image/png,image/jpeg,image/jpg",
+        "key": "background",
+        "title": "Background image",
+        "summary": "Select the image background to use",
+        "defaultValue": "data/backgrounds/background2.jpeg",
+        "scope": "plugin,Foreground Segmentation"
+    }
+]
diff --git a/GreenScreen/preferences-ort.json b/GreenScreen/preferences-ort.json
new file mode 100644
index 0000000000000000000000000000000000000000..f40f75b8fff1cf15b618f01aa72725f5a97e7939
--- /dev/null
+++ b/GreenScreen/preferences-ort.json
@@ -0,0 +1,45 @@
+[
+    {
+        "category" : "StreamsListPreference",
+        "type": "List",
+        "key": "streamslist",
+        "title": "Streams to transform",
+        "summary": "Select video to transform",
+        "defaultValue": "out",
+        "entries": ["sent", "received"],
+        "entryValues": ["out", "in"],
+        "scope": "plugin"
+    },
+    {
+        "category" : "acceleration",
+        "type": "List",
+        "key": "acceleration",
+        "title": "Use HW Acceleration",
+        "summary": "Define Yes or No",
+        "defaultValue": "1",
+        "entries": ["Yes", "No"],
+        "entryValues": ["1", "0"],
+        "scope": "plugin"
+    },
+    {
+        "category" : "models",
+        "type": "List",
+        "key": "modellist",
+        "title": "Model to load",
+        "summary": "Select the model to use",
+        "defaultValue": "mModel.ort",
+        "entries": ["mModel"],
+        "entryValues": ["mModel.ort"],
+        "scope": "plugin"
+    },
+    {
+        "category" : "backgrounds",
+        "type": "Path",
+        "mimeType": "image/png,image/jpeg,image/jpg",
+        "key": "background",
+        "title": "Background image",
+        "summary": "Select the image background to use",
+        "defaultValue": "data/backgrounds/background2.jpeg",
+        "scope": "plugin,Foreground Segmentation"
+    }
+]
diff --git a/GreenScreen/videoSubscriber.cpp b/GreenScreen/videoSubscriber.cpp
index 8602f6fa8ea79d9e63ec0d9830a3ab0ec8442acb..d1b40cd338d018d95ef8998691707f20092a38f8 100644
--- a/GreenScreen/videoSubscriber.cpp
+++ b/GreenScreen/videoSubscriber.cpp
@@ -39,9 +39,9 @@ const char sep = separator();
 
 namespace jami {
 
-VideoSubscriber::VideoSubscriber(const std::string& dataPath)
+VideoSubscriber::VideoSubscriber(const std::string& dataPath, const std::string& model, const std::string& backgroundImage, bool acc)
     : path_ {dataPath}
-    , pluginProcessor {dataPath}
+    , pluginProcessor {dataPath, model, backgroundImage, acc}
 {
     /**
      * Waits for new frames and then process them
@@ -78,100 +78,95 @@ VideoSubscriber::~VideoSubscriber()
 void
 VideoSubscriber::update(jami::Observable<AVFrame*>*, AVFrame* const& pluginFrame)
 {
-    if (pluginProcessor.pluginInference.isAllocated() && pluginProcessor.hasBackground()) {
-        if (!pluginFrame)
-            return;
-
-        //======================================================================================
-        // GET FRAME ROTATION
-        AVFrameSideData* side_data = av_frame_get_side_data(pluginFrame,
-                                                            AV_FRAME_DATA_DISPLAYMATRIX);
-
-        int angle {0};
-        if (side_data) {
-            auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
-            angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
-        }
-        delete side_data;
-
-        //======================================================================================
-        // GET RAW FRAME
-        // Use a non-const Frame
-        // Convert input frame to RGB
-        int inputHeight = pluginFrame->height;
-        int inputWidth = pluginFrame->width;
-
-        fcopy.originalSize = cv::Size {inputWidth, inputHeight};
-
-        AVFrame* temp = transferToMainMemory(pluginFrame, AV_PIX_FMT_NV12);
-        AVFrame* bgrFrame = scaler.convertFormat(temp, AV_PIX_FMT_RGB24);
-        av_frame_unref(temp);
-        av_frame_free(&temp);
-        if (!bgrFrame)
-            return;
-        cv::Mat frame = cv::Mat {bgrFrame->height,
-                                 bgrFrame->width,
-                                 CV_8UC3,
-                                 bgrFrame->data[0],
-                                 static_cast<size_t>(bgrFrame->linesize[0])};
-        // First clone the frame as the original one is unusable because of
-        // linespace
-
-        cv::Mat clone = frame.clone();
-        //======================================================================================
-
-        pluginProcessor.setBackgroundRotation(angle);
-
-        if (firstRun) {
-            pluginProcessor.pluginInference.setExpectedImageDimensions();
-            fcopy.resizedSize = cv::Size {pluginProcessor.pluginInference.getImageWidth(),
-                                          pluginProcessor.pluginInference.getImageHeight()};
-            pluginProcessor.resetInitValues(fcopy.resizedSize);
-
-            cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
-            pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
-
-            cv::resize(pluginProcessor.backgroundImage,
-                       pluginProcessor.backgroundImage,
-                       fcopy.resizedSize);
-
-            firstRun = false;
-        }
+    if (!observable_ || !pluginProcessor.isAllocated() || !pluginProcessor.hasBackground() || !pluginFrame)
+        return;
+
+    //======================================================================================
+    // GET FRAME ROTATION
+    AVFrameSideData* side_data = av_frame_get_side_data(pluginFrame,
+                                                        AV_FRAME_DATA_DISPLAYMATRIX);
+
+    int angle {0};
+    if (side_data) {
+        auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
+        angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
+    }
 
-        if (!newFrame) {
-            std::lock_guard<std::mutex> l(inputLock);
-            cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
-            pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
-            newFrame = true;
-            inputCv.notify_all();
-        }
+    //======================================================================================
+    // GET RAW FRAME
+    // Use a non-const Frame
+    // Convert input frame to RGB
+    int inputHeight = pluginFrame->height;
+    int inputWidth = pluginFrame->width;
+
+    fcopy.originalSize = cv::Size {inputWidth, inputHeight};
+
+    AVFrame* temp = transferToMainMemory(pluginFrame, AV_PIX_FMT_NV12);
+    AVFrame* bgrFrame = scaler.convertFormat(temp, AV_PIX_FMT_RGB24);
+    av_frame_unref(temp);
+    av_frame_free(&temp);
+    if (!bgrFrame)
+        return;
+    cv::Mat frame = cv::Mat {bgrFrame->height,
+                                bgrFrame->width,
+                                CV_8UC3,
+                                bgrFrame->data[0],
+                                static_cast<size_t>(bgrFrame->linesize[0])};
+    // First clone the frame as the original one is unusable because of
+    // linespace
+
+    cv::Mat clone = frame.clone();
+    //======================================================================================
+
+    pluginProcessor.setBackgroundRotation(angle);
+
+    if (firstRun) {
+        fcopy.resizedSize = cv::Size {257, 257};
+        pluginProcessor.resetInitValues(fcopy.resizedSize);
+
+        cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+        pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
+
+        cv::resize(pluginProcessor.backgroundImage,
+                    pluginProcessor.backgroundImage,
+                    fcopy.resizedSize);
+
+        firstRun = false;
+    }
 
-        fcopy.predictionsFrameRGB = frame;
-        fcopy.predictionsResizedFrameRGB = fcopy.resizedFrameRGB.clone();
-        pluginProcessor.rotateFrame(-angle, fcopy.predictionsResizedFrameRGB);
-        pluginProcessor.drawMaskOnFrame(fcopy.predictionsFrameRGB,
-                                        fcopy.predictionsResizedFrameRGB,
-                                        pluginProcessor.computedMask,
-                                        bgrFrame->linesize[0],
-                                        angle);
-
-        //======================================================================================
-        // REPLACE AVFRAME DATA WITH FRAME DATA
-        if (bgrFrame->data[0]) {
-            uint8_t* frameData = bgrFrame->data[0];
-            if (angle == 90 || angle == -90) {
-                std::memmove(frameData,
-                             fcopy.predictionsFrameRGB.data,
-                             static_cast<size_t>(pluginFrame->width * pluginFrame->height * 3)
-                                 * sizeof(uint8_t));
-            }
+    if (!newFrame) {
+        std::lock_guard<std::mutex> l(inputLock);
+        cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+        pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
+        newFrame = true;
+        inputCv.notify_all();
+    }
 
-            av_frame_copy_props(bgrFrame, pluginFrame);
-            moveFrom(pluginFrame, bgrFrame);
+    fcopy.predictionsFrameRGB = frame;
+    fcopy.predictionsResizedFrameRGB = fcopy.resizedFrameRGB.clone();
+    pluginProcessor.rotateFrame(-angle, fcopy.predictionsResizedFrameRGB);
+    pluginProcessor.drawMaskOnFrame(fcopy.predictionsFrameRGB,
+                                    fcopy.predictionsResizedFrameRGB,
+                                    pluginProcessor.computedMask,
+                                    bgrFrame->linesize[0],
+                                    angle);
+
+    //======================================================================================
+    // REPLACE AVFRAME DATA WITH FRAME DATA
+    if (bgrFrame->data[0]) {
+        uint8_t* frameData = bgrFrame->data[0];
+        if (angle == 90 || angle == -90) {
+            std::memmove(frameData,
+                            fcopy.predictionsFrameRGB.data,
+                            static_cast<size_t>(pluginFrame->width * pluginFrame->height * 3)
+                                * sizeof(uint8_t));
         }
-        av_frame_unref(bgrFrame);
-        av_frame_free(&bgrFrame);
+
+        av_frame_copy_props(bgrFrame, pluginFrame);
+        moveFrom(pluginFrame, bgrFrame);
     }
+    av_frame_unref(bgrFrame);
+    av_frame_free(&bgrFrame);
 }
 
 void
diff --git a/GreenScreen/videoSubscriber.h b/GreenScreen/videoSubscriber.h
index 8598feb48d88bb69befa0d5f88a48dcc15232ed8..97c6c86026fe257896620fd2a63d8f0a025df518 100644
--- a/GreenScreen/videoSubscriber.h
+++ b/GreenScreen/videoSubscriber.h
@@ -54,7 +54,7 @@ public:
 class VideoSubscriber : public jami::Observer<AVFrame*>
 {
 public:
-    VideoSubscriber(const std::string& dataPath);
+    VideoSubscriber(const std::string& dataPath, const std::string& model, const std::string& backgroundImage, bool acc);
     ~VideoSubscriber();
 
     virtual void update(jami::Observable<AVFrame*>*, AVFrame* const&) override;
diff --git a/HelloWorld/CenterCircleVideoSubscriber.cpp b/HelloWorld/CenterCircleVideoSubscriber.cpp
index 25c95a39675a24ffadcce2dbd865af40d54ef837..1196abffe628f469db735aee2f491466650396e4 100644
--- a/HelloWorld/CenterCircleVideoSubscriber.cpp
+++ b/HelloWorld/CenterCircleVideoSubscriber.cpp
@@ -60,7 +60,6 @@ CenterCircleVideoSubscriber::update(jami::Observable<AVFrame*>*, AVFrame* const&
         auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
         angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
     }
-    delete side_data;
 
     //======================================================================================
     // GET RAW FRAME
diff --git a/HelloWorld/CoinCircleVideoSubscriber.cpp b/HelloWorld/CoinCircleVideoSubscriber.cpp
index 5e963cb8e782aaa22f149e6193ed4002ce78f39c..3698e6c517680e2a85f9bcd06998e22fcc8d2de0 100644
--- a/HelloWorld/CoinCircleVideoSubscriber.cpp
+++ b/HelloWorld/CoinCircleVideoSubscriber.cpp
@@ -60,7 +60,6 @@ CoinCircleVideoSubscriber::update(jami::Observable<AVFrame*>*, AVFrame* const& p
         auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
         angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
     }
-    delete side_data;
 
     //======================================================================================
     // GET RAW FRAME
diff --git a/SDK/Templates/videoUpdate.txt b/SDK/Templates/videoUpdate.txt
index b72d4432f5325f5219c1561b84363ce6ef53f12b..aff2307e7d16bf5ef404a4424cfbcab0c612de8b 100644
--- a/SDK/Templates/videoUpdate.txt
+++ b/SDK/Templates/videoUpdate.txt
@@ -10,7 +10,6 @@
         auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
         angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
     }
-    delete side_data;
 
     //======================================================================================
     // GET RAW FRAME
diff --git a/TensorflowSegmentation/CMakeLists.txt b/TensorflowSegmentation/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..da541a77cda00a003f4100c2f9e34be491cf87f2
--- /dev/null
+++ b/TensorflowSegmentation/CMakeLists.txt
@@ -0,0 +1,178 @@
+cmake_minimum_required(VERSION 3.10)
+
+# set the project name
+set (ProjectName TensorflowSegmentation)
+set (Version 1.0.2)
+
+project(${ProjectName} VERSION ${Version})
+
+set (DAEMON ${PROJECT_SOURCE_DIR}/../../daemon)
+set (PLUGIN_NAME TensorflowSegmentation)
+set (JPL_FILE_NAME ${PLUGIN_NAME}.jpl)
+set (DAEMON_SRC ${DAEMON}/src)
+set (CONTRIB_PATH ${DAEMON}/contrib)
+set (PLUGINS_LIB ${PROJECT_SOURCE_DIR}/../lib)
+set (JPL_DIRECTORY ${PROJECT_BINARY_DIR}/jpl)
+set (LIBS_DIR ${PROJECT_SOURCE_DIR}/../contrib/Libs)
+
+if(WIN32)
+    message(OS:\  WINDOWS\ ${CMAKE_SYSTEM_PROCESSOR})
+    if (NOT ${CMAKE_CL_64})
+        message( FATAL_ERROR "\nUse CMake only for x64 Windows" )
+    endif()
+    set (CONTRIB_PLATFORM_CURT x64)
+    set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-windows)
+    set (LIBRARY_FILE_NAME ${PLUGIN_NAME}.dll)
+    set (LIBS_BIN_DIR $ENV{PLUGIN_ENV})
+    set (FFMPEG ${CONTRIB_PATH}/build/ffmpeg/Build/win32/x64)
+endif()
+
+if(UNIX)
+    message( FATAL_ERROR "\nUse CMake only for Windows! For linux or Android (linux host), use our bash scripts.\nPlese refer to documentation for more infos." )
+    message(OS:\  LINUX\ ${CMAKE_SYSTEM_PROCESSOR})
+    set (CONTRIB_PLATFORM_CURT ${CMAKE_SYSTEM_PROCESSOR})
+    set (CONTRIB_PLATFORM ${CONTRIB_PLATFORM_CURT}-linux-gnu)
+    set (LIBRARY_FILE_NAME lib${PLUGIN_NAME}.so)
+    set (LIBS_BIN_DIR /home/${USER}/Libs)
+endif()
+
+
+message(Building:\   ${ProjectName}\   ${Version})
+message(Build\ path:\ ${PROJECT_BINARY_DIR})
+message(JPL\ assembling\ path:\ ${JPL_DIRECTORY})
+message(JPL\ path:\ ${JPL_DIRECTORY}/../../../build/${ProjectName}/${JPL_FILE_NAME})
+
+set(TENSORFLOW _tensorflow_cc)
+set(model mModel-resnet50float.pb)
+set(modelType .pb)
+set(preferencesFile preferences-tfcc.json)
+set(TFLIB libtensorflow_cc)
+set(GPU -gpu61)
+
+if (CPU)
+    set(GPU )
+    add_definitions(-DCPU)
+    message(CPU\ BUILDING!)
+endif()
+
+if (TFLITE)
+    add_definitions(-DTFLITE)
+    set(TENSORFLOW _tensorflowLite)
+    set(model mobilenet_v2_deeplab_v3_256_myquant.tflite)
+    set(modelType .tflite)
+    set(preferencesFile preferences-tflite.json)
+    set(TFLIB libtensorflowlite)
+    message(TFLITE\ BUILDING!)
+endif()
+
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED True)
+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
+set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
+
+set(plugin_SRC main.cpp
+               pluginInference.cpp
+               pluginMediaHandler.cpp
+               pluginParameters.cpp
+               pluginProcessor.cpp
+               TFInference.cpp
+               videoSubscriber.cpp
+               ./../lib/accel.cpp
+               ./../lib/frameUtils.cpp
+               )
+
+set(plugin_HDR pluginInference.h
+               pluginMediaHandler.h
+               pluginParameters.h
+               pluginProcessor.h
+               TFInference.h
+               TFModels.h
+               videoSubscriber.h
+               ./../lib/accel.h
+               ./../lib/frameScaler.h
+               ./../lib/frameUtils.h
+               ./../lib/pluglog.h
+               )
+
+
+
+# add the library
+add_library(${ProjectName} SHARED ${plugin_SRC}
+                                  ${plugin_HDR}
+                                  )
+
+if (WIN32)
+target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
+                                                 ${PROJECT_SOURCE_DIR}
+                                                 ${PLUGINS_LIB}
+                                                 ${DAEMON_SRC}
+                                                 ${CONTRIB_PATH}
+                                                 ${FFMPEG}/include
+                                                 ${CONTRIB_PATH}/build/opencv/build/install/include
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
+                                                 )
+target_link_directories(${ProjectName} PUBLIC ${CONTRIB_PATH}
+                                        ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
+                                        ${CONTRIB_PATH}/build/opencv/build/lib/Release
+                                        ${CONTRIB_PATH}/build/opencv/build/3rdparty/lib/Release
+                                        ${FFMPEG}/bin
+                                        )
+
+target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs411 opencv_imgproc411 opencv_core411 ${TFLIB} zlib)
+endif()
+
+if (UNIX)
+target_include_directories(${ProjectName} PUBLIC ${PROJECT_BINARY_DIR}
+                                                 ${PROJECT_SOURCE_DIR}
+                                                 ${PLUGINS_LIB}
+                                                 ${DAEMON_SRC}
+                                                 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include
+                                                 ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include/third_party/eigen3
+                                                 ${LIBS_DIR}/${TENSORFLOW}/include/flatbuffers
+                                                 )
+link_directories(${ProjectName} PUBLIC    ${CONTRIB_PATH}
+                                        ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/${CONTRIB_PLATFORM}
+                                        ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib
+                                        ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty
+                                        )
+target_link_libraries(${ProjectName} PUBLIC swscale avutil libpng opencv_imgcodecs opencv_imgproc opencv_core ${TFLIB})
+endif()
+
+add_custom_command(
+    TARGET ${ProjectName}
+    PRE_BUILD
+    COMMAND python ${PROJECT_SOURCE_DIR}/../SDK/jplManipulation.py --preassemble --plugin=TensorflowSegmentation
+    COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBS_BIN_DIR}/${TENSORFLOW}/lib/ ${JPL_DIRECTORY}/lib
+    COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/data/models
+    COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/modelsSRC/${model} ${JPL_DIRECTORY}/data/models
+    COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/models/${model} ${JPL_DIRECTORY}/data/models/mModel${modelType}
+    COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/${preferencesFile} ${JPL_DIRECTORY}/data
+    COMMAND ${CMAKE_COMMAND} -E rename ${JPL_DIRECTORY}/data/${preferencesFile} ${JPL_DIRECTORY}/data/preferences.json
+    COMMENT "Assembling Plugin files"
+)
+
+if (WIN32)
+    add_custom_command(
+        TARGET ${ProjectName}
+        POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/x64-windows/${TENSORFLOW}
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${ProjectName}.lib ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/Release/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
+        COMMAND python ${PROJECT_SOURCE_DIR}/../SDK/jplManipulation.py --assemble --plugin=TensorflowSegmentation --extraPath=${TENSORFLOW}
+        COMMENT "Generating JPL archive"
+    )
+else()
+    add_custom_command(
+        TARGET ${ProjectName}
+        POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E make_directory ${JPL_DIRECTORY}/../../../build/x86_64-linux-gnu/${TENSORFLOW}
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/${LIBRARY_FILE_NAME} ${JPL_DIRECTORY}/lib/${CONTRIB_PLATFORM}
+        COMMAND python3 ${PROJECT_SOURCE_DIR}/../SDK/jplManipulation.py --assemble --plugin=TensorflowSegmentation --extraPath=${TENSORFLOW}
+        COMMENT "Generating JPL archive"
+    )
+
+endif()
\ No newline at end of file
diff --git a/GreenScreen/TFInference.cpp b/TensorflowSegmentation/TFInference.cpp
similarity index 100%
rename from GreenScreen/TFInference.cpp
rename to TensorflowSegmentation/TFInference.cpp
diff --git a/GreenScreen/TFInference.h b/TensorflowSegmentation/TFInference.h
similarity index 100%
rename from GreenScreen/TFInference.h
rename to TensorflowSegmentation/TFInference.h
diff --git a/GreenScreen/TFModels.h b/TensorflowSegmentation/TFModels.h
similarity index 100%
rename from GreenScreen/TFModels.h
rename to TensorflowSegmentation/TFModels.h
diff --git a/TensorflowSegmentation/build.sh b/TensorflowSegmentation/build.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d40a84345c04f7f0270a59f11a112310b84bfb94
--- /dev/null
+++ b/TensorflowSegmentation/build.sh
@@ -0,0 +1,344 @@
+#! /bin/bash
+# Build the plugin for the project
+set -e
+export OSTYPE
+ARCH=$(arch)
+EXTRAPATH=''
+# Flags:
+
+  # -p: number of processors to use
+  # -c: Runtime plugin cpu/gpu setting.
+  # -t: target platform.
+
+
+if [ -z "${DAEMON}" ]; then
+    DAEMON="./../../daemon"
+    echo "DAEMON not provided, building with ${DAEMON}"
+fi
+
+PLUGIN_NAME="TensorflowSegmentation"
+JPL_FILE_NAME="${PLUGIN_NAME}.jpl"
+SO_FILE_NAME="lib${PLUGIN_NAME}.so"
+DAEMON_SRC="${DAEMON}/src"
+CONTRIB_PATH="${DAEMON}/contrib"
+PLUGINS_LIB="../lib"
+LIBS_DIR="./../contrib/Libs"
+
+if [ -z "${TF_LIBS_DIR}" ]; then
+    TF_LIBS_DIR="./../../../Libs"
+fi
+echo "Building with ${TF_LIBS_DIR}"
+
+PLATFORM="linux-gnu"
+PROCESSOR='GPU'
+
+while getopts t:c:p OPT; do
+  case "$OPT" in
+    t)
+      PLATFORM="${OPTARG}"
+      if [ -z "${TF}" ]; then
+          if [ "$PLATFORM" = 'linux-gnu' ]; then
+              TF="_tensorflow_cc"
+          elif [ "$PLATFORM" = 'android' ]; then
+              TF="_tensorflowLite"
+          fi
+      fi
+    ;;
+    c)
+      PROCESSOR="${OPTARG}"
+    ;;
+    p)
+    ;;
+    \?)
+      exit 1
+    ;;
+  esac
+done
+
+
+if [ -z "${TF}" ]; then
+    TF="_tensorflow_cc"
+fi
+echo "Building with ${TF}"
+
+if [[ "${TF}" = "_tensorflow_cc" ]] && [[ "${PLATFORM}" = "linux-gnu" ]]
+then
+    if [ -z "$CUDALIBS" ]; then
+        echo "CUDALIBS must point to CUDA 10.1!"
+        exit
+    fi
+    if [ -z "$CUDNN" ]; then
+        echo "CUDNN must point to libcudnn.so 7!"
+        exit
+    fi
+
+    echo "Building for ${PROCESSOR}"
+
+    python3 ./../SDK/jplManipulation.py --preassemble --plugin=${PLUGIN_NAME} 
+
+    CONTRIB_PLATFORM_CURT=${ARCH}
+    CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-${PLATFORM}
+    EXTRAPATH=${TF}
+
+    # Compile
+    clang++ -std=c++17 -shared -fPIC \
+    -Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+    -Wall -Wextra \
+    -Wno-unused-variable \
+    -Wno-unused-function \
+    -Wno-unused-parameter \
+    -D"${PROCESSOR}" \
+    -I"." \
+    -I"${DAEMON_SRC}" \
+    -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+    -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+    -I"${LIBS_DIR}/${TF}/include" \
+    -I"${LIBS_DIR}/${TF}/include/third_party/eigen3" \
+    -I"${PLUGINS_LIB}" \
+    ./../lib/accel.cpp \
+    ./../lib/frameUtils.cpp \
+    main.cpp \
+    videoSubscriber.cpp \
+    pluginProcessor.cpp \
+    pluginMediaHandler.cpp \
+    TFInference.cpp \
+    pluginInference.cpp \
+    pluginParameters.cpp \
+    -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
+    -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/" \
+    -L"${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}-gpu61/" \
+    -l:libswscale.a \
+    -l:libavutil.a \
+    -lopencv_imgcodecs \
+    -lopencv_imgproc \
+    -lopencv_core \
+    -llibpng \
+    -lva \
+    -ltensorflow_cc \
+    -o "build-local/jpl/lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}"
+
+    cp "${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}-gpu61/libtensorflow_cc.so" "build-local/jpl/lib/$CONTRIB_PLATFORM/libtensorflow_cc.so.2"
+    cp "${CUDALIBS}/libcudart.so" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcudart.so.10.0"
+    cp "${CUDNN}/libcublas.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcublas.so.10.0"
+    cp "${CUDALIBS}/libcufft.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcufft.so.10.0"
+    cp "${CUDALIBS}/libcurand.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcurand.so.10.0"
+    cp "${CUDALIBS}/libcusolver.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcusolver.so.10.0"
+    cp "${CUDALIBS}/libcusparse.so.10" "build-local/jpl/lib/$CONTRIB_PLATFORM/libcusparse.so.10.0"
+    cp "${CUDNN}/libcudnn.so.7" "build-local/jpl/lib/$CONTRIB_PLATFORM"
+
+    pwd
+    mkdir ./build-local/jpl/data/models
+    cp ./modelsSRC/mModel-resnet50float.pb ./build-local/jpl/data/models/mModel.pb
+    cp ./preferences-tfcc.json ./build-local/jpl/data/preferences.json
+elif [ "${TF}" = "_tensorflowLite" ]
+then
+    if [ "${PLATFORM}" = "linux-gnu" ]
+    then
+        python3 ./../SDK/jplManipulation.py --preassemble --plugin=${PLUGIN_NAME}
+
+        CONTRIB_PLATFORM_CURT=${ARCH}
+        CONTRIB_PLATFORM=${CONTRIB_PLATFORM_CURT}-${PLATFORM}
+        EXTRAPATH="${TF}"
+
+        # Compile
+        clang++ -std=c++17 -shared -fPIC \
+        -Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+        -Wall -Wextra \
+        -Wno-unused-variable \
+        -Wno-unused-function \
+        -Wno-unused-parameter \
+        -DTFLITE \
+        -I"." \
+        -I"${DAEMON_SRC}" \
+        -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+        -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+        -I"${LIBS_DIR}/${TF}/include/flatbuffers" \
+        -I"${LIBS_DIR}/${TF}/include" \
+        -I"${PLUGINS_LIB}" \
+        ./../lib/accel.cpp \
+        ./../lib/frameUtils.cpp \
+        videoSubscriber.cpp \
+        pluginProcessor.cpp \
+        pluginMediaHandler.cpp \
+        TFInference.cpp \
+        pluginInference.cpp \
+        pluginParameters.cpp \
+        main.cpp \
+        -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
+        -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty/" \
+        -L"${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}/" \
+        -l:libswscale.a \
+        -l:libavutil.a \
+        -lopencv_imgcodecs \
+        -lopencv_imgproc \
+        -lopencv_core \
+        -ltensorflowlite \
+        -llibpng \
+        -lva \
+        -o "build-local/jpl/lib/${CONTRIB_PLATFORM}/${SO_FILE_NAME}"
+
+        cp "${TF_LIBS_DIR}/${TF}/lib/${CONTRIB_PLATFORM}/libtensorflowlite.so" "build-local/jpl/lib/$CONTRIB_PLATFORM"
+
+    elif [ "${PLATFORM}" = "android" ]
+    then
+        python3 ./../SDK/jplManipulation.py --preassemble --plugin=${PLUGIN_NAME} --distribution=${PLATFORM}
+
+        if [ -z "$ANDROID_NDK" ]; then
+             ANDROID_NDK="/home/${USER}/Android/Sdk/ndk/21.1.6352462"
+            echo "ANDROID_NDK not provided, building with ${ANDROID_NDK}"
+        fi
+
+        #=========================================================
+        #    Check if the ANDROID_ABI was provided
+        #    if not, set default
+        #=========================================================
+        if [ -z "$ANDROID_ABI" ]; then
+            ANDROID_ABI="armeabi-v7a arm64-v8a"
+            echo "ANDROID_ABI not provided, building for ${ANDROID_ABI}"
+        fi
+
+        buildlib() {
+            echo "$CURRENT_ABI"
+
+            #=========================================================
+            #    ANDROID TOOLS
+            #=========================================================
+            export HOST_TAG=linux-x86_64
+            export TOOLCHAIN=$ANDROID_NDK/toolchains/llvm/prebuilt/$HOST_TAG
+
+            if [ "$CURRENT_ABI" = armeabi-v7a ]
+            then
+            export AR=$TOOLCHAIN/bin/arm-linux-android-ar
+            export AS=$TOOLCHAIN/bin/arm-linux-android-as
+            export CC=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang
+            export CXX=$TOOLCHAIN/bin/armv7a-linux-androideabi21-clang++
+            export LD=$TOOLCHAIN/bin/arm-linux-android-ld
+            export RANLIB=$TOOLCHAIN/bin/arm-linux-android-ranlib
+            export STRIP=$TOOLCHAIN/bin/arm-linux-androideabi-strip
+            export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm/sysroot
+
+            elif [ "$CURRENT_ABI" = arm64-v8a ]
+            then
+            export AR=$TOOLCHAIN/bin/aarch64-linux-android-ar
+            export AS=$TOOLCHAIN/bin/aarch64-linux-android-as
+            export CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang
+            export CXX=$TOOLCHAIN/bin/aarch64-linux-android21-clang++
+            export LD=$TOOLCHAIN/bin/aarch64-linux-android-ld
+            export RANLIB=$TOOLCHAIN/bin/aarch64-linux-android-ranlib
+            export STRIP=$TOOLCHAIN/bin/aarch64-linux-android-strip
+            export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-arm64/sysroot
+
+            elif [ "$CURRENT_ABI" = x86_64 ]
+            then
+            export AR=$TOOLCHAIN/bin/x86_64-linux-android-ar
+            export AS=$TOOLCHAIN/bin/x86_64-linux-android-as
+            export CC=$TOOLCHAIN/bin/x86_64-linux-android21-clang
+            export CXX=$TOOLCHAIN/bin/x86_64-linux-android21-clang++
+            export LD=$TOOLCHAIN/bin/x86_64-linux-android-ld
+            export RANLIB=$TOOLCHAIN/bin/x86_64-linux-android-ranlib
+            export STRIP=$TOOLCHAIN/bin/x86_64-linux-android-strip
+            export ANDROID_SYSROOT=${DAEMON}/../client-android/android-toolchain-21-x86_64/sysroot
+
+            else
+            echo "ABI NOT OK" >&2
+            exit 1
+            fi
+
+            #=========================================================
+            #    CONTRIBS
+            #=========================================================
+            if [ "$CURRENT_ABI" = armeabi-v7a ]
+            then
+            CONTRIB_PLATFORM=arm-linux-androideabi
+
+            elif [ "$CURRENT_ABI" = arm64-v8a ]
+            then
+            CONTRIB_PLATFORM=aarch64-linux-android
+
+            elif [ "$CURRENT_ABI" = x86_64 ]
+            then
+            CONTRIB_PLATFORM=x86_64-linux-android
+            fi
+
+            #NDK SOURCES FOR cpufeatures
+            NDK_SOURCES=${ANDROID_NDK}/sources/android
+
+            #=========================================================
+            #    LD_FLAGS
+            #=========================================================
+            if [ "$CURRENT_ABI" = armeabi-v7a ]
+            then
+            export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi -L${ANDROID_SYSROOT}/usr/lib/arm-linux-androideabi/21"
+            elif [ "$CURRENT_ABI" = arm64-v8a ]
+            then
+            export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android -L${ANDROID_SYSROOT}/usr/lib/aarch64-linux-android/21"
+            elif [ "$CURRENT_ABI" = x86_64 ]
+            then
+            export EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android -L${ANDROID_SYSROOT}/usr/lib/x86_64-linux-android/21"
+            fi
+
+            #=========================================================
+            #    Compile CPU FEATURES, NEEDED FOR OPENCV
+            #=========================================================
+            $CC -c "$NDK_SOURCES/cpufeatures/cpu-features.c" -o cpu-features.o -o cpu-features.o --sysroot=$ANDROID_SYSROOT
+
+            #=========================================================
+            #    Compile the plugin
+            #=========================================================
+
+            # Create so destination folder
+            $CXX --std=c++17 -O3 -g -fPIC \
+            -Wl,-Bsymbolic,-rpath,"\${ORIGIN}" \
+            -shared \
+            -Wall -Wextra \
+            -Wno-unused-variable \
+            -Wno-unused-function \
+            -Wno-unused-parameter \
+            -DTFLITE \
+            -I"." \
+            -I"${DAEMON_SRC}" \
+            -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include" \
+            -I"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4" \
+            -I"${LIBS_DIR}/${TF}/include/flatbuffers" \
+            -I"${LIBS_DIR}/${TF}/include" \
+            -I"${PLUGINS_LIB}" \
+            ./../lib/accel.cpp \
+            ./../lib/frameUtils.cpp \
+            main.cpp \
+            videoSubscriber.cpp \
+            pluginProcessor.cpp \
+            pluginMediaHandler.cpp \
+            TFInference.cpp \
+            pluginInference.cpp \
+            pluginParameters.cpp \
+            cpu-features.o \
+            -L"${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/" \
+            -L"${TF_LIBS_DIR}/${TF}/lib/${CURRENT_ABI}/" \
+            -lswscale \
+            -lavutil \
+            -lopencv_imgcodecs \
+            -lopencv_imgproc \
+            -lopencv_core \
+            -llibpng \
+            -ltensorflowlite \
+            -llog -lz \
+            --sysroot=$ANDROID_SYSROOT \
+            -o "build-local/jpl/lib/$CURRENT_ABI/${SO_FILE_NAME}"
+
+            cp "${TF_LIBS_DIR}/${TF}/lib/${CURRENT_ABI}/libtensorflowlite.so" "build-local/jpl/lib/$CURRENT_ABI"
+            rm cpu-features.o
+        }
+
+        # Build the so
+        for i in ${ANDROID_ABI}; do
+            CURRENT_ABI=$i
+            buildlib
+        done
+    fi
+
+    mkdir ./build-local/jpl/data/models
+    cp ./modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite ./build-local/jpl/data/models/mModel.tflite
+    cp ./preferences-tflite.json ./build-local/jpl/data/preferences.json
+fi
+
+python3 ./../SDK/jplManipulation.py --assemble --plugin=${PLUGIN_NAME} --distribution=${PLATFORM} --extraPath=${EXTRAPATH}
diff --git a/TensorflowSegmentation/data/backgrounds/background1.png b/TensorflowSegmentation/data/backgrounds/background1.png
new file mode 100644
index 0000000000000000000000000000000000000000..849251c69bd6a1a1ce7917b7d806cec85f0b7b01
Binary files /dev/null and b/TensorflowSegmentation/data/backgrounds/background1.png differ
diff --git a/GreenScreen/data/backgrounds/background2.png b/TensorflowSegmentation/data/backgrounds/background2.png
similarity index 100%
rename from GreenScreen/data/backgrounds/background2.png
rename to TensorflowSegmentation/data/backgrounds/background2.png
diff --git a/TensorflowSegmentation/data/icon.png b/TensorflowSegmentation/data/icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..f44370f1f48de6fe24377c74c1fbbcd0097e6a12
Binary files /dev/null and b/TensorflowSegmentation/data/icon.png differ
diff --git a/TensorflowSegmentation/main.cpp b/TensorflowSegmentation/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..014187eb51428cb0294dc976fab42b804f366de9
--- /dev/null
+++ b/TensorflowSegmentation/main.cpp
@@ -0,0 +1,67 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include <iostream>
+#include <string.h>
+#include <thread>
+#include <memory>
+
+#include <plugin/jamiplugin.h>
+#include "pluginMediaHandler.h"
+
+#ifdef WIN32
+#define EXPORT_PLUGIN __declspec(dllexport)
+#else
+#define EXPORT_PLUGIN
+#endif
+
+#define TensorflowSegmentation_VERSION_MAJOR 1
+#define TensorflowSegmentation_VERSION_MINOR 0
+#define TensorflowSegmentation_VERSION_PATCH 2
+
+extern "C" {
+void
+pluginExit(void)
+{}
+
+EXPORT_PLUGIN JAMI_PluginExitFunc
+JAMI_dynPluginInit(const JAMI_PluginAPI* api)
+{
+    std::cout << "**************************" << std::endl << std::endl;
+    std::cout << "**  TensorflowSegmentation PLUGIN  **" << std::endl;
+    std::cout << "**************************" << std::endl << std::endl;
+    std::cout << " Version " << TensorflowSegmentation_VERSION_MAJOR << "." << TensorflowSegmentation_VERSION_MINOR << "."
+              << TensorflowSegmentation_VERSION_PATCH << std::endl;
+
+    // If invokeService doesn't return an error
+    if (api) {
+        std::map<std::string, std::string> preferences;
+        api->invokeService(api, "getPluginPreferences", &preferences);
+        std::string dataPath;
+        api->invokeService(api, "getPluginDataPath", &dataPath);
+        auto fmp = std::make_unique<jami::PluginMediaHandler>(std::move(preferences), std::move(dataPath));
+
+        if (!api->manageComponent(api, "CallMediaHandlerManager", fmp.release())) {
+            return pluginExit;
+        }
+    }
+    return nullptr;
+}
+}
diff --git a/TensorflowSegmentation/manifest.json b/TensorflowSegmentation/manifest.json
new file mode 100644
index 0000000000000000000000000000000000000000..c35f8e81e338e52c20118ade1a32c21df2a1de64
--- /dev/null
+++ b/TensorflowSegmentation/manifest.json
@@ -0,0 +1,5 @@
+{
+    "name": "TensorflowSegmentation",
+    "description": "TensorflowSegmentation Plugin with Tensorflow 2.1.1",
+    "version": "1.0.2"
+}
\ No newline at end of file
diff --git a/GreenScreen/modelsSRC/mModel-resnet50float.pb b/TensorflowSegmentation/modelsSRC/mModel-resnet50float.pb
similarity index 100%
rename from GreenScreen/modelsSRC/mModel-resnet50float.pb
rename to TensorflowSegmentation/modelsSRC/mModel-resnet50float.pb
diff --git a/GreenScreen/modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite b/TensorflowSegmentation/modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite
similarity index 100%
rename from GreenScreen/modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite
rename to TensorflowSegmentation/modelsSRC/mobilenet_v2_deeplab_v3_256_myquant.tflite
diff --git a/GreenScreen/modelsSRC/model_256_F_16.tflite b/TensorflowSegmentation/modelsSRC/model_256_F_16.tflite
similarity index 100%
rename from GreenScreen/modelsSRC/model_256_F_16.tflite
rename to TensorflowSegmentation/modelsSRC/model_256_F_16.tflite
diff --git a/GreenScreen/modelsSRC/model_256_Qlatency.tflite b/TensorflowSegmentation/modelsSRC/model_256_Qlatency.tflite
similarity index 100%
rename from GreenScreen/modelsSRC/model_256_Qlatency.tflite
rename to TensorflowSegmentation/modelsSRC/model_256_Qlatency.tflite
diff --git a/GreenScreen/modelsSRC/model_256_Qlatency_16.tflite b/TensorflowSegmentation/modelsSRC/model_256_Qlatency_16.tflite
similarity index 100%
rename from GreenScreen/modelsSRC/model_256_Qlatency_16.tflite
rename to TensorflowSegmentation/modelsSRC/model_256_Qlatency_16.tflite
diff --git a/TensorflowSegmentation/package.json b/TensorflowSegmentation/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..7167fdc2b105dc1b2eec71a607b78e6ec3bfa808
--- /dev/null
+++ b/TensorflowSegmentation/package.json
@@ -0,0 +1,22 @@
+{
+    "name": "TensorflowSegmentation",
+    "version": "1.0.2",
+    "extractLibs": true,
+    "deps": [
+        "ffmpeg",
+        "opencv"
+    ],
+    "defines": [
+        "TFLITE=False",
+        "CPU=False"
+    ],
+    "custom_scripts": {
+        "pre_build": [
+            "mkdir msvc"
+        ],
+        "build": [
+            "cmake --build ./msvc --config Release"
+        ],
+        "post_build": []
+    }
+}
\ No newline at end of file
diff --git a/GreenScreen/pluginInference.cpp b/TensorflowSegmentation/pluginInference.cpp
similarity index 100%
rename from GreenScreen/pluginInference.cpp
rename to TensorflowSegmentation/pluginInference.cpp
diff --git a/GreenScreen/pluginInference.h b/TensorflowSegmentation/pluginInference.h
similarity index 100%
rename from GreenScreen/pluginInference.h
rename to TensorflowSegmentation/pluginInference.h
diff --git a/TensorflowSegmentation/pluginMediaHandler.cpp b/TensorflowSegmentation/pluginMediaHandler.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..44d68c75b63cf0e0ff9b6f3f57ec2bdcf3f2c38c
--- /dev/null
+++ b/TensorflowSegmentation/pluginMediaHandler.cpp
@@ -0,0 +1,116 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
+ */
+
+#include "pluginMediaHandler.h"
+// Logger
+#include "pluglog.h"
+#include <string_view>
+const char sep = separator();
+const std::string TAG = "FORESEG";
+
+#define NAME "Foreground Segmentation"
+
+namespace jami {
+
+PluginMediaHandler::PluginMediaHandler(std::map<std::string, std::string>&& preferences,
+                                       std::string&& datapath)
+    : datapath_ {datapath}
+    , preferences_ {preferences}
+{
+    setGlobalPluginParameters(preferences_);
+    setId(datapath_);
+    mVS = std::make_shared<VideoSubscriber>(datapath_);
+}
+
+void
+PluginMediaHandler::notifyAVFrameSubject(const StreamData& data, jami::avSubjectPtr subject)
+{
+    std::ostringstream oss;
+    std::string_view direction = data.direction ? "Receive" : "Preview";
+    oss << "NEW SUBJECT: [" << data.id << "," << direction << "]" << std::endl;
+
+    bool preferredStreamDirection = false;
+    auto it = preferences_.find("streamslist");
+    if (it != preferences_.end()) {
+        Plog::log(Plog::LogPriority::INFO, TAG, "SET PARAMETERS");
+        preferredStreamDirection = it->second == "in";
+    }
+    oss << "preferredStreamDirection " << preferredStreamDirection << std::endl;
+    if (data.type == StreamType::video && !data.direction
+        && data.direction == preferredStreamDirection) {
+        subject->attach(mVS.get()); // my image
+        oss << "got my sent image attached" << std::endl;
+        attached_ = '1';
+    } else if (data.type == StreamType::video && data.direction
+               && data.direction == preferredStreamDirection) {
+        subject->attach(mVS.get()); // the image I receive from the others on the call
+        attached_ = '1';
+    }
+
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+std::map<std::string, std::string>
+PluginMediaHandler::getCallMediaHandlerDetails()
+{
+    return {{"name", NAME},
+            {"iconPath", datapath_ + sep + "icon.png"},
+            {"pluginId", id()},
+            {"attached", attached_},
+            {"dataType", "1"}};
+}
+
+void
+PluginMediaHandler::setPreferenceAttribute(const std::string& key, const std::string& value)
+{
+    auto it = preferences_.find(key);
+    if (it != preferences_.end() && it->second != value) {
+        it->second = value;
+        if (key == "background") {
+            mVS->setBackground(value);
+        }
+    }
+}
+
+bool
+PluginMediaHandler::preferenceMapHasKey(const std::string& key)
+{
+    if (key == "background") {
+        return true;
+    }
+    return false;
+}
+
+void
+PluginMediaHandler::detach()
+{
+    attached_ = '0';
+    mVS->detach();
+}
+
+PluginMediaHandler::~PluginMediaHandler()
+{
+    std::ostringstream oss;
+    oss << " ~FORESEG Plugin" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    detach();
+}
+} // namespace jami
diff --git a/TensorflowSegmentation/pluginMediaHandler.h b/TensorflowSegmentation/pluginMediaHandler.h
new file mode 100644
index 0000000000000000000000000000000000000000..3962246230d1b1dbe4bb837996a509e3943b2de1
--- /dev/null
+++ b/TensorflowSegmentation/pluginMediaHandler.h
@@ -0,0 +1,55 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
+ */
+
+#pragma once
+
+// Project
+#include "videoSubscriber.h"
+
+// Plugin
+#include "plugin/jamiplugin.h"
+#include "plugin/mediahandler.h"
+
+using avSubjectPtr = std::weak_ptr<jami::Observable<AVFrame*>>;
+
+namespace jami {
+
+class PluginMediaHandler : public jami::CallMediaHandler
+{
+public:
+    PluginMediaHandler(std::map<std::string, std::string>&& preferences, std::string&& dataPath);
+    ~PluginMediaHandler();
+
+    virtual void notifyAVFrameSubject(const StreamData& data, avSubjectPtr subject) override;
+    virtual std::map<std::string, std::string> getCallMediaHandlerDetails() override;
+
+    virtual void detach() override;
+    virtual void setPreferenceAttribute(const std::string& key, const std::string& value) override;
+    virtual bool preferenceMapHasKey(const std::string& key) override;
+
+    std::shared_ptr<VideoSubscriber> mVS;
+
+private:
+    const std::string datapath_;
+    std::map<std::string, std::string> preferences_;
+    std::string attached_ {'0'};
+};
+} // namespace jami
diff --git a/GreenScreen/pluginParameters.cpp b/TensorflowSegmentation/pluginParameters.cpp
similarity index 100%
rename from GreenScreen/pluginParameters.cpp
rename to TensorflowSegmentation/pluginParameters.cpp
diff --git a/GreenScreen/pluginParameters.h b/TensorflowSegmentation/pluginParameters.h
similarity index 100%
rename from GreenScreen/pluginParameters.h
rename to TensorflowSegmentation/pluginParameters.h
diff --git a/TensorflowSegmentation/pluginProcessor.cpp b/TensorflowSegmentation/pluginProcessor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3b38830a53bc6e6c3ef51f4690f897b692f63ef9
--- /dev/null
+++ b/TensorflowSegmentation/pluginProcessor.cpp
@@ -0,0 +1,343 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
+ */
+
+#include "pluginProcessor.h"
+// System includes
+#include <algorithm>
+#include <cstring>
+// OpenCV headers
+#include <opencv2/core.hpp>
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/imgproc.hpp>
+// Logger
+#include <pluglog.h>
+
+extern "C" {
+#include <libavutil/display.h>
+}
+
+const char sep = separator();
+
+const std::string TAG = "FORESEG";
+
+PluginParameters* mPluginParameters = getGlobalPluginParameters();
+
+namespace jami {
+
+PluginProcessor::PluginProcessor(const std::string& dataPath)
+    : pluginInference {TFModel {dataPath + sep + "models" + sep + mPluginParameters->model}}
+{
+    initModel();
+    setBackgroundImage(mPluginParameters->image);
+}
+
+void
+PluginProcessor::setBackgroundImage(const std::string& backgroundPath)
+{
+    cv::Size size = cv::Size {0, 0};
+
+    if (!backgroundImage.empty())
+        size = backgroundImage.size();
+
+    cv::Mat newBackgroundImage = cv::imread(backgroundPath);
+    if (newBackgroundImage.cols == 0) {
+        Plog::log(Plog::LogPriority::ERR, TAG, "Background image not Loaded");
+    } else {
+        Plog::log(Plog::LogPriority::INFO, TAG, "Background image Loaded");
+        cv::cvtColor(newBackgroundImage, newBackgroundImage, cv::COLOR_BGR2RGB);
+        newBackgroundImage.convertTo(newBackgroundImage, CV_32FC3);
+        if (size.height) {
+            cv::resize(newBackgroundImage, newBackgroundImage, size);
+            backgroundRotation = 0;
+        }
+        backgroundImage = newBackgroundImage.clone();
+        newBackgroundImage.release();
+        hasBackground_ = true;
+    }
+}
+
+void
+PluginProcessor::initModel()
+{
+    try {
+        pluginInference.init();
+    } catch (std::exception& e) {
+        Plog::log(Plog::LogPriority::ERR, TAG, e.what());
+    }
+    std::ostringstream oss;
+    oss << "Model is allocated " << pluginInference.isAllocated();
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+#ifdef TFLITE
+void
+PluginProcessor::feedInput(const cv::Mat& frame)
+{
+    auto pair = pluginInference.getInput();
+    uint8_t* inputPointer = pair.first;
+
+    cv::Mat temp(frame.rows, frame.cols, CV_8UC3, inputPointer);
+    frame.convertTo(temp, CV_8UC3);
+
+    inputPointer = nullptr;
+}
+#else
+void
+PluginProcessor::feedInput(const cv::Mat& frame)
+{
+    pluginInference.ReadTensorFromMat(frame);
+}
+#endif // TFLITE
+
+int
+PluginProcessor::getBackgroundRotation()
+{
+    return backgroundRotation;
+}
+
+void
+PluginProcessor::setBackgroundRotation(int angle)
+{
+    if (backgroundRotation != angle && (backgroundRotation - angle) != 0) {
+        rotateFrame(backgroundRotation - angle, backgroundImage);
+        backgroundRotation = angle;
+    }
+}
+
+void
+PluginProcessor::computePredictions()
+{
+    if (count == 0) {
+        // Run the graph
+        pluginInference.runGraph();
+        auto predictions = pluginInference.masksPredictions();
+
+        // Save the predictions
+        computedMask = predictions;
+    }
+}
+
+void
+PluginProcessor::printMask()
+{
+    for (size_t i = 0; i < computedMask.size(); i++) {
+        // Log the predictions
+        std::ostringstream oss;
+        oss << "\nclass: " << computedMask[i] << std::endl;
+        Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    }
+}
+
+void
+PluginProcessor::resetInitValues(const cv::Size& modelInputSize)
+{
+    previousMasks[0] = cv::Mat(modelInputSize.height, modelInputSize.width, CV_32FC1, double(0.));
+    previousMasks[1] = cv::Mat(modelInputSize.height, modelInputSize.width, CV_32FC1, double(0.));
+    kSize = cv::Size(modelInputSize.width * kernelSize, modelInputSize.height * kernelSize);
+    if (kSize.height % 2 == 0) {
+        kSize.height -= 1;
+    }
+    if (kSize.width % 2 == 0) {
+        kSize.width -= 1;
+    }
+    count = 0;
+    grabCutMode = cv::GC_INIT_WITH_MASK;
+    grabCutIterations = 5;
+}
+
+void
+copyByLine(uchar* frameData, uchar* applyMaskData, const int lineSize, cv::Size size)
+{
+    if (3 * size.width == lineSize) {
+        std::memcpy(frameData, applyMaskData, size.height * size.width * 3);
+    } else {
+        int rows = size.height;
+        int offset = 0;
+        int maskoffset = 0;
+        for (int i = 0; i < rows; i++) {
+            std::memcpy(frameData + offset, applyMaskData + maskoffset, lineSize);
+            offset += lineSize;
+            maskoffset += 3 * size.width;
+        }
+    }
+}
+
+void
+PluginProcessor::drawMaskOnFrame(
+    cv::Mat& frame, cv::Mat& frameReduced, std::vector<float> computedMask, int lineSize, int angle)
+{
+    if (computedMask.empty()) {
+        return;
+    }
+
+    int maskSize = static_cast<int>(std::sqrt(computedMask.size()));
+    cv::Mat maskImg(maskSize, maskSize, CV_32FC1, computedMask.data());
+    cv::Mat* applyMask = &frameReduced;
+    cv::Mat output;
+
+    if (count == 0) {
+        rotateFrame(-angle, maskImg);
+#ifdef TFLITE
+        for (int i = 0; i < maskImg.cols; i++) {
+            for (int j = 0; j < maskImg.rows; j++) {
+                if (maskImg.at<float>(j, i) == 15)
+                    maskImg.at<float>(j, i) = 1.;
+                else
+                    maskImg.at<float>(j, i) = smoothFactors[0] * previousMasks[0].at<float>(j, i)
+                                              + smoothFactors[1] * previousMasks[1].at<float>(j, i);
+            }
+        }
+        cv::morphologyEx(maskImg,
+                         maskImg,
+                         cv::MORPH_CLOSE,
+                         cv::getStructuringElement(cv::MORPH_ELLIPSE, kSize),
+                         cv::Point(-1, -1),
+                         4);
+#else
+        cv::resize(maskImg, maskImg, cv::Size(frameReduced.cols, frameReduced.rows));
+
+        double m, M;
+        cv::minMaxLoc(maskImg, &m, &M);
+
+        if (M < 2) { // avoid detection if there is any one in frame
+            maskImg = 0. * maskImg;
+        } else {
+            for (int i = 0; i < maskImg.cols; i++) {
+                for (int j = 0; j < maskImg.rows; j++) {
+                    maskImg.at<float>(j, i) = (maskImg.at<float>(j, i) - m) / (M - m);
+
+                    if (maskImg.at<float>(j, i) < 0.4)
+                        maskImg.at<float>(j, i) = 0.;
+                    else if (maskImg.at<float>(j, i) < 0.7) {
+                        float value = maskImg.at<float>(j, i) * smoothFactors[0]
+                                      + previousMasks[0].at<float>(j, i) * smoothFactors[1]
+                                      + previousMasks[1].at<float>(j, i) * smoothFactors[2];
+                        maskImg.at<float>(j, i) = 0.;
+                        if (value > 0.7)
+                            maskImg.at<float>(j, i) = 1.;
+                    } else
+                        maskImg.at<float>(j, i) = 1.;
+                }
+            }
+        }
+#endif
+        if (cv::countNonZero(maskImg) != 0) {
+#ifdef TFLITE
+            cv::Mat tfMask;
+            tfMask = maskImg.clone();
+            tfMask *= 255.;
+            tfMask.convertTo(tfMask, CV_8UC1);
+            cv::threshold(tfMask, tfMask, 127, 255, cv::THRESH_BINARY);
+            if (cv::countNonZero(tfMask) != 0) {
+#endif
+                cv::Mat dilate;
+                cv::dilate(maskImg,
+                           dilate,
+                           cv::getStructuringElement(cv::MORPH_ELLIPSE, kSize),
+                           cv::Point(-1, -1),
+                           2);
+                cv::erode(maskImg,
+                          maskImg,
+                          cv::getStructuringElement(cv::MORPH_ELLIPSE, kSize),
+                          cv::Point(-1, -1),
+                          2);
+                for (int i = 0; i < maskImg.cols; i++) {
+                    for (int j = 0; j < maskImg.rows; j++) {
+                        if (dilate.at<float>(j, i) != maskImg.at<float>(j, i))
+                            maskImg.at<float>(j, i) = grabcutClass;
+                    }
+                }
+                maskImg.convertTo(maskImg, CV_8UC1);
+                applyMask->convertTo(*applyMask, CV_8UC1);
+                cv::Rect rect(1, 1, maskImg.rows, maskImg.cols);
+                cv::grabCut(*applyMask,
+                            maskImg,
+                            rect,
+                            bgdModel,
+                            fgdModel,
+                            grabCutIterations,
+                            grabCutMode);
+
+                grabCutMode = cv::GC_EVAL;
+                grabCutIterations = 1;
+
+                maskImg = maskImg & 1;
+#ifdef TFLITE
+                cv::bitwise_and(maskImg, tfMask, maskImg);
+            }
+#endif
+            maskImg.convertTo(maskImg, CV_32FC1);
+            maskImg *= 255.;
+            GaussianBlur(maskImg, maskImg, cv::Size(7, 7), 0); // float mask from 0 to 255.
+            maskImg = maskImg / 255.;
+        }
+        previousMasks[1] = previousMasks[0].clone();
+        previousMasks[0] = maskImg.clone();
+    }
+
+    cv::Mat roiMaskImg = previousMasks[0].clone();
+    cv::Mat roiMaskImgComplementary = 1. - roiMaskImg; // mask from 1. to 0
+
+    std::vector<cv::Mat> channels;
+    std::vector<cv::Mat> channelsComplementary;
+
+    channels.emplace_back(roiMaskImg);
+    channels.emplace_back(roiMaskImg);
+    channels.emplace_back(roiMaskImg);
+    channelsComplementary.emplace_back(roiMaskImgComplementary);
+    channelsComplementary.emplace_back(roiMaskImgComplementary);
+    channelsComplementary.emplace_back(roiMaskImgComplementary);
+
+    cv::merge(channels, roiMaskImg);
+    cv::merge(channelsComplementary, roiMaskImgComplementary);
+
+    int origType = frameReduced.type();
+    int roiMaskType = roiMaskImg.type();
+
+    frameReduced.convertTo(output, roiMaskType);
+    output = output.mul(roiMaskImg);
+    output += backgroundImage.mul(roiMaskImgComplementary);
+    output.convertTo(output, origType);
+
+    cv::resize(output, output, cv::Size(frame.cols, frame.rows));
+
+    copyByLine(frame.data, output.data, lineSize, cv::Size(frame.cols, frame.rows));
+    count++;
+    count = count % frameCount;
+}
+
+void
+PluginProcessor::rotateFrame(int angle, cv::Mat& mat)
+{
+    if (angle == -90)
+        cv::rotate(mat, mat, cv::ROTATE_90_COUNTERCLOCKWISE);
+    else if (std::abs(angle) == 180)
+        cv::rotate(mat, mat, cv::ROTATE_180);
+    else if (angle == 90)
+        cv::rotate(mat, mat, cv::ROTATE_90_CLOCKWISE);
+}
+
+bool
+PluginProcessor::hasBackground() const
+{
+    return hasBackground_;
+}
+} // namespace jami
diff --git a/TensorflowSegmentation/pluginProcessor.h b/TensorflowSegmentation/pluginProcessor.h
new file mode 100644
index 0000000000000000000000000000000000000000..e81dd955982d180efd8bf6dd8d0545d7cc0aaa75
--- /dev/null
+++ b/TensorflowSegmentation/pluginProcessor.h
@@ -0,0 +1,108 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
+ */
+
+#pragma once
+// STL
+#include <condition_variable>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <mutex>
+#include <thread>
+#include <vector>
+// Filters
+#include "pluginInference.h"
+// AvFrame
+extern "C" {
+#include <libavutil/frame.h>
+}
+// Plugin
+#include <plugin/jamiplugin.h>
+#include <plugin/mediahandler.h>
+
+namespace jami {
+
+class PluginProcessor
+{
+public:
+    PluginProcessor(const std::string& dataPath);
+
+    void initModel();
+    /**
+     * @brief feedInput
+     * Takes a frame and feeds it to the model storage for predictions
+     * @param frame
+     */
+    void feedInput(const cv::Mat& frame);
+
+    /**
+     * @brief computePredictions
+     * Uses the model to compute the predictions and store them in
+     * computedPredictions
+     */
+    void computePredictions();
+
+    void printMask();
+    void drawMaskOnFrame(cv::Mat& frame,
+                         cv::Mat& frameReduced,
+                         std::vector<float> computedMask,
+                         int lineSize,
+                         int angle);
+    int getBackgroundRotation();
+    void setBackgroundRotation(int angle);
+    void setBackgroundImage(const std::string& backgroundPath);
+    void rotateFrame(int angle, cv::Mat& mat);
+    bool hasBackground() const;
+    void resetInitValues(const cv::Size& modelInputSize);
+
+    // Output predictions
+    std::vector<float> computedMask;
+
+    cv::Mat previousMasks[2];
+    cv::Mat backgroundImage;
+
+    cv::Size kSize;
+
+    PluginInference pluginInference;
+    std::string backgroundPath;
+    int count = 0;
+
+private:
+    // Frame
+    cv::Mat frame;
+    int backgroundRotation = 0;
+    bool hasBackground_ = false;
+    cv::Mat bgdModel, fgdModel;
+    int grabCutMode = 1; // cv::GC_INIT_WITH_MASK = 1;
+    int grabCutIterations = 5;
+#ifdef TFLITE
+    int grabcutClass = 2;
+    int frameCount = 3;
+    float smoothFactors[2] = {0.3f, 0.05f};
+    float kernelSize = 0.1f;
+#else
+    int grabcutClass = 3;
+    int frameCount = 5;
+    float smoothFactors[3] = {0.6f, 0.3f, 0.1f};
+    float kernelSize = 0.05f;
+#endif
+};
+} // namespace jami
diff --git a/GreenScreen/preferences-tfcc.json b/TensorflowSegmentation/preferences-tfcc.json
similarity index 100%
rename from GreenScreen/preferences-tfcc.json
rename to TensorflowSegmentation/preferences-tfcc.json
diff --git a/GreenScreen/preferences-tflite.json b/TensorflowSegmentation/preferences-tflite.json
similarity index 100%
rename from GreenScreen/preferences-tflite.json
rename to TensorflowSegmentation/preferences-tflite.json
diff --git a/TensorflowSegmentation/videoSubscriber.cpp b/TensorflowSegmentation/videoSubscriber.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..989c607ba1e6905e9fd00a4b63f6d8c0d1f84419
--- /dev/null
+++ b/TensorflowSegmentation/videoSubscriber.cpp
@@ -0,0 +1,219 @@
+/**
+ *  Copyright (C) 2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+ * USA.
+ */
+
+#include "videoSubscriber.h"
+// Use for display rotation matrix
+extern "C" {
+#include <libavutil/display.h>
+}
+#include <accel.h>
+#include <frameUtils.h>
+
+// Opencv processing
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/imgproc.hpp>
+
+// LOGGING
+#include <pluglog.h>
+
+const std::string TAG = "FORESEG";
+const char sep = separator();
+
+namespace jami {
+
+VideoSubscriber::VideoSubscriber(const std::string& dataPath)
+    : path_ {dataPath}
+    , pluginProcessor {dataPath}
+{
+    /**
+     * Waits for new frames and then process them
+     * Writes the predictions in computedPredictions
+     **/
+    processFrameThread = std::thread([this] {
+        while (running) {
+            std::unique_lock<std::mutex> l(inputLock);
+            inputCv.wait(l, [this] { return not running or newFrame; });
+            if (not running) {
+                break;
+            }
+
+            pluginProcessor.feedInput(fcopy.resizedFrameRGB);
+            newFrame = false;
+            /** Unclock the mutex, this way we let the other thread
+             *  copy new data while we are processing the old one
+             **/
+            l.unlock();
+            pluginProcessor.computePredictions();
+        }
+    });
+}
+
+VideoSubscriber::~VideoSubscriber()
+{
+    std::ostringstream oss;
+    oss << "~MediaProcessor" << std::endl;
+    stop();
+    processFrameThread.join();
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+void
+VideoSubscriber::update(jami::Observable<AVFrame*>*, AVFrame* const& pluginFrame)
+{
+    if (pluginProcessor.pluginInference.isAllocated() && pluginProcessor.hasBackground()) {
+        if (!pluginFrame)
+            return;
+
+        //======================================================================================
+        // GET FRAME ROTATION
+        AVFrameSideData* side_data = av_frame_get_side_data(pluginFrame,
+                                                            AV_FRAME_DATA_DISPLAYMATRIX);
+
+        int angle {0};
+        if (side_data) {
+            auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
+            angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
+        }
+
+        //======================================================================================
+        // GET RAW FRAME
+        // Use a non-const Frame
+        // Convert input frame to RGB
+        int inputHeight = pluginFrame->height;
+        int inputWidth = pluginFrame->width;
+
+        fcopy.originalSize = cv::Size {inputWidth, inputHeight};
+
+        AVFrame* temp = transferToMainMemory(pluginFrame, AV_PIX_FMT_NV12);
+        AVFrame* bgrFrame = scaler.convertFormat(temp, AV_PIX_FMT_RGB24);
+        av_frame_unref(temp);
+        av_frame_free(&temp);
+        if (!bgrFrame)
+            return;
+        cv::Mat frame = cv::Mat {bgrFrame->height,
+                                 bgrFrame->width,
+                                 CV_8UC3,
+                                 bgrFrame->data[0],
+                                 static_cast<size_t>(bgrFrame->linesize[0])};
+        // First clone the frame as the original one is unusable because of
+        // linespace
+
+        cv::Mat clone = frame.clone();
+        //======================================================================================
+
+        pluginProcessor.setBackgroundRotation(angle);
+
+        if (firstRun) {
+            pluginProcessor.pluginInference.setExpectedImageDimensions();
+            fcopy.resizedSize = cv::Size {pluginProcessor.pluginInference.getImageWidth(),
+                                          pluginProcessor.pluginInference.getImageHeight()};
+            pluginProcessor.resetInitValues(fcopy.resizedSize);
+
+            cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+            pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
+
+            cv::resize(pluginProcessor.backgroundImage,
+                       pluginProcessor.backgroundImage,
+                       fcopy.resizedSize);
+
+            firstRun = false;
+        }
+
+        if (!newFrame) {
+            std::lock_guard<std::mutex> l(inputLock);
+            cv::resize(clone, fcopy.resizedFrameRGB, fcopy.resizedSize);
+            pluginProcessor.rotateFrame(angle, fcopy.resizedFrameRGB);
+            newFrame = true;
+            inputCv.notify_all();
+        }
+
+        fcopy.predictionsFrameRGB = frame;
+        fcopy.predictionsResizedFrameRGB = fcopy.resizedFrameRGB.clone();
+        pluginProcessor.rotateFrame(-angle, fcopy.predictionsResizedFrameRGB);
+        pluginProcessor.drawMaskOnFrame(fcopy.predictionsFrameRGB,
+                                        fcopy.predictionsResizedFrameRGB,
+                                        pluginProcessor.computedMask,
+                                        bgrFrame->linesize[0],
+                                        angle);
+
+        //======================================================================================
+        // REPLACE AVFRAME DATA WITH FRAME DATA
+        if (bgrFrame->data[0]) {
+            uint8_t* frameData = bgrFrame->data[0];
+            if (angle == 90 || angle == -90) {
+                std::memmove(frameData,
+                             fcopy.predictionsFrameRGB.data,
+                             static_cast<size_t>(pluginFrame->width * pluginFrame->height * 3)
+                                 * sizeof(uint8_t));
+            }
+
+            av_frame_copy_props(bgrFrame, pluginFrame);
+            moveFrom(pluginFrame, bgrFrame);
+        }
+        av_frame_unref(bgrFrame);
+        av_frame_free(&bgrFrame);
+    }
+}
+
+void
+VideoSubscriber::attached(jami::Observable<AVFrame*>* observable)
+{
+    std::ostringstream oss;
+    oss << "::Attached ! " << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    observable_ = observable;
+}
+
+void
+VideoSubscriber::detached(jami::Observable<AVFrame*>*)
+{
+    firstRun = true;
+    observable_ = nullptr;
+    std::ostringstream oss;
+    oss << "::Detached()" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+void
+VideoSubscriber::detach()
+{
+    if (observable_) {
+        firstRun = true;
+        std::ostringstream oss;
+        oss << "::Calling detach()" << std::endl;
+        Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+        observable_->detach(this);
+    }
+}
+
+void
+VideoSubscriber::stop()
+{
+    running = false;
+    inputCv.notify_all();
+}
+
+void
+VideoSubscriber::setBackground(const std::string& backgroundPath)
+{
+    pluginProcessor.setBackgroundImage(backgroundPath);
+}
+} // namespace jami
diff --git a/TensorflowSegmentation/videoSubscriber.h b/TensorflowSegmentation/videoSubscriber.h
new file mode 100644
index 0000000000000000000000000000000000000000..8598feb48d88bb69befa0d5f88a48dcc15232ed8
--- /dev/null
+++ b/TensorflowSegmentation/videoSubscriber.h
@@ -0,0 +1,94 @@
+/**
+ *  Copyright (C) 2004-2020 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#pragma once
+
+// AvFrame
+extern "C" {
+#include <libavutil/frame.h>
+}
+#include <observer.h>
+
+// STl
+#include <map>
+#include <thread>
+#include <condition_variable>
+
+#include <opencv2/core.hpp>
+
+#include "pluginProcessor.h"
+#include <frameScaler.h>
+
+namespace jami {
+
+class FrameCopy
+{
+public:
+    // This frame is a resized version of the original in RGB format
+    cv::Mat resizedFrameRGB;
+    cv::Size resizedSize;
+    // This frame is used to draw predictions into in RGB format
+    cv::Mat predictionsFrameRGB;
+    cv::Size originalSize;
+    // This frame is used to draw predictions into in RGB format on a resized frame
+    cv::Mat predictionsResizedFrameRGB;
+};
+
+class VideoSubscriber : public jami::Observer<AVFrame*>
+{
+public:
+    VideoSubscriber(const std::string& dataPath);
+    ~VideoSubscriber();
+
+    virtual void update(jami::Observable<AVFrame*>*, AVFrame* const&) override;
+    virtual void attached(jami::Observable<AVFrame*>*) override;
+    virtual void detached(jami::Observable<AVFrame*>*) override;
+
+    void detach();
+    void stop();
+    void setBackground(const std::string& backgroundPath);
+
+private:
+    // Observer pattern
+    Observable<AVFrame*>* observable_{};
+
+    // Data
+    std::string path_;
+
+    // Frame
+    FrameCopy fcopy;
+    cv::Mat frame;
+
+    FrameScaler scaler;
+
+    // Threading
+    std::thread processFrameThread;
+    std::mutex inputLock;
+    std::condition_variable inputCv;
+
+    // Status variables of the processing
+    bool firstRun {true};
+    bool running {true};
+    bool newFrame {false};
+
+    // std::shared_ptr<PluginProcessor> pluginProcessor;
+    PluginProcessor pluginProcessor;
+};
+} // namespace jami
diff --git a/contrib/build-dependencies.sh b/contrib/build-dependencies.sh
index e0564f3c318ef4ebb0aecc3fe3f0a5cb84ccae09..6b5331ec1e017c2527a1d25ba68ea8dbc7929e0a 100755
--- a/contrib/build-dependencies.sh
+++ b/contrib/build-dependencies.sh
@@ -2,8 +2,6 @@
 # Build Jami daemon for architecture specified by ANDROID_ABI
 set -e
 
-tar -xf libs.tar.gz
-
 if [ -z "$ANDROID_NDK" -o -z "$ANDROID_SDK" ]; then
    echo "You must define ANDROID_NDK, ANDROID_SDK and ANDROID_ABI before starting."
    echo "They must point to your NDK and SDK directories."
diff --git a/docker/Dockerfile_android_onnxruntime b/docker/Dockerfile_android_onnxruntime
new file mode 100644
index 0000000000000000000000000000000000000000..fb7ee8e6b41436821cc0aa2dd9c35fbacd30e488
--- /dev/null
+++ b/docker/Dockerfile_android_onnxruntime
@@ -0,0 +1,74 @@
+FROM sflagsantos/onnxruntime1.6.0-android
+
+ENV DEBIAN_FRONTEND noninteractive
+ENV EXTRALIBS_PATH=/home/gradle/Libs/onnxruntime/
+
+ENV LANG en_US.utf8
+ENV LC_ALL en_US.utf8
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+        clang \
+	asciidoc \
+	autogen \
+	automake \
+	autoconf \
+	autopoint \
+	gettext \
+	ca-certificates \
+	cmake \
+	bc \
+	bison \
+	build-essential \
+	bzip2 \
+	doxygen \
+	git \
+	lib32stdc++6 \
+	lib32z1 \
+	libtool \
+	locales \
+	m4 \
+	pkg-config \
+	software-properties-common \
+	python-is-python3 \
+	ssh \
+	unzip \
+	wget \
+	curl \
+	yasm \
+	nasm \
+	zip \
+	libpcre3 \
+	libpcre3-dev \
+	ruby ruby-dev \
+        python3-distutils \
+	&& locale-gen $LANG $LC_ALL && update-locale $LANG $LC_ALL
+
+# Android SDK tools
+ENV ANDROID_HOME=/opt/android-sdk
+ENV ANDROID_SDK_ROOT=${ANDROID_HOME}
+RUN wget -O /tmp/android-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-6609375_latest.zip && \
+	mkdir -p ${ANDROID_HOME} && \
+	unzip -q -d ${ANDROID_HOME} /tmp/android-tools.zip && \
+	rm -f /tmp/android-tools.zip && \
+	chown -R root:root ${ANDROID_HOME}
+ENV PATH=${PATH}:${ANDROID_HOME}/tools/bin
+
+# Swig 4.0.1
+RUN wget -O /tmp/swig.tar.gz https://github.com/swig/swig/archive/rel-4.0.1.tar.gz && \
+	tar xzf  /tmp/swig.tar.gz -C /opt && \
+	cd /opt/swig-rel-4.0.1/ && ./autogen.sh && ./configure && make && make install && \
+	cd .. && rm -rf /opt/swig-rel-4.0.1 /tmp/swig.tar.gz
+
+# Android SDK libraries, NDK
+RUN sdkmanager --sdk_root=${ANDROID_HOME} --update
+RUN (while sleep 1; do echo "y"; done) | sdkmanager --sdk_root=${ANDROID_HOME} 'build-tools;30.0.2' \
+	'platforms;android-30'\
+	'extras;android;m2repository'\
+	'extras;google;m2repository'\
+	'ndk;21.3.6528147'
+ENV ANDROID_SDK=${ANDROID_HOME}
+ENV ANDROID_NDK=${ANDROID_HOME}/ndk/21.3.6528147
+
+# Fastlane
+RUN gem install fastlane -NV
+ENV HOME=/tmp
diff --git a/docker/Dockerfile_android b/docker/Dockerfile_android_tensorflow2.1
similarity index 100%
rename from docker/Dockerfile_android
rename to docker/Dockerfile_android_tensorflow2.1
diff --git a/docker/Dockerfile_ubuntu_18.04_onnxruntime b/docker/Dockerfile_ubuntu_18.04_onnxruntime
new file mode 100644
index 0000000000000000000000000000000000000000..a2273655eae500662ac72cc4c6a7b1318c3e689b
--- /dev/null
+++ b/docker/Dockerfile_ubuntu_18.04_onnxruntime
@@ -0,0 +1,78 @@
+FROM sflagsantos/onnxruntime1.6.0-ubuntu18.04
+
+ENV DEBIAN_FRONTEND noninteractive
+ENV CUDA_HOME=/root/Libs/cuda
+ENV CUDNN_HOME=/root/Libs/cuda
+ENV EXTRALIBS_PATH=/root/Libs/onnxruntime/
+
+# Speed up mk-build-deps
+RUN apt-get clean
+RUN apt-get update && \
+    apt-get install -y -o Acquire::Retries=2 \
+        clang \
+        g++-8 \
+        gcc-8 \
+        cpp-8 \
+        file \
+        make \
+        libc6-dev \
+        libstdc++-8-dev \
+        git \
+        autoconf \
+        automake \
+        autopoint \
+        cmake \
+        libdbus-1-dev \
+        libdbus-c++-dev \
+        libgnutls28-dev \
+        libargon2-0-dev \
+        libcanberra-gtk3-dev \
+        libclutter-gtk-1.0-dev \
+        libclutter-1.0-dev \
+        libglib2.0-dev \
+        libgtk-3-dev \
+        libnotify-dev \
+        qtbase5-dev \
+        qttools5-dev \
+        qttools5-dev-tools \
+        yasm \
+        nasm \
+        autotools-dev \
+        gettext \
+        libpulse-dev \
+        libasound2-dev \
+        libexpat1-dev \
+        libpcre3-dev \
+        libyaml-cpp-dev \
+        libboost-dev \
+        libxext-dev \
+        libxfixes-dev \
+        libspeex-dev \
+        libspeexdsp-dev \
+        uuid-dev \
+        libavcodec-dev \
+        libavutil-dev \
+        libavformat-dev \
+        libswscale-dev \
+        libavdevice-dev \
+        libopus-dev \
+        libudev-dev \
+        libgsm1-dev \
+        libjsoncpp-dev \
+        libmsgpack-dev \
+        libnatpmp-dev \
+        libayatana-appindicator3-dev \
+        libqrencode-dev \
+        libnm-dev \
+        libwebkit2gtk-4.0-dev \
+        libcrypto++-dev \
+        libva-dev \
+        libvdpau-dev \
+	libssl-dev \
+        libsndfile1-dev
+
+RUN update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 50
+RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 50
+
+RUN chown 1001:1001 /root
+WORKDIR /root
diff --git a/docker/Dockerfile_ubuntu_20.04 b/docker/Dockerfile_ubuntu_tensorflow2.1
similarity index 100%
rename from docker/Dockerfile_ubuntu_20.04
rename to docker/Dockerfile_ubuntu_tensorflow2.1