diff --git a/Segmentation/CMakeLists.txt b/Segmentation/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6129d6ec300ba01d58c99c85150034fe8b8e79c1
--- /dev/null
+++ b/Segmentation/CMakeLists.txt
@@ -0,0 +1,293 @@
+cmake_minimum_required (VERSION 3.10)
+
+# Set the project name and version
+set (ProjectName Segmentation)
+set (Version 2.0.0)
+
+project (${ProjectName} VERSION ${Version})
+
+# Set default options
+option (DEBUG "Build in debug mode" OFF)
+option (ENABLE_CUDA "Enable CUDA support" OFF)
+
+# Set platform to build for
+set (PLATFORM_TYPE $ENV{PLATFORM_TYPE}) # can be LINUX or ANDROID or MACOS
+if (PLATFORM_TYPE STREQUAL "ANDROID")
+    set (PLATFORM $ENV{ANDROID_ABI}) # can be arm64-v8a, armeabi-v7a or x86_64
+elseif(PLATFORM_TYPE STREQUAL "LINUX")
+    set (PLATFORM x86_64-linux-gnu) # can be x86_64-linux-gnu
+elseif(PLATFORM_TYPE STREQUAL "MACOS")
+    execute_process(
+        COMMAND sh -c "$(command -v cc || command -v gcc) -dumpmachine" # used to create the directory for daemon contribution, see daemon/contrib/bootstrap
+        OUTPUT_VARIABLE DUMP_MACHINE_RESULT
+        OUTPUT_STRIP_TRAILING_WHITESPACE # remove the trailing "/n"
+    )
+    set (PLATFORM ${DUMP_MACHINE_RESULT})
+    execute_process(
+        COMMAND sh -c "uname -m"
+        OUTPUT_VARIABLE ARCH
+        OUTPUT_STRIP_TRAILING_WHITESPACE # remove the trailing "/n"
+    )
+    set (PLATFORM_LIB_DIR "${ARCH}-apple-darwin")
+else()
+    message(FATAL_ERROR "Platform type not supported for now")
+endif ()
+
+set(CMAKE_ANDROID_ARCH_ABI ${PLATFORM})
+
+# set compilation flags based on platform
+if (PLATFORM STREQUAL "x86_64-linux-gnu")
+    set (CONTRIB_PLATFORM x86_64-linux-gnu)
+    set (distribution x86_64-linux-gnu)
+elseif (PLATFORM STREQUAL "arm64-v8a")
+    set (CONTRIB_PLATFORM aarch64-linux-android)
+    set (distribution android)
+elseif (PLATFORM STREQUAL "armeabi-v7a")
+    set (CONTRIB_PLATFORM arm-linux-androideabi)
+    set (distribution android)
+elseif (PLATFORM STREQUAL "x86_64")
+    set (CONTRIB_PLATFORM x86_64-linux-android)
+    set (distribution android)
+elseif (PLATFORM STREQUAL "x86_64-apple-darwin23.6.0")
+    set (CONTRIB_PLATFORM x86_64-apple-darwin23.6.0)
+    set (distribution x86_64-apple-darwin23.6.0)
+else()
+    set (CONTRIB_PLATFORM ${PLATFORM})
+    set (dsitribution ${PLATFORM})
+endif ()
+
+if (PLATFORM_TYPE STREQUAL "ANDROID")
+    # should be removed and use the toolchain in env instead
+    set(CMAKE_ANDROID_ARCH_ABI ${PLATFORM}) # Set the desired ABI
+    set(CMAKE_ANDROID_STL_TYPE c++_shared) # Use C++ shared library
+    set(CMAKE_ANDROID_NDK_TOOLCHAIN_FILE /opt/android/ndk/26.3.11579264/build/cmake/android.toolchain.cmake)
+    set(CMAKE_TOOLCHAIN_FILE=/opt/android/ndk/26.3.11579264/build/cmake/android.toolchain.cmake)
+    set(CMAKE_C_COMPILER /opt/android/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/bin/${CONTRIB_PLATFORM}30-clang)
+    set(CMAKE_CXX_COMPILER /opt/android/ndk/26.3.11579264/toolchains/llvm/prebuilt/linux-x86_64/bin/${CONTRIB_PLATFORM}30-clang++)
+endif()
+
+message ("PLATFORM_TYPE: ${PLATFORM_TYPE}")
+message ("PLATFORM: ${PLATFORM}")
+message ("CONTRIB_PLATFORM: ${CONTRIB_PLATFORM}")
+message ("distribution: ${distribution}")
+
+# Set variables
+set (CMAKE_CXX_STANDARD 17)
+set (CMAKE_CXX_STANDARD_REQUIRED True)
+set (DAEMON ${PROJECT_SOURCE_DIR}/../daemon)
+set (CONTRIB_PATH ${DAEMON}/contrib)
+set (PLUGINS_LIB ${PROJECT_SOURCE_DIR}/../lib)
+set (CONTRIB_ROOT_PATH ${CONTRIB_PATH}/${CONTRIB_PLATFORM})
+set (CMAKE_BUILD_RPATH "$ORIGIN")
+set (SDK_PATH ${PROJECT_SOURCE_DIR}/../SDK)
+set (CMAKE_POSITION_INDEPENDENT_CODE ON)
+set (ONNX_PATH ${CONTRIB_ROOT_PATH})
+
+if (ENABLE_CUDA)
+    set (ONNX_LIBS nvidia-gpu)
+    set (ONNX_CUDA_PATH ${CONTRIB_ROOT_PATH}/lib/onnxruntime/${ONNX_LIBS}/libonnxruntime_providers_cuda.so)
+    set (ONNX_SHARED_PATH ${CONTRIB_ROOT_PATH}/lib/onnxruntime/${ONNX_LIBS}/libonnxruntime_providers_shared.so)
+else ()
+    set (ONNX_LIBS cpu)
+endif ()
+
+# Set platform specific variables
+if (PLATFORM_TYPE STREQUAL "ANDROID")
+    set (CMAKE_SHARED_LINKER_FLAGS "-Wl,-Bsymbolic")
+    set (OpenCV_DIR ${CONTRIB_PATH}/native-${CONTRIB_PLATFORM}/opencv/build)
+    set (ONNX_SO_PATH ${CONTRIB_PATH}/native-${CONTRIB_PLATFORM}/onnx/build/Linux/Release/libonnxruntime.so)
+elseif (PLATFORM_TYPE STREQUAL "LINUX")
+    set (CMAKE_SHARED_LINKER_FLAGS "-Wl,-Bsymbolic")
+    set (ONNX_SO_PATH ${CONTRIB_ROOT_PATH}/lib/onnxruntime/${ONNX_LIBS}/libonnxruntime.so)
+elseif(PLATFORM_TYPE STREQUAL "MACOS")
+    set (CMAKE_SHARED_LINKER_FLAGS "-Wl")
+    set (ONNX_SO_PATH ${CONTRIB_ROOT_PATH}/lib/onnxruntime/${ONNX_LIBS}/libonnxruntime.dylib)
+endif ()
+
+message(ONNX_SO_PATH : ${ONNX_SO_PATH})
+
+find_package (PkgConfig REQUIRED)
+list (APPEND PKG_CONFIG_EXECUTABLE "--static")
+
+list (APPEND CMAKE_FIND_ROOT_PATH ${CONTRIB_ROOT_PATH})
+set (CMAKE_FIND_ROOT_PATH_MODE_PACKAGE BOTH)
+list (APPEND CMAKE_PREFIX_PATH ${CONTRIB_ROOT_PATH})
+
+# Find required packages
+pkg_search_module (avformat REQUIRED IMPORTED_TARGET libavformat)
+pkg_search_module (avdevice REQUIRED IMPORTED_TARGET libavdevice)
+pkg_search_module (avfilter REQUIRED IMPORTED_TARGET libavfilter)
+pkg_search_module (avcodec REQUIRED IMPORTED_TARGET libavcodec)
+pkg_search_module (swresample REQUIRED IMPORTED_TARGET libswresample)
+pkg_search_module (swscale REQUIRED IMPORTED_TARGET libswscale)
+pkg_search_module (avutil REQUIRED IMPORTED_TARGET libavutil)
+
+find_package (OpenCV 4 REQUIRED)
+message("OpenCV_LIBS: ${OpenCV_LIBS}")
+message("OpenCV_INCLUDE: ${OpenCV_INCLUDE_DIRS}")
+
+add_definitions (-DMSGPACK_NO_BOOST)
+
+if (DEBUG)
+    set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -fsanitize=address -Wall -Wextra -Wno-unused-parameter")
+    add_definitions (-D__DEBUG__)
+else ()
+    set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -Wall -Wextra -Wno-unused-parameter")
+endif ()
+
+if(PLATEFORM_TYPE STREQUAL "MACOS")
+    find_library (COREFOUNDATION CoreFoundation)
+    find_library (COREMEDIA CoreMedia)
+    find_library (COREVIDEO CoreVideo)
+    find_library (CORESERVICES CoreServices)
+    find_library (FOUNDATION Foundation)
+    find_library (AVFOUNDATION AVFoundation)
+    find_library (COREGRAPHICS CoreGraphics)
+    set (MACOS_LIBS
+        ${COREFOUNDATION}
+        ${COREMEDIA}
+        ${COREVIDEO}
+        ${CORESERVICES}
+        ${FOUNDATION}
+        ${AVFOUNDATION}
+        ${COREGRAPHICS}
+    )
+endif()
+# Source files
+set (plugin_SRC
+    main.cpp
+    SegmentationVideoSubscriber.cpp
+    SegmentationMediaHandler.cpp
+    ${PLUGINS_LIB}/common.cpp
+    ${PLUGINS_LIB}/accel.cpp
+    ${PLUGINS_LIB}/frameUtils.cpp
+    ${PLUGINS_LIB}/frameFilter.cpp
+)
+
+# Create shared library
+add_library (${ProjectName} SHARED ${plugin_SRC})
+
+# Include directories
+target_include_directories (${ProjectName} PUBLIC
+    ${PROJECT_SOURCE_DIR}
+    ${DAEMON}/src
+    ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include
+    ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/include/opencv4
+    ${ONNX_PATH}/include/onnxruntime/session
+    ${PLUGINS_LIB}
+)
+if (ENABLE_CUDA)
+    target_include_directories (${ProjectName} PUBLIC
+        ${ONNX_PATH}/include/onnxruntime/core/providers/cuda
+    )
+elseif (PLATFORM_TYPE STREQUAL "ANDROID")
+    target_include_directories (${ProjectName} PUBLIC
+        ${ONNX_PATH}/include/onnxruntime/providers/nnapi
+    )
+endif ()
+
+# Link directories
+link_directories (
+    ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib
+    ${CONTRIB_PATH}/${CONTRIB_PLATFORM}/lib/opencv4/3rdparty
+    ${ONNX_PATH}/lib/onnxruntime/${ONNX_LIBS}
+)
+
+if (ENABLE_CUDA)
+    link_directories (
+        /usr/local/cuda-12.6/lib64 # $ENV{CUDA_HOME}/lib64
+    )
+endif ()
+
+# Libraries to link
+set (LINK_LIBS
+    ${OpenCV_LIBS}
+    PkgConfig::avformat
+    PkgConfig::avdevice
+    PkgConfig::avfilter
+    PkgConfig::avcodec
+    PkgConfig::swresample
+    PkgConfig::swscale
+    PkgConfig::avutil
+    ${ONNX_SO_PATH}
+)
+
+# Link libraries to the target
+target_link_libraries (${ProjectName} PUBLIC  ${MACOS_LIBS} ${LINK_LIBS}) # if the platform is not MACOS, MACOS_LIBS will be empty and will not affect the build
+
+if(PLATFORM_TYPE STREQUAL "MACOS")
+    set_target_properties (${ProjectName} PROPERTIES
+        LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM_LIB_DIR}"
+    )
+else ()
+    set_target_properties (${ProjectName} PROPERTIES
+        LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM}"
+    )
+endif()
+
+if(PLATFORM_TYPE STREQUAL "MACOS")
+    add_custom_command (
+        TARGET ${ProjectName}
+        POST_BUILD
+        COMMAND install_name_tool -change "@rpath/libonnxruntime.1.16.3.dylib" "@loader_path/libonnxruntime.dylib" ${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM_LIB_DIR}/libSegmentation.dylib
+        COMMENT "Relinking of onnx lib to @loader_path"
+        # install_name_tool -change A B C : Change the lib link from A to B in C
+        # use the loader_path instead of rpath else jami can't find it
+        # remove 1.16.3 from onnx lib name to match the regex of the pluginutils used to move libs form data/lib/ABI to the plugin root folder,
+        # else it stays in the data/lib/ABI folder and is harder to locate.
+        # Would also work with libonnxruntime.dylib.1.16.3 if you also rename it in the copy part
+    )
+endif ()
+
+# JPL manipulation commands for copying runtime files and generating JPL archive
+if (PLATFORM_TYPE STREQUAL "ANDROID")
+    add_custom_command (
+        TARGET ${ProjectName}
+        POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/modelSRC/yolo11n-seg.onnx ${PROJECT_BINARY_DIR}/jpl/data/model/yolo11n-seg.onnx
+        COMMAND ${CMAKE_COMMAND} -E copy ${ONNX_SO_PATH} ${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM}/libonnxruntime.so
+        COMMENT "Copying runtime files"
+    )
+elseif (PLATFORM_TYPE STREQUAL "LINUX" AND ENABLE_CUDA)
+    add_custom_command (
+        TARGET ${ProjectName}
+        POST_BUILD
+        # Rename onnx providers libs to match the regex of the pluginutils used to move libs form data/lib/ABI to the plugin root folder
+        COMMAND patchelf --replace-needed libonnxruntime_providers_cuda.so libonnxruntimeProvidersCuda.so ${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM_LIB_DIR}/libSegmentation.so
+        COMMAND patchelf --replace-needed libonnxruntime_providers_shared.so libonnxruntimeProvidersShared.so ${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM_LIB_DIR}/libSegmentation.so
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/modelSRC/yolo11n-seg.onnx ${PROJECT_BINARY_DIR}/jpl/data/model/yolo11n-seg.onnx
+        COMMAND ${CMAKE_COMMAND} -E copy ${ONNX_SO_PATH} ${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM}/libonnxruntime.so.1.16.3
+        COMMAND ${CMAKE_COMMAND} -E copy ${ONNX_CUDA_PATH} ${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM}/libonnxruntimeProvidersCuda.so
+        COMMAND ${CMAKE_COMMAND} -E copy ${ONNX_SHARED_PATH} ${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM}/libonnxruntimeProvidersShared.so
+        COMMENT "Copying runtime files"
+    )
+elseif(PLATFORM_TYPE STREQUAL "LINUX")
+    add_custom_command (
+        TARGET ${ProjectName}
+        POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/modelSRC/yolo11n-seg.onnx ${PROJECT_BINARY_DIR}/jpl/data/model/yolo11n-seg.onnx
+        COMMAND ${CMAKE_COMMAND} -E copy ${ONNX_SO_PATH} ${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM}/libonnxruntime.so.1.16.3
+        COMMENT "Copying runtime files"
+    )
+elseif(PLATFORM_TYPE STREQUAL "MACOS")
+    add_custom_command (
+        TARGET ${ProjectName}
+        POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/modelSRC/yolo11n-seg.onnx ${PROJECT_BINARY_DIR}/jpl/data/model/yolo11n-seg.onnx
+        COMMAND ${CMAKE_COMMAND} -E copy ${ONNX_SO_PATH} ${PROJECT_BINARY_DIR}/jpl/lib/${PLATFORM_LIB_DIR}/libonnxruntime.dylib
+        COMMENT "Copying runtime files"
+    )
+endif ()
+
+add_custom_command (
+    TARGET ${ProjectName}
+    PRE_BUILD
+    COMMAND ${Python3_EXECUTABLE} ${SDK_PATH}/jplManipulation.py --preassemble --plugin=${ProjectName} --distribution=${distribution} --arch=${ARCH}
+    COMMENT "Assembling Plugin files"
+)
+add_custom_command (
+    TARGET ${ProjectName}
+    POST_BUILD
+    COMMAND ${Python3_EXECUTABLE} ${SDK_PATH}/jplManipulation.py --assemble --plugin=${ProjectName} --distribution=${distribution}
+    COMMENT "Generating JPL archive"
+)
\ No newline at end of file
diff --git a/Segmentation/SegmentationMediaHandler.cpp b/Segmentation/SegmentationMediaHandler.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e8f24d159285bdd4d358b138b5a61720350494dd
--- /dev/null
+++ b/Segmentation/SegmentationMediaHandler.cpp
@@ -0,0 +1,116 @@
+/**
+ *  Copyright (C) 2024 Savoir-faire Linux Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include "SegmentationMediaHandler.h"
+#include "pluglog.h"
+#include <string_view>
+
+const char sep = separator();
+const std::string TAG = "Segmentation";
+
+#define NAME "Segmentation"
+
+namespace jami {
+
+SegmentationMediaHandler::SegmentationMediaHandler(std::map<std::string, std::string>&& preferences,
+                                               std::string&& datapath)
+    : datapath_ {datapath}
+    , preferences_ {preferences}
+{
+    setId(datapath_);
+    mVS = std::make_shared<SegmentationVideoSubscriber>(datapath_);
+    auto it = preferences_.find("visible");
+    if (it != preferences_.end()) {
+        mVS->setVisibility(it->second);
+    } else {
+        mVS->setVisibility("all");
+    }
+}
+
+void
+SegmentationMediaHandler::notifyAVFrameSubject(const StreamData& data, jami::avSubjectPtr subject)
+{
+    std::ostringstream oss;
+    std::string_view direction = data.direction ? "Receive" : "Preview";
+    oss << "NEW SUBJECT: [" << data.id << "," << direction << "]" << std::endl;
+
+    bool preferredStreamDirection = false; // false for output; true for input
+    auto it = preferences_.find("videostream");
+    if (it != preferences_.end()) {
+        preferredStreamDirection = it->second == "1";
+    }
+    oss << "preferredStreamDirection " << preferredStreamDirection << std::endl;
+    if (data.type == StreamType::video && !data.direction
+        && data.direction == preferredStreamDirection) {
+        subject->attach(mVS.get()); // your image
+        oss << "got my sent image attached" << std::endl;
+        attached_ = "1";
+    } else if (data.type == StreamType::video && data.direction
+               && data.direction == preferredStreamDirection) {
+        subject->attach(mVS.get()); // the image you receive from others on the call
+        oss << "got received image attached" << std::endl;
+        attached_ = "1";
+    }
+
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+std::map<std::string, std::string>
+SegmentationMediaHandler::getCallMediaHandlerDetails()
+{
+    return {{"name", NAME},
+            {"iconPath", datapath_ + sep + "icon.svg"},
+            {"pluginId", id()},
+            {"attached", attached_},
+            {"dataType", "1"}};
+}
+
+void
+SegmentationMediaHandler::setPreferenceAttribute(const std::string& key, const std::string& value)
+{
+    auto it = preferences_.find(key);
+    if (it != preferences_.end() && it->second != value) {
+        it->second = value;
+    }
+}
+
+bool
+SegmentationMediaHandler::preferenceMapHasKey(const std::string& key)
+{
+    return (key == "all" || key == "mask" || key == "box");
+}
+
+void
+SegmentationMediaHandler::detach()
+{
+    attached_ = "0";
+    std::ostringstream oss;
+    oss << "Detached from Segmentation Plugin" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+
+    mVS->detach();
+}
+
+SegmentationMediaHandler::~SegmentationMediaHandler()
+{
+    std::ostringstream oss;
+    oss << " ~SegmentationMediaHandler from Segmentation Plugin" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    detach();
+}
+} // namespace jami
diff --git a/Segmentation/SegmentationMediaHandler.h b/Segmentation/SegmentationMediaHandler.h
new file mode 100644
index 0000000000000000000000000000000000000000..9bd9a2bfe0d43a5301ec118be3f34c009cc953cd
--- /dev/null
+++ b/Segmentation/SegmentationMediaHandler.h
@@ -0,0 +1,49 @@
+/**
+ *  Copyright (C) 2024 Savoir-faire Linux Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#pragma once
+
+#include "SegmentationVideoSubscriber.h"
+#include "plugin/jamiplugin.h"
+#include "plugin/mediahandler.h"
+
+using avSubjectPtr = std::weak_ptr<jami::Observable<AVFrame*>>;
+
+namespace jami {
+
+class SegmentationMediaHandler : public jami::CallMediaHandler
+{
+public:
+    SegmentationMediaHandler(std::map<std::string, std::string>&& preferences, std::string&& dataPath);
+    ~SegmentationMediaHandler();
+
+    virtual void notifyAVFrameSubject(const StreamData& data, avSubjectPtr subject) override;
+    virtual std::map<std::string, std::string> getCallMediaHandlerDetails() override;
+
+    virtual void detach() override;
+    virtual void setPreferenceAttribute(const std::string& key, const std::string& value) override;
+    virtual bool preferenceMapHasKey(const std::string& key) override;
+
+    std::shared_ptr<SegmentationVideoSubscriber> mVS;
+
+private:
+    const std::string datapath_;
+    std::map<std::string, std::string> preferences_;
+    std::string attached_ {"0"};
+};
+} // namespace jami
diff --git a/Segmentation/SegmentationVideoSubscriber.cpp b/Segmentation/SegmentationVideoSubscriber.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..acab73f6ddc959ea1df2b7625c64bc5d9afb821a
--- /dev/null
+++ b/Segmentation/SegmentationVideoSubscriber.cpp
@@ -0,0 +1,627 @@
+/**
+ *  Copyright (C) 2024 Savoir-faire Linux Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include "SegmentationVideoSubscriber.h"
+extern "C" {
+#include <libavutil/display.h>
+}
+#include <accel.h>
+#include <frameUtils.h>
+#include <frameScaler.h>
+#include <pluglog.h>
+#include <stdio.h>
+#include <opencv2/imgproc.hpp>
+const std::string TAG = "Segmentation";
+const char sep = separator();
+
+namespace jami {
+
+SegmentationVideoSubscriber::SegmentationVideoSubscriber(const std::string& dataPath)
+    : path_ {dataPath}, env(ORT_LOGGING_LEVEL_WARNING, "test")
+{
+    initModel();
+    inferenceThread = std::thread(&SegmentationVideoSubscriber::inferenceThreadFunc, this);
+}
+
+SegmentationVideoSubscriber::~SegmentationVideoSubscriber()
+{
+    {
+        std::lock_guard<std::mutex> lock(queueMutex);
+        stopThread = true;
+    }
+    queueCondVar.notify_all();
+    inferenceThread.join();
+
+    std::ostringstream oss;
+    oss << "~SegmentationVideoSubscriber" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+void SegmentationVideoSubscriber::inferenceThreadFunc()
+{
+    while (true) {
+        cv::Mat frameToProcess;
+        {
+            std::unique_lock<std::mutex> lock(queueMutex);
+            queueCondVar.wait(lock, [this] { return !frameQueue.empty() || stopThread; });
+
+            if (stopThread && frameQueue.empty())
+                break;
+
+            frameToProcess = frameQueue.front();
+            frameQueue.pop();
+        }
+
+        if (!frameToProcess.empty()) {
+            auto start = std::chrono::high_resolution_clock::now();
+            detections = Detect(frameToProcess);
+            auto end = std::chrono::high_resolution_clock::now();
+            std::chrono::duration<double> elapsed_seconds = end - start;
+            Plog::log(Plog::LogPriority::INFO, TAG, "Inference time: " + std::to_string(elapsed_seconds.count()) + "s");
+            inferenceTime = "Inference time: " + std::to_string(elapsed_seconds.count()) + "s";
+            updateTrackedObjects(detections);
+        }
+    }
+}
+
+void SegmentationVideoSubscriber::updateTrackedObjects(const std::vector<Detection>& detections) {
+    // Update the tracked objects with the new detections, to stabilize the results between frames
+    // You can adjust the max_frames_to_persist parameter to keep the objects longer
+
+    for (auto& obj : tracked_objects) {
+        obj.frames_since_seen++;
+    }
+
+    for (const auto& det : detections) {
+        float best_iou = 0.0f;
+        TrackedObject* best_match = nullptr;
+
+        for (auto& obj : tracked_objects) {
+            if (obj.detection.class_id != det.class_id)
+                continue;
+
+            float iou = calculateIoU(obj.detection.box, det.box);
+            if (iou > best_iou) {
+                best_iou = iou;
+                best_match = &obj;
+            }
+        }
+
+        if (best_match && best_iou > 0.3f) {
+            best_match->detection = det;
+            best_match->frames_since_seen = 0;
+        } else {
+            TrackedObject new_obj;
+            new_obj.id = next_id++;
+            new_obj.detection = det;
+            new_obj.frames_since_seen = 0;
+            tracked_objects.push_back(new_obj);
+        }
+    }
+
+    tracked_objects.erase(
+        std::remove_if(tracked_objects.begin(), tracked_objects.end(),
+            [this](const TrackedObject& obj) {
+                return obj.frames_since_seen > max_frames_to_persist;
+            }),
+        tracked_objects.end()
+    );
+}
+
+void
+SegmentationVideoSubscriber::initModel() {
+
+    // The model used is yolo11n-seg.onnx
+    // It is the nano version of the yolo 11 model
+    // The model is trained on the COCO dataset and is able to detect 80 classes of objects.
+    // It has been converted form PyTorch to ONNX format with IR 9 version, to be compatible with onnxruntime1.16.3
+    // the model takes a 1x3x640x640 input tensor and outputs 2 tensors of shape 1x116x8400 and 1x32x160x160:
+    // input: images, [Batch_size, channels, height, width].
+    // output0: output0, [Batch_size, 4 positions + 80 classes + 32 mask coefficients, raw_boxes].
+    // output1: output1, [Batch_size, raw_masks, height, width].
+    // To extract the mask of the detected object, the model outputs 32 mask coefficients that are multiplied by 160x160 mask prototypes.
+
+    Plog::log(Plog::LogPriority::INFO, TAG, "Initialisation du modèle yolo11n-seg.onnx");
+    Ort::SessionOptions session_options;
+    session_options.SetIntraOpNumThreads(1);
+    const std::string& modelpath = path_ + "/model/yolo11n-seg.onnx";
+    session = std::make_unique<Ort::Session>(env, modelpath.c_str(), session_options);
+    Plog::log(Plog::LogPriority::INFO, TAG, "Modèle chargé");
+}
+
+void
+SegmentationVideoSubscriber::update(jami::Observable<AVFrame*>*, AVFrame* const& pluginFrame)
+{
+    if (!pluginFrame)
+        return;
+    frameCounter++;
+
+    //======================================================================================
+    // GET FRAME ROTATION
+    AVFrameSideData* side_data = av_frame_get_side_data(pluginFrame, AV_FRAME_DATA_DISPLAYMATRIX);
+
+    int angle {0};
+    if (side_data) {
+        auto matrix_rotation = reinterpret_cast<int32_t*>(side_data->data);
+        angle = static_cast<int>(av_display_rotation_get(matrix_rotation));
+    }
+
+    //======================================================================================
+    // GET RAW FRAME
+    // Use a non-const Frame
+    // Convert input frame to RGB
+    uniqueFramePtr rgbFrame = {transferToMainMemory(pluginFrame, AV_PIX_FMT_NV12), frameFree};
+    rgbFrame.reset(FrameScaler::convertFormat(rgbFrame.get(), AV_PIX_FMT_RGB24));
+    if (!rgbFrame.get())
+        return;
+    resultFrame = cv::Mat {rgbFrame->height,
+                           rgbFrame->width,
+                           CV_8UC3,
+                           rgbFrame->data[0],
+                           static_cast<size_t>(rgbFrame->linesize[0])};
+
+    // First clone the frame as the original one is unusable because of
+    // linespace
+
+    processingFrame = resultFrame.clone();
+
+    rotateFrame(angle, processingFrame);
+
+    // Thread
+    {
+        std::lock_guard<std::mutex> lock(queueMutex);
+        if (frameQueue.size() >= maxQueueSize) {
+            frameQueue.pop();
+        }
+        frameQueue.push(processingFrame.clone()); // Clone is needed, else there is issues when rotated at 180°
+    }
+    queueCondVar.notify_one();
+    // save processingFrame in a file
+
+    drawSegmentation(angle);
+
+    rotateFrame(-angle, processingFrame);
+
+    copyByLine(rgbFrame->linesize[0]);
+
+    //======================================================================================
+    // REPLACE AVFRAME DATA WITH FRAME DATA
+    rgbFrame.reset(FrameScaler::convertFormat(rgbFrame.get(), AV_PIX_FMT_YUV420P));
+    moveFrom(pluginFrame, rgbFrame.get());
+}
+
+void
+SegmentationVideoSubscriber::drawSegmentation(const int angle)
+{
+    if (!processingFrame.empty()) {
+        if (!tracked_objects.empty()){
+            DrawFinalDet(tracked_objects);
+        }
+    }
+}
+
+std::vector<SegmentationVideoSubscriber::Detection>
+SegmentationVideoSubscriber::Detect(cv::Mat frameToProcess) {
+
+    cv::Mat image = frameToProcess.clone();
+    if (image.empty()) {
+        return detections;
+    }
+    const char* input_names[] = {"images"};
+    const char* output_names[] = {"output0", "output1"};
+
+    int img_width = image.cols;
+    int img_height = image.rows;
+
+    const int input_width = 640;
+    const int input_height = 640;
+    float scale;
+    int top, left;
+
+    cv::Mat resized_image = resizeWithPadding(image, cv::Size(input_width, input_height), scale, top, left);
+    // Convert to float32 and normalize
+    resized_image.convertTo(resized_image, CV_32F, 1.0 / 255.0);
+
+    // convert form BGR to RGB (OpenCV uses BGR by default)
+    cv::cvtColor(resized_image, resized_image, cv::COLOR_BGR2RGB);
+
+    // Transpose channels to match the model's input format (HWC -> CHW)
+    std::vector<cv::Mat> chw(3);
+    cv::split(resized_image, chw);
+    // Flatten the image to a 1D array
+    std::vector<float> input_tensor_values;
+    for (int i = 0; i < 3; ++i) {
+        input_tensor_values.insert(input_tensor_values.end(), (float*)chw[i].datastart, (float*)chw[i].dataend);
+    }
+
+    std::vector<int64_t> input_shape = {1, 3, input_height, input_width};
+    // Create input tensor
+    Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
+    Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
+        memory_info, input_tensor_values.data(), input_tensor_values.size(),
+        input_shape.data(), input_shape.size()
+    );
+    // Run inference
+    auto output_tensors = session->Run(
+        Ort::RunOptions{nullptr},
+        input_names, &input_tensor, 1,
+        output_names, 2
+    );
+
+    // Get the output tensor values
+    float* output0_data = output_tensors[0].GetTensorMutableData<float>();
+    std::vector<int64_t> output0_shape = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape();
+
+    float* output1_data = output_tensors[1].GetTensorMutableData<float>();
+    std::vector<int64_t> output1_shape = output_tensors[1].GetTensorTypeAndShapeInfo().GetShape();
+
+    int num_channels = output0_shape[1];     // 116
+    int num_predictions = output0_shape[2];  // 8400
+
+    int mask_dim = 32; // Mask coefficients
+    int mask_h = output1_shape[2]; // Masks height (160)
+    int mask_w = output1_shape[3]; // Masks width (160)
+
+    // Convert output to cv::Mat
+    cv::Mat output0 = cv::Mat(num_channels, num_predictions, CV_32F, output0_data);
+    output0 = output0.t(); // Transpose for each line to be a prediction
+
+    // Convert output1 to cv::Mat
+    cv::Mat mask_protos = cv::Mat(mask_dim, mask_h * mask_w, CV_32F, output1_data);
+
+    std::vector<Detection> detections;
+    float conf_threshold = 0.25f;
+    float nms_threshold = 0.45f;
+    int num_classes = 80; // Classes of dataset COCO
+
+    // Save the predictions
+    std::vector<cv::Mat> mask_coef_vectors;
+
+    for (int i = 0; i < output0.rows; ++i) {
+        float* data = output0.ptr<float>(i);
+
+        // Coordonates of the bounding box
+        float x = data[0];
+        float y = data[1];
+        float w = data[2];
+        float h = data[3];
+
+        // Classes scores
+        float* scores = &data[4];
+
+        // Get max score and class
+        auto max_class_score = std::max_element(scores, scores + num_classes);
+        float confidence = *max_class_score;
+        int class_id = std::distance(scores, max_class_score);
+
+        if (confidence >= conf_threshold) {
+            // Search box coordinates in the original image
+            float x0 = (x - w / 2.0f);
+            float y0 = (y - h / 2.0f);
+            float x1 = (x + w / 2.0f);
+            float y1 = (y + h / 2.0f);
+
+            // Adjust with padding and scale
+            x0 = (x0 - left) / scale;
+            y0 = (y0 - top) / scale;
+            x1 = (x1 - left) / scale;
+            y1 = (y1 - top) / scale;
+
+            // Stay in the image boundaries
+            x0 = std::max(0.0f, std::min(x0, static_cast<float>(img_width - 1)));
+            y0 = std::max(0.0f, std::min(y0, static_cast<float>(img_height - 1)));
+            x1 = std::max(0.0f, std::min(x1, static_cast<float>(img_width - 1)));
+            y1 = std::max(0.0f, std::min(y1, static_cast<float>(img_height - 1)));
+
+            cv::Rect box(cv::Point(static_cast<int>(x0), static_cast<int>(y0)),
+                         cv::Point(static_cast<int>(x1), static_cast<int>(y1)));
+
+            // Extract mask coeficients
+            float* mask_coef_data = &data[4 + num_classes];
+            cv::Mat mask_coef = cv::Mat(1, mask_dim, CV_32F, mask_coef_data).clone(); // Clone pour copier les données
+
+            Detection det;
+            det.box = box;
+            det.confidence = confidence;
+            det.class_id = class_id;
+
+            detections.push_back(det);
+            mask_coef_vectors.push_back(mask_coef);
+        }
+    }
+    // Apply Non-Maximum Suppression (NMS) to avoid multiple detections of the same object
+    // Normally, you would use the NMS function from OpenCV_DNN, but here we will implement it manually because DNN is not in the daemon contribs
+    std::vector<int> indices;
+    std::vector<cv::Rect> boxes;
+    std::vector<float> confidences;
+
+    for (const auto& det : detections) {
+        boxes.push_back(det.box);
+        confidences.push_back(det.confidence);
+    }
+
+    indices = applyManualNMS(boxes, confidences, nms_threshold);
+    // Generate the final detections
+    std::vector<Detection> final_detections;
+    for (size_t i = 0; i < indices.size(); ++i) {
+        int idx = indices[i];
+        Detection det = detections[idx];
+        cv::Mat mask_coef = mask_coef_vectors[idx];
+
+        // Generate the mask
+        cv::Mat mask = mask_coef * mask_protos; // (1, 32) x (32, 160*160) -> (1, 160*160)
+
+        cv::Mat mask_reshaped = mask.reshape(1, mask_h); // Reshape to (160, 160)
+        cv::Mat mask_sigmoid;
+        cv::exp(-mask_reshaped, mask_sigmoid);
+        mask_sigmoid = 1.0 / (1.0 + mask_sigmoid);
+
+        // Resize the mask to the original image size
+        cv::Mat mask_resized;
+        if (mask_sigmoid.empty()) {
+            continue;
+        }
+        cv::resize(mask_sigmoid, mask_resized, cv::Size(input_width, input_height), 0, 0, cv::INTER_LINEAR);
+
+        // Crop the mask to the bounding box
+        int x0 = std::max((det.box.x * scale + left), 0.0f);
+        int y0 = std::max((det.box.y * scale + top), 0.0f);
+        int x1 = std::min((det.box.x + det.box.width) * scale + left, (float)input_width);
+        int y1 = std::min((det.box.y + det.box.height) * scale + top, (float)input_height);
+
+        cv::Rect mask_roi = cv::Rect(cv::Point(int(x0), int(y0)), cv::Point(int(x1), int(y1)));
+
+        cv::Mat mask_cropped = mask_resized(mask_roi);
+
+        // Resize the mask to the original image size
+        cv::Mat mask_original_size;
+        if (mask_cropped.empty()) {
+            continue;
+        }
+        cv::resize(mask_cropped, mask_original_size, cv::Size(det.box.width, det.box.height), 0, 0, cv::INTER_LINEAR);
+
+        // Binarize the mask
+        cv::Mat mask_bin;
+        cv::threshold(mask_original_size, mask_bin, 0.6, 1, cv::THRESH_BINARY);
+
+        // Store the mask
+        det.mask = mask_bin;
+
+        final_detections.push_back(det);
+    }
+    return final_detections;
+}
+
+void
+SegmentationVideoSubscriber::DrawFinalDet(const std::vector<TrackedObject>& tracked_objects) {
+    // Classes names from COCO dataset (80 classes)
+    std::vector<std::string> class_names = {
+        "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train",
+        "truck", "boat", "traffic light", "fire hydrant", "stop sign",
+        "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep",
+        "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella",
+        "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard",
+        "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
+        "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork",
+        "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange",
+        "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
+        "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor",
+        "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave",
+        "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
+        "scissors", "teddy bear", "hair drier", "toothbrush"
+    };
+
+    for (const auto& obj : tracked_objects) {
+        // if (det.class_id != 0) continue; // To detect only persons
+        // Draw the bounding box
+        const auto& det = obj.detection;
+
+        if (det.box.x < 0 || det.box.y < 0 || det.box.x + det.box.width > processingFrame.cols || det.box.y + det.box.height > processingFrame.rows) {
+            continue; // Ignore the box if it is out of the frame
+        }
+
+        if (baseVisibility == "all" || baseVisibility == "box") {
+            // Draw the bounding box
+            cv::rectangle(processingFrame, det.box, cv::Scalar(0, 255, 0), 1);
+
+            // Afficher le nom de la classe, la confiance et l'ID de suivi
+            std::string label = class_names[det.class_id] + ": " + cv::format("%.2f", det.confidence);
+            int baseLine = 0;
+            cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 1, 2, &baseLine);
+            int top_label = std::max(det.box.y, labelSize.height);
+            cv::rectangle(processingFrame, cv::Point(det.box.x, top_label - labelSize.height),
+                        cv::Point(det.box.x + labelSize.width, top_label + baseLine),
+                        cv::Scalar(255, 255, 255), cv::FILLED);
+            cv::putText(processingFrame, label, cv::Point(det.box.x, top_label),
+                        cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 0, 0), 2);
+            if (det.mask.empty()) {
+                Plog::log(Plog::LogPriority::INFO, TAG, "Mask is empty");
+                continue;
+            }
+        }
+
+        if (baseVisibility == "all" || baseVisibility == "mask") {
+            // Draw the segmentation mask
+            srand (det.class_id); // Set the seed of random to the class_id to have the same color for the same class
+            cv::Mat color = cv::Mat::zeros(det.mask.size(), CV_8UC3);
+            cv::Scalar random_color = cv::Scalar(rand() % 256, rand() % 256, rand() % 256);
+            color.setTo(random_color);
+
+            // roi = Region of Interest
+            cv::Mat roi = processingFrame(det.box);
+
+            cv::Mat mask_uint8;
+            det.mask.convertTo(mask_uint8, CV_8U, 255);
+            cv::Mat mask_resized;
+            if (mask_uint8.empty()) {
+                Plog::log(Plog::LogPriority::INFO, TAG, "mask_uint8 is empty");
+                continue;
+            }
+            cv::resize(mask_uint8, mask_resized, roi.size(), 0, 0, cv::INTER_NEAREST);
+            cv::Mat blended;
+            if (mask_resized.empty()) {
+                Plog::log(Plog::LogPriority::INFO, TAG, "mask_resized is empty");
+                continue;
+            }
+            if (roi.size() != color.size()) {
+                Plog::log(Plog::LogPriority::INFO, TAG, "roi.size() != color.size()");
+                continue;
+            }
+            cv::addWeighted(roi, 1.0, color, 0.5, 0, blended);
+            if (mask_resized.size() == roi.size()) {
+            blended.copyTo(roi, mask_resized);
+            }
+        }
+    }
+    // add a rectangle with std::string inferenceTime
+    // cv::rectangle(processingFrame, cv::Point(0, 0), cv::Point(processingFrame.cols, 30), cv::Scalar(0, 0, 0), cv::FILLED);
+    // cv::putText(processingFrame, inferenceTime, cv::Point(10, 20), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(255, 255, 255), 1);
+}
+
+cv::Mat
+SegmentationVideoSubscriber::resizeWithPadding(const cv::Mat& img, const cv::Size& new_size, float& scale, int& top, int& left) {
+    // Resize the image to the imput size with padding to keep the aspect ratio
+    int original_width = img.cols;
+    int original_height = img.rows;
+    int resized_width = new_size.width;
+    int resized_height = new_size.height;
+
+    scale = std::min(resized_width / (float)original_width, resized_height / (float)original_height);
+    int new_unpad_w = scale * original_width;
+    int new_unpad_h = scale * original_height;
+
+    cv::Mat resized;
+    cv::resize(img, resized, cv::Size(new_unpad_w, new_unpad_h));
+
+    int dw = resized_width - new_unpad_w;
+    int dh = resized_height - new_unpad_h;
+
+    left = dw / 2;
+    top = dh / 2;
+
+    cv::Mat padded;
+    cv::copyMakeBorder(resized, padded, top, dh - top, left, dw - left, cv::BORDER_CONSTANT, cv::Scalar(114, 114, 114));
+    return padded;
+}
+
+float
+SegmentationVideoSubscriber::calculateIoU(const cv::Rect& boxA, const cv::Rect& boxB) {
+    // Calculate the Intersection over Union (IoU) of two bounding boxes
+    int xA = std::max(boxA.x, boxB.x);
+    int yA = std::max(boxA.y, boxB.y);
+    int xB = std::min(boxA.x + boxA.width, boxB.x + boxB.width);
+    int yB = std::min(boxA.y + boxA.height, boxB.y + boxB.height);
+
+    int interArea = std::max(0, xB - xA + 1) * std::max(0, yB - yA + 1);
+    int boxAArea = boxA.width * boxA.height;
+    int boxBArea = boxB.width * boxB.height;
+
+    float iou = static_cast<float>(interArea) / static_cast<float>(boxAArea + boxBArea - interArea);
+    return iou;
+}
+
+std::vector<int>
+SegmentationVideoSubscriber::applyManualNMS(const std::vector<cv::Rect>& boxes, const std::vector<float>& confidences, float nms_threshold) {
+    // Apply Non-Maximum Suppression (NMS) to avoid multiple detections of the same object
+    std::vector<int> indices;
+    std::vector<std::pair<float, int>> sorted_boxes;
+
+    for (size_t i = 0; i < confidences.size(); ++i) {
+        sorted_boxes.push_back(std::make_pair(confidences[i], i));
+    }
+
+    std::sort(sorted_boxes.begin(), sorted_boxes.end(), [](const std::pair<float, int>& a, const std::pair<float, int>& b) {
+        return a.first > b.first;
+    });
+
+    std::vector<bool> is_suppressed(confidences.size(), false);
+    for (size_t i = 0; i < sorted_boxes.size(); ++i) {
+        int idxA = sorted_boxes[i].second;
+        if (is_suppressed[idxA]) continue;
+
+        indices.push_back(idxA);
+        for (size_t j = i + 1; j < sorted_boxes.size(); ++j) {
+            int idxB = sorted_boxes[j].second;
+            if (calculateIoU(boxes[idxA], boxes[idxB]) > nms_threshold) {
+                is_suppressed[idxB] = true;
+            }
+        }
+    }
+    return indices;
+}
+
+void
+SegmentationVideoSubscriber::rotateFrame(const int angle, cv::Mat& frame)
+{
+    // Rotate the frame by the given angle
+    // On Android, when the phone rotate, the video output is the same, with a rotation angle in the metadata to avoid huge calculations on the phone.
+    // We need to rotate the frame to use it correctly
+    if (angle == -90)
+        cv::rotate(frame, frame, cv::ROTATE_90_COUNTERCLOCKWISE);
+    else if (std::abs(angle) == 180)
+        cv::rotate(frame, frame, cv::ROTATE_180);
+    else if (angle == 90)
+        cv::rotate(frame, frame, cv::ROTATE_90_CLOCKWISE);
+}
+
+void
+SegmentationVideoSubscriber::setVisibility(const std::string& visibility)
+{
+    char visible[5] = "all";
+    std::sscanf(visibility.c_str(), "%4s", visible);
+    baseVisibility = visible;
+}
+
+void
+SegmentationVideoSubscriber::copyByLine(const int lineSize)
+{
+    if (resultFrame.size() != processingFrame.size() || resultFrame.type() != processingFrame.type()) {
+        resultFrame = cv::Mat(processingFrame.size(), processingFrame.type());
+    }
+    processingFrame.copyTo(resultFrame);
+}
+
+void
+SegmentationVideoSubscriber::attached(jami::Observable<AVFrame*>* observable)
+{
+    std::ostringstream oss;
+    oss << "::Attached ! " << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+    observable_ = observable;
+}
+
+void
+SegmentationVideoSubscriber::detached(jami::Observable<AVFrame*>*)
+{
+    observable_ = nullptr;
+    std::ostringstream oss;
+    oss << "::Detached()" << std::endl;
+    Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+}
+
+void
+SegmentationVideoSubscriber::detach()
+{
+    if (observable_) {
+        std::ostringstream oss;
+        oss << "::Calling detach()" << std::endl;
+        Plog::log(Plog::LogPriority::INFO, TAG, oss.str());
+        observable_->detach(this);
+    }
+}
+} // namespace jami
diff --git a/Segmentation/SegmentationVideoSubscriber.h b/Segmentation/SegmentationVideoSubscriber.h
new file mode 100644
index 0000000000000000000000000000000000000000..dfd5b550fc79f6977187513dc8db26e0c20ba308
--- /dev/null
+++ b/Segmentation/SegmentationVideoSubscriber.h
@@ -0,0 +1,105 @@
+/**
+ *  Copyright (C) 2024 Savoir-faire Linux Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#pragma once
+
+extern "C" {
+#include <libavutil/frame.h>
+}
+#include <observer.h>
+#include <opencv2/core.hpp>
+#include <queue>
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+#include <onnxruntime_cxx_api.h>
+
+namespace jami {
+
+class SegmentationVideoSubscriber : public Observer<AVFrame*>
+{
+public:
+    SegmentationVideoSubscriber(const std::string& dataPath);
+    ~SegmentationVideoSubscriber();
+
+    virtual void update(Observable<AVFrame*>*, AVFrame* const&) override;
+    virtual void attached(Observable<AVFrame*>*) override;
+    virtual void detached(Observable<AVFrame*>*) override;
+
+    void detach();
+
+    void setVisibility(const std::string& visibility);
+
+private:
+    struct Detection {
+        cv::Rect box;
+        float confidence;
+        int class_id;
+        cv::Mat mask;
+    };
+    struct TrackedObject {
+    int id;
+    Detection detection;
+    int frames_since_seen;
+    };
+
+    int next_id = 0;
+    std::vector<TrackedObject> tracked_objects;
+    int max_frames_to_persist = 3;
+
+    // thread
+    void inferenceThreadFunc();
+
+    std::queue<cv::Mat> frameQueue;
+    std::thread inferenceThread;
+    std::mutex queueMutex;
+    std::condition_variable queueCondVar;
+    bool stopThread = false;
+
+    static constexpr int maxQueueSize = 1;
+
+    // Observer pattern
+    Observable<AVFrame*>* observable_{};
+
+    // Data
+    std::string path_;
+
+    // define custom variables
+    void copyByLine(const int lineSize);
+    void drawSegmentation(const int angle);
+    void rotateFrame(const int angle, cv::Mat& frame);
+    void initModel();
+    cv::Mat resizeWithPadding(const cv::Mat& img, const cv::Size& new_size, float& scale, int& top, int& left);
+    float calculateIoU(const cv::Rect& boxA, const cv::Rect& boxB);
+    std::vector<int> applyManualNMS(const std::vector<cv::Rect>& boxes, const std::vector<float>& confidences, float nms_threshold);
+
+    void DrawFinalDet(const std::vector<TrackedObject>& tracked_objects);
+    void updateTrackedObjects(const std::vector<Detection>& detections);
+    std::vector<Detection> Detect(cv::Mat frameToProcess);
+    cv::Mat processingFrame;
+    cv::Mat resultFrame;
+    int frameCounter = 0;
+    std::string baseVisibility ="all";
+
+    Ort::Env env;
+    std::unique_ptr<Ort::Session> session;
+    std::vector<Detection> detections;
+
+    std::string inferenceTime;
+};
+} // namespace jami
diff --git a/Segmentation/build.md b/Segmentation/build.md
new file mode 100644
index 0000000000000000000000000000000000000000..06faf9a78334854195f63a21cba1e8aeaea59942
--- /dev/null
+++ b/Segmentation/build.md
@@ -0,0 +1,341 @@
+# Jami Plugin Build Process
+
+This document outlines the steps to build a Jami plugin, specifically the `Segmentation` plugin.
+For signing it, please reffer to `jami-plugins/sign.md`
+
+You can build it for Linux architectures (`x86_64-linux-gnu`), MacOS architectures (`arm64` and `x86_64`) and Android architectures (`arm64-v8a`, `armeabi-v7a` and `x86_64`). The example provided uses `arm64-v8a`, but feel free to replace it with the desired architecture.
+
+## Requirements:
+
+- `git`
+- `docker` (only for Linux or android)
+- `python3`
+
+### On computer:
+
+If you want a proper result, you need to run inference using GPU instead of CPU.
+Depending on your computer :
+CPU->0.1-0.3 sec / image
+GPU->0.01 sec / image
+Both will work, but the mask will update at 30 fps with GPU while being limited to lower fps with CPU.
+
+To use the GPU, you need to build onnxruntime with CUDA and CUDNN, it implies having a NVIDIA GPU
+You can use the CUDA Toolkit 12.6 and cuDNN 9.5.1.
+You can install it following NVIDIA instructions.
+
+### On Android:
+
+Depending of your phone, inference time can go from 0.3 sec to 1 sec.
+We tried using NNAPI to run the model on GPU but some operations are not supported by NNAPI.
+It forces the CPU to run some operations, creating multiple partitions in the model.
+The over 2 partitions, NNAPI is not faster nor recommanded.
+
+### You should disable --use-nnapi in daemon/contrib/src/onnx/rules.mak
+
+On other models, it may work better, you can check with onnx tools :
+```bash
+python -m onnxruntime.tools.check_onnx_model_mobile_usability your_model.onnx
+```
+
+Building the plugin directly on your computer is not recommended for compatibility reasons.
+
+## Step 1: Clone Jami Repositories
+
+Clone the Jami plugin and daemon repositories:
+
+```bash
+git clone https://review.jami.net/jami-plugins
+cd jami-plugins
+```
+
+For the daemon:
+
+```bash
+git submodule update --init --recursive
+```
+
+The structure should look like this:
+
+```
+|-jami-plugins
+	|-Segmentation
+		|-build.md
+		|-CMakeLists.txt
+		|-data
+		|-modelSRC
+			|-yolo11n-seg.onnx
+		|-*.cpp
+		|-*.h
+	|-daemon
+		|-contrib
+	|-SDK
+		|jplManipulation.py
+	|-docker
+		|-Dockerfile_ubuntu_20.04
+		|-Dockerfile_android
+	|-sign.md
+```
+
+## Step 2: Build Docker Image
+
+Build the Docker image using the provided Dockerfile:
+
+### Linux:
+```bash
+docker build -f docker/Dockerfile_ubuntu_20.04 -t {DockerNameToReplace} .
+```
+
+### Android:
+```bash
+docker build -f docker/Dockerfile_android -t {DockerNameToReplace} .
+```
+
+### MacOS:
+
+Do it on your system.
+
+## Step 3: Run Docker and Compile Dependencies
+
+Run the following Docker command to start building dependencies:
+> The dependencies are the libraries you will include in your plugin
+
+### Linux without CUDA:
+
+```bash
+docker run -t --rm \
+    -v $(pwd):/root/jami/:rw \
+    -w /root/ \
+    -e BATCH_MODE=1 \
+    {DockerNameToReplace} /bin/bash -c "
+    cd ./jami/daemon/contrib
+    mkdir -p native
+    cd native
+    ../bootstrap --disable-x264 --enable-ffmpeg --disable-dhtnet \
+                 --disable-webrtc-audio-processing --disable-argon2 \
+                 --disable-asio --enable-fmt --disable-gcrypt --disable-gmp \
+                 --disable-gnutls --disable-gpg-error --disable-gsm \
+                 --disable-http_parser --disable-jack --disable-jsoncpp \
+                 --disable-libarchive --disable-libressl --enable-msgpack \
+                 --disable-natpmp --disable-nettle --enable-opencv --enable-opendht \
+                 --disable-pjproject --disable-portaudio --disable-restinio \
+                 --disable-secp256k1 --disable-speex --disable-speexdsp --disable-upnp \
+                 --disable-uuid --disable-yaml-cpp --enable-onnx --enable-opus && make list
+    make fetch opencv opencv_contrib
+    make -j24
+"
+```
+
+### Linux with CUDA:
+
+```bash
+docker run -t --rm \
+    -v $(pwd):/root/jami/:rw \
+    -w /root/ \
+    -e BATCH_MODE=1 \
+    {DockerNameToReplace} /bin/bash -c "
+    cd ./jami/daemon/contrib
+    mkdir -p native
+    cd native
+    ../bootstrap --disable-x264 --enable-ffmpeg --disable-dhtnet \
+                 --disable-webrtc-audio-processing --disable-argon2 \
+                 --disable-asio --enable-fmt --disable-gcrypt --disable-gmp \
+                 --disable-gnutls --disable-gpg-error --disable-gsm \
+                 --disable-http_parser --disable-jack --disable-jsoncpp \
+                 --disable-libarchive --disable-libressl --enable-msgpack \
+                 --disable-natpmp --disable-nettle --enable-opencv --enable-opendht \
+                 --disable-pjproject --disable-portaudio --disable-restinio \
+                 --disable-secp256k1 --disable-speex --disable-speexdsp --disable-upnp \
+                 --disable-uuid --disable-yaml-cpp --disable-onnx --enable-opus && make list
+    make fetch opencv opencv_contrib
+    make -j24
+"
+```
+
+Then, you can build onnxruntime from source (1.21 works, but it should work from 1.18.1) with CUDA and cuDNN (can take hours and a lot of RAM).
+You should not use a docker for this one, considering it should be compatible with your own system.
+
+> in a folder of your choice
+```bash
+git clone --recursive https://github.com/microsoft/onnxruntime
+cd onnxruntime
+
+# Verify if the path is correct on your system
+
+export CUDA_HOME=/usr/local/cuda-12.6
+export CUDNN_HOME=/usr/lib/x86_64-linux-gnu
+export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH
+export PATH=$CUDA_HOME/bin:$PATH
+pip3 install -r requirements.txt
+
+./build.sh --config Release --build_shared_lib --use_cuda --cuda_home $CUDA_HOME --cudnn_home $CUDNN_HOME
+
+```
+It builds the 3 folowing librairies :
+libonnxruntime.so
+libonnxruntime_providers_cuda.so
+libonnxruntime_providers_shared.so
+
+You have to add the onnx files to the compilation, you can either update the CMakeLists to match the path, or you can move the files at the right place.
+
+here, we're supposing you built onnxruntime in your home directory, but you can adapt the path.
+
+In jami-plugins:
+```bash
+sudo mkdir ./daemon/contrib/x86_64-linux-gnu/lib/onnxruntime/
+sudo mkdir ./daemon/contrib/x86_64-linux-gnu/lib/onnxruntime/nvidia-gpu
+sudo cp ~/onnxruntime/build/Linux/Release/libonnxruntime.so ./daemon/contrib/x86_64-linux-gnu/lib/onnxruntime/nvidia-gpu
+sudo cp ~/onnxruntime/build/Linux/Release/libonnxruntime_providers_cuda.so ./daemon/contrib/x86_64-linux-gnu/lib/onnxruntime/nvidia-gpu
+sudo cp ~/onnxruntime/build/Linux/Release/libonnxruntime_providers_shared.so ./daemon/contrib/x86_64-linux-gnu/lib/onnxruntime/nvidia-gpu
+sudo cp ~/onnxruntime/build/Linux/Release/* ./daemon/contrib/x86_64-linux-gnu/lib/onnxruntime/nvidia-gpu
+sudo cp ~/onnxruntime/build/Linux/Release/lib/* ./daemon/contrib/x86_64-linux-gnu/lib/onnxruntime/nvidia-gpu
+```
+
+set "ENABLE_CUDA" ON in CMakeLists.txt
+
+> **Note:** Please note than actually, when you install the plugin with CUDA on jami, the plugin manager can fail to find the libonnxruntime_providers_cuda and libonnxruntime_providers_shared libs. You can fix it by moving the libraries to the same directory as onnxruntime.so : `mv ~/.local/share/jami/plugins/Segmentation/lib/x86_64-linux-gnu/libonnxruntime_providers_* ~/.local/share/jami/plugins/Segmentation`
+
+### Android:
+```bash
+docker run -t --rm \
+    -v $(pwd):/home/gradle/plugins:rw \
+    -w /home/gradle \
+    -e BATCH_MODE=1 \
+    {DockerNameToReplace} /bin/bash -c "
+    cd ./plugins/contrib
+    ANDROID_ABI='arm64-v8a' sh build-dependencies.sh
+    "
+```
+
+### MacOS:
+
+```bash
+cd ./daemon/contrib
+mkdir native
+cd native
+../bootstrap --disable-x264 --enable-ffmpeg --disable-dhtnet \
+                 --disable-webrtc-audio-processing --disable-argon2 \
+                 --disable-asio --enable-fmt --disable-gcrypt --disable-gmp \
+                 --disable-gnutls --disable-gpg-error --disable-gsm \
+                 --disable-http_parser --disable-jack --disable-jsoncpp \
+                 --disable-libarchive --disable-libressl --enable-msgpack \
+                 --disable-natpmp --disable-nettle --enable-opencv --enable-opendht \
+                 --disable-pjproject --disable-portaudio --disable-restinio \
+                 --disable-secp256k1 --disable-speex --disable-speexdsp --disable-upnp \
+                 --disable-uuid --disable-yaml-cpp --enable-onnx --enable-opus
+make list
+make fetch opencv opencv_contrib
+make
+```
+
+} **Note:** The build process for the `contrib` dependencies (particularly ONNX) may take a long time (~20 minutes).
+
+## Step 4: Build the Plugin
+
+Use the following command to build the `Segmentation` plugin:
+
+### Linux:
+```bash
+docker run -t --rm \
+    -v $(pwd):/root/jami/:rw \
+    -w /root/ \
+    -e BATCH_MODE=1 \
+    {DockerNameToReplace} /bin/bash -c "
+    cd jami
+    PLATFORM_TYPE="LINUX" python3 build-plugin.py --projects='Segmentation' --distribution=unix
+"
+```
+> **Note:** If OpenCV fails to build due to a missing `libade.a`, create an empty file to bypass the issue:
+
+```bash
+sudo touch ./daemon/contrib/x86_64-linux-gnu/lib/opencv4/3rdparty/libade.a
+```
+
+### Android:
+```bash
+docker run -t --rm \
+    -v $(pwd):/home/gradle/plugins:rw \
+    -w /home/gradle \
+    -e BATCH_MODE=1 \
+    {DockerNameToReplace} /bin/bash -c "
+    export DAEMON=/home/gradle/plugins
+    cd ./plugins
+    PLATFORM_TYPE="ANDROID" ANDROID_ABI="arm64-v8a" python3 build-plugin.py --projects=Segmentation --distribution=android
+    "
+```
+
+### MacOS:
+
+```bash
+PLATFORM_TYPE="APPLE" python3 build-plugin.py --projects=Segmentation --distribution=osx
+```
+> **Note:** If OpenCV fails to build due to a missing `libade.a`, create an empty file to bypass the issue:
+
+```bash
+sudo touch ./daemon/contrib/{YOUR-ARCH e.g. x86_64-apple-darwin}/lib/opencv4/3rdparty/libade.a
+```
+
+## Step 5: Change Ownership of the Built Plugin
+
+The default user in the Docker container who built the plugin is `root`. To manipulate the plugin, change the ownership of the output files to the current user:
+
+### Linux or Android:
+```bash
+sudo chown -R ${USER}:${USER} build/
+```
+
+### MacOS:
+
+Not needed since you built it on your system without docker
+
+# You have now successfully built your plugin!
+# The JPL archive is in build/{distribution}
+## To add it to Jami, you now need to sign it by referring to `jami-plugins/sign.md`.
+
+## Additional documentation:
+
+### For a better understanding of plugins, here is what you should know:
+
+The plugins are designed to be compatible for everyone. This is challenging because there are many different systems, and each user may have different libraries installed.
+
+### About plugin building:
+The file used for the compilation is the `CMakelists.txt` in Segmentation.
+It assembles the files, create the library and call the SDK scripts to manipulate the jpl.
+
+### About JPL Archive:
+In your folder `build/{distribution}`, you have the JPL archive, the Jami Plugin format.
+
+You can extract it like a zip, or view it before compression in your folder `jami-plugins/Segmentation/build-local/jpl`.
+Inside, you'll find your libraries in `jpl/lib/{arch}`.
+You will find the `libSegmentation.so`, and as in every plugin, you'll have `lib{PluginName}.so`.
+In Segmentation, you will also find `libonnxruntime.so`, the ONNX library needed to use ONNX Runtime.
+
+### About Libraries:
+
+Libraries are built using `Segmentation/CMakeLists.txt`.
+
+If you want your plugin to work on other computers, you need to understand that the plugin library (`libSegmentation.so`) contains every needed library, linking them dynamically for system libraries and statically for additional libraries like OpenCV.
+
+System libraries must be dynamically linked because the user is supposed to have them installed, and statically linking them might cause compatibility issues.
+
+However, users won’t have every additional library at the required version, so you need to include the exact version in your plugin by statically linking it.
+
+Some useful commands:
+- See the linked shared libraries with `ldd libSegmentation.so`.
+  > Helps verify that every dynamic library is found on the system.
+  > However, you can't see which library is statically linked, but you can verify if a symbol is defined.
+
+- See symbols in the library: `nm libSegmentation.so | grep {YourSymbol}`.
+  > Helps with missing symbols during loading in Jami. "U" means undefined, and "T" means they are in the text section. See `man nm` for details.
+
+- Symbols can be hard to read. You can demangle them with `c++filt {YourSymbol}`.
+  > The symbols represent the low-level names of the functions.
+
+### About plugin's code
+
+The plugin is mainly in c++, located in the `jami-plugins/Segmentation` folder.
+The media handler file is used to interact with jami's API (chat, audio and video).
+> e.g. receiving the frames of the video stream before transformation and sending the transformed frames.
+
+The media subscriber file is used to modify the object received.
+> e.g. draw a mask over the segmentated areas using opencv
+
diff --git a/Segmentation/data/icon.png b/Segmentation/data/icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..437e40834746471d23081d0643d6f6f59b648c1c
Binary files /dev/null and b/Segmentation/data/icon.png differ
diff --git a/Segmentation/data/icon.svg b/Segmentation/data/icon.svg
new file mode 100644
index 0000000000000000000000000000000000000000..4ced6a7b018b11349e925af8f5a3322639ccbaf0
--- /dev/null
+++ b/Segmentation/data/icon.svg
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg width="300px" height="300px" viewBox="0 0 300 300" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+    <title>O</title>
+    <defs>
+        <linearGradient x1="50.001267%" y1="120.94311%" x2="50.001267%" y2="-257.222291%" id="linearGradient-1">
+            <stop stop-color="#EFB000" offset="0%"></stop>
+            <stop stop-color="#F1B904" offset="2.798313%"></stop>
+            <stop stop-color="#F6CF0E" offset="11.34%"></stop>
+            <stop stop-color="#FAE016" offset="21.42%"></stop>
+            <stop stop-color="#FDEB1B" offset="33.9%"></stop>
+            <stop stop-color="#FFF21E" offset="51.4%"></stop>
+            <stop stop-color="#FFF41F" offset="100%"></stop>
+        </linearGradient>
+        <linearGradient x1="49.9829639%" y1="-278.222509%" x2="49.9829639%" y2="99.9773676%" id="linearGradient-2">
+            <stop stop-color="#EFB000" offset="80.23%"></stop>
+            <stop stop-color="#F6CD0D" offset="85.32%"></stop>
+            <stop stop-color="#FBE317" offset="90.44%"></stop>
+            <stop stop-color="#FEF01D" offset="95.41%"></stop>
+            <stop stop-color="#FFF41F" offset="100%"></stop>
+        </linearGradient>
+    </defs>
+    <g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
+        <g id="O" fill-rule="nonzero">
+            <g transform="translate(2.000000, 0.000000)" id="Path">
+                <path d="M295.571429,152.285714 C295.571429,152.214286 295.571429,152.142857 295.571429,152.071429 C295.571429,150.428571 295.571429,148.714286 295.5,146.928571 C295.5,146.214286 295.5,145.428571 295.5,144.714286 L295.428571,144.714286 C293.857143,105.928571 280.071429,50.7142857 210.642857,15.1428571 C135.928571,-23.0714286 70.0714286,23.3571429 70.0714286,23.3571429 C70.1428571,23.3571429 70.1428571,23.3571429 70.2142857,23.3571429 C110.857143,16.2857143 172.357143,46.8571429 206.285714,90.9285714 C211.571429,97.7857143 216.214286,104.928571 219.928571,112.357143 C226.214286,124.642857 228.928571,136.214286 229.428571,146.642857 C229.428571,147.071429 229.5,147.571429 229.5,148 C229.571429,151.357143 229.5,154.714286 229.142857,157.857143 L229.142857,157.857143 C227.357143,176.214286 219,194.071429 205,208.142857 C173.428571,239.642857 121.142857,240.142857 89.5714286,208.571429 L89.4285714,209.142857 C123.357143,253.214286 184.857143,283.785714 225.5,276.714286 C235.071429,270.785714 244.071429,263.785714 252.357143,255.5 C279.428571,228.428571 293.714286,193.428571 295.357143,157.928571 L295.5,157.928571 C295.571429,156.428571 295.571429,154.928571 295.571429,153.428571 C295.571429,153 295.571429,152.642857 295.571429,152.285714 Z" fill="url(#linearGradient-1)"></path>
+                <path d="M225.5,276.642857 C184.857143,283.714286 123.357143,253.142857 89.4285714,209.071429 C84.1428571,202.214286 79.5,195.071429 75.7857143,187.642857 C69.5,175.357143 66.7857143,163.785714 66.2857143,153.357143 C66.2857143,152.928571 66.2142857,152.428571 66.2142857,152 C66.1428571,148.642857 66.2142857,145.285714 66.5714286,142.142857 L66.5714286,142.142857 C68.3571429,123.785714 76.7142857,105.928571 90.7142857,91.8571429 C122.285714,60.3571429 174.571429,59.8571429 206.142857,91.4285714 L206.285714,90.8571429 C172.357143,46.8571429 110.857143,16.2857143 70.2142857,23.3571429 C60.6428571,29.2857143 51.6428571,36.2857143 43.3571429,44.5714286 C16.2857143,71.6428571 2,106.642857 0.357142857,142.142857 L0.214285714,142.142857 C0.142857143,143.642857 0.142857143,145.142857 0.142857143,146.642857 C0.142857143,147 0.142857143,147.357143 0.142857143,147.714286 C0.142857143,147.785714 0.142857143,147.857143 0.142857143,147.928571 C0.142857143,149.571429 0.142857143,151.285714 0.214285714,153.071429 C0.214285714,153.785714 0.214285714,154.571429 0.285714286,155.285714 L0.357142857,155.285714 C1.92857143,194.071429 15.7142857,249.285714 85.1428571,284.857143 C159.857143,323.142857 225.714286,276.714286 225.714286,276.714286 C225.571429,276.642857 225.571429,276.642857 225.5,276.642857 Z" fill="url(#linearGradient-2)"></path>
+            </g>
+        </g>
+    </g>
+</svg>
\ No newline at end of file
diff --git a/Segmentation/data/preferences.json b/Segmentation/data/preferences.json
new file mode 100644
index 0000000000000000000000000000000000000000..e93b302b8cdf47f615f644137bc520d053ce7f47
--- /dev/null
+++ b/Segmentation/data/preferences.json
@@ -0,0 +1,36 @@
+[
+    {
+        "type": "List",
+        "key": "visible",
+        "title": "Visibility",
+        "summary": "select what you want to see",
+        "defaultValue": "all",
+        "scope": "plugin,Segmentation",
+        "entryValues": [
+            "all",
+            "mask",
+            "box"
+        ],
+        "entries": [
+            "all",
+            "mask",
+            "box"
+        ]
+    },
+    {
+        "type": "List",
+        "key": "videostream",
+        "title": "Video stream",
+        "summary": "Select stream",
+        "defaultValue": "0",
+        "scope": "plugin",
+        "entryValues": [
+            "0",
+            "1"
+        ],
+        "entries": [
+            "sent",
+            "received"
+        ]
+    }
+]
\ No newline at end of file
diff --git a/Segmentation/main.cpp b/Segmentation/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0637edb477681b5bffa57b7872d438ee5d9fe903
--- /dev/null
+++ b/Segmentation/main.cpp
@@ -0,0 +1,71 @@
+/**
+ *  Copyright (C) 2020-2021 Savoir-faire Linux Inc.
+ *
+ *  Author: Aline Gondim Santos <aline.gondimsantos@savoirfairelinux.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 3 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301 USA.
+ */
+
+#include <iostream>
+#include <string.h>
+#include <thread>
+#include <memory>
+#include <plugin/jamiplugin.h>
+
+#include "SegmentationMediaHandler.h"
+
+#ifdef WIN32
+#define EXPORT_PLUGIN __declspec(dllexport)
+#else
+#define EXPORT_PLUGIN
+#endif
+
+#define Segmentation_VERSION_MAJOR 1
+#define Segmentation_VERSION_MINOR 0
+#define Segmentation_VERSION_PATCH 1
+
+extern "C" {
+void
+pluginExit(void)
+{}
+
+EXPORT_PLUGIN JAMI_PluginExitFunc
+JAMI_dynPluginInit(const JAMI_PluginAPI* api)
+{
+    std::cout << "******************" << std::endl;
+    std::cout << "**  Segmentation  **" << std::endl;
+    std::cout << "******************" << std::endl << std::endl;
+    std::cout << "Version " << Segmentation_VERSION_MAJOR << "." << Segmentation_VERSION_MINOR << "."
+              << Segmentation_VERSION_PATCH << std::endl;
+
+    // If invokeService doesn't return an error
+    if (api) {
+        std::map<std::string, std::string> preferences;
+        api->invokeService(api, "getPluginPreferences", &preferences);
+        std::string dataPath;
+        api->invokeService(api, "getPluginDataPath", &dataPath);
+
+        auto fmpSegmentationMediaHandler
+            = std::make_unique<jami::SegmentationMediaHandler>(std::move(preferences),
+                                                             std::move(dataPath));
+        if (api->manageComponent(api,
+                                 "CallMediaHandlerManager",
+                                 fmpSegmentationMediaHandler.release())) {
+            return nullptr;
+        }
+    }
+    return pluginExit;
+}
+}
diff --git a/Segmentation/manifest.json b/Segmentation/manifest.json
new file mode 100644
index 0000000000000000000000000000000000000000..3bdbcbc7f8fd34e8126a0e2d22e5d541b7915fbb
--- /dev/null
+++ b/Segmentation/manifest.json
@@ -0,0 +1,8 @@
+{
+    "id" : "Segmentation",
+    "name": "Segmentation",
+    "description": "Segmentation run yolo model on video and show the Segmentation result",
+    "version": "1.0.0",
+    "iconPath" : "icon.svg",
+    "backgroundPath" : "background.jpg"
+}
\ No newline at end of file
diff --git a/Segmentation/modelSRC/yolo11n-seg.onnx b/Segmentation/modelSRC/yolo11n-seg.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..a90efd01c4359a04236e27d1b456f8a09534331d
Binary files /dev/null and b/Segmentation/modelSRC/yolo11n-seg.onnx differ
diff --git a/Segmentation/package.json b/Segmentation/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..3ec47b72672dcca9baa43af33fac19633259e0c7
--- /dev/null
+++ b/Segmentation/package.json
@@ -0,0 +1,20 @@
+{
+    "name": "Segmentation",
+    "version": "1.0.0",
+    "extractLibs": false,
+    "deps": [
+        "fmt",
+        "ffmpeg",
+        "opencv"
+    ],
+    "defines": [],
+    "custom_scripts": {
+        "pre_build": [
+            "mkdir msvc"
+        ],
+        "build": [
+            "cmake --build ./msvc --config Release"
+        ],
+        "post_build": []
+    }
+}
diff --git a/build-plugin.py b/build-plugin.py
index e4d932d514362fb4749ad9ae4227cca417fc800c..f9d8275b4fb847b18e8588ea3951ea4c7a3d00fb 100755
--- a/build-plugin.py
+++ b/build-plugin.py
@@ -36,7 +36,7 @@ IOS_DISTRIBUTION_NAME = "ios"
 OSX_DISTRIBUTION_NAME = "osx"
 ANDROID_DISTRIBUTION_NAME = "android"
 WIN32_DISTRIBUTION_NAME = "win32"
-UNIX_DISTRIBUTION_NAME = "unix"
+UBUNTU_DISTRIBUTION_NAME = "ubuntu"
 
 def parse():
     parser = argparse.ArgumentParser(description='Builds Plugins projects')
@@ -75,7 +75,7 @@ def validate_args(parsed_args):
 
     # Filter unsupported distributions.
     supported_distros = [
-        ANDROID_DISTRIBUTION_NAME, UNIX_DISTRIBUTION_NAME,
+        ANDROID_DISTRIBUTION_NAME, UBUNTU_DISTRIBUTION_NAME,
         WIN32_DISTRIBUTION_NAME, OSX_DISTRIBUTION_NAME
     ]
 
@@ -119,7 +119,7 @@ def choose_distribution():
     return 'Unknown'
 
 
-def buildPlugin(pluginPath, distribution, arch=None):
+def buildPlugin(pluginPath):
     # Change the current working directory to pluginPath
     os.chdir(pluginPath)
 
@@ -131,12 +131,7 @@ def buildPlugin(pluginPath, distribution, arch=None):
     os.chdir('build-local')
 
     # Prepare build-local
-    if distribution == ANDROID_DISTRIBUTION_NAME:
-        abi=os.environ["ANDROID_ABI"]
-        ndk=os.environ["ANDROID_NDK"]
-        os.system(f'cmake .. -DCMAKE_TOOLCHAIN_FILE={ndk}/build/cmake/android.toolchain.cmake -DANDROID_PLATFORM=24 -DANDROID_ABI={abi}')
-    else:
-        os.system(f'cmake ..')
+    os.system('cmake ..')
 
     # Run the cmake build command
     os.system('cmake --build .')
@@ -147,7 +142,7 @@ def main():
 
     for i, plugin in enumerate(args.projects):
         os.chdir(currentDir + "/" + plugin)
-        buildPlugin(currentDir + "/" + plugin, args.distribution)
+        buildPlugin(currentDir + "/" + plugin)
 
 if __name__ == "__main__":
     main()