diff --git a/doc/trace/tracepoints.rst b/doc/trace/tracepoints.rst
index 9d3066ab8ff93ce1c8f2aa7719f8a653cdb8af12..6b170f972bbd951825b750483a3c1a2131828832 100644
--- a/doc/trace/tracepoints.rst
+++ b/doc/trace/tracepoints.rst
@@ -33,13 +33,13 @@ Enabling tracepoints
 --------------------
 
 To enable tracepoints in Jami, you should configure the project using the
-``--enable-tracepoint`` feature.  You also need ``lttng-ust >= 2.13.0`` and
+``--enable-tracepoints`` feature.  You also need ``lttng-ust >= 2.13.0`` and
 ``liburcu >= 0.13.0``.
 
 How to define a tracepoint
 --------------------------
 
-To define a new tracepoints, you need to add the definition in src/jami/tracepoint.h
+To define a new tracepoints, you need to add the definition in src/jami/tracepoint-def.h
 
 It's recommended to use the ``LTTNG_UST_TRACEPOINT_EVENT`` macro and avoid using
 the others except if you know what you're doing.
@@ -56,7 +56,7 @@ NOTE!  As the documentation of LTTng says, the concatenation of provider name +
 tracepoint name must **not exceed 254 characters** or you will get bite.
 
 For example, here's the definition of a tracepoint for the scheduled executor in
-src/jami/tracepoint.h::
+src/jami/tracepoint-def.h::
 
   LTTNG_UST_TRACEPOINT_EVENT(
     jami,
diff --git a/src/conference.cpp b/src/conference.cpp
index 4421fef686269f4299b50a49e6249e54dcfc55be..10041a18feb5ea1a4e8ecfdaff8ab1cf4cc11d3e 100644
--- a/src/conference.cpp
+++ b/src/conference.cpp
@@ -30,6 +30,7 @@
 #include "sip/siptransport.h"
 
 #include "client/videomanager.h"
+#include "tracepoint.h"
 #ifdef ENABLE_VIDEO
 #include "call.h"
 #include "video/video_input.h"
@@ -255,6 +256,7 @@ Conference::Conference(const std::shared_ptr<Account>& account)
 
     parser_.onVoiceActivity(
         [&](const auto& streamId, bool state) { setVoiceActivity(streamId, state); });
+    jami_tracepoint(conference_begin, id_.c_str());
 }
 
 Conference::~Conference()
@@ -306,6 +308,7 @@ Conference::~Conference()
         confAVStreams.clear();
     }
 #endif // ENABLE_PLUGIN
+    jami_tracepoint(conference_end, id_.c_str());
 }
 
 Conference::State
@@ -655,6 +658,10 @@ Conference::addParticipant(const std::string& participant_id)
 {
     JAMI_DBG("Adding call %s to conference %s", participant_id.c_str(), id_.c_str());
 
+    jami_tracepoint(conference_add_participant,
+                    id_.c_str(),
+                    participant_id.c_str());
+
     {
         std::lock_guard<std::mutex> lk(participantsMtx_);
         if (!participants_.insert(participant_id).second)
diff --git a/src/jami/tracepoint-def.h b/src/jami/tracepoint-def.h
index e6505756bbee0585d49e502d3f61bd95e8fe5465..cb812f6eee1dab1c7c6b7f90667f2639c10749ae 100644
--- a/src/jami/tracepoint-def.h
+++ b/src/jami/tracepoint-def.h
@@ -143,6 +143,92 @@ LTTNG_UST_TRACEPOINT_EVENT(
     )
 )
 
+LTTNG_UST_TRACEPOINT_EVENT(
+    jami,
+    audio_input_read_from_device_end,
+    LTTNG_UST_TP_ARGS(
+        const char*, id
+    ),
+    LTTNG_UST_TP_FIELDS(
+            lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
+    )
+)
+
+LTTNG_UST_TRACEPOINT_EVENT(
+    jami,
+    audio_layer_put_recorded_end,
+    LTTNG_UST_TP_ARGS(
+    ),
+    LTTNG_UST_TP_FIELDS(
+    )
+)
+
+LTTNG_UST_TRACEPOINT_EVENT(
+    jami,
+    audio_layer_get_to_play_end,
+    LTTNG_UST_TP_ARGS(
+    ),
+    LTTNG_UST_TP_FIELDS(
+    )
+)
+
+LTTNG_UST_TRACEPOINT_EVENT(
+    jami,
+    call_start,
+    LTTNG_UST_TP_ARGS(
+            const char*, id
+    ),
+    LTTNG_UST_TP_FIELDS(
+            lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
+    )
+)
+
+LTTNG_UST_TRACEPOINT_EVENT(
+    jami,
+    call_end,
+    LTTNG_UST_TP_ARGS(
+            const char*, id
+    ),
+    LTTNG_UST_TP_FIELDS(
+            lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
+    )
+)
+
+LTTNG_UST_TRACEPOINT_EVENT(
+    jami,
+    conference_begin,
+    LTTNG_UST_TP_ARGS(
+            const char*, id
+    ),
+    LTTNG_UST_TP_FIELDS(
+            lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
+    )
+)
+
+LTTNG_UST_TRACEPOINT_EVENT(
+    jami,
+    conference_end,
+    LTTNG_UST_TP_ARGS(
+            const char*, id
+    ),
+    LTTNG_UST_TP_FIELDS(
+            lttng_ust_field_integer(uint64_t, id, strtoull(id, NULL, 16))
+    )
+)
+
+LTTNG_UST_TRACEPOINT_EVENT(
+    jami,
+    conference_add_participant,
+    LTTNG_UST_TP_ARGS(
+            const char*, conference_id,
+            const char*, participant_id
+    ),
+    LTTNG_UST_TP_FIELDS(
+            lttng_ust_field_integer(uint64_t, id, strtoull(conference_id, NULL, 16))
+            lttng_ust_field_integer(uint64_t, participant_id, strtoull(participant_id, NULL, 16))
+    )
+)
+
 #endif /* TRACEPOINT_DEF_H */
 
 #include <lttng/tracepoint-event.h>
diff --git a/src/media/audio/audio_input.cpp b/src/media/audio/audio_input.cpp
index 03027026dac2a87b306da3f3e29eec7924897a48..32dd5a203c8a080ce98bd90d83babaced7c27a87 100644
--- a/src/media/audio/audio_input.cpp
+++ b/src/media/audio/audio_input.cpp
@@ -29,6 +29,7 @@
 #include "ringbuffer.h"
 #include "ringbufferpool.h"
 #include "smartools.h"
+#include "tracepoint.h"
 
 #include <future>
 #include <memory>
@@ -136,6 +137,8 @@ AudioInput::readFromDevice()
     if (bufferPool.getInternalAudioFormat() != format_)
         audioFrame = resampler_->resample(std::move(audioFrame), format_);
     resizer_->enqueue(std::move(audioFrame));
+
+    jami_tracepoint(audio_input_read_from_device_end, id_.c_str());
 }
 
 void
diff --git a/src/media/audio/audiolayer.cpp b/src/media/audio/audiolayer.cpp
index 29382c04d2536dbfad26bb87e5475f15cd488540..f876c6b7854174665e43464ed300a8d53309f2fe 100644
--- a/src/media/audio/audiolayer.cpp
+++ b/src/media/audio/audiolayer.cpp
@@ -29,6 +29,7 @@
 #include "client/ring_signal.h"
 
 #include "audio-processing/null_audio_processor.h"
+#include "tracepoint.h"
 #if HAVE_WEBRTC_AP
 #include "audio-processing/webrtc.h"
 #endif
@@ -132,9 +133,9 @@ static inline bool
 shouldUseAudioProcessorEchoCancel(bool hasNativeAEC, const std::string& echoCancellerPref)
 {
     return
-        // user doesn't care which and there is a system AEC
+        // user doesn't care which and there is not a system AEC
         (echoCancellerPref == "auto" && !hasNativeAEC)
-        // use specifically wants audioProcessor
+        // user specifically wants audioProcessor
         or (echoCancellerPref == "audioProcessor");
 }
 
@@ -158,6 +159,8 @@ AudioLayer::createAudioProcessor()
     auto nb_channels = std::max(audioFormat_.nb_channels, audioInputFormat_.nb_channels);
     auto sample_rate = std::max(audioFormat_.sample_rate, audioInputFormat_.sample_rate);
 
+    // round sample_rate to nearest multiple of 16000 and clamp it.
+    sample_rate = 16000u * ((sample_rate / 16000u) + 1u);
     sample_rate = std::clamp(sample_rate, 16000u, 48000u);
 
     AudioFormat formatForProcessor {sample_rate, nb_channels};
@@ -318,6 +321,8 @@ AudioLayer::getToPlay(AudioFormat format, size_t writableSamples)
             break;
     }
 
+    jami_tracepoint(audio_layer_get_to_play_end);
+
     return playbackBuf;
 }
 
@@ -334,6 +339,8 @@ AudioLayer::putRecorded(std::shared_ptr<AudioFrame>&& frame)
     } else {
         mainRingBuffer_->put(std::move(frame));
     }
+
+    jami_tracepoint(audio_layer_put_recorded_end, );
 }
 
 } // namespace jami
diff --git a/src/sip/sipcall.cpp b/src/sip/sipcall.cpp
index d2025c6bd84a10992bfa99a320259a4686792ef8..32fe89cdd58bd802cf530ed48b76ba1791793f2c 100644
--- a/src/sip/sipcall.cpp
+++ b/src/sip/sipcall.cpp
@@ -64,6 +64,8 @@
 #include <opendht/thread_pool.h>
 #include <fmt/ranges.h>
 
+#include "tracepoint.h"
+
 namespace jami {
 
 using sip_utils::CONST_PJ_STR;
@@ -106,6 +108,8 @@ SIPCall::SIPCall(const std::shared_ptr<SIPAccountBase>& account,
     , enableIce_(account->isIceForMediaEnabled())
     , srtpEnabled_(account->isSrtpEnabled())
 {
+    jami_tracepoint(call_start, callId.c_str());
+
     if (account->getUPnPActive())
         upnp_.reset(new upnp::Controller());
 
diff --git a/tools/trace/audio/.gitignore b/tools/trace/audio/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..019d491803716a6dd31a9e66003add5bb21e900d
--- /dev/null
+++ b/tools/trace/audio/.gitignore
@@ -0,0 +1 @@
+out.png
diff --git a/tools/trace/audio/audio.py b/tools/trace/audio/audio.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8a04fa4202c8ddab34240f1fdafc869f74460ed
--- /dev/null
+++ b/tools/trace/audio/audio.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python3
+"""
+Quick-and-dirty script to plot lttng trace data of the jami audio system
+
+Depends on `python3-bt2` package:
+https://babeltrace.org/docs/v2.0/python/bt2/
+
+If using a virtual environment, you need to link the system babeltrace2 python3
+  package to your virtual environment, something like:
+`ln -s /usr/lib/python3/dist-packages/bt2 "$VIRTUAL_ENV"/lib/python3*/site-packages/`
+
+"""
+
+import re
+import dataclasses
+from typing import Dict, List
+import sys
+
+import bt2
+import matplotlib.pyplot as plt
+
+
+@dataclasses.dataclass
+class Intervals:
+    """Keep track of timestamps and the interval intervals between them"""
+
+    intervals: List[int] = dataclasses.field(default_factory=list)
+    times: List[int] = dataclasses.field(default_factory=list)
+    last_timestamp: int = 0
+
+    def add_event(self, timestamp: int):
+        """Add a new timestamp and calculate the interval"""
+        if self.last_timestamp == 0:
+            self.intervals.append(0)
+        else:
+            self.intervals.append(timestamp - self.last_timestamp)
+
+        self.times.append(timestamp)
+
+        self.last_timestamp = timestamp
+
+    def filter(self, earliest: int, latest: int):
+        """Filter out entries that are not in between `earliest` and `latest` timestamps"""
+        new_intervals = []
+        new_times = []
+        for i, val in enumerate(self.times):
+            if val > earliest and val < latest:
+                new_times.append(val)
+                new_intervals.append(self.intervals[i])
+
+        self.times = new_times
+        self.intervals = new_intervals
+
+
+def analyze(filename: str):
+    """Process trace file"""
+    # running_averages: Dict[str, RunningAverage] = {}
+    intervals: Dict[str, Intervals] = {}
+    desired_event_regex = re.compile("jami:(call|audio|conference)")
+
+    for message in bt2.TraceCollectionMessageIterator(filename):
+        # pylint: disable=protected-access
+        if not isinstance(message, bt2._EventMessageConst):
+            continue
+        ns_from_origin = message.default_clock_snapshot.ns_from_origin
+        name = message.event.name
+
+        if not desired_event_regex.match(name):
+            continue
+
+        if "id" in message.event.payload_field and name != "jami:conference_add_participant":
+            name = f"{name}({str(message.event.payload_field['id'])})"
+
+        if not name in intervals:
+            # running_averages[name] = RunningAverage()
+            intervals[name] = Intervals()
+
+        # running_averages[name].add_val(ns_from_origin)
+        intervals[name].add_event(ns_from_origin)
+
+        # print(f"{ns_from_origin / 10e9:.9f}: {name}")
+
+    for key, val in intervals.items():
+        print(
+            f"event: {key}, average: {(sum(val.intervals) / len(val.intervals)) / 1e9:.9f}"
+        )
+
+    earliest_recorded = intervals["jami:audio_layer_put_recorded_end"].times[0]
+    latest_recorded = intervals["jami:audio_layer_put_recorded_end"].times[-1]
+
+    earliest_recorded += int(1e9)  # start graph 1 second later
+
+    print(
+        f"earliest: {earliest_recorded / 1e9 :.9f}, latest: {latest_recorded / 1e9:.9f}"
+    )
+
+    for _, interval_val in intervals.items():
+        interval_val.filter(earliest_recorded, latest_recorded)
+
+    # convert from unix timestamp to seconds since start
+    for _, interval_val in intervals.items():
+        for i, _ in enumerate(interval_val.times):
+            interval_val.times[i] -= earliest_recorded
+
+    plot(intervals, filename)
+
+
+def plot(intervals: Dict[str, Intervals], filename: str):
+    """Plot audio event data"""
+    add_part = intervals["jami:conference_add_participant"]
+
+    fig, axis = plt.subplots(figsize=(20, 9))
+
+    axis.set_xlabel("seconds since start")
+    axis.set_ylabel("interval between events (seconds)")
+    axis.set_title("jami:audio*")
+
+    # only plot audio events
+    events_to_plot_regex = re.compile("jami:audio")
+
+    # plot each desired interval
+    for interval_key, interval_val in intervals.items():
+        if not events_to_plot_regex.match(interval_key):
+            continue
+
+        axis.plot(
+            [i / 1e9 for i in interval_val.times],
+            [i / 1e9 for i in interval_val.intervals],
+            label=interval_key,
+        )
+
+    # generate vertical line for each time a new participant is added to conference
+    for time in add_part.times:
+        axis.axvline(
+            time / 1e9,
+            color="black",
+            ls="--",
+        )
+
+    axis.legend()
+
+    # save it as out.png
+    filename = "out.png"
+    fig.savefig(filename, dpi=100)
+    print(f"png saved as {filename}")
+
+
+if __name__ == "__main__":
+    if len(sys.argv) < 2:
+        print("usage: python3 audio.py <lttng trace directory>", file=sys.stderr)
+        sys.exit(-1)
+    analyze(sys.argv[1])
diff --git a/tools/trace/audio/lttng-setup.sh b/tools/trace/audio/lttng-setup.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c9ada94ea3be4033dcd6ec55b9b114d1e619a307
--- /dev/null
+++ b/tools/trace/audio/lttng-setup.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+
+# this script sets up an lttng session and enables triggers for call start and conference end
+# ideally, this script would be more configurable, right now it's hardcoded and hacky
+
+if lttng status | grep -q "Recording session"; then
+    echo "lttng session already active, exiting"
+    exit 1
+fi
+
+SESSION="jami"
+
+# Prepare a capture session
+
+lttng create "$SESSION"
+
+# Enable kernel and user channels
+
+# lttng enable-channel -k --subbuf-size 4096K kernel
+lttng enable-channel -u --subbuf-size 1024K user
+
+# Enable all kernel syscalls tracing
+# lttng enable-event --kernel --syscall --all --channel=kernel
+
+# Enable sched_* (scheduler) functions tracing
+# lttng enable-event --kernel --channel=kernel "$(lttng list --kernel  | grep sched_ | awk '{ print $1 }' | xargs  | sed -e 's/ /,/g')"
+
+# Enable net_* functions tracing
+# lttng enable-event --kernel --channel=kernel "$(lttng list --kernel  | grep net_ | awk '{ print $1 }' | xargs  | sed -e 's/ /,/g')"
+
+# Userspace tracing
+# Add support for tracef() traces
+# You need to link application with @-llttng-ust@
+# lttng enable-event --userspace 'lttng_ust_tracef:*' --channel=user
+
+# Add context for libc (including malloc, etc)
+export LD_PRELOAD="$LD_PRELOAD":liblttng-ust-libc-wrapper.so
+lttng enable-event --userspace  'lttng_ust_libc:*' --channel=user
+lttng add-context --userspace -t vtid -t procname
+
+# Add context for pthread (including mutexes)
+export LD_PRELOAD="$LD_PRELOAD":liblttng-ust-pthread-wrapper.so
+lttng enable-event --userspace 'lttng_ust_pthread:*' --channel=user
+
+# Add context for function profiling
+export LD_PRELOAD="$LD_PRELOAD":liblttng-ust-cyg-profile.so
+lttng enable-event --userspace --all --channel=user
+lttng add-context --userspace -t vpid -t vtid -t procname
+
+# loop over lines in our for-loop, since triggers might have spaces in their names
+OLDIFS="$IFS"
+IFS='
+'
+
+# remove all triggers
+for trigger in $(lttng list-triggers | sed -nr 's/- name: (.*)$/\1/p'); do
+    echo "removing trigger: $trigger"
+    lttng remove-trigger "$trigger"
+done
+
+IFS=$OLDIFS
+
+# add start and end trigger
+lttng add-trigger --name "jami call start" --condition=event-rule-matches --type=user --name='jami:call_start' --action=start-session $SESSION
+lttng add-trigger --name "jami conference end" --condition=event-rule-matches --type=user --name="jami:conference_end" --action=stop-session $SESSION