Commit 4bade6fa authored by Sébastien Blin's avatar Sébastien Blin

sip: negotiate both UDP and TCP for the control channel

NOTE: SIP over TCP is disabled for now on Windows, waiting for
TLS 1.3 support. To re-enable it, check the #ifdef _WIN32 in
ice_transport.cpp

Our pjsip version supports the RFC6544. With this patch, when
starting a call, the daemon is using two ICE sessions for the SIP
channel. One is negotiating a UDP socket, and the other a TCP socket
and transmits both SDP on the DHT.

If both negotiations succeed, TCP is prefered and will be used
to transmit SIP messages and the VCard. This should solve the 30
seconds timeout on bad networks.

Note that the media channel is still using UDP to transmit audio
and video.

MAJOR CHANGE: the SIP channel use TLS on top of TCP, no DTLS,
so the transport is considered as reliable.

Also lot of changes in rfc6544.patch to link to rfc6062. The patch
needs to be cleaned, cf TODO notes

Also this seems to fix the ICE shutdown at the end of the call
(after the IDLE Timeout)

Change-Id: I01210da3abfcc448071268b4e1e38abdd58f9f05
Gitlab: #103
Gitlab: #108
parent 5fdb9649
......@@ -35,7 +35,6 @@ bash -c "%PATCH_CMD% %UNIXPATH%pjproject/fix_ioqueue_ipv6_sendto.patch"
bash -c "%PATCH_CMD% %UNIXPATH%pjproject/add_dtls_transport.patch"
bash -c "%PATCH_CMD% %UNIXPATH%pjproject/rfc6544.patch"
bash -c "%PATCH_CMD% %UNIXPATH%pjproject/ice_config.patch"
bash -c "%PATCH_CMD% %UNIXPATH%pjproject/win32_ice_tcp_temp_fix.patch"
%APPLY_CMD% %SRC%\pjproject\win32_vs_gnutls.patch
%APPLY_CMD% %SRC%\pjproject\win_config.patch
......
This diff is collapsed.
From 5f288fe0067f995b91ea87ba4ed19fd65b75ff31 Mon Sep 17 00:00:00 2001
From: Andreas Traczyk <andreas.traczyk@savoirfairelinux.com>
Date: Tue, 11 Jun 2019 16:47:06 -0400
Subject: [PATCH] fix for windows GetAdaptersAddresses
---
pjnath/src/pjnath/ice_strans.c | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/pjnath/src/pjnath/ice_strans.c b/pjnath/src/pjnath/ice_strans.c
index 6172172..33ac521 100644
--- a/pjnath/src/pjnath/ice_strans.c
+++ b/pjnath/src/pjnath/ice_strans.c
@@ -1645,9 +1645,7 @@ pj_ice_strans_sendto2(pj_ice_strans *ice_st, unsigned comp_id, const void *data,
dest_addr_len = dst_addr_len;
}
- pj_stun_sock_info stun_sock_info;
- pj_stun_sock_get_info(comp->stun[tp_idx].sock, &stun_sock_info);
- pj_bool_t add_header = stun_sock_info.conn_type != PJ_STUN_TP_UDP;
+ pj_bool_t add_header = comp->ice_st->cfg.stun_tp->conn_type == PJ_STUN_TP_TCP;
if (add_header) {
//TCP
/*
@@ -1864,9 +1862,7 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
if (comp->stun[tp_idx].sock) {
pj_ssize_t sent_size;
- pj_stun_sock_info stun_sock_info;
- pj_stun_sock_get_info(comp->stun[tp_idx].sock, &stun_sock_info);
- pj_bool_t add_header = stun_sock_info.conn_type != PJ_STUN_TP_UDP;
+ pj_bool_t add_header = comp->ice_st->cfg.stun_tp->conn_type == PJ_STUN_TP_TCP;
if (add_header) {
//TCP
/*
--
2.7.4
......@@ -67,14 +67,17 @@ public:
static constexpr uint16_t IPV4_HEADER_SIZE = 20; // Size in bytes of IPv4 packet header
static constexpr uint16_t UDP_HEADER_SIZE = 8; // Size in bytes of UDP header
IceSocketTransport(std::shared_ptr<IceTransport>& ice, int comp_id)
IceSocketTransport(std::shared_ptr<IceTransport>& ice, int comp_id, bool reliable = false)
: compId_ {comp_id}
, ice_ {ice} {}
, ice_ {ice}
, reliable_ {reliable} {}
bool isReliable() const override {
return false; // we consider that a ICE transport is never reliable (UDP support only)
return reliable_;
}
void shutdown() override;
bool isInitiator() const override;
int maxPayload() const override;
......@@ -94,6 +97,7 @@ public:
private:
const int compId_;
std::shared_ptr<IceTransport> ice_;
bool reliable_;
};
};
......@@ -26,7 +26,6 @@
#include "upnp/upnp_control.h"
#include <pjlib.h>
#include <msgpack.hpp>
#include <map>
#include <atomic>
......@@ -92,8 +91,6 @@ public:
MutexGuard lk{mutex_};
stream_.clear();
stream_ << data;
notified_ = true;
cv_.notify_one();
}
......@@ -107,18 +104,15 @@ public:
}
template <typename Duration> bool wait(Duration timeout) {
std::lock(apiMutex_, mutex_);
MutexGuard lk_api{apiMutex_, std::adopt_lock};
MutexLock lk{mutex_, std::adopt_lock};
MutexLock lk{mutex_};
auto a = cv_.wait_for(lk, timeout,
[this] { return stop_ or /*(data.size() != 0)*/ !stream_.eof(); });
[this] { return stop_ or !stream_.eof(); });
return a;
}
std::size_t read(char *output, std::size_t size) {
std::lock(apiMutex_, mutex_);
MutexGuard lk_api{apiMutex_, std::adopt_lock};
MutexLock lk{mutex_, std::adopt_lock};
MutexLock lk{mutex_};
if (stream_.eof()) return 0;
cv_.wait(lk, [&, this] {
if (stop_)
return true;
......@@ -129,29 +123,17 @@ public:
}
void stop() noexcept {
{
MutexGuard lk{mutex_};
if (stop_)
return;
stop_ = true;
}
stop_ = true;
cv_.notify_all();
// Make sure that no thread is blocked into read() or wait() methods
MutexGuard lk_api{apiMutex_};
}
private:
PeerChannel(const PeerChannel &o) = delete;
PeerChannel &operator=(const PeerChannel &o) = delete;
std::mutex apiMutex_{};
std::mutex mutex_{};
std::condition_variable cv_{};
std::stringstream stream_{};
bool stop_{false};
bool notified_{false};
std::vector<char> data;
friend void operator<<(std::vector<char> &, PeerChannel &);
};
......@@ -213,6 +195,8 @@ public:
pj_ice_strans_cfg config_;
std::string last_errmsg_;
std::atomic_bool is_stopped_ {false};
struct Packet {
Packet(void *pkt, pj_size_t size)
: data{reinterpret_cast<char *>(pkt), reinterpret_cast<char *>(pkt) + size} { }
......@@ -220,7 +204,6 @@ public:
};
std::vector<PeerChannel> peerChannels_;
std::mutex apiMutex_;
struct ComponentIO {
std::mutex mutex;
......@@ -869,13 +852,11 @@ IceTransport::Impl::onReceiveData(unsigned comp_id, void *pkt, pj_size_t size)
if (on_recv_cb_) {
on_recv_cb_();
}
if (io.cb) {
io.cb((uint8_t*)pkt, size);
} else {
MutexLock lk{apiMutex_};
auto &channel = peerChannels_.at(comp_id);
lk.unlock();
channel << std::string(reinterpret_cast<const char *>(pkt), size);
peerChannels_.at(comp_id-1) << std::string(reinterpret_cast<const char *>(pkt), size);
}
}
......@@ -907,6 +888,13 @@ IceTransport::isRunning() const
return pimpl_->_isRunning();
}
bool
IceTransport::isStopped() const
{
std::lock_guard<std::mutex> lk {pimpl_->iceMutex_};
return pimpl_->is_stopped_;
}
bool
IceTransport::isFailed() const
{
......@@ -950,12 +938,14 @@ IceTransport::start(const Attribute& rem_attrs, const std::vector<IceCandidate>&
{
if (not isInitialized()) {
JAMI_ERR("[ice:%p] not initialized transport", this);
pimpl_->is_stopped_ = true;
return false;
}
// pj_ice_strans_start_ice crashes if remote candidates array is empty
if (rem_candidates.empty()) {
JAMI_ERR("[ice:%p] start failed: no remote candidates", this);
pimpl_->is_stopped_ = true;
return false;
}
......@@ -969,62 +959,49 @@ IceTransport::start(const Attribute& rem_attrs, const std::vector<IceCandidate>&
if (status != PJ_SUCCESS) {
pimpl_->last_errmsg_ = sip_utils::sip_strerror(status);
JAMI_ERR("[ice:%p] start failed: %s", this, pimpl_->last_errmsg_.c_str());
pimpl_->is_stopped_ = true;
return false;
}
return true;
}
bool
IceTransport::start(const std::vector<uint8_t>& rem_data)
IceTransport::start(const SDP& sdp)
{
std::string rem_ufrag;
std::string rem_pwd;
std::vector<IceCandidate> rem_candidates;
auto data = reinterpret_cast<const char*>(rem_data.data());
auto size = rem_data.size();
try {
std::size_t offset = 0;
auto result = msgpack::unpack(data, size, offset);
auto version = result.get().as<uint8_t>();
JAMI_DBG("[ice:%p] rx msg v%u", this, version);
if (version == 1) {
result = msgpack::unpack(data, size, offset);
std::tie(rem_ufrag, rem_pwd) = result.get().as<std::pair<std::string, std::string>>();
result = msgpack::unpack(data, size, offset);
auto comp_cnt = result.get().as<uint8_t>();
while (comp_cnt-- > 0) {
result = msgpack::unpack(data, size, offset);
IceCandidate cand;
for (const auto& line : result.get().as<std::vector<std::string>>()) {
if (getCandidateFromSDP(line, cand))
rem_candidates.emplace_back(cand);
}
}
} else {
JAMI_ERR("[ice:%p] invalid msg version", this);
return false;
}
} catch (const msgpack::unpack_error& e) {
JAMI_ERR("[ice:%p] remote msg unpack error: %s", this, e.what());
if (not isInitialized()) {
JAMI_ERR("[ice:%p] not initialized transport", this);
pimpl_->is_stopped_ = true;
return false;
}
if (rem_ufrag.empty() or rem_pwd.empty() or rem_candidates.empty()) {
JAMI_ERR("[ice:%p] invalid remote attributes", this);
JAMI_DBG("[ice:%p] negotiation starting (%zu remote candidates)", this, sdp.candidates.size());
pj_str_t ufrag, pwd;
std::vector<IceCandidate> rem_candidates;
rem_candidates.reserve(sdp.candidates.size());
IceCandidate cand;
for (const auto &line : sdp.candidates) {
if (getCandidateFromSDP(line, cand))
rem_candidates.emplace_back(cand);
}
auto status = pj_ice_strans_start_ice(pimpl_->icest_.get(),
pj_strset(&ufrag, (char*)sdp.ufrag.c_str(), sdp.ufrag.size()),
pj_strset(&pwd, (char*)sdp.pwd.c_str(), sdp.pwd.size()),
rem_candidates.size(),
rem_candidates.data());
if (status != PJ_SUCCESS) {
pimpl_->last_errmsg_ = sip_utils::sip_strerror(status);
JAMI_ERR("[ice:%p] start failed: %s", this, pimpl_->last_errmsg_.c_str());
pimpl_->is_stopped_ = true;
return false;
}
if (pimpl_->onlyIPv4Private_)
JAMI_WARN("[ice:%p] no public IPv4 found, your connection may fail!", this);
return start({rem_ufrag, rem_pwd}, rem_candidates);
return true;
}
bool
IceTransport::stop()
{
pimpl_->is_stopped_ = true;
if (isStarted()) {
auto status = pj_ice_strans_stop_ice(pimpl_->icest_.get());
if (status != PJ_SUCCESS) {
......@@ -1036,6 +1013,14 @@ IceTransport::stop()
return true;
}
void
IceTransport::cancelOperations()
{
for (auto& c: pimpl_->peerChannels_) {
c.stop();
}
}
IpAddr
IceTransport::getLocalAddress(unsigned comp_id) const
{
......@@ -1139,20 +1124,29 @@ IceTransport::registerPublicIP(unsigned compId, const IpAddr& publicIP)
}
std::vector<uint8_t>
IceTransport::packIceMsg() const
IceTransport::packIceMsg(uint8_t version) const
{
static constexpr uint8_t ICE_MSG_VERSION = 1;
if (not isInitialized())
return {};
std::stringstream ss;
msgpack::pack(ss, ICE_MSG_VERSION);
msgpack::pack(ss, std::make_pair(pimpl_->local_ufrag_, pimpl_->local_pwd_));
msgpack::pack(ss, static_cast<uint8_t>(pimpl_->component_count_));
for (unsigned i=0; i<pimpl_->component_count_; i++)
msgpack::pack(ss, getLocalCandidates(i));
if (version == 1) {
msgpack::pack(ss, version);
msgpack::pack(ss, std::make_pair(pimpl_->local_ufrag_, pimpl_->local_pwd_));
msgpack::pack(ss, static_cast<uint8_t>(pimpl_->component_count_));
for (unsigned i=0; i<pimpl_->component_count_; i++)
msgpack::pack(ss, getLocalCandidates(i));
} else {
SDP sdp;
sdp.ufrag = pimpl_->local_ufrag_;
sdp.pwd = pimpl_->local_pwd_;
for (unsigned i = 0; i < pimpl_->component_count_; i++) {
auto candidates = getLocalCandidates(i);
sdp.candidates.reserve(sdp.candidates.size() + candidates.size());
sdp.candidates.insert(sdp.candidates.end(), candidates.begin(), candidates.end());
}
msgpack::pack(ss, sdp);
}
auto str(ss.str());
return std::vector<uint8_t>(str.begin(), str.end());
}
......@@ -1274,10 +1268,7 @@ IceTransport::recv(int comp_id, unsigned char* buf, size_t len)
ssize_t
IceTransport::recvfrom(int comp_id, char *buf, size_t len) {
MutexLock lk{pimpl_->apiMutex_};
auto &channel = pimpl_->peerChannels_.at(comp_id);
lk.unlock();
return channel.read(buf, len);
return pimpl_->peerChannels_.at(comp_id).read(buf, len);
}
void
......@@ -1357,19 +1348,60 @@ IceTransport::waitForNegotiation(unsigned timeout)
ssize_t
IceTransport::isDataAvailable(int comp_id)
{
MutexLock lk{pimpl_->apiMutex_};
auto &channel = pimpl_->peerChannels_.at(comp_id);
lk.unlock();
return channel.isDataAvailable();
return pimpl_->peerChannels_.at(comp_id).isDataAvailable();
}
ssize_t
IceTransport::waitForData(int comp_id, unsigned int timeout, std::error_code& ec)
{
MutexLock lk{pimpl_->apiMutex_};
auto &channel = pimpl_->peerChannels_.at(comp_id);
lk.unlock();
return channel.wait(std::chrono::milliseconds(timeout));
return pimpl_->peerChannels_.at(comp_id).wait(std::chrono::milliseconds(timeout));
}
std::vector<SDP>
IceTransport::parseSDPList(const std::vector<uint8_t>& msg)
{
std::vector<SDP> sdp_list;
msgpack::unpacker pac;
pac.reserve_buffer(msg.size());
memcpy(pac.buffer(), msg.data(), msg.size());
pac.buffer_consumed(msg.size());
msgpack::object_handle oh;
while (auto result = pac.next(oh)) {
try {
SDP sdp;
if (oh.get().type == msgpack::type::POSITIVE_INTEGER) {
// Version 1
result = pac.next(oh);
if (!result) break;
std::tie(sdp.ufrag, sdp.pwd) = oh.get().as<std::pair<std::string, std::string>>();
result = pac.next(oh);
if (!result) break;
auto comp_cnt = oh.get().as<uint8_t>();
while (comp_cnt-- > 0) {
result = pac.next(oh);
if (!result) break;
auto candidates = oh.get().as<std::vector<std::string>>();
sdp.candidates.reserve(sdp.candidates.size() + candidates.size());
sdp.candidates.insert(sdp.candidates.end(), candidates.begin(), candidates.end());
}
} else {
oh.get().convert(sdp);
}
sdp_list.emplace_back(sdp);
} catch (const msgpack::unpack_error &e) {
break;
}
}
return sdp_list;
}
bool
IceTransport::isTCPEnabled()
{
return pimpl_->config_.protocol == PJ_ICE_TP_TCP;
}
//==============================================================================
......@@ -1431,6 +1463,12 @@ IceSocketTransport::isInitiator() const
return ice_->isInitiator();
}
void
IceSocketTransport::shutdown()
{
ice_->cancelOperations();
}
int
IceSocketTransport::maxPayload() const
{
......@@ -1442,6 +1480,7 @@ IceSocketTransport::maxPayload() const
int
IceSocketTransport::waitForData(unsigned ms_timeout, std::error_code& ec) const
{
if (!ice_->isRunning()) return -1;
return ice_->waitForData(compId_, ms_timeout, ec);
}
......@@ -1460,13 +1499,21 @@ IceSocketTransport::write(const ValueType* buf, std::size_t len, std::error_code
std::size_t
IceSocketTransport::read(ValueType* buf, std::size_t len, std::error_code& ec)
{
auto res = ice_->recv(compId_, buf, len);
if (res < 0) {
ec.assign(errno, std::generic_category());
return 0;
if (!ice_->isRunning()) return 0;
try {
auto res = reliable_
? ice_->recvfrom(compId_, reinterpret_cast<char *>(buf), len)
: ice_->recv(compId_, buf, len);
if (res < 0) {
ec.assign(errno, std::generic_category());
return 0;
}
ec.clear();
return res;
} catch (const std::exception &e) {
JAMI_ERR("IceSocketTransport::read exception: %s", e.what());
}
ec.clear();
return res;
return 0;
}
IpAddr
......
......@@ -29,6 +29,7 @@
#include <functional>
#include <memory>
#include <msgpack.hpp>
#include <vector>
namespace jami {
......@@ -73,6 +74,14 @@ struct IceTransportOptions {
bool aggressive {false}; // If we use the aggressive nomination strategy
};
struct SDP {
std::string ufrag;
std::string pwd;
std::vector<std::string> candidates;
MSGPACK_DEFINE(ufrag, pwd, candidates)
};
class IceTransport {
public:
using Attribute = struct {
......@@ -85,7 +94,6 @@ public:
*/
IceTransport(const char* name, int component_count, bool master,
const IceTransportOptions& options = {});
/**
* Get current state
*/
......@@ -100,13 +108,18 @@ public:
*/
bool start(const Attribute& rem_attrs,
const std::vector<IceCandidate>& rem_candidates);
bool start(const std::vector<uint8_t>& attrs_candidates);
bool start(const SDP& sdp);
/**
* Stop a started or completed transport.
*/
bool stop();
/**
* Cancel operations
*/
void cancelOperations();
/**
* Returns true if ICE transport has been initialized
* [mutex protected]
......@@ -125,6 +138,12 @@ public:
*/
bool isRunning() const;
/**
* Return true if a start operations fails or if stop() has been called
* [mutex protected]
*/
bool isStopped() const;
/**
* Returns true if ICE transport is in failure state
* [mutex protected]
......@@ -156,7 +175,7 @@ public:
/**
* Returns serialized ICE attributes and candidates.
*/
std::vector<uint8_t> packIceMsg() const;
std::vector<uint8_t> packIceMsg(uint8_t version = 1) const;
bool getCandidateFromSDP(const std::string& line, IceCandidate& cand);
......@@ -188,6 +207,15 @@ public:
bool setSlaveSession();
bool setInitiatorSession();
/**
* Get SDP messages list
* @param msg The payload to parse
* @return the list of SDP messages
*/
static std::vector<SDP> parseSDPList(const std::vector<uint8_t>& msg);
bool isTCPEnabled();
private:
class Impl;
std::unique_ptr<Impl> pimpl_;
......
......@@ -131,6 +131,7 @@ ip_utils::getAnyHostAddr(pj_uint16_t family)
IpAddr
ip_utils::getLocalAddr(pj_uint16_t family)
{
sip_utils::register_thread();
if (family == pj_AF_UNSPEC()) {
family = pj_AF_INET6();
}
......
......@@ -138,6 +138,7 @@ struct JamiAccount::PendingCall
{
std::chrono::steady_clock::time_point start;
std::shared_ptr<IceTransport> ice_sp;
std::shared_ptr<IceTransport> ice_tcp_sp;
std::weak_ptr<SIPCall> call;
std::future<size_t> listen_key;
dht::InfoHash call_key;
......@@ -411,6 +412,30 @@ JamiAccount::newOutgoingCall(const std::string& toUrl,
return call;
}
void
initICE(const std::vector<uint8_t> &msg, const std::shared_ptr<IceTransport> &ice,
const std::shared_ptr<IceTransport> &ice_tcp, bool &udp_failed, bool &tcp_failed)
{
auto sdp_list = IceTransport::parseSDPList(msg);
for (const auto &sdp : sdp_list) {
if (sdp.candidates.size() > 0) {
if (sdp.candidates[0].find("TCP") != std::string::npos) {
// It is a SDP for the TCP component
tcp_failed = (ice_tcp && !ice_tcp->start(sdp));
} else {
// For UDP
udp_failed = (ice && !ice->start(sdp));
}
}
}
// During the ICE reply we can start the ICE negotiation
if (tcp_failed && ice_tcp) {
ice_tcp->stop();
JAMI_WARN("ICE over TCP not started, will only use UDP");
}
}
void
JamiAccount::startOutgoingCall(const std::shared_ptr<SIPCall>& call, const std::string& toUri)
{
......@@ -455,9 +480,16 @@ JamiAccount::startOutgoingCall(const std::shared_ptr<SIPCall>& call, const std::
return;
}
auto ice_config = getIceOptions();