From e83a10069e95dc37d7684135f3e924284b37ba61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Blin?= <sebastien.blin@savoirfairelinux.com> Date: Thu, 16 May 2019 14:03:05 -0400 Subject: [PATCH] sip: negotiate both UDP and TCP for the control channel NOTE: SIP over TCP is disabled for now on Windows, waiting for TLS 1.3 support. To re-enable it, check the #ifdef _WIN32 in ice_transport.cpp Our pjsip version supports the RFC6544. With this patch, when starting a call, the daemon is using two ICE sessions for the SIP channel. One is negotiating a UDP socket, and the other a TCP socket and transmits both SDP on the DHT. If both negotiations succeed, TCP is prefered and will be used to transmit SIP messages and the VCard. This should solve the 30 seconds timeout on bad networks. Note that the media channel is still using UDP to transmit audio and video. MAJOR CHANGE: the SIP channel use TLS on top of TCP, no DTLS, so the transport is considered as reliable. Also lot of changes in rfc6544.patch to link to rfc6062. The patch needs to be cleaned, cf TODO notes Also this seems to fix the ICE shutdown at the end of the call (after the IDLE Timeout) Change-Id: I55c5f51377fd8787bc951d6d282eec46f8eaf977 Gitlab: #103 Gitlab: #108 --- contrib/src/pjproject/fetch_and_patch.bat | 1 - contrib/src/pjproject/rfc6544.patch | 1237 ++++++++++++----- .../pjproject/win32_ice_tcp_temp_fix.patch | 38 - src/ice_socket.h | 8 +- src/ice_transport.cpp | 189 ++- src/ice_transport.h | 29 +- src/jamidht/jamiaccount.cpp | 174 ++- src/jamidht/jamiaccount.h | 1 + src/jamidht/sips_transport_ice.cpp | 39 +- src/jamidht/sips_transport_ice.h | 4 + src/peer_connection.cpp | 6 +- src/peer_connection.h | 3 +- src/security/tls_session.cpp | 43 +- src/sip/siptransport.cpp | 3 + 14 files changed, 1258 insertions(+), 517 deletions(-) delete mode 100644 contrib/src/pjproject/win32_ice_tcp_temp_fix.patch diff --git a/contrib/src/pjproject/fetch_and_patch.bat b/contrib/src/pjproject/fetch_and_patch.bat index febc0ce91c..5cb61f5665 100644 --- a/contrib/src/pjproject/fetch_and_patch.bat +++ b/contrib/src/pjproject/fetch_and_patch.bat @@ -35,7 +35,6 @@ bash -c "%PATCH_CMD% %UNIXPATH%pjproject/fix_ioqueue_ipv6_sendto.patch" bash -c "%PATCH_CMD% %UNIXPATH%pjproject/add_dtls_transport.patch" bash -c "%PATCH_CMD% %UNIXPATH%pjproject/rfc6544.patch" bash -c "%PATCH_CMD% %UNIXPATH%pjproject/ice_config.patch" -bash -c "%PATCH_CMD% %UNIXPATH%pjproject/win32_ice_tcp_temp_fix.patch" %APPLY_CMD% %SRC%\pjproject\win32_vs_gnutls.patch %APPLY_CMD% %SRC%\pjproject\win_config.patch diff --git a/contrib/src/pjproject/rfc6544.patch b/contrib/src/pjproject/rfc6544.patch index d12dfb8921..cad0f947a7 100644 --- a/contrib/src/pjproject/rfc6544.patch +++ b/contrib/src/pjproject/rfc6544.patch @@ -17,28 +17,29 @@ on behalf of Savoir-faire Linux. --- pjnath/include/pjnath/config.h | 13 +- - pjnath/include/pjnath/ice_session.h | 118 ++++++++++++++--- - pjnath/include/pjnath/ice_strans.h | 61 ++++++++- - pjnath/include/pjnath/stun_session.h | 66 ++++++++- - pjnath/include/pjnath/stun_sock.h | 84 +++++++++--- + pjnath/include/pjnath/ice_session.h | 166 ++- + pjnath/include/pjnath/ice_strans.h | 61 +- + pjnath/include/pjnath/stun_session.h | 82 +- + pjnath/include/pjnath/stun_sock.h | 93 +- pjnath/src/pjnath-test/concur_test.c | 8 +- pjnath/src/pjnath-test/sess_auth.c | 12 +- pjnath/src/pjnath-test/stun_sock_test.c | 7 +- - pjnath/src/pjnath/ice_session.c | 388 +++++++++++++++++++++++++++++++++++++---------------- - pjnath/src/pjnath/ice_strans.c | 741 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------- + pjnath/src/pjnath/ice_session.c | 535 +++++++-- + pjnath/src/pjnath/ice_strans.c | 958 ++++++++++++----- pjnath/src/pjnath/nat_detect.c | 12 +- pjnath/src/pjnath/stun_session.c | 12 +- - pjnath/src/pjnath/stun_sock.c | 1285 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------------------------- + pjnath/src/pjnath/stun_sock.c | 1313 ++++++++++++++++------- + pjnath/src/pjnath/stun_transaction.c | 1 + pjnath/src/pjnath/turn_session.c | 3 +- pjnath/src/pjturn-client/client_main.c | 11 +- pjnath/src/pjturn-srv/allocation.c | 2 +- pjnath/src/pjturn-srv/server.c | 2 +- - pjsip-apps/src/samples/icedemo.c | 671 +++++++++++++++++++++++++++++++++++++++++++++++++++----------------------------------------- + pjsip-apps/src/samples/icedemo.c | 671 +++++++----- pjsip/src/pjsua-lib/pjsua_core.c | 8 +- - 19 files changed, 2424 insertions(+), 1080 deletions(-) + 20 files changed, 2877 insertions(+), 1093 deletions(-) diff --git a/pjnath/include/pjnath/config.h b/pjnath/include/pjnath/config.h -index fc1e2755..6f17a663 100644 +index fc1e27550..6f17a663b 100644 --- a/pjnath/include/pjnath/config.h +++ b/pjnath/include/pjnath/config.h @@ -1,5 +1,5 @@ @@ -84,7 +85,7 @@ index fc1e2755..6f17a663 100644 diff --git a/pjnath/include/pjnath/ice_session.h b/pjnath/include/pjnath/ice_session.h -index fa13a3b7..2e686ea1 100644 +index fa13a3b7c..9644836d0 100644 --- a/pjnath/include/pjnath/ice_session.h +++ b/pjnath/include/pjnath/ice_session.h @@ -163,6 +163,51 @@ typedef enum pj_ice_cand_type @@ -151,7 +152,30 @@ index fa13a3b7..2e686ea1 100644 } pj_ice_sess_cand; -@@ -331,6 +381,13 @@ typedef enum pj_ice_sess_check_state +@@ -324,6 +374,22 @@ typedef enum pj_ice_sess_check_state + */ + PJ_ICE_SESS_CHECK_STATE_FROZEN, + ++ /** ++ * The following status is used when a packet sent via TURN got a ++ * "Connection reset by peer". This mean that the peer didn't allow ++ * us to connect yet. The socket will be reconnected during the next ++ * loop. ++ */ ++ PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY, ++ ++ /** ++ * TODO (sblin): REMOVE THIS! - https://github.com/coturn/coturn/issues/408 ++ * For now, this status is only used because sometimes, the first packet ++ * doesn't receive any response. So, we retry to send the packet every ++ * 50 loops. ++ */ ++ PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET, ++ + /** + * A check has not been performed for this pair, and can be + * performed as soon as it is the highest priority Waiting pair on +@@ -331,6 +397,13 @@ typedef enum pj_ice_sess_check_state */ PJ_ICE_SESS_CHECK_STATE_WAITING, @@ -165,7 +189,7 @@ index fa13a3b7..2e686ea1 100644 /** * A check has not been performed for this pair, and can be * performed as soon as it is the highest priority Waiting pair on -@@ -512,6 +569,29 @@ typedef struct pj_ice_sess_cb +@@ -512,6 +585,41 @@ typedef struct pj_ice_sess_cb void *pkt, pj_size_t size, const pj_sockaddr_t *src_addr, unsigned src_addr_len); @@ -182,6 +206,18 @@ index fa13a3b7..2e686ea1 100644 + unsigned check_id); + + /** ++ * Reconnect a resetted TCP connection and send connectivity check ++ * cf. PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY ++ * ++ * @param ice The ICE session. ++ * @param clist The ICE connection list ++ * @param check_id The wanted check. ++ */ ++ pj_status_t (*reconnect_tcp_connection)(pj_ice_sess *ice, ++ pj_ice_sess_checklist *clist, ++ unsigned check_id); ++ ++ /** + * Close TCP socket + * + * @param ice The ICE session. @@ -195,7 +231,7 @@ index fa13a3b7..2e686ea1 100644 } pj_ice_sess_cb; -@@ -627,6 +707,7 @@ struct pj_ice_sess +@@ -627,6 +735,7 @@ struct pj_ice_sess pj_bool_t is_destroying; /**< Destroy is called */ pj_status_t ice_status; /**< Error status. */ pj_timer_entry timer; /**< ICE timer. */ @@ -203,7 +239,7 @@ index fa13a3b7..2e686ea1 100644 pj_ice_sess_cb cb; /**< Callback. */ pj_stun_config stun_cfg; /**< STUN settings. */ -@@ -669,6 +750,7 @@ struct pj_ice_sess +@@ -669,6 +778,7 @@ struct pj_ice_sess char txt[128]; char errmsg[PJ_ERR_MSG_SIZE]; } tmp; @@ -211,7 +247,7 @@ index fa13a3b7..2e686ea1 100644 }; -@@ -826,8 +908,6 @@ PJ_DECL(pj_status_t) pj_ice_sess_change_role(pj_ice_sess *ice, +@@ -826,8 +936,6 @@ PJ_DECL(pj_status_t) pj_ice_sess_change_role(pj_ice_sess *ice, PJ_DECL(pj_status_t) pj_ice_sess_set_prefs(pj_ice_sess *ice, const pj_uint8_t prefs[4]); @@ -220,7 +256,7 @@ index fa13a3b7..2e686ea1 100644 /** * Add a candidate to this ICE session. Application must add candidates for * each components ID before it can start pairing the candidates and -@@ -846,20 +926,17 @@ PJ_DECL(pj_status_t) pj_ice_sess_set_prefs(pj_ice_sess *ice, +@@ -846,20 +954,17 @@ PJ_DECL(pj_status_t) pj_ice_sess_set_prefs(pj_ice_sess *ice, * @param rel_addr Optional related address. * @param addr_len Length of addresses. * @param p_cand_id Optional pointer to receive the candidate ID. @@ -249,12 +285,10 @@ index fa13a3b7..2e686ea1 100644 /** * Find default candidate for the specified component ID, using this -@@ -968,8 +1045,19 @@ PJ_DECL(pj_status_t) pj_ice_sess_on_rx_pkt(pj_ice_sess *ice, +@@ -968,7 +1073,42 @@ PJ_DECL(pj_status_t) pj_ice_sess_on_rx_pkt(pj_ice_sess *ice, const pj_sockaddr_t *src_addr, int src_addr_len); -- -- +/** + * Notification when ICE session get a new incoming connection + * @@ -268,11 +302,34 @@ index fa13a3b7..2e686ea1 100644 +PJ_DECL(void) +ice_sess_on_peer_connection(pj_ice_sess *ice, + pj_uint8_t transport_id, pj_status_t status, pj_sockaddr_t* remote_addr); + ++/** ++ * Notification when ICE session get a new resetted connection ++ * cf PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY ++ * ++ * @param ice The ICE session. ++ * @param transport_id Related transport ++ * @param remote_addr Connected remove address ++ */ ++PJ_DECL(void) ++ice_sess_on_peer_reset_connection(pj_ice_sess *ice, ++ pj_uint8_t transport_id, pj_sockaddr_t* remote_addr); ++ ++/** ++ * Notification when ICE session get a new packet ++ * Used to remove the PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET status ++ * ++ * @param ice The ICE session. ++ * @param transport_id Related transport ++ * @param remote_addr Connected remove address ++ */ ++PJ_DECL(void) ++ice_sess_on_peer_packet(pj_ice_sess *ice, pj_uint8_t transport_id, pj_sockaddr_t* remote_addr); + /** * @} - */ diff --git a/pjnath/include/pjnath/ice_strans.h b/pjnath/include/pjnath/ice_strans.h -index b4a83067..2b491e0d 100644 +index b4a83067f..2b491e0d0 100644 --- a/pjnath/include/pjnath/ice_strans.h +++ b/pjnath/include/pjnath/ice_strans.h @@ -1,5 +1,5 @@ @@ -377,7 +434,7 @@ index b4a83067..2b491e0d 100644 /** * @} diff --git a/pjnath/include/pjnath/stun_session.h b/pjnath/include/pjnath/stun_session.h -index f8ea4d1d..1e95a297 100644 +index f8ea4d1dc..3d42af5a2 100644 --- a/pjnath/include/pjnath/stun_session.h +++ b/pjnath/include/pjnath/stun_session.h @@ -1,5 +1,5 @@ @@ -417,7 +474,7 @@ index f8ea4d1d..1e95a297 100644 /** * This is the callback to be registered to pj_stun_session, to send -@@ -307,6 +330,17 @@ typedef struct pj_stun_session_cb +@@ -307,6 +330,33 @@ typedef struct pj_stun_session_cb const pj_sockaddr_t *src_addr, unsigned src_addr_len); @@ -431,11 +488,27 @@ index f8ea4d1d..1e95a297 100644 + * @param remote_addr The remote connected + */ + void (*on_peer_connection)(pj_stun_session *sess, pj_status_t status, pj_sockaddr_t* remote_addr); ++ ++ /** ++ * Notification when STUN connection is resetted (TCP only). ++ * ++ * @param stun_session The STUN session. ++ * @param remote_addr The remote resetted ++ */ ++ void (*on_peer_reset_connection)(pj_stun_session *sess, pj_sockaddr_t* remote_addr); ++ ++ /** ++ * Notification when STUN connection is resetted (TCP only). ++ * ++ * @param stun_session The STUN session. ++ * @param remote_addr The remote resetted ++ */ ++ void (*on_peer_packet)(pj_stun_session *sess, pj_sockaddr_t* remote_addr); + } pj_stun_session_cb; -@@ -388,15 +422,15 @@ typedef enum pj_stun_sess_msg_log_flag +@@ -388,15 +438,15 @@ typedef enum pj_stun_sess_msg_log_flag * @param grp_lock Optional group lock to be used by this session. * If NULL, the session will create one itself. * @param p_sess Pointer to receive STUN session instance. @@ -457,7 +530,7 @@ index f8ea4d1d..1e95a297 100644 /** * Destroy the STUN session and all objects created in the context of -@@ -751,6 +785,24 @@ PJ_DECL(void) pj_stun_msg_destroy_tdata(pj_stun_session *sess, +@@ -751,6 +801,24 @@ PJ_DECL(void) pj_stun_msg_destroy_tdata(pj_stun_session *sess, pj_stun_tx_data *tdata); @@ -483,7 +556,7 @@ index f8ea4d1d..1e95a297 100644 * @} */ diff --git a/pjnath/include/pjnath/stun_sock.h b/pjnath/include/pjnath/stun_sock.h -index fff4df88..d70300bb 100644 +index fff4df885..05a61bb11 100644 --- a/pjnath/include/pjnath/stun_sock.h +++ b/pjnath/include/pjnath/stun_sock.h @@ -1,5 +1,5 @@ @@ -609,7 +682,7 @@ index fff4df88..d70300bb 100644 * * @return PJ_SUCCESS if data has been sent immediately, or * PJ_EPENDING if data cannot be sent immediately. In -@@ -483,7 +502,36 @@ PJ_DECL(pj_status_t) pj_stun_sock_sendto(pj_stun_sock *stun_sock, +@@ -483,7 +502,45 @@ PJ_DECL(pj_status_t) pj_stun_sock_sendto(pj_stun_sock *stun_sock, unsigned pkt_len, unsigned flag, const pj_sockaddr_t *dst_addr, @@ -629,6 +702,15 @@ index fff4df88..d70300bb 100644 +pj_stun_sock_connect_active(pj_stun_sock *stun_sock, const pj_sockaddr_t *remote_addr, int af); + +/** ++ * Connect active socket to remote address ++ * @param stun_sock ++ * @param remote_addr the destination ++ * @param af address family ++ */ ++PJ_DECL(pj_status_t) ++pj_stun_sock_reconnect_active(pj_stun_sock *stun_sock, const pj_sockaddr_t *remote_addr, int af); ++ ++/** + * Close active socket + * @param stun_sock + * @param remote_addr The remote address linked @@ -648,7 +730,7 @@ index fff4df88..d70300bb 100644 /** * @} diff --git a/pjnath/src/pjnath-test/concur_test.c b/pjnath/src/pjnath-test/concur_test.c -index c3013d2a..ebe17392 100644 +index c3013d2ab..ebe173922 100644 --- a/pjnath/src/pjnath-test/concur_test.c +++ b/pjnath/src/pjnath-test/concur_test.c @@ -183,10 +183,10 @@ static int stun_destroy_test_session(struct stun_test_session *test_sess) @@ -667,7 +749,7 @@ index c3013d2a..ebe17392 100644 return -10; } diff --git a/pjnath/src/pjnath-test/sess_auth.c b/pjnath/src/pjnath-test/sess_auth.c -index 055eaad6..d1ad137a 100644 +index 055eaad61..d1ad137a3 100644 --- a/pjnath/src/pjnath-test/sess_auth.c +++ b/pjnath/src/pjnath-test/sess_auth.c @@ -248,7 +248,8 @@ static int create_std_server(pj_stun_auth_type auth_type, @@ -704,7 +786,7 @@ index 055eaad6..d1ad137a 100644 destroy_client_server(); return -270; diff --git a/pjnath/src/pjnath-test/stun_sock_test.c b/pjnath/src/pjnath-test/stun_sock_test.c -index fff4fad2..a54df74d 100644 +index fff4fad26..a54df74dc 100644 --- a/pjnath/src/pjnath-test/stun_sock_test.c +++ b/pjnath/src/pjnath-test/stun_sock_test.c @@ -255,8 +255,8 @@ static pj_status_t create_client(pj_stun_config *cfg, @@ -731,7 +813,7 @@ index fff4fad2..a54df74d 100644 app_perror(" error: server sending data", status); ret = -390; diff --git a/pjnath/src/pjnath/ice_session.c b/pjnath/src/pjnath/ice_session.c -index c51dba77..b9639061 100644 +index c51dba771..52fa05e3e 100644 --- a/pjnath/src/pjnath/ice_session.c +++ b/pjnath/src/pjnath/ice_session.c @@ -1,5 +1,5 @@ @@ -741,15 +823,18 @@ index c51dba77..b9639061 100644 * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com) * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org> * -@@ -45,6 +45,7 @@ static const char *check_state_name[] = +@@ -44,7 +44,10 @@ static const char *cand_type_names[] = + static const char *check_state_name[] = { "Frozen", ++ "Needs Retry", ++ "Needs First Packet", "Waiting", + "Pending", "In Progress", "Succeeded", "Failed" -@@ -75,8 +76,8 @@ enum timer_type +@@ -75,8 +78,8 @@ enum timer_type valid check for every components. */ TIMER_START_NOMINATED_CHECK,/**< Controlling agent start connectivity checks with USE-CANDIDATE flag. */ @@ -760,7 +845,15 @@ index c51dba77..b9639061 100644 }; /* Candidate type preference */ -@@ -133,6 +134,7 @@ typedef struct timer_data +@@ -123,6 +126,7 @@ typedef struct timer_data + { + pj_ice_sess *ice; + pj_ice_sess_checklist *clist; ++ unsigned first_packet_counter; // TODO (remove), for now, needed for the NEEDS_FIRST_PACKET state + } timer_data; + + +@@ -133,6 +137,7 @@ typedef struct timer_data /* Forward declarations */ static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te); @@ -768,7 +861,7 @@ index c51dba77..b9639061 100644 static void on_ice_complete(pj_ice_sess *ice, pj_status_t status); static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now); static void ice_on_destroy(void *obj); -@@ -288,10 +290,9 @@ static pj_status_t init_comp(pj_ice_sess *ice, +@@ -288,10 +293,9 @@ static pj_status_t init_comp(pj_ice_sess *ice, sess_cb.on_send_msg = &on_stun_send_msg; /* Create STUN session for this candidate */ @@ -782,7 +875,7 @@ index c51dba77..b9639061 100644 if (status != PJ_SUCCESS) return status; -@@ -715,7 +716,8 @@ PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice, +@@ -715,7 +719,8 @@ PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice, const pj_sockaddr_t *base_addr, const pj_sockaddr_t *rel_addr, int addr_len, @@ -792,7 +885,7 @@ index c51dba77..b9639061 100644 { pj_ice_sess_cand *lcand; pj_status_t status = PJ_SUCCESS; -@@ -738,6 +740,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice, +@@ -738,6 +743,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice, lcand->comp_id = (pj_uint8_t)comp_id; lcand->transport_id = (pj_uint8_t)transport_id; lcand->type = type; @@ -800,15 +893,16 @@ index c51dba77..b9639061 100644 pj_strdup(ice->pool, &lcand->foundation, foundation); lcand->prio = CALC_CAND_PRIO(ice, type, local_pref, lcand->comp_id); pj_sockaddr_cp(&lcand->addr, addr); -@@ -959,6 +962,7 @@ static void check_set_state(pj_ice_sess *ice, pj_ice_sess_check *check, +@@ -959,6 +965,8 @@ static void check_set_state(pj_ice_sess *ice, pj_ice_sess_check *check, pj_ice_sess_check_state st, pj_status_t err_code) { + if (check->state == st) return; // nothing to do ++ if (st == PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET && check->state == PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS) return; pj_assert(check->state < PJ_ICE_SESS_CHECK_STATE_SUCCEEDED); LOG5((ice->obj_name, "Check %s: state changed from %s to %s", -@@ -1079,6 +1083,17 @@ static pj_status_t prune_checklist(pj_ice_sess *ice, +@@ -1079,6 +1087,17 @@ static pj_status_t prune_checklist(pj_ice_sess *ice, return PJNATH_EICENOHOSTCAND; } } @@ -826,7 +920,7 @@ index c51dba77..b9639061 100644 } /* Next remove a pair if its local and remote candidates are identical -@@ -1181,6 +1196,8 @@ static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te) +@@ -1181,6 +1200,8 @@ static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te) case TIMER_KEEP_ALIVE: ice_keep_alive(ice, PJ_TRUE); break; @@ -835,29 +929,7 @@ index c51dba77..b9639061 100644 case TIMER_NONE: /* Nothing to do, just to get rid of gcc warning */ break; -@@ -1189,6 +1206,21 @@ static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te) - pj_grp_lock_release(ice->grp_lock); - } - -+static void on_tcp_connect_timeout(pj_ice_sess* ice) { -+ pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, &ice->timer, TIMER_NONE); -+ -+ for (int i = 0; i < ice->clist.count; ++i) { -+ pj_ice_sess_check *check = &ice->clist.checks[i]; -+ if (check->state == PJ_ICE_SESS_CHECK_STATE_PENDING) { -+ if (*ice->cb.wait_tcp_connection) { -+ (*ice->cb.close_tcp_connection)(ice, &ice->clist, i); -+ } -+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, PJ_ECANCELLED); -+ break; -+ } -+ } -+} -+ - /* Send keep-alive */ - static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now) - { -@@ -1202,9 +1234,9 @@ static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now) +@@ -1202,9 +1223,9 @@ static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now) pj_bool_t saved; pj_status_t status; @@ -870,7 +942,7 @@ index c51dba77..b9639061 100644 /* Create the Binding Indication */ status = pj_stun_session_create_ind(comp->stun_sess, -@@ -1343,7 +1375,6 @@ static pj_bool_t on_check_complete(pj_ice_sess *ice, +@@ -1343,7 +1364,6 @@ static pj_bool_t on_check_complete(pj_ice_sess *ice, check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_WAITING, 0); } } @@ -878,7 +950,43 @@ index c51dba77..b9639061 100644 LOG5((ice->obj_name, "Check %d is successful%s", GET_CHECK_ID(&ice->clist, check), (check->nominated ? " and nominated" : ""))); -@@ -1695,6 +1726,25 @@ PJ_DEF(pj_status_t) pj_ice_sess_create_check_list( +@@ -1609,6 +1629,35 @@ static pj_bool_t on_check_complete(pj_ice_sess *ice, + return PJ_FALSE; + } + ++static void on_tcp_connect_timeout(pj_ice_sess* ice) { ++ pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, &ice->timer, TIMER_NONE); ++ ++ pj_bool_t first_found = PJ_FALSE, set_timer = PJ_FALSE; ++ ++ for (int i = 0; i < ice->clist.count; ++i) { ++ pj_ice_sess_check *check = &ice->clist.checks[i]; ++ if (check->state == PJ_ICE_SESS_CHECK_STATE_PENDING && !first_found) { ++ if (*ice->cb.wait_tcp_connection) (*ice->cb.close_tcp_connection)(ice, &ice->clist, i); ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, PJ_ECANCELLED); ++ on_check_complete(ice, check); ++ first_found = PJ_TRUE; ++ } else if(check->state == PJ_ICE_SESS_CHECK_STATE_PENDING) { ++ set_timer = PJ_TRUE; ++ break; ++ } ++ } ++ ++ if (set_timer && ice->timer.id == TIMER_NONE) { ++ // Reschedule ++ pj_time_val delay = {0, 0}; ++ delay.msec = 1500; ++ pj_time_val_normalize(&delay); ++ pj_timer_heap_schedule_w_grp_lock( ++ ice->stun_cfg.timer_heap, &ice->timer, &delay, ++ TIMER_CONNECTION_TIMEOUT, ice->grp_lock); ++ } ++} ++ + + /* Create checklist by pairing local candidates with remote candidates */ + PJ_DEF(pj_status_t) pj_ice_sess_create_check_list( +@@ -1695,6 +1744,25 @@ PJ_DEF(pj_status_t) pj_ice_sess_create_check_list( continue; } @@ -904,7 +1012,15 @@ index c51dba77..b9639061 100644 chk->lcand = lcand; chk->rcand = rcand; -@@ -1751,40 +1801,71 @@ PJ_DEF(pj_status_t) pj_ice_sess_create_check_list( +@@ -1739,6 +1807,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_create_check_list( + td = PJ_POOL_ZALLOC_T(ice->pool, timer_data); + td->ice = ice; + td->clist = clist; ++ td->first_packet_counter = 1; + clist->timer.user_data = (void*)td; + clist->timer.cb = &periodic_timer; + +@@ -1751,40 +1820,73 @@ PJ_DEF(pj_status_t) pj_ice_sess_create_check_list( return PJ_SUCCESS; } @@ -942,6 +1058,8 @@ index c51dba77..b9639061 100644 + &rcand->addr, pj_sockaddr_get_len(&rcand->addr), check->tdata); + if (status != PJ_SUCCESS) { + check->tdata = NULL; ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status); ++ on_check_complete(ice, check); + pjnath_perror(ice->obj_name, "Error sending STUN request", status); + pj_log_pop_indent(); + return status; @@ -994,7 +1112,7 @@ index c51dba77..b9639061 100644 /* Attach data to be retrieved later when STUN request transaction * completes and on_stun_request_complete() callback is called. */ -@@ -1796,57 +1877,72 @@ static pj_status_t perform_check(pj_ice_sess *ice, +@@ -1796,57 +1898,84 @@ static pj_status_t perform_check(pj_ice_sess *ice, msg_data->data.req.ckid = check_id; /* Add PRIORITY */ @@ -1052,17 +1170,24 @@ index c51dba77..b9639061 100644 + if (lcand->transport == PJ_CAND_UDP) { + status = send_connectivity_check(ice, clist, check_id, nominate, msg_data); + } else if (lcand->transport == PJ_CAND_TCP_ACTIVE) { -+ pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, &ice->timer, TIMER_NONE); -+ status = (*ice->cb.wait_tcp_connection)(ice, clist, check_id); -+ if (ice->timer.id == TIMER_NONE) { -+ pj_time_val delay = {0, 0}; -+ delay.msec = 1500; -+ pj_time_val_normalize(&delay); -+ pj_timer_heap_schedule_w_grp_lock( -+ ice->stun_cfg.timer_heap, &ice->timer, &delay, -+ TIMER_CONNECTION_TIMEOUT, ice->grp_lock); ++ ++ if (check->state == PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY) { ++ status = (*ice->cb.reconnect_tcp_connection)(ice, clist, check_id); ++ } else if (check->state == PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET) { ++ status = send_connectivity_check(ice, clist, check_id, nominate, msg_data); + } else { -+ pj_assert(!"Not expected any timer active"); ++ pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, &ice->timer, TIMER_NONE); ++ status = (*ice->cb.wait_tcp_connection)(ice, clist, check_id); ++ if (ice->timer.id == TIMER_NONE) { ++ pj_time_val delay = {0, 0}; ++ delay.msec = 1500; ++ pj_time_val_normalize(&delay); ++ pj_timer_heap_schedule_w_grp_lock( ++ ice->stun_cfg.timer_heap, &ice->timer, &delay, ++ TIMER_CONNECTION_TIMEOUT, ice->grp_lock); ++ } else if (check->state == PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY) { ++ pj_assert(!"Not expected any timer active"); ++ } + } } +#else @@ -1088,8 +1213,13 @@ index c51dba77..b9639061 100644 + check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_PENDING, + status); + } else { -+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, -+ PJ_SUCCESS); ++ if (check->rcand->type == PJ_ICE_CAND_TYPE_RELAYED) { ++ // TODO (sblin) remove this - https://github.com/coturn/coturn/issues/408 ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET, status); ++ } else { ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, ++ PJ_SUCCESS); ++ } } - check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, @@ -1101,16 +1231,55 @@ index c51dba77..b9639061 100644 } -@@ -1859,7 +1955,7 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th, - timer_data *td; - pj_ice_sess *ice; - pj_ice_sess_checklist *clist; -- unsigned i, start_count=0; -+ unsigned i, start_count=0, pending_count=0; - pj_status_t status; +@@ -1882,44 +2011,101 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th, + LOG5((ice->obj_name, "Starting checklist periodic check")); + pj_log_push_indent(); - td = (struct timer_data*) te->user_data; -@@ -1885,41 +1981,41 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th, ++ /* Send STUN Binding request for check with highest priority on ++ * retry state. ++ */ ++ ++ if (start_count == 0) { ++ for (i = 0; i < clist->count; ++i) { ++ pj_ice_sess_check *check = &clist->checks[i]; ++ // Reconnect closed TURN sockets ++ if (check->state == PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY) { ++ status = perform_check(ice, clist, i, ice->is_nominating); ++ if (status != PJ_SUCCESS && status != PJ_EPENDING) { ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, ++ status); ++ on_check_complete(ice, check); ++ } ++ ++start_count; ++ break; ++ } ++ } ++ } ++ ++ if (start_count == 0) { ++ // TODO (sblin) remove - https://github.com/coturn/coturn/issues/408 ++ pj_bool_t inc_counter = PJ_TRUE; ++ for (i = 0; i < clist->count; ++i) { ++ pj_ice_sess_check *check = &clist->checks[i]; ++ if (check->state == PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET) { ++ if (inc_counter) { ++ td->first_packet_counter += 1; ++ inc_counter = PJ_FALSE; ++ } ++ if (td->first_packet_counter % 50 == 0) { ++ status = perform_check(ice, clist, i, ice->is_nominating); ++ if (status != PJ_SUCCESS && status != PJ_EPENDING) { ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, ++ status); ++ on_check_complete(ice, check); ++ } ++ } ++ ++start_count; ++ break; ++ } ++ } ++ } ++ /* Send STUN Binding request for check with highest priority on * Waiting state. */ @@ -1154,12 +1323,7 @@ index c51dba77..b9639061 100644 - PJ_ICE_SESS_CHECK_STATE_FAILED, status); - on_check_complete(ice, check); - } -- -- ++start_count; -- break; -- } -- } -+ if (start_count == 0 && pending_count == 0) { ++ if (start_count == 0) { + for (i = 0; i < clist->count; ++i) { + pj_ice_sess_check *check = &clist->checks[i]; + @@ -1173,10 +1337,26 @@ index c51dba77..b9639061 100644 + break; + } + } ++ } + +- ++start_count; +- break; +- } +- } ++ if (start_count == 0) { ++ // If all sockets are pending, do nothing ++ pj_bool_t inc_counter = PJ_TRUE; ++ for (i = 0; i < clist->count; ++i) { ++ pj_ice_sess_check *check = &clist->checks[i]; ++ if (check->state == PJ_ICE_SESS_CHECK_STATE_PENDING) { ++ ++start_count; ++ break; ++ } ++ } } /* Cannot start check because there's no suitable candidate pair. -@@ -1936,8 +2032,7 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th, +@@ -1936,8 +2122,7 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th, pj_grp_lock_release(ice->grp_lock); pj_log_pop_indent(); return PJ_SUCCESS; @@ -1186,7 +1366,7 @@ index c51dba77..b9639061 100644 /* Start sending connectivity check with USE-CANDIDATE */ static void start_nominated_check(pj_ice_sess *ice) -@@ -2109,13 +2204,13 @@ PJ_DEF(pj_status_t) pj_ice_sess_start_check(pj_ice_sess *ice) +@@ -2109,13 +2294,13 @@ PJ_DEF(pj_status_t) pj_ice_sess_start_check(pj_ice_sess *ice) /* First, perform all pending triggered checks, simultaneously. */ rcheck = ice->early_check.next; while (rcheck != &ice->early_check) { @@ -1207,7 +1387,7 @@ index c51dba77..b9639061 100644 } pj_list_init(&ice->early_check); -@@ -2161,7 +2256,7 @@ static pj_status_t on_stun_send_msg(pj_stun_session *sess, +@@ -2161,7 +2346,7 @@ static pj_status_t on_stun_send_msg(pj_stun_session *sess, /* Stray retransmit timer that could happen while * we're being destroyed */ pj_grp_lock_release(ice->grp_lock); @@ -1216,10 +1396,33 @@ index c51dba77..b9639061 100644 } status = (*ice->cb.on_tx_pkt)(ice, sd->comp_id, msg_data->transport_id, -@@ -2171,6 +2266,75 @@ static pj_status_t on_stun_send_msg(pj_stun_session *sess, +@@ -2171,6 +2356,136 @@ static pj_status_t on_stun_send_msg(pj_stun_session *sess, return status; } ++static pj_ice_sess_check* get_current_check_at_state(pj_ice_sess *ice, ++ pj_sockaddr_t* remote_addr, pj_ice_sess_check_state state, int* current_check) ++{ ++ if (!ice || !remote_addr) return NULL; ++ // NOTE: Multiple checks can have the same remote, we only take care of the first ++ // First, check if the TCP is really connected. If not, abort ++ pj_ice_sess_check *check = NULL; ++ for (int i = 0; i < ice->clist.count; ++i) { ++ // Find related check ++ pj_ice_sess_check *c = &ice->clist.checks[i]; ++ /* Host candidate not found this this srflx! */ ++ if (pj_sockaddr_cmp(remote_addr, &c->rcand->addr) == 0) { ++ if (c->tdata == NULL || c->state != state) ++ continue; ++ /* Match */ ++ check = c; ++ if (current_check) *current_check = i; ++ break; ++ } ++ } ++ return check; ++} ++ +void ice_sess_on_peer_connection(pj_ice_sess *ice, pj_uint8_t transport_id, + pj_status_t status, + pj_sockaddr_t* remote_addr) { @@ -1228,27 +1431,15 @@ index c51dba77..b9639061 100644 + // finished + if (!remote_addr) return; + -+ // First, check if the TCP is really connected. If not, abort -+ pj_ice_sess_check *check = NULL; + int current_check = -1; -+ for (int i = 0; i < ice->clist.count; ++i) { -+ // Find related check -+ pj_ice_sess_check *c = &ice->clist.checks[i]; -+ /* Host candidate not found this this srflx! */ -+ if (pj_sockaddr_cmp(remote_addr, &c->rcand->addr) == 0) { -+ if (c->tdata == NULL) { -+ continue; -+ } -+ /* Match */ -+ check = c; -+ current_check = i; -+ break; -+ } -+ } -+ -+ // NOTE: Multiple checks can have the same remote, we only take care of the first ++ pj_ice_sess_check *check = get_current_check_at_state(ice, remote_addr, ++ PJ_ICE_SESS_CHECK_STATE_PENDING, ¤t_check); + if (!check) { -+ return; ++ // Handle peer reflexive candidates (incoming are still waiting here) ++ check = get_current_check_at_state(ice, remote_addr, PJ_ICE_SESS_CHECK_STATE_WAITING, ¤t_check); ++ if (!check) { ++ return; ++ } + } + + if (status != PJ_SUCCESS) { @@ -1278,21 +1469,71 @@ index c51dba77..b9639061 100644 + // Note that USERNAME and MESSAGE-INTEGRITY will be added by the + // STUN session. + -+ // Initiate STUN transaction to send the request -+ status_send_msg = pj_stun_session_send_msg( -+ comp->stun_sess, msg_data, PJ_FALSE, PJ_FALSE, &rcand->addr, -+ pj_sockaddr_get_len(&rcand->addr), check->tdata); -+ if (status_send_msg != PJ_SUCCESS) { -+ check->tdata = NULL; -+ pjnath_perror(ice->obj_name, "Error sending STUN request", status_send_msg); -+ pj_log_pop_indent(); -+ } -+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, status); ++ // Initiate STUN transaction to send the request ++ status_send_msg = pj_stun_session_send_msg( ++ comp->stun_sess, msg_data, PJ_FALSE, PJ_FALSE, &rcand->addr, ++ pj_sockaddr_get_len(&rcand->addr), check->tdata); ++ if (status_send_msg == 120104 /* CONNECTION RESET BY PEER */ ++ && rcand->type == PJ_ICE_CAND_TYPE_RELAYED) { ++ /** ++ * This part of the code is triggered when using ICE over TCP via TURN ++ * In fact, the other peer has to authorize this peer to connect to ++ * the relayed candidate. This is done by set_perm from the other case. ++ * But from this side, we can't know if the peer has authorized us. If it's ++ * not the case, the connection will got a CONNECTION RESET BY PEER status. ++ * In this case, we can try to reconnect a bit after and this until the check ++ * reached its timeout. ++ */ ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY, status); ++ } else if (status_send_msg != PJ_SUCCESS) { ++ check->tdata = NULL; ++ pjnath_perror(ice->obj_name, "Error sending STUN request", status_send_msg); ++ pj_log_pop_indent(); ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status); ++ on_check_complete(ice, check); ++ } else if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED) { ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET, status); ++ } else { ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, status); ++ } ++} ++ ++void ice_sess_on_peer_reset_connection(pj_ice_sess *ice, pj_uint8_t transport_id, ++ pj_sockaddr_t* remote_addr) { ++ // The TCP link is reseted ++ if (!remote_addr) return; ++ pj_ice_sess_check *check = get_current_check_at_state(ice, remote_addr, ++ PJ_ICE_SESS_CHECK_STATE_PENDING, NULL); ++ if (!check) { ++ // Just check if it's not the first packet failing ++ check = get_current_check_at_state(ice, remote_addr, ++ PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET, NULL); ++ if (!check) return; ++ } ++ ++ const pj_ice_sess_cand *rcand = check->rcand; ++ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED) { ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY, 120104); ++ } ++} ++ ++void ice_sess_on_peer_packet(pj_ice_sess *ice, pj_uint8_t transport_id, ++ pj_sockaddr_t* remote_addr) { ++ // The TCP link received its bind request response ++ if (!ice || !remote_addr) return; ++ pj_ice_sess_check *check = get_current_check_at_state(ice, remote_addr, ++ PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET, NULL); ++ if (!check) return; ++ ++ const pj_ice_sess_cand *rcand = check->rcand; ++ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED) { ++ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, PJ_SUCCESS); ++ } +} /* This callback is called when outgoing STUN request completed */ static void on_stun_request_complete(pj_stun_session *stun_sess, -@@ -2401,7 +2565,8 @@ static void on_stun_request_complete(pj_stun_session *stun_sess, +@@ -2401,7 +2716,8 @@ static void on_stun_request_complete(pj_stun_session *stun_sess, &check->lcand->base_addr, &check->lcand->base_addr, pj_sockaddr_get_len(&xaddr->sockaddr), @@ -1302,7 +1543,7 @@ index c51dba77..b9639061 100644 if (status != PJ_SUCCESS) { check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status); -@@ -2516,8 +2681,8 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, +@@ -2516,8 +2832,8 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, pj_grp_lock_acquire(ice->grp_lock); if (ice->is_destroying) { @@ -1313,7 +1554,7 @@ index c51dba77..b9639061 100644 } /* -@@ -2532,9 +2697,9 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, +@@ -2532,9 +2848,9 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, prio_attr = (pj_stun_priority_attr*) pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_PRIORITY, 0); if (prio_attr == NULL) { @@ -1326,7 +1567,7 @@ index c51dba77..b9639061 100644 } /* Get USE-CANDIDATE attribute */ -@@ -2579,7 +2744,7 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, +@@ -2579,7 +2895,7 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, NULL, token, PJ_TRUE, src_addr, src_addr_len); pj_grp_lock_release(ice->grp_lock); @@ -1335,7 +1576,7 @@ index c51dba77..b9639061 100644 } } else if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLED && -@@ -2591,7 +2756,7 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, +@@ -2591,7 +2907,7 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, NULL, token, PJ_TRUE, src_addr, src_addr_len); pj_grp_lock_release(ice->grp_lock); @@ -1344,7 +1585,7 @@ index c51dba77..b9639061 100644 } else { /* Switch role to controlled */ LOG4((ice->obj_name, -@@ -2606,7 +2771,7 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, +@@ -2606,7 +2922,7 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, status = pj_stun_session_create_res(sess, rdata, 0, NULL, &tdata); if (status != PJ_SUCCESS) { pj_grp_lock_release(ice->grp_lock); @@ -1353,7 +1594,7 @@ index c51dba77..b9639061 100644 } if (((pj_sockaddr *)src_addr)->addr.sa_family == pj_AF_INET6()) { -@@ -2663,9 +2828,9 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, +@@ -2663,9 +2979,9 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess, msg_data->has_req_data = PJ_FALSE; /* Send the response */ @@ -1366,7 +1607,7 @@ index c51dba77..b9639061 100644 /* * Handling early check. -@@ -2839,7 +3004,6 @@ static void handle_incoming_check(pj_ice_sess *ice, +@@ -2839,7 +3155,6 @@ static void handle_incoming_check(pj_ice_sess *ice, * Note: DO NOT overwrite nominated flag if one is already set. */ c->nominated = ((rcheck->use_candidate) || c->nominated); @@ -1375,7 +1616,7 @@ index c51dba77..b9639061 100644 c->state == PJ_ICE_SESS_CHECK_STATE_WAITING) { diff --git a/pjnath/src/pjnath/ice_strans.c b/pjnath/src/pjnath/ice_strans.c -index ca15a74e..d5877e31 100644 +index ca15a74e8..ca4cbae56 100644 --- a/pjnath/src/pjnath/ice_strans.c +++ b/pjnath/src/pjnath/ice_strans.c @@ -69,6 +69,7 @@ enum tp_type @@ -1386,7 +1627,7 @@ index ca15a74e..d5877e31 100644 /* The candidate type preference when STUN candidate is used */ static pj_uint8_t srflx_pref_table[PJ_ICE_CAND_TYPE_MAX] = -@@ -102,8 +103,15 @@ static void ice_rx_data(pj_ice_sess *ice, +@@ -102,8 +103,19 @@ static void ice_rx_data(pj_ice_sess *ice, void *pkt, pj_size_t size, const pj_sockaddr_t *src_addr, unsigned src_addr_len); @@ -1397,6 +1638,10 @@ index ca15a74e..d5877e31 100644 + pj_ice_sess_checklist *clist, + unsigned check_id); + ++static pj_status_t ice_reconnect_tcp_connection(pj_ice_sess *ice, ++ pj_ice_sess_checklist *clist, ++ unsigned check_id); ++ +static pj_status_t ice_close_tcp_connection(pj_ice_sess *ice, + pj_ice_sess_checklist *clist, + unsigned check_id); @@ -1404,7 +1649,24 @@ index ca15a74e..d5877e31 100644 /* STUN socket callbacks */ /* Notification when incoming packet has been received. */ static pj_bool_t stun_on_rx_data(pj_stun_sock *stun_sock, -@@ -201,6 +209,10 @@ struct pj_ice_strans +@@ -115,6 +127,7 @@ static pj_bool_t stun_on_rx_data(pj_stun_sock *stun_sock, + static pj_bool_t stun_on_data_sent(pj_stun_sock *stun_sock, + pj_ioqueue_op_key_t *send_key, + pj_ssize_t sent); ++static pj_bool_t turn_on_data_sent(pj_turn_sock *turn_sock, pj_ssize_t sent); + /* Notification when the status of the STUN transport has changed. */ + static pj_bool_t stun_on_status(pj_stun_sock *stun_sock, + pj_stun_sock_op op, +@@ -130,8 +143,6 @@ static void turn_on_rx_data(pj_turn_sock *turn_sock, + static void turn_on_state(pj_turn_sock *turn_sock, pj_turn_state_t old_state, + pj_turn_state_t new_state); + +- +- + /* Forward decls */ + static void ice_st_on_destroy(void *obj); + static void destroy_ice_st(pj_ice_strans *ice_st); +@@ -201,6 +212,14 @@ struct pj_ice_strans pj_bool_t destroy_req;/**< Destroy has been called? */ pj_bool_t cb_called; /**< Init error callback called?*/ @@ -1412,10 +1674,14 @@ index ca15a74e..d5877e31 100644 + pj_bool_t is_pending; + + pj_uint8_t rtp_pkt[MAX_RTP_SIZE]; ++ ++ pj_uint8_t rx_buffer[MAX_RTP_SIZE]; ++ pj_uint16_t rx_buffer_size; ++ pj_uint16_t rx_wanted_size; }; -@@ -237,6 +249,7 @@ PJ_DEF(void) pj_ice_strans_cfg_default(pj_ice_strans_cfg *cfg) +@@ -237,6 +256,7 @@ PJ_DEF(void) pj_ice_strans_cfg_default(pj_ice_strans_cfg *cfg) pj_bzero(cfg, sizeof(*cfg)); cfg->af = pj_AF_INET(); @@ -1423,7 +1689,7 @@ index ca15a74e..d5877e31 100644 pj_stun_config_init(&cfg->stun_cfg, NULL, 0, NULL, NULL); pj_ice_strans_stun_cfg_default(&cfg->stun); pj_ice_strans_turn_cfg_default(&cfg->turn); -@@ -252,6 +265,7 @@ PJ_DEF(void) pj_ice_strans_stun_cfg_default(pj_ice_strans_stun_cfg *cfg) +@@ -252,6 +272,7 @@ PJ_DEF(void) pj_ice_strans_stun_cfg_default(pj_ice_strans_stun_cfg *cfg) pj_bzero(cfg, sizeof(*cfg)); cfg->af = pj_AF_INET(); @@ -1431,7 +1697,7 @@ index ca15a74e..d5877e31 100644 cfg->port = PJ_STUN_PORT; cfg->max_host_cands = 64; cfg->ignore_stun_error = PJ_FALSE; -@@ -389,6 +403,7 @@ static pj_status_t add_update_turn(pj_ice_strans *ice_st, +@@ -389,6 +410,7 @@ static pj_status_t add_update_turn(pj_ice_strans *ice_st, cand->local_pref = RELAY_PREF; cand->transport_id = tp_id; cand->comp_id = (pj_uint8_t) comp->comp_id; @@ -1439,7 +1705,18 @@ index ca15a74e..d5877e31 100644 } /* Allocate and initialize TURN socket data */ -@@ -447,6 +462,7 @@ static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand, +@@ -396,6 +418,10 @@ static pj_status_t add_update_turn(pj_ice_strans *ice_st, + data->comp = comp; + data->transport_id = cand->transport_id; + ++ if (turn_cfg->conn_type == PJ_TURN_TP_TCP) { ++ turn_cfg->alloc_param.peer_conn_type = PJ_TURN_TP_TCP; ++ } ++ + /* Create the TURN transport */ + status = pj_turn_sock_create(&ice_st->cfg.stun_cfg, turn_cfg->af, + turn_cfg->conn_type, +@@ -447,6 +473,7 @@ static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand, || lcand->transport_id != rcand->transport_id || lcand->local_pref != rcand->local_pref || lcand->prio != rcand->prio @@ -1447,7 +1724,7 @@ index ca15a74e..d5877e31 100644 || pj_sockaddr_cmp(&lcand->addr, &rcand->addr) != 0 || pj_sockaddr_cmp(&lcand->base_addr, &rcand->base_addr) != 0) { -@@ -456,6 +472,115 @@ static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand, +@@ -456,6 +483,115 @@ static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand, return PJ_TRUE; } @@ -1563,7 +1840,7 @@ index ca15a74e..d5877e31 100644 static pj_status_t add_stun_and_host(pj_ice_strans *ice_st, pj_ice_strans_comp *comp, -@@ -504,6 +629,7 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st, +@@ -504,6 +640,7 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st, cand->local_pref = SRFLX_PREF; cand->transport_id = CREATE_TP_ID(TP_STUN, idx); cand->comp_id = (pj_uint8_t) comp->comp_id; @@ -1571,7 +1848,7 @@ index ca15a74e..d5877e31 100644 /* Allocate and initialize STUN socket data */ data = PJ_POOL_ZALLOC_T(ice_st->pool, sock_user_data); -@@ -511,11 +637,12 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st, +@@ -511,11 +648,12 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st, data->transport_id = cand->transport_id; /* Create the STUN transport */ @@ -1589,7 +1866,7 @@ index ca15a74e..d5877e31 100644 /* Start STUN Binding resolution and add srflx candidate only if server * is set. When any error occur during STUN Binding resolution, let's -@@ -581,116 +708,43 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st, +@@ -581,116 +719,43 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st, break; } @@ -1739,7 +2016,7 @@ index ca15a74e..d5877e31 100644 } return status; -@@ -803,6 +857,13 @@ PJ_DEF(pj_status_t) pj_ice_strans_create( const char *name, +@@ -803,6 +868,13 @@ PJ_DEF(pj_status_t) pj_ice_strans_create( const char *name, return status; } @@ -1753,18 +2030,19 @@ index ca15a74e..d5877e31 100644 pj_grp_lock_add_ref(ice_st->grp_lock); pj_grp_lock_add_handler(ice_st->grp_lock, pool, ice_st, &ice_st_on_destroy); -@@ -1097,6 +1158,10 @@ PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st, +@@ -1097,6 +1169,11 @@ PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st, ice_cb.on_ice_complete = &on_ice_complete; ice_cb.on_rx_data = &ice_rx_data; ice_cb.on_tx_pkt = &ice_tx_pkt; +#if PJ_HAS_TCP + ice_cb.wait_tcp_connection = &ice_wait_tcp_connection; ++ ice_cb.reconnect_tcp_connection = &ice_reconnect_tcp_connection; + ice_cb.close_tcp_connection = &ice_close_tcp_connection; +#endif /* Create! */ status = pj_ice_sess_create(&ice_st->cfg.stun_cfg, ice_st->obj_name, role, -@@ -1172,7 +1237,7 @@ PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st, +@@ -1172,7 +1249,7 @@ PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st, &cand->foundation, &cand->addr, &cand->base_addr, &cand->rel_addr, pj_sockaddr_get_len(&cand->addr), @@ -1773,7 +2051,7 @@ index ca15a74e..d5877e31 100644 if (status != PJ_SUCCESS) goto on_error; } -@@ -1460,110 +1525,162 @@ PJ_DEF(pj_status_t) pj_ice_strans_sendto( pj_ice_strans *ice_st, +@@ -1460,110 +1537,154 @@ PJ_DEF(pj_status_t) pj_ice_strans_sendto( pj_ice_strans *ice_st, const pj_sockaddr_t *dst_addr, int dst_addr_len) { @@ -1860,23 +2138,27 @@ index ca15a74e..d5877e31 100644 - - if (def_cand->status == PJ_SUCCESS) { - unsigned tp_idx = GET_TP_IDX(def_cand->transport_id); -- ++ if (ice_st->is_pending) { ++ return PJ_EPENDING; ++ } + - if (def_cand->type == PJ_ICE_CAND_TYPE_RELAYED) { -- ++ return status; ++ } + - enum { - msg_disable_ind = 0xFFFF & - ~(PJ_STUN_SESS_LOG_TX_IND| - PJ_STUN_SESS_LOG_RX_IND) - }; -- ++ pj_grp_lock_release(ice_st->grp_lock); + - /* https://trac.pjsip.org/repos/ticket/1316 */ - if (comp->turn[tp_idx].sock == NULL) { - /* TURN socket error */ - return PJ_EINVALIDOP; - } -+ if (ice_st->is_pending) { -+ return PJ_EPENDING; -+ } ++ def_cand = &comp->cand_list[comp->default_cand]; - if (!comp->turn[tp_idx].log_off) { - /* Disable logging for Send/Data indications */ @@ -1887,13 +2169,63 @@ index ca15a74e..d5877e31 100644 - msg_disable_ind); - comp->turn[tp_idx].log_off = PJ_TRUE; - } -+ return status; -+ } -+ -+ pj_grp_lock_release(ice_st->grp_lock); -+ -+ def_cand = &comp->cand_list[comp->default_cand]; -+ ++ pj_bool_t add_header = def_cand->transport != PJ_CAND_UDP; ++ pj_uint8_t* final_pkt = data; ++ unsigned final_len = data_len; ++ ++ if (add_header) { ++ // TCP ++ /* ++ * RFC6544 ICE requires an agent to demultiplex STUN and ++ * application-layer traffic, since they appear on the same port. This ++ * demultiplexing is described in [RFC5245] and is done using the magic ++ * cookie and other fields of the message. Stream-oriented transports ++ * introduce another wrinkle, since they require a way to frame the ++ * connection so that the application and STUN packets can be extracted ++ * in order to differentiate STUN packets from application-layer ++ * traffic. For this reason, TCP media streams utilizing ICE use the ++ * basic framing provided in RFC 4571 [RFC4571], even if the application ++ * layer protocol is not RTP. ++ */ ++ pj_uint8_t header_1 = data_len % 256; ++ pj_uint8_t header_0 = data_len >> 8; ++ final_len = 2 + data_len; ++ memcpy(&ice_st->rtp_pkt, &(header_0), sizeof(pj_uint8_t)); ++ memcpy(&ice_st->rtp_pkt[1], &(header_1), sizeof(pj_uint8_t)); ++ memcpy(&ice_st->rtp_pkt[2], (unsigned char *)data, data_len); ++ final_pkt = &ice_st->rtp_pkt; ++ } + +- status = pj_turn_sock_sendto(comp->turn[tp_idx].sock, +- (const pj_uint8_t*)data, +- (unsigned)data_len, +- dst_addr, dst_addr_len); +- return (status==PJ_SUCCESS||status==PJ_EPENDING) ? +- PJ_SUCCESS : status; +- } else { +- const pj_sockaddr_t *dest_addr; +- unsigned dest_addr_len; +- +- if (comp->ipv4_mapped) { +- if (comp->synth_addr_len == 0 || +- pj_sockaddr_cmp(&comp->dst_addr, dst_addr) != 0) +- { +- status = pj_sockaddr_synthesize(pj_AF_INET6(), +- &comp->synth_addr, +- dst_addr); +- if (status != PJ_SUCCESS) +- return status; +- +- pj_sockaddr_cp(&comp->dst_addr, dst_addr); +- comp->synth_addr_len = pj_sockaddr_get_len( +- &comp->synth_addr); +- } +- dest_addr = &comp->synth_addr; +- dest_addr_len = comp->synth_addr_len; +- } else { +- dest_addr = dst_addr; +- dest_addr_len = dst_addr_len; +- } + if (def_cand->status == PJ_SUCCESS) { + unsigned tp_idx = GET_TP_IDX(def_cand->transport_id); + @@ -1920,11 +2252,9 @@ index ca15a74e..d5877e31 100644 + comp->turn[tp_idx].log_off = PJ_TRUE; + } + -+ status = -+ pj_turn_sock_sendto(comp->turn[tp_idx].sock, (const pj_uint8_t *)data, -+ (unsigned)data_len, dst_addr, dst_addr_len); -+ return (status == PJ_SUCCESS || status == PJ_EPENDING) ? PJ_SUCCESS -+ : status; ++ status = pj_turn_sock_sendto(comp->turn[tp_idx].sock, final_pkt, ++ final_len, dst_addr, dst_addr_len); ++ ice_st->is_pending = ((status == PJ_EPENDING) && ice_st); + } else { + const pj_sockaddr_t *dest_addr; + unsigned dest_addr_len; @@ -1947,83 +2277,20 @@ index ca15a74e..d5877e31 100644 + dest_addr_len = dst_addr_len; + } -- status = pj_turn_sock_sendto(comp->turn[tp_idx].sock, -- (const pj_uint8_t*)data, -- (unsigned)data_len, -- dst_addr, dst_addr_len); -- return (status==PJ_SUCCESS||status==PJ_EPENDING) ? -- PJ_SUCCESS : status; -- } else { -- const pj_sockaddr_t *dest_addr; -- unsigned dest_addr_len; -- -- if (comp->ipv4_mapped) { -- if (comp->synth_addr_len == 0 || -- pj_sockaddr_cmp(&comp->dst_addr, dst_addr) != 0) -- { -- status = pj_sockaddr_synthesize(pj_AF_INET6(), -- &comp->synth_addr, -- dst_addr); -- if (status != PJ_SUCCESS) -- return status; -- -- pj_sockaddr_cp(&comp->dst_addr, dst_addr); -- comp->synth_addr_len = pj_sockaddr_get_len( -- &comp->synth_addr); -- } -- dest_addr = &comp->synth_addr; -- dest_addr_len = comp->synth_addr_len; -- } else { -- dest_addr = dst_addr; -- dest_addr_len = dst_addr_len; -- } -+ pj_stun_sock_info stun_sock_info; -+ pj_stun_sock_get_info(comp->stun[tp_idx].sock, &stun_sock_info); -+ pj_bool_t add_header = stun_sock_info.conn_type != PJ_STUN_TP_UDP; -+ if (add_header) { -+ //TCP -+ /* -+ * RFC6544 ICE requires an agent to demultiplex STUN and -+ * application-layer traffic, since they appear on the same port. This -+ * demultiplexing is described in [RFC5245] and is done using the magic -+ * cookie and other fields of the message. Stream-oriented transports -+ * introduce another wrinkle, since they require a way to frame the -+ * connection so that the application and STUN packets can be extracted -+ * in order to differentiate STUN packets from application-layer -+ * traffic. For this reason, TCP media streams utilizing ICE use the -+ * basic framing provided in RFC 4571 [RFC4571], even if the application -+ * layer protocol is not RTP. -+ */ -+ pj_uint8_t header_1 = data_len % 256; -+ pj_uint8_t header_0 = data_len >> 8; -+ pj_size_t final_size = 2 + data_len; -+ memcpy(&ice_st->rtp_pkt, &(header_0), sizeof(pj_uint8_t)); -+ memcpy(&ice_st->rtp_pkt[1], &(header_1), sizeof(pj_uint8_t)); -+ memcpy(&ice_st->rtp_pkt[2], (unsigned char *)data, data_len); -+ status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL, &ice_st->rtp_pkt, -+ (unsigned)final_size, 0, dest_addr, dest_addr_len, size); -+ if ((status == PJ_EPENDING || *size != final_size) && ice_st) { -+ ice_st->is_pending = PJ_TRUE; -+ } -+ *size -= sizeof(pj_uint16_t); -+ } else { -+ // UDP -+ status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL, data, -+ (unsigned)data_len, 0, dest_addr, -+ dest_addr_len, size); -+ if ((status == PJ_EPENDING || *size != data_len) && ice_st) { -+ ice_st->is_pending = PJ_TRUE; -+ } -+ } - - status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL, data, - (unsigned)data_len, 0, dest_addr, - dest_addr_len); - return (status==PJ_SUCCESS||status==PJ_EPENDING) ? - PJ_SUCCESS : status; - } -+ return status; ++ status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL, final_pkt, ++ final_len, 0, dest_addr, dest_addr_len, size); ++ ++ if (add_header) *size -= sizeof(pj_uint16_t); // Do not count the header ++ ice_st->is_pending = ((status == PJ_EPENDING || *size != data_len) && ice_st); + } ++ ++ return status; - } else - return PJ_EINVALIDOP; @@ -2032,7 +2299,7 @@ index ca15a74e..d5877e31 100644 } /* -@@ -1615,7 +1732,15 @@ static void on_ice_complete(pj_ice_sess *ice, pj_status_t status) +@@ -1615,7 +1736,15 @@ static void on_ice_complete(pj_ice_sess *ice, pj_status_t status) sizeof(lip), 3); pj_sockaddr_print(&check->rcand->addr, rip, sizeof(rip), 3); @@ -2049,7 +2316,7 @@ index ca15a74e..d5877e31 100644 if (tp_typ == TP_TURN) { /* Activate channel binding for the remote address * for more efficient data transfer using TURN. -@@ -1683,12 +1808,16 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice, +@@ -1683,24 +1812,55 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice, PJ_ASSERT_RETURN(comp_id && comp_id <= ice_st->comp_cnt, PJ_EINVAL); @@ -2067,7 +2334,54 @@ index ca15a74e..d5877e31 100644 pj_sockaddr_get_port(dst_addr), tp_typ)); -@@ -1724,12 +1853,52 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice, +- if (tp_typ == TP_TURN) { +- if (comp->turn[tp_idx].sock) { +- status = pj_turn_sock_sendto(comp->turn[tp_idx].sock, +- (const pj_uint8_t*)pkt, +- (unsigned)size, +- dst_addr, dst_addr_len); +- } else { +- status = PJ_EINVALIDOP; ++ pj_bool_t add_header = comp->ice_st->cfg.stun_tp->conn_type == PJ_STUN_TP_TCP; ++ pj_uint8_t* final_pkt = pkt; ++ unsigned final_len = size; ++ pj_ssize_t sent_size; ++ ++ if (add_header) { ++ // TCP ++ /* ++ * RFC6544 ICE requires an agent to demultiplex STUN and ++ * application-layer traffic, since they appear on the same port. This ++ * demultiplexing is described in [RFC5245] and is done using the magic ++ * cookie and other fields of the message. Stream-oriented transports ++ * introduce another wrinkle, since they require a way to frame the ++ * connection so that the application and STUN packets can be extracted ++ * in order to differentiate STUN packets from application-layer ++ * traffic. For this reason, TCP media streams utilizing ICE use the ++ * basic framing provided in RFC 4571 [RFC4571], even if the application ++ * layer protocol is not RTP. ++ */ ++ pj_uint8_t header_1 = size % 256; ++ pj_uint8_t header_0 = size >> 8; ++ final_len = 2 + size; ++ memcpy(&ice_st->rtp_pkt, &(header_0), sizeof(pj_uint8_t)); ++ memcpy(&ice_st->rtp_pkt[1], &(header_1), sizeof(pj_uint8_t)); ++ memcpy(&ice_st->rtp_pkt[2], (unsigned char *)pkt, size); ++ final_pkt = &ice_st->rtp_pkt; + } ++ ++ if (tp_typ == TP_TURN) { ++ if (comp->turn[tp_idx].sock) { ++ status = pj_turn_sock_sendto(comp->turn[tp_idx].sock, ++ final_pkt, final_len, dst_addr, dst_addr_len); ++ ice_st->is_pending = status == PJ_EPENDING; ++ } else { ++ status = PJ_EINVALIDOP; ++ } + } else if (tp_typ == TP_STUN) { + const pj_sockaddr_t *dest_addr; + unsigned dest_addr_len; +@@ -1724,12 +1884,16 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice, dest_addr_len = dst_addr_len; } @@ -2075,45 +2389,9 @@ index ca15a74e..d5877e31 100644 - pkt, (unsigned)size, 0, - dest_addr, dest_addr_len); + if (comp->stun[tp_idx].sock) { -+ pj_ssize_t sent_size; -+ -+ pj_stun_sock_info stun_sock_info; -+ pj_stun_sock_get_info(comp->stun[tp_idx].sock, &stun_sock_info); -+ pj_bool_t add_header = stun_sock_info.conn_type != PJ_STUN_TP_UDP; -+ if (add_header) { -+ //TCP -+ /* -+ * RFC6544 ICE requires an agent to demultiplex STUN and -+ * application-layer traffic, since they appear on the same port. This -+ * demultiplexing is described in [RFC5245] and is done using the magic -+ * cookie and other fields of the message. Stream-oriented transports -+ * introduce another wrinkle, since they require a way to frame the -+ * connection so that the application and STUN packets can be extracted -+ * in order to differentiate STUN packets from application-layer -+ * traffic. For this reason, TCP media streams utilizing ICE use the -+ * basic framing provided in RFC 4571 [RFC4571], even if the application -+ * layer protocol is not RTP. -+ */ -+ pj_uint8_t header_1 = size % 256; -+ pj_uint8_t header_0 = size >> 8; -+ const pj_size_t final_size = 2 + size; -+ memcpy(&ice_st->rtp_pkt, &(header_0), sizeof(pj_uint8_t)); -+ memcpy(&ice_st->rtp_pkt[1], &(header_1), sizeof(pj_uint8_t)); -+ memcpy(&ice_st->rtp_pkt[2], (unsigned char *)pkt, size); -+ status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL, &ice_st->rtp_pkt, -+ (unsigned)final_size, 0, dest_addr, dest_addr_len, &sent_size); -+ if (status == PJ_EPENDING || sent_size != final_size) { -+ ice_st->is_pending = PJ_TRUE; -+ } -+ } else { -+ // UDP -+ status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL, pkt, -+ (unsigned)size, 0, dest_addr, dest_addr_len, &sent_size); -+ if (status == PJ_EPENDING || sent_size != size) { -+ ice_st->is_pending = PJ_TRUE; -+ } -+ } -+ ++ status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL, final_pkt, ++ final_len, 0, dest_addr, dest_addr_len, &sent_size); ++ ice_st->is_pending = (status == PJ_EPENDING || sent_size != final_len); + } else { + status = PJ_EINVALIDOP; + } @@ -2124,7 +2402,7 @@ index ca15a74e..d5877e31 100644 } return (status==PJ_SUCCESS||status==PJ_EPENDING) ? PJ_SUCCESS : status; -@@ -1755,22 +1924,115 @@ static void ice_rx_data(pj_ice_sess *ice, +@@ -1755,22 +1919,213 @@ static void ice_rx_data(pj_ice_sess *ice, } } @@ -2154,6 +2432,61 @@ index ca15a74e..d5877e31 100644 + ice_sess_on_peer_connection(ice_st->ice, data->transport_id, status, remote_addr); +} + ++static void on_peer_reset_connection(pj_stun_session* sess, pj_sockaddr_t* remote_addr) { ++ ++ sock_user_data *data; ++ pj_ice_strans_comp *comp; ++ pj_ice_strans *ice_st; ++ pj_stun_sock* stun_sock = (pj_stun_sock *)pj_stun_session_get_user_data(sess); ++ if (!stun_sock) { ++ /* We have disassociated ourselves from the STUN session */ ++ return; ++ } ++ data = (sock_user_data *)pj_stun_sock_get_user_data(stun_sock); ++ if (!data) { ++ /* We have disassociated ourselves from the STUN socket */ ++ return; ++ } ++ ++ comp = data->comp; ++ ice_st = comp->ice_st; ++ if (!ice_st || !ice_st->ice) { ++ // Incorrect ICE ++ return; ++ } ++ ++ ice_sess_on_peer_reset_connection(ice_st->ice, data->transport_id, remote_addr); ++} ++ ++static void on_peer_packet(pj_stun_session* sess, pj_sockaddr_t* remote_addr) { ++ ++ if (!sess || !remote_addr) return; ++ sock_user_data *data; ++ pj_ice_strans_comp *comp; ++ pj_ice_strans *ice_st; ++ pj_stun_sock* stun_sock = (pj_stun_sock *)pj_stun_session_get_user_data(sess); ++ if (!stun_sock) { ++ /* We have disassociated ourselves from the STUN session */ ++ return; ++ } ++ data = (sock_user_data *)pj_stun_sock_get_user_data(stun_sock); ++ if (!data) { ++ /* We have disassociated ourselves from the STUN socket */ ++ return; ++ } ++ ++ comp = data->comp; ++ if (!comp) return; ++ ice_st = comp->ice_st; ++ if (!ice_st || !ice_st->ice) { ++ // Incorrect ICE ++ return; ++ } ++ ++ ice_sess_on_peer_packet(ice_st->ice, data->transport_id, remote_addr); ++} ++ ++#if PJ_HAS_TCP +static pj_status_t ice_wait_tcp_connection(pj_ice_sess *ice, + pj_ice_sess_checklist *clist, + unsigned check_id) { @@ -2187,6 +2520,8 @@ index ca15a74e..d5877e31 100644 + return PJ_EINVAL; + } + pj_stun_session_callback(sess)->on_peer_connection = &on_peer_connection; ++ pj_stun_session_callback(sess)->on_peer_reset_connection = &on_peer_reset_connection; ++ pj_stun_session_callback(sess)->on_peer_packet = &on_peer_packet; + return pj_stun_sock_connect_active(st_comp->stun[idx].sock, &rcand->addr, + rcand->addr.addr.sa_family); + } @@ -2194,7 +2529,48 @@ index ca15a74e..d5877e31 100644 + return PJ_EINVAL; +} + -+#if PJ_HAS_TCP ++static pj_status_t ice_reconnect_tcp_connection(pj_ice_sess *ice, ++ pj_ice_sess_checklist *clist, ++ unsigned check_id) { ++ pj_ice_sess_check *check; ++ check = &clist->checks[check_id]; ++ const pj_ice_sess_cand *lcand; ++ lcand = check->lcand; ++ const pj_ice_sess_cand *rcand; ++ rcand = check->rcand; ++ ++ pj_ice_strans *ice_st = (pj_ice_strans *)ice->user_data; ++ pj_ice_strans_comp *st_comp = ice_st->comp[lcand->comp_id - 1]; ++ ++ int idx = -1; ++ for (int i=0; i<ice_st->cfg.stun_tp_cnt; ++i) { ++ if (ice_st->cfg.stun_tp[i].af == rcand->addr.addr.sa_family) { ++ idx = i; ++ break; ++ } ++ } ++ if (idx == -1) { ++ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN sock found.", ++ st_comp->comp_id)); ++ return PJ_EINVAL; ++ } ++ if (st_comp->stun[idx].sock) { ++ pj_stun_session *sess = pj_stun_sock_get_session(st_comp->stun[idx].sock); ++ if (!sess) { ++ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN session.", ++ st_comp->comp_id)); ++ return PJ_EINVAL; ++ } ++ pj_stun_session_callback(sess)->on_peer_connection = &on_peer_connection; ++ pj_stun_session_callback(sess)->on_peer_reset_connection = &on_peer_reset_connection; ++ pj_stun_session_callback(sess)->on_peer_packet = &on_peer_packet; ++ return pj_stun_sock_reconnect_active(st_comp->stun[idx].sock, &rcand->addr, ++ rcand->addr.addr.sa_family); ++ } ++ ++ return PJ_EINVAL; ++} ++ +static pj_status_t ice_close_tcp_connection(pj_ice_sess *ice, + pj_ice_sess_checklist *clist, + unsigned check_id) { @@ -2253,7 +2629,7 @@ index ca15a74e..d5877e31 100644 /* We have disassociated ourselves from the STUN socket */ return PJ_FALSE; } -@@ -1814,9 +2076,17 @@ static pj_bool_t stun_on_data_sent(pj_stun_sock *stun_sock, +@@ -1814,9 +2169,34 @@ static pj_bool_t stun_on_data_sent(pj_stun_sock *stun_sock, pj_ioqueue_op_key_t *send_key, pj_ssize_t sent) { @@ -2270,11 +2646,28 @@ index ca15a74e..d5877e31 100644 + ice_st->is_pending = PJ_FALSE; + if (ice_st->cb.on_data_sent) { + (*ice_st->cb.on_data_sent)(ice_st, comp->comp_id, sent); ++ } ++ return PJ_TRUE; ++} ++ ++static pj_bool_t turn_on_data_sent(pj_turn_sock *turn_sock, pj_ssize_t sent) ++{ ++ sock_user_data *data; ++ pj_ice_strans_comp *comp; ++ pj_ice_strans *ice_st; ++ ++ data = (sock_user_data*) pj_turn_sock_get_user_data(turn_sock); ++ comp = data->comp; ++ ice_st = comp->ice_st; ++ ice_st->is_pending = PJ_FALSE; ++ ++ if (ice_st->cb.on_data_sent) { ++ (*ice_st->cb.on_data_sent)(ice_st, comp->comp_id, sent); + } return PJ_TRUE; } -@@ -2021,6 +2291,10 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock, +@@ -2021,6 +2401,10 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock, } } break; @@ -2285,7 +2678,119 @@ index ca15a74e..d5877e31 100644 } return pj_grp_lock_dec_ref(ice_st->grp_lock)? PJ_FALSE : PJ_TRUE; -@@ -2252,5 +2526,4 @@ static void turn_on_state(pj_turn_sock *turn_sock, pj_turn_state_t old_state, +@@ -2029,7 +2413,7 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock, + /* Callback when TURN socket has received a packet */ + static void turn_on_rx_data(pj_turn_sock *turn_sock, + void *pkt, +- unsigned pkt_len, ++ unsigned size, + const pj_sockaddr_t *peer_addr, + unsigned addr_len) + { +@@ -2055,20 +2439,93 @@ static void turn_on_rx_data(pj_turn_sock *turn_sock, + */ + if (comp->ice_st->cb.on_rx_data) { + (*comp->ice_st->cb.on_rx_data)(comp->ice_st, comp->comp_id, pkt, +- pkt_len, peer_addr, addr_len); ++ size, peer_addr, addr_len); + } + + } else { + + /* Hand over the packet to ICE */ +- status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id, +- data->transport_id, pkt, pkt_len, +- peer_addr, addr_len); +- +- if (status != PJ_SUCCESS) { +- ice_st_perror(comp->ice_st, +- "Error processing packet from TURN relay", +- status); ++ if (comp->ice_st->cfg.turn_tp->conn_type == PJ_TURN_TP_TCP && size > 0) { ++ pj_uint16_t parsed = 0; ++ pj_status_t result = PJ_TRUE; ++ pj_status_t status; ++ ++ do { ++ pj_uint16_t pkt_len = size - parsed; ++ pj_uint8_t *current_packet = ((pj_uint8_t *)(pkt)) + parsed; ++ ++ /* RFC6544, the packet is wrapped into a packet following the RFC4571 */ ++ // cf stun_sock.c:parse_rx_packet ++ pj_bool_t store_remaining = PJ_TRUE; ++ if (comp->ice_st->rx_buffer_size != 0 || comp->ice_st->rx_wanted_size != 0) { ++ // We currently have a packet to complete ++ if (comp->ice_st->rx_buffer_size == 1) { ++ // We do not know the current size, parse it. ++ pkt_len = (((pj_uint8_t *)comp->ice_st->rx_buffer)[0] << 8) + ++ ((pj_uint8_t *)current_packet)[0]; ++ comp->ice_st->rx_buffer_size = 0; // We have eaten the temp packet. ++ current_packet = current_packet + 1; ++ parsed += 1; ++ if (pkt_len + parsed <= size) { ++ store_remaining = PJ_FALSE; ++ parsed += pkt_len; ++ } else { ++ comp->ice_st->rx_wanted_size = pkt_len; ++ } ++ } else if (pkt_len + comp->ice_st->rx_buffer_size >= comp->ice_st->rx_wanted_size) { ++ // We have enough pkt Build new packet to parse ++ store_remaining = PJ_FALSE; ++ pj_uint16_t eaten_bytes = comp->ice_st->rx_wanted_size - comp->ice_st->rx_buffer_size; ++ memcpy(comp->ice_st->rx_buffer + comp->ice_st->rx_buffer_size, ++ current_packet, eaten_bytes); ++ pkt_len = comp->ice_st->rx_wanted_size; ++ current_packet = comp->ice_st->rx_buffer; ++ parsed += eaten_bytes; ++ comp->ice_st->rx_buffer_size = 0; ++ comp->ice_st->rx_wanted_size = 0; ++ } ++ } else if (pkt_len > 1) { ++ pkt_len = (((pj_uint8_t *)current_packet)[0] << 8) + ((pj_uint8_t *)current_packet)[1]; ++ current_packet = current_packet + 2; ++ parsed += 2; ++ if (pkt_len + parsed <= size) { ++ store_remaining = PJ_FALSE; ++ parsed += pkt_len; ++ } else { ++ comp->ice_st->rx_wanted_size = pkt_len; ++ ++ } ++ } ++ if (store_remaining) { ++ pj_uint16_t stored_size = size - parsed; ++ memcpy(comp->ice_st->rx_buffer + comp->ice_st->rx_buffer_size, ++ current_packet, stored_size); ++ comp->ice_st->rx_buffer_size += stored_size; ++ status = pj_grp_lock_release(comp->ice_st->grp_lock); ++ result &= status != PJ_EGONE ? PJ_TRUE : PJ_FALSE; ++ status = PJ_SUCCESS; ++ break; ++ } ++ ++ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id, ++ data->transport_id, current_packet, pkt_len, ++ peer_addr, addr_len); ++ ++ if (status != PJ_SUCCESS) { ++ ice_st_perror(comp->ice_st, ++ "Error processing packet from TURN relay", ++ status); ++ } ++ } while (parsed < size && result); ++ } else { ++ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id, ++ data->transport_id, pkt, size, ++ peer_addr, addr_len); ++ if (status != PJ_SUCCESS) { ++ ice_st_perror(comp->ice_st, ++ "Error processing packet from TURN relay", ++ status); ++ } + } + } + +@@ -2252,5 +2709,4 @@ static void turn_on_state(pj_turn_sock *turn_sock, pj_turn_state_t old_state, pj_grp_lock_dec_ref(comp->ice_st->grp_lock); pj_log_pop_indent(); @@ -2294,7 +2799,7 @@ index ca15a74e..d5877e31 100644 +} \ No newline at end of file diff --git a/pjnath/src/pjnath/nat_detect.c b/pjnath/src/pjnath/nat_detect.c -index db0de10b..3013eeed 100644 +index db0de10bc..3013eeed2 100644 --- a/pjnath/src/pjnath/nat_detect.c +++ b/pjnath/src/pjnath/nat_detect.c @@ -329,7 +329,8 @@ PJ_DEF(pj_status_t) pj_stun_detect_nat_type2(const pj_sockaddr *server, @@ -2324,7 +2829,7 @@ index db0de10b..3013eeed 100644 goto on_error; diff --git a/pjnath/src/pjnath/stun_session.c b/pjnath/src/pjnath/stun_session.c -index 7b53aba7..2b006d91 100644 +index 7b53aba74..2b006d918 100644 --- a/pjnath/src/pjnath/stun_session.c +++ b/pjnath/src/pjnath/stun_session.c @@ -49,6 +49,8 @@ struct pj_stun_session @@ -2354,7 +2859,7 @@ index 7b53aba7..2b006d91 100644 if (grp_lock) { sess->grp_lock = grp_lock; -@@ -1511,3 +1515,9 @@ on_return: +@@ -1511,3 +1515,9 @@ PJ_DEF(pj_status_t) pj_stun_session_on_rx_pkt(pj_stun_session *sess, return status; } @@ -2365,7 +2870,7 @@ index 7b53aba7..2b006d91 100644 + return sess ? sess->conn_type : PJ_STUN_TP_UDP; +} diff --git a/pjnath/src/pjnath/stun_sock.c b/pjnath/src/pjnath/stun_sock.c -index 7692e6c1..7624cf25 100644 +index 7692e6c14..3b8d509ab 100644 --- a/pjnath/src/pjnath/stun_sock.c +++ b/pjnath/src/pjnath/stun_sock.c @@ -1,5 +1,5 @@ @@ -3035,7 +3540,7 @@ index 7692e6c1..7624cf25 100644 } stun_sock->tsx_id[5] = 0; -@@ -378,15 +530,238 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg, +@@ -378,15 +530,248 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg, stun_sock->ka_timer.cb = &ka_timer_cb; stun_sock->ka_timer.user_data = stun_sock; @@ -3130,7 +3635,6 @@ index 7692e6c1..7624cf25 100644 +#if PJ_HAS_TCP + } +#endif -+ + /* Check that this is STUN message */ + status = pj_stun_msg_check((const pj_uint8_t *)current_packet, pkt_len, + PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET); @@ -3184,21 +3688,29 @@ index 7692e6c1..7624cf25 100644 + result &= status != PJ_EGONE ? PJ_TRUE : PJ_FALSE; + } while (parsed < size && result); + return result; -+} -+ + } + +pj_bool_t on_data_read(pj_activesock_t *asock, void *data, pj_size_t size, + pj_status_t status, pj_size_t *remainder) { + + pj_stun_sock *stun_sock; + + stun_sock = (pj_stun_sock *)pj_activesock_get_user_data(asock); -+ if (!stun_sock) -+ return PJ_FALSE; ++ if (!stun_sock) return PJ_FALSE; + ++ pj_stun_session_cb *cb = pj_stun_session_callback(stun_sock->stun_sess); + /* Log socket error or disconnection */ + if (status != PJ_SUCCESS) { -+ if (stun_sock->conn_type == PJ_STUN_TP_UDP || status != PJ_EEOF) { ++ if (stun_sock->conn_type == PJ_STUN_TP_UDP || (status != PJ_EEOF && status != 120104)) { + PJ_PERROR(2, (stun_sock->obj_name, status, "read() error")); ++ } else if (status == 120104 /* RESET BY PEER */) { ++ for (int i = 0; i <= stun_sock->outgoing_nb; ++i) { ++ if (stun_sock->outgoing_socks[i].sock == asock) { ++ if (cb && (cb->on_peer_reset_connection)) { ++ (cb->on_peer_reset_connection)(stun_sock->stun_sess, stun_sock->outgoing_socks[i].addr); ++ } ++ } ++ } + } + return PJ_FALSE; + } @@ -3209,6 +3721,8 @@ index 7692e6c1..7624cf25 100644 + if (stun_sock->outgoing_socks[i].sock == asock) { + rx_addr = stun_sock->outgoing_socks[i].addr; + sock_addr_len = pj_sockaddr_get_len(rx_addr); ++ if (cb && (cb->on_peer_packet)) ++ (cb->on_peer_packet)(stun_sock->stun_sess, stun_sock->outgoing_socks[i].addr); + } + } + if (rx_addr == NULL && stun_sock->incoming_nb != -1) { @@ -3218,10 +3732,11 @@ index 7692e6c1..7624cf25 100644 + } + return parse_rx_packet(stun_sock, data, size, rx_addr, sock_addr_len); +#else ++ pj_grp_lock_release(stun_sock->grp_lock); + return PJ_FALSE; +#endif - } - ++} ++ +#if PJ_HAS_TCP +/* + * Notification when incoming TCP socket has been connected. @@ -3280,7 +3795,7 @@ index 7692e6c1..7624cf25 100644 /* Start socket. */ PJ_DEF(pj_status_t) pj_stun_sock_start( pj_stun_sock *stun_sock, const pj_str_t *domain, -@@ -504,15 +879,35 @@ PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock) +@@ -504,15 +889,35 @@ PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock) } stun_sock->is_destroying = PJ_TRUE; @@ -3323,7 +3838,7 @@ index 7692e6c1..7624cf25 100644 } if (stun_sock->stun_sess) { -@@ -619,12 +1014,12 @@ static pj_status_t get_mapped_addr(pj_stun_sock *stun_sock) +@@ -619,12 +1024,12 @@ static pj_status_t get_mapped_addr(pj_stun_sock *stun_sock) &tdata); if (status != PJ_SUCCESS) goto on_error; @@ -3341,7 +3856,7 @@ index 7692e6c1..7624cf25 100644 if (status != PJ_SUCCESS && status != PJ_EPENDING) goto on_error; -@@ -637,7 +1032,7 @@ on_error: +@@ -637,7 +1042,7 @@ static pj_status_t get_mapped_addr(pj_stun_sock *stun_sock) /* Get info */ PJ_DEF(pj_status_t) pj_stun_sock_get_info( pj_stun_sock *stun_sock, @@ -3350,7 +3865,7 @@ index 7692e6c1..7624cf25 100644 { int addr_len; pj_status_t status; -@@ -646,73 +1041,73 @@ PJ_DEF(pj_status_t) pj_stun_sock_get_info( pj_stun_sock *stun_sock, +@@ -646,73 +1051,73 @@ PJ_DEF(pj_status_t) pj_stun_sock_get_info( pj_stun_sock *stun_sock, pj_grp_lock_acquire(stun_sock->grp_lock); @@ -3480,7 +3995,7 @@ index 7692e6c1..7624cf25 100644 } pj_grp_lock_release(stun_sock->grp_lock); -@@ -726,36 +1121,235 @@ PJ_DEF(pj_status_t) pj_stun_sock_sendto( pj_stun_sock *stun_sock, +@@ -726,36 +1131,253 @@ PJ_DEF(pj_status_t) pj_stun_sock_sendto( pj_stun_sock *stun_sock, unsigned pkt_len, unsigned flag, const pj_sockaddr_t *dst_addr, @@ -3513,7 +4028,10 @@ index 7692e6c1..7624cf25 100644 if (send_key==NULL) - send_key = &stun_sock->send_key; + send_key = &stun_sock->send_key; -+ + +- size = pkt_len; +- status = pj_activesock_sendto(stun_sock->active_sock, send_key, +- pkt, &size, flag, dst_addr, addr_len); + *size = pkt_len; + if (stun_sock->conn_type == PJ_STUN_TP_UDP) { + status = pj_activesock_sendto(stun_sock->main_sock, send_key, @@ -3548,7 +4066,7 @@ index 7692e6c1..7624cf25 100644 + +#endif + } -+ + + pj_grp_lock_release(stun_sock->grp_lock); + return status; +} @@ -3556,39 +4074,22 @@ index 7692e6c1..7624cf25 100644 +#if PJ_HAS_TCP + +PJ_DECL(pj_status_t) -+pj_stun_sock_connect_active(pj_stun_sock *stun_sock, const pj_sockaddr_t *remote_addr, int af) { - -- size = pkt_len; -- status = pj_activesock_sendto(stun_sock->active_sock, send_key, -- pkt, &size, flag, dst_addr, addr_len); -+ if (stun_sock->incoming_nb != -1) { -+ // Check if not incoming, if so, already connected (mainly for PRFLX candidates) -+ for (int i = 0 ; i <= stun_sock->incoming_nb; ++i) { -+ if (pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr, remote_addr) == 0) { -+ pj_stun_session_cb *cb = pj_stun_session_callback(stun_sock->stun_sess); -+ (cb->on_peer_connection)(stun_sock->stun_sess, PJ_SUCCESS, remote_addr); -+ return PJ_SUCCESS; -+ } -+ } -+ } -+ pj_status_t status; ++pj_stun_sock_connect(pj_stun_sock *stun_sock, const pj_sockaddr_t *remote_addr, int af, int nb_check) { + + pj_grp_lock_acquire(stun_sock->grp_lock); + int sock_type = pj_SOCK_STREAM(); + -+ /* Create socket and bind socket */ -+ stun_sock->outgoing_nb += 1; -+ int nb_check = stun_sock->outgoing_nb; + pj_sock_t *fd = &stun_sock->outgoing_socks[nb_check].fd; + pj_activesock_t **asock = &stun_sock->outgoing_socks[nb_check].sock; + pj_sockaddr_t **addr = &stun_sock->outgoing_socks[nb_check].addr; -+ status = pj_sock_socket(af, sock_type, 0, fd); ++ ++ pj_status_t status = pj_sock_socket(af, sock_type, 0, fd); + if (status != PJ_SUCCESS) { + pj_stun_sock_destroy(stun_sock); + pj_grp_lock_release(stun_sock->grp_lock); + return status; + } - ++ + /* Apply QoS, if specified */ + status = pj_sock_apply_qos2(*fd, stun_sock->setting.qos_type, + &stun_sock->setting.qos_params, 2, stun_sock->obj_name, NULL); @@ -3677,8 +4178,40 @@ index 7692e6c1..7624cf25 100644 + + pj_grp_lock_release(stun_sock->grp_lock); + return status; - } - ++} ++ ++PJ_DECL(pj_status_t) ++pj_stun_sock_connect_active(pj_stun_sock *stun_sock, const pj_sockaddr_t *remote_addr, int af) { ++ ++ if (stun_sock->incoming_nb != -1) { ++ // Check if not incoming, if so, already connected (mainly for PRFLX candidates) ++ for (int i = 0 ; i <= stun_sock->incoming_nb; ++i) { ++ if (pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr, remote_addr) == 0) { ++ pj_stun_session_cb *cb = pj_stun_session_callback(stun_sock->stun_sess); ++ (cb->on_peer_connection)(stun_sock->stun_sess, PJ_SUCCESS, remote_addr); ++ return PJ_SUCCESS; ++ } ++ } ++ } ++ ++ /* Create socket and bind socket */ ++ stun_sock->outgoing_nb += 1; ++ int nb_check = stun_sock->outgoing_nb; ++ return pj_stun_sock_connect(stun_sock, remote_addr, af, nb_check); ++ ++} ++ ++PJ_DECL(pj_status_t) ++pj_stun_sock_reconnect_active(pj_stun_sock *stun_sock, const pj_sockaddr_t *remote_addr, int af) { ++ for (int i = 0; i <= stun_sock->outgoing_nb; ++i) { ++ if (pj_sockaddr_cmp(stun_sock->outgoing_socks[i].addr, remote_addr) == 0) { ++ pj_activesock_close(stun_sock->outgoing_socks[i].sock); ++ return pj_stun_sock_connect(stun_sock, remote_addr, af, i); ++ } ++ } ++ return PJ_EINVAL; ++} ++ +PJ_DECL(pj_status_t) +pj_stun_sock_close(pj_stun_sock *stun_sock, const pj_sockaddr_t *remote_addr) { + for (int i = 0; i <= stun_sock->outgoing_nb; ++i) { @@ -3720,8 +4253,8 @@ index 7692e6c1..7624cf25 100644 + if (!remote_addr) return PJ_FALSE; + (cb->on_peer_connection)(stun_sock->stun_sess, status, remote_addr); + return PJ_TRUE; -+} -+ + } + +#endif + /* This callback is called by the STUN session to send packet */ @@ -3730,7 +4263,7 @@ index 7692e6c1..7624cf25 100644 void *token, const void *pkt, pj_size_t pkt_size, -@@ -766,7 +1360,7 @@ static pj_status_t sess_on_send_msg(pj_stun_session *sess, +@@ -766,7 +1388,7 @@ static pj_status_t sess_on_send_msg(pj_stun_session *sess, pj_ssize_t size; stun_sock = (pj_stun_sock *) pj_stun_session_get_user_data(sess); @@ -3739,7 +4272,7 @@ index 7692e6c1..7624cf25 100644 /* We have been shutdown, but this callback may still get called * by retransmit timer. */ -@@ -777,15 +1371,31 @@ static pj_status_t sess_on_send_msg(pj_stun_session *sess, +@@ -777,15 +1399,31 @@ static pj_status_t sess_on_send_msg(pj_stun_session *sess, PJ_UNUSED_ARG(token); size = pkt_size; @@ -3775,7 +4308,7 @@ index 7692e6c1..7624cf25 100644 pj_status_t status, void *token, pj_stun_tx_data *tdata, -@@ -869,9 +1479,9 @@ on_return: +@@ -869,9 +1507,9 @@ static void sess_on_request_complete(pj_stun_session *sess, } /* Schedule keep-alive timer */ @@ -3787,7 +4320,7 @@ index 7692e6c1..7624cf25 100644 &stun_sock->ka_timer, 0); pj_assert(stun_sock->ka_interval != 0); -@@ -881,7 +1491,7 @@ static void start_ka_timer(pj_stun_sock *stun_sock) +@@ -881,7 +1519,7 @@ static void start_ka_timer(pj_stun_sock *stun_sock) delay.sec = stun_sock->ka_interval; delay.msec = 0; @@ -3796,7 +4329,7 @@ index 7692e6c1..7624cf25 100644 &stun_sock->ka_timer, &delay, PJ_TRUE, stun_sock->grp_lock); -@@ -889,7 +1499,7 @@ static void start_ka_timer(pj_stun_sock *stun_sock) +@@ -889,7 +1527,7 @@ static void start_ka_timer(pj_stun_sock *stun_sock) } /* Keep-alive timer callback */ @@ -3805,7 +4338,7 @@ index 7692e6c1..7624cf25 100644 { pj_stun_sock *stun_sock; -@@ -911,7 +1521,7 @@ static void ka_timer_cb(pj_timer_heap_t *th, pj_timer_entry *te) +@@ -911,7 +1549,7 @@ static void ka_timer_cb(pj_timer_heap_t *th, pj_timer_entry *te) } /* Callback from active socket when incoming packet is received */ @@ -3814,7 +4347,7 @@ index 7692e6c1..7624cf25 100644 void *data, pj_size_t size, const pj_sockaddr_t *src_addr, -@@ -919,8 +1529,6 @@ static pj_bool_t on_data_recvfrom(pj_activesock_t *asock, +@@ -919,8 +1557,6 @@ static pj_bool_t on_data_recvfrom(pj_activesock_t *asock, pj_status_t status) { pj_stun_sock *stun_sock; @@ -3823,7 +4356,7 @@ index 7692e6c1..7624cf25 100644 stun_sock = (pj_stun_sock*) pj_activesock_get_user_data(asock); if (!stun_sock) -@@ -932,62 +1540,11 @@ static pj_bool_t on_data_recvfrom(pj_activesock_t *asock, +@@ -932,62 +1568,11 @@ static pj_bool_t on_data_recvfrom(pj_activesock_t *asock, return PJ_TRUE; } @@ -3888,7 +4421,7 @@ index 7692e6c1..7624cf25 100644 pj_ioqueue_op_key_t *send_key, pj_ssize_t sent) { -@@ -1024,3 +1581,7 @@ static pj_bool_t on_data_sent(pj_activesock_t *asock, +@@ -1024,3 +1609,7 @@ static pj_bool_t on_data_sent(pj_activesock_t *asock, return PJ_TRUE; } @@ -3896,8 +4429,20 @@ index 7692e6c1..7624cf25 100644 +{ + return stun_sock ? stun_sock->stun_sess : NULL; +} +diff --git a/pjnath/src/pjnath/stun_transaction.c b/pjnath/src/pjnath/stun_transaction.c +index 28f623005..0c15763c2 100644 +--- a/pjnath/src/pjnath/stun_transaction.c ++++ b/pjnath/src/pjnath/stun_transaction.c +@@ -394,6 +394,7 @@ static void retransmit_timer_callback(pj_timer_heap_t *timer_heap, + PJ_DEF(pj_status_t) pj_stun_client_tsx_retransmit(pj_stun_client_tsx *tsx, + pj_bool_t mod_count) + { ++ if (!tsx) return PJ_EINVAL; + if (tsx->destroy_timer.id != 0) { + return PJ_SUCCESS; + } diff --git a/pjnath/src/pjnath/turn_session.c b/pjnath/src/pjnath/turn_session.c -index fcbe703e..1c0430bc 100644 +index fcbe703e8..1c0430bc7 100644 --- a/pjnath/src/pjnath/turn_session.c +++ b/pjnath/src/pjnath/turn_session.c @@ -311,7 +311,8 @@ PJ_DEF(pj_status_t) pj_turn_session_create( const pj_stun_config *cfg, @@ -3911,7 +4456,7 @@ index fcbe703e..1c0430bc 100644 do_destroy(sess); return status; diff --git a/pjnath/src/pjturn-client/client_main.c b/pjnath/src/pjturn-client/client_main.c -index 1a866722..5f013631 100644 +index 1a8667220..5f013631a 100644 --- a/pjnath/src/pjturn-client/client_main.c +++ b/pjnath/src/pjturn-client/client_main.c @@ -154,10 +154,10 @@ static int init() @@ -3941,7 +4486,7 @@ index 1a866722..5f013631 100644 case 'q': g.quit = PJ_TRUE; diff --git a/pjnath/src/pjturn-srv/allocation.c b/pjnath/src/pjturn-srv/allocation.c -index 6c9c9ce1..eea91f01 100644 +index 6c9c9ce11..eea91f01b 100644 --- a/pjnath/src/pjturn-srv/allocation.c +++ b/pjnath/src/pjturn-srv/allocation.c @@ -338,7 +338,7 @@ PJ_DEF(pj_status_t) pj_turn_allocation_create(pj_turn_transport *transport, @@ -3954,7 +4499,7 @@ index 6c9c9ce1..eea91f01 100644 goto on_error; } diff --git a/pjnath/src/pjturn-srv/server.c b/pjnath/src/pjturn-srv/server.c -index 94dda29a..95ad1793 100644 +index 94dda29a3..95ad1793d 100644 --- a/pjnath/src/pjturn-srv/server.c +++ b/pjnath/src/pjturn-srv/server.c @@ -156,7 +156,7 @@ PJ_DEF(pj_status_t) pj_turn_srv_create(pj_pool_factory *pf, @@ -3967,7 +4512,7 @@ index 94dda29a..95ad1793 100644 goto on_error; } diff --git a/pjsip-apps/src/samples/icedemo.c b/pjsip-apps/src/samples/icedemo.c -index 9d811374..92826fd8 100644 +index 9d811374a..92826fd8d 100644 --- a/pjsip-apps/src/samples/icedemo.c +++ b/pjsip-apps/src/samples/icedemo.c @@ -43,7 +43,7 @@ static struct app_t @@ -4717,7 +5262,7 @@ index 9d811374..92826fd8 100644 status = icedemo_init(); diff --git a/pjsip/src/pjsua-lib/pjsua_core.c b/pjsip/src/pjsua-lib/pjsua_core.c -index ea1e72b8..39d05d1e 100644 +index ea1e72b85..39d05d1ed 100644 --- a/pjsip/src/pjsua-lib/pjsua_core.c +++ b/pjsip/src/pjsua-lib/pjsua_core.c @@ -1529,10 +1529,10 @@ static void resolve_stun_entry(pjsua_stun_resolve *sess) diff --git a/contrib/src/pjproject/win32_ice_tcp_temp_fix.patch b/contrib/src/pjproject/win32_ice_tcp_temp_fix.patch deleted file mode 100644 index 069664d759..0000000000 --- a/contrib/src/pjproject/win32_ice_tcp_temp_fix.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 5f288fe0067f995b91ea87ba4ed19fd65b75ff31 Mon Sep 17 00:00:00 2001 -From: Andreas Traczyk <andreas.traczyk@savoirfairelinux.com> -Date: Tue, 11 Jun 2019 16:47:06 -0400 -Subject: [PATCH] fix for windows GetAdaptersAddresses - ---- - pjnath/src/pjnath/ice_strans.c | 8 ++------ - 1 file changed, 2 insertions(+), 6 deletions(-) - -diff --git a/pjnath/src/pjnath/ice_strans.c b/pjnath/src/pjnath/ice_strans.c -index 6172172..33ac521 100644 ---- a/pjnath/src/pjnath/ice_strans.c -+++ b/pjnath/src/pjnath/ice_strans.c -@@ -1645,9 +1645,7 @@ pj_ice_strans_sendto2(pj_ice_strans *ice_st, unsigned comp_id, const void *data, - dest_addr_len = dst_addr_len; - } - -- pj_stun_sock_info stun_sock_info; -- pj_stun_sock_get_info(comp->stun[tp_idx].sock, &stun_sock_info); -- pj_bool_t add_header = stun_sock_info.conn_type != PJ_STUN_TP_UDP; -+ pj_bool_t add_header = comp->ice_st->cfg.stun_tp->conn_type == PJ_STUN_TP_TCP; - if (add_header) { - //TCP - /* -@@ -1864,9 +1862,7 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice, - if (comp->stun[tp_idx].sock) { - pj_ssize_t sent_size; - -- pj_stun_sock_info stun_sock_info; -- pj_stun_sock_get_info(comp->stun[tp_idx].sock, &stun_sock_info); -- pj_bool_t add_header = stun_sock_info.conn_type != PJ_STUN_TP_UDP; -+ pj_bool_t add_header = comp->ice_st->cfg.stun_tp->conn_type == PJ_STUN_TP_TCP; - if (add_header) { - //TCP - /* --- -2.7.4 - diff --git a/src/ice_socket.h b/src/ice_socket.h index 4810bc5471..cdccafcdd4 100644 --- a/src/ice_socket.h +++ b/src/ice_socket.h @@ -67,12 +67,13 @@ public: static constexpr uint16_t IPV4_HEADER_SIZE = 20; // Size in bytes of IPv4 packet header static constexpr uint16_t UDP_HEADER_SIZE = 8; // Size in bytes of UDP header - IceSocketTransport(std::shared_ptr<IceTransport>& ice, int comp_id) + IceSocketTransport(std::shared_ptr<IceTransport>& ice, int comp_id, bool reliable = false) : compId_ {comp_id} - , ice_ {ice} {} + , ice_ {ice} + , reliable_ {reliable} {} bool isReliable() const override { - return false; // we consider that a ICE transport is never reliable (UDP support only) + return reliable_; } bool isInitiator() const override; @@ -94,6 +95,7 @@ public: private: const int compId_; std::shared_ptr<IceTransport> ice_; + bool reliable_; }; }; diff --git a/src/ice_transport.cpp b/src/ice_transport.cpp index 0a7478751f..1bda97e7af 100644 --- a/src/ice_transport.cpp +++ b/src/ice_transport.cpp @@ -26,7 +26,6 @@ #include "upnp/upnp_control.h" #include <pjlib.h> -#include <msgpack.hpp> #include <map> #include <atomic> @@ -92,8 +91,6 @@ public: MutexGuard lk{mutex_}; stream_.clear(); stream_ << data; - - notified_ = true; cv_.notify_one(); } @@ -111,14 +108,14 @@ public: MutexGuard lk_api{apiMutex_, std::adopt_lock}; MutexLock lk{mutex_, std::adopt_lock}; auto a = cv_.wait_for(lk, timeout, - [this] { return stop_ or /*(data.size() != 0)*/ !stream_.eof(); }); + [this] { return stop_ or !stream_.eof(); }); return a; } std::size_t read(char *output, std::size_t size) { - std::lock(apiMutex_, mutex_); - MutexGuard lk_api{apiMutex_, std::adopt_lock}; MutexLock lk{mutex_, std::adopt_lock}; + if (stream_.eof()) return 0; + MutexGuard lk_api{apiMutex_, std::adopt_lock}; cv_.wait(lk, [&, this] { if (stop_) return true; @@ -136,9 +133,6 @@ public: stop_ = true; } cv_.notify_all(); - - // Make sure that no thread is blocked into read() or wait() methods - MutexGuard lk_api{apiMutex_}; } private: @@ -149,9 +143,6 @@ private: std::condition_variable cv_{}; std::stringstream stream_{}; bool stop_{false}; - bool notified_{false}; - - std::vector<char> data; friend void operator<<(std::vector<char> &, PeerChannel &); }; @@ -213,6 +204,8 @@ public: pj_ice_strans_cfg config_; std::string last_errmsg_; + std::atomic_bool is_stopped_ {false}; + struct Packet { Packet(void *pkt, pj_size_t size) : data{reinterpret_cast<char *>(pkt), reinterpret_cast<char *>(pkt) + size} { } @@ -869,11 +862,12 @@ IceTransport::Impl::onReceiveData(unsigned comp_id, void *pkt, pj_size_t size) if (on_recv_cb_) { on_recv_cb_(); } + if (io.cb) { io.cb((uint8_t*)pkt, size); } else { MutexLock lk{apiMutex_}; - auto &channel = peerChannels_.at(comp_id); + auto &channel = peerChannels_.at(comp_id-1); lk.unlock(); channel << std::string(reinterpret_cast<const char *>(pkt), size); } @@ -907,6 +901,13 @@ IceTransport::isRunning() const return pimpl_->_isRunning(); } +bool +IceTransport::isStopped() const +{ + std::lock_guard<std::mutex> lk {pimpl_->iceMutex_}; + return pimpl_->is_stopped_; +} + bool IceTransport::isFailed() const { @@ -950,12 +951,14 @@ IceTransport::start(const Attribute& rem_attrs, const std::vector<IceCandidate>& { if (not isInitialized()) { JAMI_ERR("[ice:%p] not initialized transport", this); + pimpl_->is_stopped_ = true; return false; } // pj_ice_strans_start_ice crashes if remote candidates array is empty if (rem_candidates.empty()) { JAMI_ERR("[ice:%p] start failed: no remote candidates", this); + pimpl_->is_stopped_ = true; return false; } @@ -969,62 +972,49 @@ IceTransport::start(const Attribute& rem_attrs, const std::vector<IceCandidate>& if (status != PJ_SUCCESS) { pimpl_->last_errmsg_ = sip_utils::sip_strerror(status); JAMI_ERR("[ice:%p] start failed: %s", this, pimpl_->last_errmsg_.c_str()); + pimpl_->is_stopped_ = true; return false; } return true; } bool -IceTransport::start(const std::vector<uint8_t>& rem_data) +IceTransport::start(const SDP& sdp) { - std::string rem_ufrag; - std::string rem_pwd; - std::vector<IceCandidate> rem_candidates; - - auto data = reinterpret_cast<const char*>(rem_data.data()); - auto size = rem_data.size(); - - try { - std::size_t offset = 0; - auto result = msgpack::unpack(data, size, offset); - auto version = result.get().as<uint8_t>(); - JAMI_DBG("[ice:%p] rx msg v%u", this, version); - if (version == 1) { - result = msgpack::unpack(data, size, offset); - std::tie(rem_ufrag, rem_pwd) = result.get().as<std::pair<std::string, std::string>>(); - result = msgpack::unpack(data, size, offset); - auto comp_cnt = result.get().as<uint8_t>(); - while (comp_cnt-- > 0) { - result = msgpack::unpack(data, size, offset); - IceCandidate cand; - for (const auto& line : result.get().as<std::vector<std::string>>()) { - if (getCandidateFromSDP(line, cand)) - rem_candidates.emplace_back(cand); - } - } - } else { - JAMI_ERR("[ice:%p] invalid msg version", this); - return false; - } - } catch (const msgpack::unpack_error& e) { - JAMI_ERR("[ice:%p] remote msg unpack error: %s", this, e.what()); + if (not isInitialized()) { + JAMI_ERR("[ice:%p] not initialized transport", this); + pimpl_->is_stopped_ = true; return false; } - if (rem_ufrag.empty() or rem_pwd.empty() or rem_candidates.empty()) { - JAMI_ERR("[ice:%p] invalid remote attributes", this); + JAMI_DBG("[ice:%p] negotiation starting (%zu remote candidates)", this, sdp.candidates.size()); + pj_str_t ufrag, pwd; + + std::vector<IceCandidate> rem_candidates; + rem_candidates.reserve(sdp.candidates.size()); + IceCandidate cand; + for (const auto &line : sdp.candidates) { + if (getCandidateFromSDP(line, cand)) + rem_candidates.emplace_back(cand); + } + auto status = pj_ice_strans_start_ice(pimpl_->icest_.get(), + pj_strset(&ufrag, (char*)sdp.ufrag.c_str(), sdp.ufrag.size()), + pj_strset(&pwd, (char*)sdp.pwd.c_str(), sdp.pwd.size()), + rem_candidates.size(), + rem_candidates.data()); + if (status != PJ_SUCCESS) { + pimpl_->last_errmsg_ = sip_utils::sip_strerror(status); + JAMI_ERR("[ice:%p] start failed: %s", this, pimpl_->last_errmsg_.c_str()); + pimpl_->is_stopped_ = true; return false; } - - if (pimpl_->onlyIPv4Private_) - JAMI_WARN("[ice:%p] no public IPv4 found, your connection may fail!", this); - - return start({rem_ufrag, rem_pwd}, rem_candidates); + return true; } bool IceTransport::stop() { + pimpl_->is_stopped_ = true; if (isStarted()) { auto status = pj_ice_strans_stop_ice(pimpl_->icest_.get()); if (status != PJ_SUCCESS) { @@ -1139,20 +1129,29 @@ IceTransport::registerPublicIP(unsigned compId, const IpAddr& publicIP) } std::vector<uint8_t> -IceTransport::packIceMsg() const +IceTransport::packIceMsg(uint8_t version) const { - static constexpr uint8_t ICE_MSG_VERSION = 1; - if (not isInitialized()) return {}; std::stringstream ss; - msgpack::pack(ss, ICE_MSG_VERSION); - msgpack::pack(ss, std::make_pair(pimpl_->local_ufrag_, pimpl_->local_pwd_)); - msgpack::pack(ss, static_cast<uint8_t>(pimpl_->component_count_)); - for (unsigned i=0; i<pimpl_->component_count_; i++) - msgpack::pack(ss, getLocalCandidates(i)); - + if (version == 1) { + msgpack::pack(ss, version); + msgpack::pack(ss, std::make_pair(pimpl_->local_ufrag_, pimpl_->local_pwd_)); + msgpack::pack(ss, static_cast<uint8_t>(pimpl_->component_count_)); + for (unsigned i=0; i<pimpl_->component_count_; i++) + msgpack::pack(ss, getLocalCandidates(i)); + } else { + SDP sdp; + sdp.ufrag = pimpl_->local_ufrag_; + sdp.pwd = pimpl_->local_pwd_; + for (unsigned i = 0; i < pimpl_->component_count_; i++) { + auto candidates = getLocalCandidates(i); + sdp.candidates.reserve(sdp.candidates.size() + candidates.size()); + sdp.candidates.insert(sdp.candidates.end(), candidates.begin(), candidates.end()); + } + msgpack::pack(ss, sdp); + } auto str(ss.str()); return std::vector<uint8_t>(str.begin(), str.end()); } @@ -1372,6 +1371,53 @@ IceTransport::waitForData(int comp_id, unsigned int timeout, std::error_code& ec return channel.wait(std::chrono::milliseconds(timeout)); } +std::vector<SDP> +IceTransport::parseSDPList(const std::vector<uint8_t>& msg) +{ + std::vector<SDP> sdp_list; + + msgpack::unpacker pac; + pac.reserve_buffer(msg.size()); + memcpy(pac.buffer(), msg.data(), msg.size()); + pac.buffer_consumed(msg.size()); + msgpack::object_handle oh; + + while (auto result = pac.next(oh)) { + try { + SDP sdp; + if (oh.get().type == msgpack::type::POSITIVE_INTEGER) { + // Version 1 + result = pac.next(oh); + if (!result) break; + std::tie(sdp.ufrag, sdp.pwd) = oh.get().as<std::pair<std::string, std::string>>(); + result = pac.next(oh); + if (!result) break; + auto comp_cnt = oh.get().as<uint8_t>(); + while (comp_cnt-- > 0) { + result = pac.next(oh); + if (!result) break; + auto candidates = oh.get().as<std::vector<std::string>>(); + sdp.candidates.reserve(sdp.candidates.size() + candidates.size()); + sdp.candidates.insert(sdp.candidates.end(), candidates.begin(), candidates.end()); + } + } else { + oh.get().convert(sdp); + } + sdp_list.emplace_back(sdp); + } catch (const msgpack::unpack_error &e) { + break; + } + } + + return sdp_list; +} + +bool +IceTransport::isTCPEnabled() +{ + return pimpl_->config_.protocol == PJ_ICE_TP_TCP; +} + //============================================================================== IceTransportFactory::IceTransportFactory() @@ -1442,6 +1488,7 @@ IceSocketTransport::maxPayload() const int IceSocketTransport::waitForData(unsigned ms_timeout, std::error_code& ec) const { + if (!ice_->isRunning()) return -1; return ice_->waitForData(compId_, ms_timeout, ec); } @@ -1460,13 +1507,21 @@ IceSocketTransport::write(const ValueType* buf, std::size_t len, std::error_code std::size_t IceSocketTransport::read(ValueType* buf, std::size_t len, std::error_code& ec) { - auto res = ice_->recv(compId_, buf, len); - if (res < 0) { - ec.assign(errno, std::generic_category()); - return 0; + if (!ice_->isRunning()) return 0; + try { + auto res = reliable_ + ? ice_->recvfrom(compId_, reinterpret_cast<char *>(buf), len) + : ice_->recv(compId_, buf, len); + if (res < 0) { + ec.assign(errno, std::generic_category()); + return 0; + } + ec.clear(); + return res; + } catch (const std::exception &e) { + JAMI_ERR("IceSocketTransport::read exception: %s", e.what()); } - ec.clear(); - return res; + return 0; } IpAddr diff --git a/src/ice_transport.h b/src/ice_transport.h index 7e862d0100..6ab72536c6 100644 --- a/src/ice_transport.h +++ b/src/ice_transport.h @@ -29,6 +29,7 @@ #include <functional> #include <memory> +#include <msgpack.hpp> #include <vector> namespace jami { @@ -73,6 +74,14 @@ struct IceTransportOptions { bool aggressive {false}; // If we use the aggressive nomination strategy }; +struct SDP { + std::string ufrag; + std::string pwd; + + std::vector<std::string> candidates; + MSGPACK_DEFINE(ufrag, pwd, candidates) +}; + class IceTransport { public: using Attribute = struct { @@ -85,7 +94,6 @@ public: */ IceTransport(const char* name, int component_count, bool master, const IceTransportOptions& options = {}); - /** * Get current state */ @@ -100,7 +108,7 @@ public: */ bool start(const Attribute& rem_attrs, const std::vector<IceCandidate>& rem_candidates); - bool start(const std::vector<uint8_t>& attrs_candidates); + bool start(const SDP& sdp); /** * Stop a started or completed transport. @@ -125,6 +133,12 @@ public: */ bool isRunning() const; + /** + * Return true if a start operations fails or if stop() has been called + * [mutex protected] + */ + bool isStopped() const; + /** * Returns true if ICE transport is in failure state * [mutex protected] @@ -156,7 +170,7 @@ public: /** * Returns serialized ICE attributes and candidates. */ - std::vector<uint8_t> packIceMsg() const; + std::vector<uint8_t> packIceMsg(uint8_t version = 1) const; bool getCandidateFromSDP(const std::string& line, IceCandidate& cand); @@ -188,6 +202,15 @@ public: bool setSlaveSession(); bool setInitiatorSession(); + /** + * Get SDP messages list + * @param msg The payload to parse + * @return the list of SDP messages + */ + static std::vector<SDP> parseSDPList(const std::vector<uint8_t>& msg); + + bool isTCPEnabled(); + private: class Impl; std::unique_ptr<Impl> pimpl_; diff --git a/src/jamidht/jamiaccount.cpp b/src/jamidht/jamiaccount.cpp index d31fa7aca6..dbe4dae4c3 100644 --- a/src/jamidht/jamiaccount.cpp +++ b/src/jamidht/jamiaccount.cpp @@ -138,6 +138,7 @@ struct JamiAccount::PendingCall { std::chrono::steady_clock::time_point start; std::shared_ptr<IceTransport> ice_sp; + std::shared_ptr<IceTransport> ice_tcp_sp; std::weak_ptr<SIPCall> call; std::future<size_t> listen_key; dht::InfoHash call_key; @@ -411,6 +412,30 @@ JamiAccount::newOutgoingCall(const std::string& toUrl, return call; } +void +initICE(const std::vector<uint8_t> &msg, const std::shared_ptr<IceTransport> &ice, + const std::shared_ptr<IceTransport> &ice_tcp, bool &udp_failed, bool &tcp_failed) +{ + auto sdp_list = IceTransport::parseSDPList(msg); + for (const auto &sdp : sdp_list) { + if (sdp.candidates.size() > 0) { + if (sdp.candidates[0].find("TCP") != std::string::npos) { + // It is a SDP for the TCP component + tcp_failed = (ice_tcp && !ice_tcp->start(sdp)); + } else { + // For UDP + udp_failed = (ice && !ice->start(sdp)); + } + } + } + + // During the ICE reply we can start the ICE negotiation + if (tcp_failed) { + ice_tcp->stop(); + JAMI_WARN("ICE over TCP not started, will only use UDP"); + } +} + void JamiAccount::startOutgoingCall(const std::shared_ptr<SIPCall>& call, const std::string& toUri) { @@ -455,9 +480,21 @@ JamiAccount::startOutgoingCall(const std::shared_ptr<SIPCall>& call, const std:: return; } + auto ice_config = getIceOptions(); + ice_config.tcpEnable = true; + ice_config.aggressive = true; // This will directly select the first candidate. + auto ice_tcp = +#ifdef _WIN32 + std::shared_ptr<IceTransport>(nullptr); +#else + createIceTransport(("sip:" + dev_call->getCallId()).c_str(), ICE_COMPONENTS, true, ice_config); +#endif + if (not ice_tcp) { + JAMI_WARN("Can't create ICE over TCP, will only use UDP"); + } call->addSubCall(*dev_call); - manager.addTask([sthis=shared(), weak_dev_call, ice, dev, toUri, peer_account] { + manager.addTask([sthis=shared(), weak_dev_call, ice, ice_tcp, dev, toUri, peer_account] { auto call = weak_dev_call.lock(); // call aborted? @@ -470,18 +507,27 @@ JamiAccount::startOutgoingCall(const std::shared_ptr<SIPCall>& call, const std:: return false; } + if (ice_tcp && ice_tcp->isFailed()) { + JAMI_WARN("[call:%s] ice tcp init failed, will only use UDP", call->getCallId().c_str()); + } + // Loop until ICE transport is initialized. // Note: we suppose that ICE init routine has a an internal timeout (bounded in time) // and we let upper layers decide when the call shall be aborded (our first check upper). - if (not ice->isInitialized()) + if ((not ice->isInitialized()) || (ice_tcp && !ice_tcp->isInitialized())) return true; sthis->registerDhtAddress(*ice); - + if (ice_tcp) sthis->registerDhtAddress(*ice_tcp); // Next step: sent the ICE data to peer through DHT const dht::Value::Id callvid = ValueIdDist()(sthis->rand); const auto callkey = dht::InfoHash::get("callto:" + dev.toString()); - dht::Value val { dht::IceCandidates(callvid, ice->packIceMsg()) }; + auto blob = ice->packIceMsg(); + if (ice_tcp) { + auto ice_tcp_msg = ice_tcp->packIceMsg(2); + blob.insert(blob.end(), ice_tcp_msg.begin(), ice_tcp_msg.end()); + } + dht::Value val { dht::IceCandidates(callvid, blob) }; sthis->dht_.putEncrypted( callkey, dev, @@ -498,7 +544,7 @@ JamiAccount::startOutgoingCall(const std::shared_ptr<SIPCall>& call, const std:: auto listenKey = sthis->dht_.listen<dht::IceCandidates>( callkey, - [weak_dev_call, ice, callvid, dev] (dht::IceCandidates&& msg) { + [weak_dev_call, ice, ice_tcp, callvid, dev] (dht::IceCandidates&& msg) { if (msg.id != callvid or msg.from != dev) return true; // remove unprintable characters @@ -509,7 +555,10 @@ JamiAccount::startOutgoingCall(const std::shared_ptr<SIPCall>& call, const std:: JAMI_WARN("ICE request replied from DHT peer %s\nData: %s", dev.toString().c_str(), iceData.c_str()); if (auto call = weak_dev_call.lock()) { call->setState(Call::ConnectionState::PROGRESSING); - if (!ice->start(msg.ice_data)) { + + auto udp_failed = true, tcp_failed = true; + initICE(msg.ice_data, ice, ice_tcp, udp_failed, tcp_failed); + if (udp_failed && tcp_failed) { call->onFailure(); return true; } @@ -521,7 +570,7 @@ JamiAccount::startOutgoingCall(const std::shared_ptr<SIPCall>& call, const std:: std::lock_guard<std::mutex> lock(sthis->callsMutex_); sthis->pendingCalls_.emplace_back(PendingCall{ std::chrono::steady_clock::now(), - ice, weak_dev_call, + ice, ice_tcp, weak_dev_call, std::move(listenKey), callkey, dev, @@ -1823,25 +1872,47 @@ bool JamiAccount::handlePendingCall(PendingCall& pc, bool incoming) { auto call = pc.call.lock(); - if (not call) + // Cleanup pending call if call is over (cancelled by user or any other reason) + if (not call || call->getState() == Call::CallState::OVER) return true; - auto ice = pc.ice_sp.get(); - if (not ice or ice->isFailed()) { - JAMI_ERR("[call:%s] Null or failed ICE transport", call->getCallId().c_str()); + if ((std::chrono::steady_clock::now() - pc.start) >= ICE_NEGOTIATION_TIMEOUT) { + JAMI_WARN("[call:%s] Timeout on ICE negotiation", call->getCallId().c_str()); call->onFailure(); return true; } - // Return to pending list if not negotiated yet and not in timeout - if (not ice->isRunning()) { - if ((std::chrono::steady_clock::now() - pc.start) >= ICE_NEGOTIATION_TIMEOUT) { - JAMI_WARN("[call:%s] Timeout on ICE negotiation", call->getCallId().c_str()); - call->onFailure(); - return true; + auto ice_tcp = pc.ice_tcp_sp.get(); + auto ice = pc.ice_sp.get(); + + bool tcp_finished = ice_tcp == nullptr || ice_tcp->isStopped(); + bool udp_finished = ice == nullptr || ice->isStopped(); + + if (not udp_finished and ice->isFailed()) { + udp_finished = true; + } + + if (not tcp_finished and ice_tcp->isFailed()) { + tcp_finished = true; + } + + // At least wait for TCP + if (not tcp_finished and not ice_tcp->isRunning()) { + return false; + } else if (tcp_finished and (not ice_tcp or not ice_tcp->isRunning())) { + // If TCP is finished but not running, wait for UDP + if (not udp_finished and ice and not ice->isRunning()) { + return false; } - // Cleanup pending call if call is over (cancelled by user or any other reason) - return call->getState() == Call::CallState::OVER; + } + + udp_finished = ice && ice->isRunning(); + tcp_finished = ice_tcp && ice_tcp->isRunning(); + // If both transport are not running, the negotiation failed + if (not udp_finished and not tcp_finished) { + JAMI_ERR("[call:%s] Both ICE negotations failed", call->getCallId().c_str()); + call->onFailure(); + return true; } // Securize a SIP transport with TLS (on top of ICE tranport) and assign the call with it @@ -1893,9 +1964,15 @@ JamiAccount::handlePendingCall(PendingCall& pc, bool incoming) } }; + auto best_transport = pc.ice_tcp_sp; + if (!tcp_finished) { + JAMI_DBG("TCP not running, will use SIP over UDP"); + best_transport = pc.ice_sp; + } + // Following can create a transport that need to be negotiated (TLS). // This is a asynchronous task. So we're going to process the SIP after this negotiation. - auto transport = link_->sipTransportBroker->getTlsIceTransport(pc.ice_sp, + auto transport = link_->sipTransportBroker->getTlsIceTransport(best_transport, ICE_COMP_SIP_TRANSPORT, tlsParams); if (!transport) @@ -1910,7 +1987,7 @@ JamiAccount::handlePendingCall(PendingCall& pc, bool incoming) // Be acknowledged on transport connection/disconnection auto lid = reinterpret_cast<uintptr_t>(this); auto remote_id = remote_device.toString(); - auto remote_addr = ice->getRemoteAddress(ICE_COMP_SIP_TRANSPORT); + auto remote_addr = best_transport->getRemoteAddress(ICE_COMP_SIP_TRANSPORT); auto& tr_self = *transport; transport->addStateListener(lid, [&tr_self, lid, wcall, waccount, remote_id, remote_addr](pjsip_transport_state state, @@ -1933,7 +2010,7 @@ JamiAccount::handlePendingCall(PendingCall& pc, bool incoming) call->setState(Call::ConnectionState::PROGRESSING); return true; -} + } bool JamiAccount::mapPortUPnP() @@ -2481,9 +2558,18 @@ JamiAccount::incomingCall(dht::IceCandidates&& msg, const std::shared_ptr<dht::c { auto call = Manager::instance().callFactory.newCall<SIPCall, JamiAccount>(*this, Manager::instance().getNewCallID(), Call::CallType::INCOMING); auto ice = createIceTransport(("sip:"+call->getCallId()).c_str(), ICE_COMPONENTS, false, getIceOptions()); + auto ice_config = getIceOptions(); + ice_config.tcpEnable = true; + ice_config.aggressive = true; // This will directly select the first candidate. + auto ice_tcp = +#ifdef _WIN32 + std::shared_ptr<IceTransport>(nullptr); +#else + createIceTransport(("sip:" + call->getCallId()).c_str(), ICE_COMPONENTS, true, ice_config); +#endif std::weak_ptr<SIPCall> wcall = call; - Manager::instance().addTask([account=shared(), wcall, ice, msg, from_cert, from] { + Manager::instance().addTask([account=shared(), wcall, ice, ice_tcp, msg, from_cert, from] { auto call = wcall.lock(); // call aborted? @@ -2499,10 +2585,10 @@ JamiAccount::incomingCall(dht::IceCandidates&& msg, const std::shared_ptr<dht::c // Loop until ICE transport is initialized. // Note: we suppose that ICE init routine has a an internal timeout (bounded in time) // and we let upper layers decide when the call shall be aborted (our first check upper). - if (not ice->isInitialized()) + if ((not ice->isInitialized()) || (ice_tcp && !ice_tcp->isInitialized())) return true; - account->replyToIncomingIceMsg(call, ice, msg, from_cert, from); + account->replyToIncomingIceMsg(call, ice, ice_tcp, msg, from_cert, from); return false; }); } @@ -2575,6 +2661,7 @@ JamiAccount::foundPeerDevice(const std::shared_ptr<dht::crypto::Certificate>& cr void JamiAccount::replyToIncomingIceMsg(const std::shared_ptr<SIPCall>& call, const std::shared_ptr<IceTransport>& ice, + const std::shared_ptr<IceTransport>& ice_tcp, const dht::IceCandidates& peer_ice_msg, const std::shared_ptr<dht::crypto::Certificate>& from_cert, const dht::InfoHash& from_id) @@ -2591,13 +2678,20 @@ JamiAccount::replyToIncomingIceMsg(const std::shared_ptr<SIPCall>& call, } }); #endif - registerDhtAddress(*ice); + if (ice_tcp) registerDhtAddress(*ice_tcp); + + auto blob = ice->packIceMsg(); + if (ice_tcp) { + auto ice_tcp_msg = ice_tcp->packIceMsg(2); + blob.insert(blob.end(), ice_tcp_msg.begin(), ice_tcp_msg.end()); + } + // Asynchronous DHT put of our local ICE data dht_.putEncrypted( callKey_, peer_ice_msg.from, - dht::Value {dht::IceCandidates(peer_ice_msg.id, ice->packIceMsg())}, + dht::Value {dht::IceCandidates(peer_ice_msg.id, blob)}, [wcall](bool ok) { if (!ok) { JAMI_WARN("Can't put ICE descriptor reply on DHT"); @@ -2609,8 +2703,11 @@ JamiAccount::replyToIncomingIceMsg(const std::shared_ptr<SIPCall>& call, auto started_time = std::chrono::steady_clock::now(); - // During the ICE reply we can start the ICE negotiation - if (!ice->start(peer_ice_msg.ice_data)) { + auto sdp_list = IceTransport::parseSDPList(peer_ice_msg.ice_data); + auto udp_failed = true, tcp_failed = true; + initICE(peer_ice_msg.ice_data, ice, ice_tcp, udp_failed, tcp_failed); + + if (udp_failed && tcp_failed) { call->onFailure(EIO); return; } @@ -2620,15 +2717,16 @@ JamiAccount::replyToIncomingIceMsg(const std::shared_ptr<SIPCall>& call, // Let the call handled by the PendingCall handler loop { std::lock_guard<std::mutex> lock(callsMutex_); - pendingCalls_.emplace_back(PendingCall { - /*.start = */started_time, - /*.ice_sp = */ice, - /*.call = */wcall, - /*.listen_key = */{}, - /*.call_key = */{}, - /*.from = */peer_ice_msg.from, - /*.from_account = */from_id, - /*.from_cert = */from_cert }); + pendingCalls_.emplace_back( + PendingCall{/*.start = */ started_time, + /*.ice_sp = */ udp_failed ? nullptr : ice, + /*.ice_tcp_sp = */ tcp_failed ? nullptr : ice_tcp, + /*.call = */ wcall, + /*.listen_key = */ {}, + /*.call_key = */ {}, + /*.from = */ peer_ice_msg.from, + /*.from_account = */ from_id, + /*.from_cert = */ from_cert}); checkPendingCallsTask(); } } diff --git a/src/jamidht/jamiaccount.h b/src/jamidht/jamiaccount.h index 2327a038d7..eafc84794a 100644 --- a/src/jamidht/jamiaccount.h +++ b/src/jamidht/jamiaccount.h @@ -615,6 +615,7 @@ class JamiAccount : public SIPAccountBase { void saveKnownDevices() const; void replyToIncomingIceMsg(const std::shared_ptr<SIPCall>&, + const std::shared_ptr<IceTransport>&, const std::shared_ptr<IceTransport>&, const dht::IceCandidates&, const std::shared_ptr<dht::crypto::Certificate>& from_cert, diff --git a/src/jamidht/sips_transport_ice.cpp b/src/jamidht/sips_transport_ice.cpp index 963d193c97..1c5af73b24 100644 --- a/src/jamidht/sips_transport_ice.cpp +++ b/src/jamidht/sips_transport_ice.cpp @@ -236,7 +236,7 @@ SipsIceTransport::SipsIceTransport(pjsip_endpoint* endpt, std::memset(&localCertInfo_, 0, sizeof(pj_ssl_cert_info)); std::memset(&remoteCertInfo_, 0, sizeof(pj_ssl_cert_info)); - iceSocket_ = std::make_unique<IceSocketTransport>(ice_, comp_id); + iceSocket_ = std::make_unique<IceSocketTransport>(ice_, comp_id, PJSIP_TRANSPORT_IS_RELIABLE(&trData_.base)); TlsSession::TlsSessionCallbacks cbs = { /*.onStateChange = */[this](TlsSessionState state){ onTlsStateChange(state); }, @@ -249,11 +249,22 @@ SipsIceTransport::SipsIceTransport(pjsip_endpoint* endpt, if (pjsip_transport_register(base.tpmgr, &base) != PJ_SUCCESS) throw std::runtime_error("Can't register PJSIP transport."); + + if (PJSIP_TRANSPORT_IS_RELIABLE(&trData_.base)) { + eventLoop_ = std::thread([this] { + try { + eventLoop(); + } catch (const std::exception& e) { + JAMI_ERR() << "SipIceTransport: eventLoop() failure: " << e.what(); + } + }); + } } SipsIceTransport::~SipsIceTransport() { JAMI_DBG("~SipIceTransport@%p {tr=%p}", this, &trData_.base); + stopLoop_ = true; // Flush send queue with ENOTCONN error for (auto tdata : txQueue_) { @@ -266,6 +277,8 @@ SipsIceTransport::~SipsIceTransport() auto base = getTransportBase(); // Stop low-level transport first + tls_->shutdown(); + if (eventLoop_.joinable()) eventLoop_.join(); tls_.reset(); // If delete not trigged by pjsip_transport_destroy (happen if objet not given to pjsip) @@ -500,7 +513,10 @@ SipsIceTransport::getInfo(pj_ssl_sock_info* info, bool established) std::memset(info, 0, sizeof(*info)); info->established = established; - info->proto = PJ_SSL_SOCK_PROTO_DTLS1; + if (PJSIP_TRANSPORT_IS_RELIABLE(&trData_.base)) + info->proto = PJSIP_SSL_DEFAULT_PROTO; + else + info->proto = PJ_SSL_SOCK_PROTO_DTLS1; pj_sockaddr_cp(&info->local_addr, local_.pjPtr()); @@ -708,4 +724,23 @@ SipsIceTransport::getTlsSessionMtu() return tls_->maxPayload(); } +void +SipsIceTransport::eventLoop() +{ + while(!stopLoop_) { + std::error_code err; + if (tls_ && tls_->waitForData(100, err)) { + std::vector<uint8_t> pkt; + pkt.resize(PJSIP_MAX_PKT_LEN); + auto read = tls_->read(pkt.data(), PJSIP_MAX_PKT_LEN, err); + if (read > 0) { + pkt.resize(read); + std::lock_guard<std::mutex> l(rxMtx_); + rxPending_.emplace_back(std::move(pkt)); + scheduler_.run([this]{ handleEvents(); }); + } + } + } +} + }} // namespace jami::tls diff --git a/src/jamidht/sips_transport_ice.h b/src/jamidht/sips_transport_ice.h index 8af70f6969..c5290c8068 100644 --- a/src/jamidht/sips_transport_ice.h +++ b/src/jamidht/sips_transport_ice.h @@ -138,6 +138,10 @@ private: void onRxData(std::vector<uint8_t>&&); void onCertificatesUpdate(const gnutls_datum_t*, const gnutls_datum_t*, unsigned int); int verifyCertificate(gnutls_session_t); + + std::thread eventLoop_; + void eventLoop(); + std::atomic_bool stopLoop_ {false}; }; }} // namespace jami::tls diff --git a/src/peer_connection.cpp b/src/peer_connection.cpp index 580ffb73d0..ae924ad69a 100644 --- a/src/peer_connection.cpp +++ b/src/peer_connection.cpp @@ -334,7 +334,7 @@ IceSocketEndpoint::waitForData(unsigned ms_timeout, std::error_code& ec) const { if (ice_) { if (!ice_->isRunning()) return -1; - return iceIsSender ? ice_->isDataAvailable(1) : ice_->waitForData(1, ms_timeout, ec); + return iceIsSender ? ice_->isDataAvailable(compId_) : ice_->waitForData(compId_, ms_timeout, ec); } return -1; } @@ -345,7 +345,7 @@ IceSocketEndpoint::read(ValueType* buf, std::size_t len, std::error_code& ec) if (ice_) { if (!ice_->isRunning()) return 0; try { - auto res = ice_->recvfrom(1, reinterpret_cast<char *>(buf), len); + auto res = ice_->recvfrom(compId_, reinterpret_cast<char *>(buf), len); if (res < 0) ec.assign(errno, std::generic_category()); else @@ -365,7 +365,7 @@ IceSocketEndpoint::write(const ValueType* buf, std::size_t len, std::error_code& if (ice_) { if (!ice_->isRunning()) return 0; auto res = 0; - res = ice_->send(0, reinterpret_cast<const unsigned char *>(buf), len); + res = ice_->send(compId_, reinterpret_cast<const unsigned char *>(buf), len); if (res < 0) { ec.assign(errno, std::generic_category()); } else { diff --git a/src/peer_connection.h b/src/peer_connection.h index f14a54d203..001369437a 100644 --- a/src/peer_connection.h +++ b/src/peer_connection.h @@ -157,7 +157,7 @@ public: void setOnRecv(RecvCb&& cb) override { if (ice_) { - ice_->setOnRecv(0, cb); + ice_->setOnRecv(compId_, cb); } } @@ -165,6 +165,7 @@ private: std::shared_ptr<IceTransport> ice_ {nullptr}; std::atomic_bool iceStopped{false}; std::atomic_bool iceIsSender{false}; + uint8_t compId_ {0}; }; //============================================================================== diff --git a/src/security/tls_session.cpp b/src/security/tls_session.cpp index cd80be3445..4ad166f36c 100644 --- a/src/security/tls_session.cpp +++ b/src/security/tls_session.cpp @@ -234,6 +234,7 @@ public: std::unique_ptr<TlsAnonymousClientCredendials> cacred_; // ctor init. std::unique_ptr<TlsAnonymousServerCredendials> sacred_; // ctor init. std::unique_ptr<TlsCertificateCredendials> xcred_; // ctor init. + std::mutex sessionMutex_; gnutls_session_t session_ {nullptr}; gnutls_datum_t cookie_key_ {nullptr, 0}; gnutls_dtls_prestate_st prestate_ {}; @@ -724,13 +725,16 @@ TlsSession::TlsSessionImpl::cleanup() state_ = TlsSessionState::SHUTDOWN; // be sure to block any user operations stateCondition_.notify_all(); - if (session_) { - if (transport_.isReliable()) - gnutls_bye(session_, GNUTLS_SHUT_RDWR); - else - gnutls_bye(session_, GNUTLS_SHUT_WR); // not wait for a peer answer - gnutls_deinit(session_); - session_ = nullptr; + { + std::lock_guard<std::mutex> lk(sessionMutex_); + if (session_) { + if (transport_.isReliable()) + gnutls_bye(session_, GNUTLS_SHUT_RDWR); + else + gnutls_bye(session_, GNUTLS_SHUT_WR); // not wait for a peer answer + gnutls_deinit(session_); + session_ = nullptr; + } } if (cookie_key_.data) @@ -1218,7 +1222,7 @@ TlsSession::TlsSession(SocketType& transport, const TlsParams& params, TlsSession::~TlsSession() { - shutdown(); + if (pimpl_) shutdown(); } bool @@ -1237,8 +1241,8 @@ int TlsSession::maxPayload() const { if (pimpl_->state_ == TlsSessionState::SHUTDOWN) - throw std::runtime_error("Getting MTU from non-valid TLS session"); - return gnutls_dtls_get_data_mtu(pimpl_->session_); + throw std::runtime_error("Getting maxPayload from non-valid TLS session"); + return pimpl_->transport_.maxPayload(); } const char* @@ -1295,15 +1299,22 @@ TlsSession::read(ValueType* data, std::size_t size, std::error_code& ec) } while (true) { - auto ret = gnutls_record_recv(pimpl_->session_, data, size); + ssize_t ret; + { + std::lock_guard<std::mutex> lk(pimpl_->sessionMutex_); + if (!pimpl_->session_) return 0; + ret = gnutls_record_recv(pimpl_->session_, data, size); + } if (ret > 0) { ec.clear(); return ret; } if (ret == 0) { - JAMI_DBG("[TLS] eof"); - shutdown(); + if (pimpl_) { + JAMI_ERR("[TLS] eof"); + shutdown(); + } error = std::errc::broken_pipe; break; } else if (ret == GNUTLS_E_REHANDSHAKE) { @@ -1312,8 +1323,10 @@ TlsSession::read(ValueType* data, std::size_t size, std::error_code& ec) pimpl_->rxCv_.notify_one(); // unblock waiting FSM pimpl_->stateCondition_.notify_all(); } else if (gnutls_error_is_fatal(ret)) { - JAMI_ERR("[TLS] fatal error in recv: %s", gnutls_strerror(ret)); - shutdown(); + if (pimpl_ && pimpl_->state_ != TlsSessionState::SHUTDOWN) { + JAMI_ERR("[TLS] fatal error in recv: %s", gnutls_strerror(ret)); + shutdown(); + } error = std::errc::io_error; break; } diff --git a/src/sip/siptransport.cpp b/src/sip/siptransport.cpp index bfa574484a..049fab02e8 100644 --- a/src/sip/siptransport.cpp +++ b/src/sip/siptransport.cpp @@ -440,6 +440,9 @@ SipTransportBroker::getTlsIceTransport(const std::shared_ptr<jami::IceTransport> { auto ipv6 = ice->getLocalAddress(comp_id).isIpv6(); auto type = ipv6 ? PJSIP_TRANSPORT_DTLS6 : PJSIP_TRANSPORT_DTLS; + if (ice->isTCPEnabled()) { + type = ipv6 ? PJSIP_TRANSPORT_TLS6 : PJSIP_TRANSPORT_TLS; + } auto sip_ice_tr = std::unique_ptr<tls::SipsIceTransport>( new tls::SipsIceTransport(endpt_, type, params, ice, comp_id)); auto tr = sip_ice_tr->getTransportBase(); -- GitLab